// © 2017 - 2022 Raptor Engineering, LLC // // Released under the terms of the LGPL v3+ // See the LICENSE file for full details module lpc_slave_interface( output wire [27:0] address, input wire [7:0] tx_data, output reg [7:0] rx_data, output reg tpm_cycle, output reg firmware_cycle, input wire continue, input wire data_ack, input wire transfer_error, input wire exception_ack, output reg address_ready, output reg data_ready, output reg data_ready_cont, output reg continue_cont, output reg [2:0] exception, output wire data_direction, // 0 == read from slave, 1 == write to slave input wire [16:0] irq_request, input wire irq_tx_ready, output reg irq_tx_queued, input wire [8:0] lpc_fw_input_xfer_write_addr, input wire [7:0] lpc_fw_input_xfer_write_data, input wire lpc_fw_input_xfer_write_clk, input wire lpc_fw_input_xfer_write_wren, input wire [8:0] lpc_fw_output_xfer_read_addr, output wire [7:0] lpc_fw_output_xfer_read_data, input wire lpc_fw_output_xfer_read_clk, input wire [8:0] ipmi_bt_input_xfer_write_addr, input wire [7:0] ipmi_bt_input_xfer_write_data, input wire ipmi_bt_input_xfer_write_clk, input wire ipmi_bt_input_xfer_write_wren, input wire [8:0] ipmi_bt_output_xfer_read_addr, output wire [7:0] ipmi_bt_output_xfer_read_data, input wire ipmi_bt_output_xfer_read_clk, input wire [15:0] range1_start, input wire [15:0] range1_end, input wire [15:0] range2_start, input wire [15:0] range2_end, input wire [15:0] range3_start, input wire [15:0] range3_end, input wire [15:0] range4_start, input wire [15:0] range4_end, input wire [15:0] range5_start, input wire [15:0] range5_end, input wire [15:0] range6_start, input wire [15:0] range6_end, input wire enable_vuart1, output wire [31:0] vuart1_status_register, input wire [31:0] vuart1_control_register, output wire vuart1_assert_b2h_break_clear, output wire vuart1_tx_fifo_reset, output wire vuart1_tx_fifo_wren, output wire [7:0] vuart1_tx_fifo_data, input wire vuart1_tx_fifo_full, input wire vuart1_tx_fifo_almost_full, input wire vuart1_tx_fifo_empty, output wire vuart1_rx_fifo_reset, output wire vuart1_rx_fifo_rpop, input wire [7:0] vuart1_rx_fifo_data, input wire vuart1_rx_fifo_empty, input wire vuart1_rx_fifo_almost_empty, input wire vuart1_rx_fifo_full, input wire [3:0] vuart1_rx_data_available_count, input wire enable_vuart2, output wire [31:0] vuart2_status_register, input wire [31:0] vuart2_control_register, output wire vuart2_assert_b2h_break_clear, output wire vuart2_tx_fifo_reset, output wire vuart2_tx_fifo_wren, output wire [7:0] vuart2_tx_fifo_data, input wire vuart2_tx_fifo_full, input wire vuart2_tx_fifo_almost_full, input wire vuart2_tx_fifo_empty, output wire vuart2_rx_fifo_reset, output wire vuart2_rx_fifo_rpop, input wire [7:0] vuart2_rx_fifo_data, input wire vuart2_rx_fifo_empty, input wire vuart2_rx_fifo_almost_empty, input wire vuart2_rx_fifo_full, input wire [3:0] vuart2_rx_data_available_count, input wire enable_ipmi_bt, input wire ipmi_bt_alt_irq, input wire [15:0] ipmi_bt_port_base_address, output wire ipmi_bt_bmc_to_host_ctl_sms_ack, output wire ipmi_bt_bmc_to_host_ctl_attn_ack, output wire ipmi_bt_host_to_bmc_ctl_attn_req, output wire ipmi_bt_host_to_bmc_ctl_oem0_req, output wire ipmi_bt_irq_ack, output wire ipmi_bt_irq_bmc_reset, output wire ipmi_bt_host_to_bmc_ctl_h_busy, output wire ipmi_bt_irq_enable, input wire ipmi_bt_bmc_to_host_ctl_sms_req, input wire ipmi_bt_bmc_to_host_ctl_attn_req, input wire ipmi_bt_bmc_to_host_ctl_sms_ack_cont, input wire ipmi_bt_bmc_to_host_ctl_attn_ack_cont, input wire ipmi_bt_host_to_bmc_ctl_attn_req_cont, input wire ipmi_bt_host_to_bmc_ctl_oem0_req_cont, input wire ipmi_bt_irq_ack_cont, input wire ipmi_bt_irq_bmc_reset_cont, input wire ipmi_bt_bmc_to_host_ctl_b_busy, input wire ipmi_bt_irq_req, output wire [3:0] fw_idsel, output wire [3:0] fw_msize, output wire [15:0] debug_port, output reg [3:0] lpc_data_out, // These three signals must have I/O output register enabled in top level SB_IO or equivalent input wire [3:0] lpc_data_in, output reg lpc_data_direction, // 0 == tristate (input), 1 == driven (output) output reg lpc_irq_out, input wire lpc_irq_in, output wire lpc_irq_direction, // 0 == tristate (input), 1 == driven (output) input wire lpc_frame_n, input wire lpc_reset_n, input wire lpc_clock ); parameter VUART1_BASE_ADDRESS = 16'h03f8; parameter VUART1_IRQ = 4; parameter VUART2_BASE_ADDRESS = 16'h02f8; parameter VUART2_IRQ = 3; parameter IPMI_BT_IRQ = 10; parameter IPMI_BT_ALT_IRQ = 11; parameter LPC_CODEWORD_ISA_START = 4'b0000; parameter LPC_CODEWORD_FWR_START = 4'b1101; parameter LPC_CODEWORD_FWW_START = 4'b1110; parameter LPC_CODEWORD_TPM_START = 4'b0101; parameter LPC_CODEWORD_SYNC_READY = 4'b0000; parameter LPC_CODEWORD_SYNC_SWAIT = 4'b0101; parameter LPC_CODEWORD_SYNC_LWAIT = 4'b0110; parameter LPC_CODEWORD_SYNC_ERROR = 4'b1010; parameter LPC_CODEWORD_TURNAROUND = 4'b1111; parameter LPC_CYCLE_TYPE_IO = 2'b00; parameter LPC_RX_TRANSFER_STATE_IDLE = 0; parameter LPC_RX_TRANSFER_STATE_TR01 = 1; parameter LPC_RX_TRANSFER_STATE_TR02 = 2; parameter LPC_RX_TRANSFER_STATE_TR03 = 3; parameter LPC_RX_TRANSFER_STATE_TR04 = 4; parameter LPC_RX_TRANSFER_STATE_TR05 = 5; parameter LPC_RX_TRANSFER_STATE_TR06 = 6; parameter LPC_RX_TRANSFER_STATE_TR07 = 7; parameter LPC_RX_TRANSFER_STATE_TR08 = 8; parameter LPC_RX_TRANSFER_STATE_TR09 = 9; parameter LPC_RX_TRANSFER_STATE_FR01 = 10; parameter LPC_RX_TRANSFER_STATE_FR02 = 11; parameter LPC_RX_TRANSFER_STATE_FR03 = 12; parameter LPC_RX_TRANSFER_STATE_FR04 = 13; parameter LPC_RX_TRANSFER_STATE_FR05 = 14; parameter LPC_RX_TRANSFER_STATE_FR06 = 15; parameter LPC_RX_TRANSFER_STATE_FR07 = 16; parameter LPC_RX_TRANSFER_STATE_FR08 = 17; parameter LPC_RX_TRANSFER_STATE_FR09 = 18; parameter LPC_RX_TRANSFER_STATE_FR10 = 19; parameter LPC_RX_TRANSFER_STATE_IW01 = 20; parameter LPC_TX_TRANSFER_STATE_IDLE = 0; parameter LPC_TX_TRANSFER_STATE_TR01 = 1; parameter LPC_TX_TRANSFER_STATE_TR02 = 2; parameter LPC_TX_TRANSFER_STATE_TR03 = 3; parameter LPC_TX_TRANSFER_STATE_TR04 = 4; parameter LPC_TX_TRANSFER_STATE_TR05 = 5; parameter LPC_TX_TRANSFER_STATE_TR06 = 6; parameter LPC_TX_TRANSFER_STATE_TR07 = 7; parameter LPC_TX_TRANSFER_STATE_TR08 = 8; parameter LPC_TX_TRANSFER_STATE_TR09 = 9; parameter LPC_TX_TRANSFER_STATE_TR10 = 10; parameter LPC_TX_TRANSFER_STATE_TR11 = 11; parameter LPC_TX_TRANSFER_STATE_FR01 = 12; parameter LPC_TX_TRANSFER_STATE_FR02 = 13; parameter LPC_TX_TRANSFER_STATE_FR03 = 14; parameter LPC_TX_TRANSFER_STATE_FR04 = 15; parameter LPC_TX_TRANSFER_STATE_FR05 = 16; parameter LPC_SERIRQ_STATE_IDLE = 0; parameter LPC_SERIRQ_STATE_TR01 = 1; parameter LPC_SERIRQ_STATE_TR02 = 2; parameter LPC_SERIRQ_STATE_TR03 = 3; parameter LPC_SERIRQ_STATE_TR04 = 4; reg [4:0] rx_transfer_state = 0; reg [4:0] tx_transfer_state = 0; reg [2:0] serirq_state = 0; reg start_tx_cycle = 0; reg abort_tx_cycle = 0; reg tx_cycle_done = 0; reg lpc_frame_n_prev = 1; reg [1:0] cycle_type = 0; reg cycle_direction; // 0 == read, 1 == write reg [27:0] io_address = 0; // Lower 16 bits I/O cycles only, full 28 bits used for FW cycles reg [3:0] fw_cycle_idsel = 0; reg [3:0] fw_cycle_msize = 0; reg vuart1_cycle = 0; reg vuart2_cycle = 0; reg ipmi_bt_cycle = 0; reg range_select_cycle = 0; reg [3:0] vuart1_ier = 0; wire [7:0] vuart1_iir; reg [7:0] vuart1_lcr = 0; reg [4:0] vuart1_mcr = 0; wire [7:0] vuart1_lsr; reg [7:0] vuart1_msr = 0; reg [7:0] vuart1_scr = 0; reg [7:0] vuart1_dll = 0; reg [7:0] vuart1_dlm = 0; reg [2:0] vuart1_interrupt_id = 0; reg vuart1_interrupt_pending = 0; reg vuart1_iir_read_tx_empty_assert = 0; reg vuart1_lsr_read_assert = 0; reg vuart1_rx_break_irq_pending = 0; reg vuart1_rx_break_request_prev = 0; reg vuart1_tx_fifo_empty_prev = 0; reg vuart1_tx_fifo_empty_irq_pending = 0; reg vuart1_fifos_enabled = 0; reg [1:0] vuart1_rcvr_trigger = 0; reg vuart1_assert_b2h_break_clear_reg = 0; reg [8:0] vuart1_rx_fifo_read_timeout_counter = 0; reg vuart1_rx_data_queue_contents_read_timeout = 0; reg vuart1_rx_data_queue_contents_past_trigger = 0; assign vuart1_iir[7] = vuart1_fifos_enabled; assign vuart1_iir[6] = vuart1_fifos_enabled; assign vuart1_iir[5:4] = 0; assign vuart1_iir[3:1] = vuart1_interrupt_id; assign vuart1_iir[0] = !vuart1_interrupt_pending; assign vuart1_lsr[7] = 0; assign vuart1_lsr[6] = vuart1_tx_fifo_empty; assign vuart1_lsr[5] = vuart1_tx_fifo_empty; assign vuart1_lsr[4] = 0; // BREAK is implemented via an external signal from the BMC, ORed over this bit assign vuart1_lsr[3] = 0; assign vuart1_lsr[2] = 0; assign vuart1_lsr[1] = 0; assign vuart1_lsr[0] = !vuart1_rx_fifo_empty; assign vuart1_assert_b2h_break_clear = vuart1_assert_b2h_break_clear_reg; reg vuart1_tx_fifo_reset_reg = 0; reg vuart1_tx_fifo_wren_reg = 0; reg [7:0] vuart1_tx_fifo_data_reg = 0; reg vuart1_rx_fifo_reset_reg = 0; reg vuart1_rx_fifo_rpop_reg = 0; assign vuart1_tx_fifo_reset = vuart1_tx_fifo_reset_reg; assign vuart1_tx_fifo_wren = vuart1_tx_fifo_wren_reg; assign vuart1_tx_fifo_data = vuart1_tx_fifo_data_reg; assign vuart1_rx_fifo_reset = vuart1_rx_fifo_reset_reg; assign vuart1_rx_fifo_rpop = vuart1_rx_fifo_rpop_reg; reg [3:0] vuart2_ier = 0; wire [7:0] vuart2_iir; reg [7:0] vuart2_lcr = 0; reg [4:0] vuart2_mcr = 0; wire [7:0] vuart2_lsr; reg [7:0] vuart2_msr = 0; reg [7:0] vuart2_scr = 0; reg [7:0] vuart2_dll = 0; reg [7:0] vuart2_dlm = 0; reg [2:0] vuart2_interrupt_id = 0; reg vuart2_interrupt_pending = 0; reg vuart2_iir_read_tx_empty_assert = 0; reg vuart2_lsr_read_assert = 0; reg vuart2_rx_break_irq_pending = 0; reg vuart2_rx_break_request_prev = 0; reg vuart2_tx_fifo_empty_prev = 0; reg vuart2_tx_fifo_empty_irq_pending = 0; reg vuart2_fifos_enabled = 0; reg [1:0] vuart2_rcvr_trigger = 0; reg vuart2_assert_b2h_break_clear_reg = 0; reg [8:0] vuart2_rx_fifo_read_timeout_counter = 0; reg vuart2_rx_data_queue_contents_read_timeout = 0; reg vuart2_rx_data_queue_contents_past_trigger = 0; assign vuart2_iir[7] = vuart2_fifos_enabled; assign vuart2_iir[6] = vuart2_fifos_enabled; assign vuart2_iir[5:4] = 0; assign vuart2_iir[3:1] = vuart2_interrupt_id; assign vuart2_iir[0] = !vuart2_interrupt_pending; assign vuart2_lsr[7] = 0; assign vuart2_lsr[6] = vuart2_tx_fifo_empty; assign vuart2_lsr[5] = vuart2_tx_fifo_empty; assign vuart2_lsr[4] = 0; // BREAK is implemented via an external signal from the BMC, ORed over this bit assign vuart2_lsr[3] = 0; assign vuart2_lsr[2] = 0; assign vuart2_lsr[1] = 0; assign vuart2_lsr[0] = !vuart2_rx_fifo_empty; assign vuart2_assert_b2h_break_clear = vuart2_assert_b2h_break_clear_reg; reg vuart2_tx_fifo_reset_reg = 0; reg vuart2_tx_fifo_wren_reg = 0; reg [7:0] vuart2_tx_fifo_data_reg = 0; reg vuart2_rx_fifo_reset_reg = 0; reg vuart2_rx_fifo_rpop_reg = 0; assign vuart2_tx_fifo_reset = vuart2_tx_fifo_reset_reg; assign vuart2_tx_fifo_wren = vuart2_tx_fifo_wren_reg; assign vuart2_tx_fifo_data = vuart2_tx_fifo_data_reg; assign vuart2_rx_fifo_reset = vuart2_rx_fifo_reset_reg; assign vuart2_rx_fifo_rpop = vuart2_rx_fifo_rpop_reg; assign vuart1_status_register = {16'h00, vuart1_fifos_enabled, 1'b0, vuart1_rcvr_trigger, vuart1_mcr, vuart1_lcr}; assign vuart2_status_register = {16'h00, vuart2_fifos_enabled, 1'b0, vuart2_rcvr_trigger, vuart2_mcr, vuart2_lcr}; reg [16:0] active_irq_request = 0; reg [3:0] irq_delay_counter = 0; reg [4:0] irq_frame_number = 0; reg lpc_irq_in_prev_1 = 1; reg lpc_irq_in_prev_2 = 1; reg lpc_irq_in_prev_3 = 1; reg irq_tx_ready_prev = 0; reg irq_quiet_mode = 0; reg lpc_irq_direction_reg = 0; reg lpc_slave_write_complete = 0; assign address = io_address; assign data_direction = cycle_direction; assign fw_idsel = fw_cycle_idsel; assign fw_msize = fw_cycle_msize; `ifdef LPC_SLAVE_DEBUG // Debug port assign debug_port[3:0] = lpc_data_in; assign debug_port[4] = lpc_frame_n; assign debug_port[5] = lpc_reset_n; assign debug_port[6] = cycle_direction; assign debug_port[7] = lpc_clock; // assign debug_port[11:8] = rx_transfer_state[3:0]; // assign debug_port[9:8] = rx_transfer_state[1:0]; // assign debug_port[11] = vuart1_cycle; // assign debug_port[10] = ipmi_bt_cycle; // assign debug_port[15:12] = tx_transfer_state[3:0]; // assign debug_port[15:8] = lpc_fw_input_xfer_read_data; assign debug_port[12] = lpc_irq_in; assign debug_port[11] = lpc_irq_direction; assign debug_port[10:8] = serirq_state[2:0]; // assign debug_port[12:11] = irq_delay_counter[1:0]; // assign debug_port[12] = lpc_irq_out; // assign debug_port[14:13] = irq_frame_number[1:0]; // assign debug_port[14:12] = serirq_state; // assign debug_port[15] = irq_quiet_mode; // assign debug_port[8] = 0; // assign debug_port[9] = firmware_cycle; // assign debug_port[10] = data_ready; // assign debug_port[11] = address_ready; // assign debug_port[15:12] = fw_cycle_msize; `else assign debug_port = 16'h0000; `endif reg tx_cycle_done_reg_rx = 0; reg [16:0] irq_request_reg = 0; reg irq_tx_ready_reg = 0; reg [8:0] lpc_fw_input_xfer_read_addr; wire [7:0] lpc_fw_input_xfer_read_data; reg [8:0] lpc_fw_output_xfer_write_addr; reg [7:0] lpc_fw_output_xfer_write_data; reg lpc_fw_output_xfer_write_wren; reg [8:0] ipmi_bt_input_xfer_read_addr; wire [7:0] ipmi_bt_input_xfer_read_data; reg [8:0] ipmi_bt_output_xfer_write_addr; reg [7:0] ipmi_bt_output_xfer_write_data; reg ipmi_bt_output_xfer_write_wren; reg [8:0] fw_cycle_rx_nibble_counter; reg [7:0] fw_cycle_tx_byte_counter; reg rx_special_data_ack = 0; reg rx_special_continue = 0; reg [7:0] special_tx_data = 0; reg ipmi_bt_bmc_to_host_ctl_sms_ack_reg = 0; reg ipmi_bt_bmc_to_host_ctl_attn_ack_reg = 0; reg ipmi_bt_host_to_bmc_ctl_attn_req_reg = 0; reg ipmi_bt_host_to_bmc_ctl_oem0_req_reg = 0; reg ipmi_bt_irq_ack_reg = 0; reg ipmi_bt_irq_bmc_reset_reg = 0; reg ipmi_bt_host_to_bmc_ctl_h_busy_reg = 0; reg ipmi_bt_irq_enable = 0; assign ipmi_bt_bmc_to_host_ctl_sms_ack = ipmi_bt_bmc_to_host_ctl_sms_ack_reg; assign ipmi_bt_bmc_to_host_ctl_attn_ack = ipmi_bt_bmc_to_host_ctl_attn_ack_reg; assign ipmi_bt_host_to_bmc_ctl_attn_req = ipmi_bt_host_to_bmc_ctl_attn_req_reg; assign ipmi_bt_host_to_bmc_ctl_oem0_req = ipmi_bt_host_to_bmc_ctl_oem0_req_reg; assign ipmi_bt_irq_ack = ipmi_bt_irq_ack_reg; assign ipmi_bt_irq_bmc_reset = ipmi_bt_irq_bmc_reset_reg; assign ipmi_bt_host_to_bmc_ctl_h_busy = ipmi_bt_host_to_bmc_ctl_h_busy_reg; assign lpc_irq_direction = lpc_irq_direction_reg; wire [16:0] vuart_irq_request_overlay; assign vuart_irq_request_overlay = (vuart2_interrupt_pending << VUART2_IRQ) | (vuart1_interrupt_pending << VUART1_IRQ); wire [16:0] ipmi_bt_irq_request_overlay; assign ipmi_bt_irq_request_overlay = (ipmi_bt_alt_irq)?(ipmi_bt_irq_req << IPMI_BT_ALT_IRQ):(ipmi_bt_irq_req << IPMI_BT_IRQ); always @(posedge lpc_clock) begin // Avoid logic glitches due to these signals crossing clock domains irq_request_reg <= irq_request; irq_tx_ready_reg <= irq_tx_ready; if (!lpc_reset_n) begin irq_quiet_mode <= 0; irq_tx_queued <= 0; lpc_irq_in_prev_1 <= 1; lpc_irq_in_prev_2 <= 1; lpc_irq_in_prev_3 <= 1; lpc_irq_out <= 1; lpc_irq_direction_reg <= 0; serirq_state <= LPC_SERIRQ_STATE_IDLE; end else begin case (serirq_state) LPC_SERIRQ_STATE_IDLE: begin if (irq_quiet_mode && irq_tx_ready_reg && !irq_tx_ready_prev) begin active_irq_request <= active_irq_request | irq_request_reg | vuart_irq_request_overlay | ipmi_bt_irq_request_overlay; irq_tx_queued <= 1; irq_delay_counter <= 0; // Initiate quiet mode transfer lpc_irq_out <= 0; lpc_irq_direction_reg <= 1; serirq_state <= LPC_SERIRQ_STATE_TR01; end else begin // Detect potential start signal from host // This can occur in either quiet or continuous mode if (!lpc_irq_in) begin if (irq_delay_counter > 2) begin // Latch current IRQ requests active_irq_request <= active_irq_request | irq_request_reg | vuart_irq_request_overlay | ipmi_bt_irq_request_overlay; serirq_state <= LPC_SERIRQ_STATE_TR02; end else begin irq_delay_counter <= irq_delay_counter + 1; end end else begin irq_delay_counter <= 0; end end end LPC_SERIRQ_STATE_TR01: begin // Tristate bus lpc_irq_out <= 0; lpc_irq_direction_reg <= 0; serirq_state <= LPC_SERIRQ_STATE_TR02; end LPC_SERIRQ_STATE_TR02: begin // Wait for completion of start signal from host if (lpc_irq_in) begin // IRQ0 needs to be asserted nearly immediately after the end of the start pulse // if it is to be asserted at all. Handle IRQ0 start pulse assertion here, as the // heavy pipelining of the IRQ transmitter will not allow a short enough delay to // launch IRQ0 in the next state... if (active_irq_request[0]) begin // Drive IRQ assert for IRQ0 lpc_irq_out <= 0; lpc_irq_direction_reg <= 1; end irq_delay_counter <= 1; irq_frame_number <= 0; serirq_state <= LPC_SERIRQ_STATE_TR03; end end LPC_SERIRQ_STATE_TR03: begin if (irq_frame_number < 17) begin if (irq_delay_counter == 0) begin if (active_irq_request[irq_frame_number]) begin // Drive IRQ assert lpc_irq_out <= 0; lpc_irq_direction_reg <= 1; end end else if (irq_delay_counter == 1) begin if (active_irq_request[irq_frame_number]) begin // Drive line back high to prepare for TAR cycle. // This avoids the line floating low / undetermined for an extended period of time // after we stop driving it; i.e. not relying solely on pullup resistor response. lpc_irq_out <= 1; lpc_irq_direction_reg <= 1; end end else begin lpc_irq_out <= 1; lpc_irq_direction_reg <= 0; end end else begin lpc_irq_out <= 1; serirq_state <= LPC_SERIRQ_STATE_TR04; end if (irq_delay_counter > 1) begin irq_frame_number <= irq_frame_number + 1; irq_delay_counter <= 0; end else begin irq_delay_counter <= irq_delay_counter + 1; end end LPC_SERIRQ_STATE_TR04: begin // Wait for rising edge if (!lpc_irq_in_prev_1 && lpc_irq_in) begin if (!lpc_irq_in_prev_3 && !lpc_irq_in_prev_2 && !lpc_irq_in_prev_1) begin irq_quiet_mode <= 0; end else begin irq_quiet_mode <= 1; end active_irq_request <= 0; serirq_state <= LPC_SERIRQ_STATE_IDLE; end // Ensure bus is tristated lpc_irq_direction_reg <= 0; end default: begin // Should never reach this state serirq_state <= LPC_SERIRQ_STATE_IDLE; end endcase end lpc_irq_in_prev_1 <= lpc_irq_in; lpc_irq_in_prev_2 <= lpc_irq_in_prev_1; lpc_irq_in_prev_3 <= lpc_irq_in_prev_2; irq_tx_ready_prev <= irq_tx_ready_reg; if ((serirq_state != LPC_SERIRQ_STATE_IDLE) && !irq_tx_ready_reg) begin irq_tx_queued <= 0; end end always @(posedge lpc_clock) begin // Avoid logic glitches due to this signal crossing clock domains tx_cycle_done_reg_rx = tx_cycle_done; if (!lpc_reset_n) begin rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; lpc_data_direction <= 0; abort_tx_cycle <= 1; rx_special_data_ack <= 0; rx_special_continue <= 0; vuart1_lcr <= 0; vuart1_fifos_enabled <= 0; vuart1_interrupt_pending <= 0; vuart1_iir_read_tx_empty_assert <= 0; vuart1_tx_fifo_empty_irq_pending <= 0; vuart1_lsr_read_assert <= 0; vuart1_rx_break_irq_pending <= 0; vuart1_rx_break_request_prev <= 0; vuart1_interrupt_id <= 0; vuart1_rcvr_trigger <= 0; vuart1_tx_fifo_reset_reg <= 0; vuart1_rx_fifo_reset_reg <= 0; vuart1_rx_fifo_rpop_reg <= 0; vuart1_rx_data_queue_contents_read_timeout <= 0; vuart1_rx_data_queue_contents_past_trigger <= 0; vuart2_lcr <= 0; vuart2_fifos_enabled <= 0; vuart2_interrupt_pending <= 0; vuart2_iir_read_tx_empty_assert <= 0; vuart2_tx_fifo_empty_irq_pending <= 0; vuart2_lsr_read_assert <= 0; vuart2_rx_break_irq_pending <= 0; vuart2_rx_break_request_prev <= 0; vuart2_interrupt_id <= 0; vuart2_rcvr_trigger <= 0; vuart2_tx_fifo_reset_reg <= 0; vuart2_rx_fifo_reset_reg <= 0; vuart2_rx_fifo_rpop_reg <= 0; vuart2_rx_data_queue_contents_read_timeout <= 0; vuart2_rx_data_queue_contents_past_trigger <= 0; ipmi_bt_bmc_to_host_ctl_sms_ack_reg <= 0; ipmi_bt_bmc_to_host_ctl_attn_ack_reg <= 0; ipmi_bt_host_to_bmc_ctl_attn_req_reg <= 0; ipmi_bt_host_to_bmc_ctl_oem0_req_reg <= 0; ipmi_bt_irq_ack_reg <= 0; ipmi_bt_irq_bmc_reset_reg <= 0; ipmi_bt_host_to_bmc_ctl_h_busy_reg <= 0; ipmi_bt_irq_enable <= 0; // Signal exception to CPU if (!exception_ack) begin exception[1] <= 1; end end else begin if (!lpc_frame_n) begin if ((rx_transfer_state == LPC_RX_TRANSFER_STATE_IDLE) || (rx_transfer_state == LPC_RX_TRANSFER_STATE_TR01)) begin cycle_type <= 0; io_address <= 0; data_ready <= 0; address_ready <= 0; vuart1_cycle <= 0; vuart2_cycle <= 0; ipmi_bt_cycle <= 0; range_select_cycle <= 0; abort_tx_cycle <= 1; if (lpc_data_in == LPC_CODEWORD_ISA_START) begin cycle_direction <= 0; tpm_cycle <= 0; firmware_cycle <= 0; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR01; end else begin if (lpc_data_in == LPC_CODEWORD_TPM_START) begin cycle_direction <= 0; tpm_cycle <= 1; firmware_cycle <= 0; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR01; end else begin if ((lpc_data_in == LPC_CODEWORD_FWR_START) || (lpc_data_in == LPC_CODEWORD_FWW_START)) begin `ifdef ENABLE_FIRMWARE_MEMORY_CYCLES tpm_cycle <= 0; firmware_cycle <= 1; if (lpc_data_in == LPC_CODEWORD_FWW_START) begin cycle_direction <= 1; end else begin cycle_direction <= 0; end rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR01; `else cycle_direction <= 0; tpm_cycle <= 0; firmware_cycle <= 0; rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; `endif end else begin rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; end end end end else begin if (!lpc_frame_n_prev) begin // Host requested active cycle abort lpc_data_direction <= 0; abort_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; // Signal exception to CPU if (!exception_ack) begin exception[0] <= 1; end end end end else begin case (rx_transfer_state) LPC_RX_TRANSFER_STATE_IDLE: begin // Idle state cycle_type <= 0; cycle_direction <= 0; io_address <= 0; tpm_cycle <= 0; firmware_cycle <= 0; data_ready <= 0; address_ready <= 0; vuart1_cycle <= 0; vuart2_cycle <= 0; ipmi_bt_cycle <= 0; range_select_cycle <= 0; rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; abort_tx_cycle <= 1; lpc_data_direction <= 0; end LPC_RX_TRANSFER_STATE_TR01: begin // Receive cycle type and direction cycle_type <= lpc_data_in[3:2]; cycle_direction <= lpc_data_in[1]; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR02; end LPC_RX_TRANSFER_STATE_TR02: begin if (cycle_type == LPC_CYCLE_TYPE_IO) begin // Receive I/O address -- nibble 1 io_address[15:12] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR03; end else begin // Cycle type not handled by this peripheral, return to idle rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; end abort_tx_cycle <= 0; end LPC_RX_TRANSFER_STATE_TR03: begin // Receive I/O address -- nibble 2 io_address[11:8] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR04; end LPC_RX_TRANSFER_STATE_TR04: begin // Receive I/O address -- nibble 3 io_address[7:4] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR05; end LPC_RX_TRANSFER_STATE_TR05: begin // Receive I/O address -- nibble 4 io_address[3:0] <= lpc_data_in; // Preliminary target peripheral routing if (enable_vuart1 && ({io_address[15:4], lpc_data_in[3], 3'b000} == VUART1_BASE_ADDRESS)) begin vuart1_cycle <= 1; if (cycle_direction == 0) begin // Start driving LAD lines lpc_data_direction <= 1; end end if (enable_vuart2 && ({io_address[15:4], lpc_data_in[3], 3'b000} == VUART2_BASE_ADDRESS)) begin vuart2_cycle <= 1; if (cycle_direction == 0) begin // Start driving LAD lines lpc_data_direction <= 1; end end if (enable_ipmi_bt && ({io_address[15:4], lpc_data_in[3:2], 2'b00} == ipmi_bt_port_base_address)) begin ipmi_bt_cycle <= 1; if (cycle_direction == 0) begin // Start driving LAD lines lpc_data_direction <= 1; end end if ((({io_address[15:4], lpc_data_in[3:0]} >= range1_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range1_end)) || (({io_address[15:4], lpc_data_in[3:0]} >= range2_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range2_end)) || (({io_address[15:4], lpc_data_in[3:0]} >= range3_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range3_end)) || (({io_address[15:4], lpc_data_in[3:0]} >= range4_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range4_end)) || (({io_address[15:4], lpc_data_in[3:0]} >= range5_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range5_end)) || (({io_address[15:4], lpc_data_in[3:0]} >= range6_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range6_end)) ) begin range_select_cycle <= 1; if (cycle_direction == 0) begin // Start driving LAD lines lpc_data_direction <= 1; end end rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR06; end LPC_RX_TRANSFER_STATE_TR06: begin if (vuart1_cycle || vuart2_cycle || ipmi_bt_cycle || range_select_cycle || tpm_cycle) begin // TPM cycles are always decoded // Address handled by this peripheral if (cycle_direction == 1) begin // Receive I/O data -- nibble 1 rx_data[3:0] <= lpc_data_in; if (!vuart1_cycle && !vuart2_cycle && !ipmi_bt_cycle) begin address_ready <= 1; end rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR07; end else begin if (vuart1_cycle) begin case (io_address[2:0]) 0: begin if (vuart1_lcr[7]) begin special_tx_data <= vuart1_dll; end else begin if (!vuart1_rx_fifo_empty) begin special_tx_data <= vuart1_rx_fifo_data; vuart1_rx_fifo_rpop_reg <= 1; end else begin special_tx_data <= 8'hff; end end end 1: begin if (vuart1_lcr[7]) begin special_tx_data <= vuart1_dlm; end else begin special_tx_data <= {4'b0000, vuart1_ier}; end end 2: begin if (vuart1_interrupt_pending && (vuart1_interrupt_id == 3'b001)) begin vuart1_iir_read_tx_empty_assert <= 1; end special_tx_data <= vuart1_iir; end 3: special_tx_data <= vuart1_lcr; 4: special_tx_data <= {3'b111, vuart1_mcr}; 5: begin if (vuart1_control_register[0]) begin vuart1_assert_b2h_break_clear_reg <= 1; special_tx_data <= vuart1_lsr | 8'b00010000; end else begin special_tx_data <= vuart1_lsr; end vuart1_lsr_read_assert <= 1; end 6: special_tx_data <= vuart1_msr; 7: special_tx_data <= vuart1_scr; endcase rx_special_continue <= 1; end else if (vuart2_cycle) begin case (io_address[2:0]) 0: begin if (vuart2_lcr[7]) begin special_tx_data <= vuart2_dll; end else begin if (!vuart2_rx_fifo_empty) begin special_tx_data <= vuart2_rx_fifo_data; vuart2_rx_fifo_rpop_reg <= 1; end else begin special_tx_data <= 8'hff; end end end 1: begin if (vuart2_lcr[7]) begin special_tx_data <= vuart2_dlm; end else begin special_tx_data <= {4'b0000, vuart2_ier}; end end 2: begin if (vuart2_interrupt_pending && (vuart2_interrupt_id == 3'b001)) begin vuart2_iir_read_tx_empty_assert <= 1; end special_tx_data <= vuart2_iir; end 3: special_tx_data <= vuart2_lcr; 4: special_tx_data <= {3'b111, vuart2_mcr}; 5: begin if (vuart2_control_register[0]) begin vuart2_assert_b2h_break_clear_reg <= 1; special_tx_data <= vuart2_lsr | 8'b00010000; end else begin special_tx_data <= vuart2_lsr; end vuart2_lsr_read_assert <= 1; end 6: special_tx_data <= vuart2_msr; 7: special_tx_data <= vuart2_scr; endcase rx_special_continue <= 1; end else if (ipmi_bt_cycle) begin case (io_address[1:0]) 0: begin special_tx_data[7] <= ipmi_bt_bmc_to_host_ctl_b_busy; special_tx_data[6] <= ipmi_bt_host_to_bmc_ctl_h_busy_reg; special_tx_data[5] <= ipmi_bt_host_to_bmc_ctl_oem0_req_reg; special_tx_data[4] <= ipmi_bt_bmc_to_host_ctl_sms_req; special_tx_data[3] <= ipmi_bt_bmc_to_host_ctl_attn_req; special_tx_data[2] <= ipmi_bt_host_to_bmc_ctl_attn_req_reg; special_tx_data[1] <= 1'b0; special_tx_data[0] <= 1'b0; end 1: begin special_tx_data <= ipmi_bt_input_xfer_read_data; ipmi_bt_input_xfer_read_addr <= ipmi_bt_input_xfer_read_addr + 1; end 2: begin special_tx_data[7:2] = 6'b000000; special_tx_data[1] = ipmi_bt_irq_req; special_tx_data[0] = ipmi_bt_irq_enable; end endcase rx_special_continue <= 1; end else begin // Signal CPU that address is ready address_ready <= 1; end // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; end end else begin // Address not handled by this peripheral, return to idle rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; abort_tx_cycle <= 1; lpc_data_direction <= 0; end end LPC_RX_TRANSFER_STATE_TR07: begin // Receive I/O data -- nibble 2 rx_data[7:4] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; // Start driving LAD lines lpc_data_direction <= 1; // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; end LPC_RX_TRANSFER_STATE_TR08: begin if (vuart1_cycle) begin case (io_address[2:0]) 0: begin if (vuart1_lcr[7]) begin vuart1_dll <= rx_data; end else begin if (!vuart1_tx_fifo_full) begin vuart1_tx_fifo_data_reg <= rx_data; vuart1_tx_fifo_wren_reg <= 1; end end end 1: begin if (vuart1_lcr[7]) begin vuart1_dlm <= rx_data; end else begin vuart1_ier <= rx_data[3:0]; end end 2: begin // FIFO control vuart1_fifos_enabled <= rx_data[0]; if (rx_data[1]) begin vuart1_rx_fifo_reset_reg <= 1; end if (rx_data[2]) begin vuart1_tx_fifo_reset_reg <= 1; end vuart1_rcvr_trigger <= rx_data[7:6]; end 3: vuart1_lcr <= rx_data; 4: vuart1_mcr <= rx_data[4:0]; 6: vuart1_msr <= rx_data; 7: vuart1_scr <= rx_data; endcase rx_special_data_ack <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; end else if (vuart2_cycle) begin case (io_address[2:0]) 0: begin if (vuart2_lcr[7]) begin vuart2_dll <= rx_data; end else begin if (!vuart2_tx_fifo_full) begin vuart2_tx_fifo_data_reg <= rx_data; vuart2_tx_fifo_wren_reg <= 1; end end end 1: begin if (vuart2_lcr[7]) begin vuart2_dlm <= rx_data; end else begin vuart2_ier <= rx_data[3:0]; end end 2: begin // FIFO control vuart2_fifos_enabled <= rx_data[0]; if (rx_data[1]) begin vuart2_rx_fifo_reset_reg <= 1; end if (rx_data[2]) begin vuart2_tx_fifo_reset_reg <= 1; end vuart2_rcvr_trigger <= rx_data[7:6]; end 3: vuart2_lcr <= rx_data; 4: vuart2_mcr <= rx_data[4:0]; 6: vuart2_msr <= rx_data; 7: vuart2_scr <= rx_data; endcase rx_special_data_ack <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; end else if (ipmi_bt_cycle) begin case (io_address[1:0]) 0: begin if (rx_data[6]) begin if (ipmi_bt_host_to_bmc_ctl_h_busy_reg) begin ipmi_bt_host_to_bmc_ctl_h_busy_reg <= 1'b0; end else begin ipmi_bt_host_to_bmc_ctl_h_busy_reg <= 1'b1; end end if (rx_data[5]) begin ipmi_bt_host_to_bmc_ctl_oem0_req_reg <= 1'b1; end if (rx_data[4]) begin ipmi_bt_bmc_to_host_ctl_sms_ack_reg <= 1'b1; end if (rx_data[3]) begin ipmi_bt_bmc_to_host_ctl_attn_ack_reg <= 1'b1; end if (rx_data[2]) begin ipmi_bt_host_to_bmc_ctl_attn_req_reg <= 1'b1; end if (rx_data[1]) begin ipmi_bt_input_xfer_read_addr <= 0; end if (rx_data[0]) begin ipmi_bt_output_xfer_write_addr <= 0; ipmi_bt_output_xfer_write_wren <= 0; end end 1: begin ipmi_bt_output_xfer_write_data <= rx_data; ipmi_bt_output_xfer_write_wren <= 1; end 2: begin if (rx_data[7]) begin ipmi_bt_irq_bmc_reset_reg <= 1'b1; end if (rx_data[1]) begin ipmi_bt_irq_ack_reg <= 1'b1; end ipmi_bt_irq_enable <= rx_data[0]; end endcase lpc_slave_write_complete <= 0; rx_transfer_state <= LPC_RX_TRANSFER_STATE_IW01; end else begin // Signal CPU that address / data are ready address_ready <= 1; if (cycle_direction == 1) begin data_ready <= 1; end rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; end end LPC_RX_TRANSFER_STATE_IW01: begin if (!lpc_slave_write_complete) begin if (ipmi_bt_cycle) begin case (io_address[1:0]) 1: begin ipmi_bt_output_xfer_write_addr <= ipmi_bt_output_xfer_write_addr + 1; ipmi_bt_output_xfer_write_wren <= 0; lpc_slave_write_complete <= 1; end 2: begin // Handle synchronous IPMI BT IRQ reset handshake signals if (ipmi_bt_irq_bmc_reset_cont) begin ipmi_bt_irq_bmc_reset_reg <= 0; end // Do not continue write until slave has completed its reset cycle if (!ipmi_bt_irq_bmc_reset_reg) begin lpc_slave_write_complete <= 1; end end default: begin lpc_slave_write_complete <= 1; end endcase end else begin lpc_slave_write_complete <= 1; end end else begin rx_special_data_ack <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; end end LPC_RX_TRANSFER_STATE_TR09: begin // Clear special cycle flags if set if (data_ready_cont) begin rx_special_data_ack <= 0; end if (continue_cont) begin rx_special_continue <= 0; end // Reset VUART FIFO control signals vuart1_tx_fifo_wren_reg <= 0; vuart1_tx_fifo_reset_reg <= 0; vuart1_rx_fifo_rpop_reg <= 0; vuart1_rx_fifo_reset_reg <= 0; vuart2_tx_fifo_wren_reg <= 0; vuart2_tx_fifo_reset_reg <= 0; vuart2_rx_fifo_rpop_reg <= 0; vuart2_rx_fifo_reset_reg <= 0; // Wait for TX cycle to complete start_tx_cycle <= 0; if (tx_cycle_done_reg_rx) begin lpc_data_direction <= 0; rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; end end LPC_RX_TRANSFER_STATE_FR01: begin // Receive IDSEL field fw_cycle_idsel <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR02; end LPC_RX_TRANSFER_STATE_FR02: begin // Receive firmware cycle address -- nibble 1 io_address[27:24] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR03; abort_tx_cycle <= 0; end LPC_RX_TRANSFER_STATE_FR03: begin // Receive firmware cycle address -- nibble 2 io_address[23:20] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR04; end LPC_RX_TRANSFER_STATE_FR04: begin // Receive firmware cycle address -- nibble 3 io_address[19:16] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR05; end LPC_RX_TRANSFER_STATE_FR05: begin // Receive firmware cycle address -- nibble 4 io_address[15:12] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR06; end LPC_RX_TRANSFER_STATE_FR06: begin // Receive firmware cycle address -- nibble 5 io_address[11:8] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR07; end LPC_RX_TRANSFER_STATE_FR07: begin // Receive firmware cycle address -- nibble 6 io_address[7:4] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR08; end LPC_RX_TRANSFER_STATE_FR08: begin // Receive firmware cycle address -- nibble 7 io_address[3:0] <= lpc_data_in; rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR09; end LPC_RX_TRANSFER_STATE_FR09: begin // Receive MSIZE field fw_cycle_msize <= lpc_data_in; // Handle data transfer if (cycle_direction == 1) begin rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR10; fw_cycle_rx_nibble_counter <= 0; end else begin // Start driving LAD lines lpc_data_direction <= 1; // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; end end LPC_RX_TRANSFER_STATE_FR10: begin // Signal CPU that address is ready address_ready <= 1; // Receive data, LSN first if (!fw_cycle_rx_nibble_counter[0]) begin lpc_fw_output_xfer_write_addr <= fw_cycle_rx_nibble_counter[8:1]; lpc_fw_output_xfer_write_data[3:0] <= lpc_data_in; lpc_fw_output_xfer_write_wren <= 0; end else begin lpc_fw_output_xfer_write_data[7:4] <= lpc_data_in; lpc_fw_output_xfer_write_wren <= 1; end case (fw_cycle_msize) 4'b0000: begin if (fw_cycle_rx_nibble_counter == 0) begin // Start driving LAD lines // One cycle of delay is introduced by the register on the tristate control line, // so to avoid missed LWAIT at the LPC master output direction has to be set one // cycle "early"... lpc_data_direction <= 1; end else if (fw_cycle_rx_nibble_counter >= 1) begin // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; end end 4'b0001: begin if (fw_cycle_rx_nibble_counter == 1) begin // Start driving LAD lines // One cycle of delay is introduced by the register on the tristate control line, // so to avoid missed LWAIT at the LPC master output direction has to be set one // cycle "early"... lpc_data_direction <= 1; end else if (fw_cycle_rx_nibble_counter >= 2) begin // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; end end 4'b0010: begin if (fw_cycle_rx_nibble_counter == 7) begin // Start driving LAD lines // One cycle of delay is introduced by the register on the tristate control line, // so to avoid missed LWAIT at the LPC master output direction has to be set one // cycle "early"... lpc_data_direction <= 1; end else if (fw_cycle_rx_nibble_counter >= 8) begin // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; end end 4'b0100: begin if (fw_cycle_rx_nibble_counter == 31) begin // Start driving LAD lines // One cycle of delay is introduced by the register on the tristate control line, // so to avoid missed LWAIT at the LPC master output direction has to be set one // cycle "early"... lpc_data_direction <= 1; end else if (fw_cycle_rx_nibble_counter >= 32) begin // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; end end 4'b0111: begin if (fw_cycle_rx_nibble_counter == 255) begin // Start driving LAD lines // One cycle of delay is introduced by the register on the tristate control line, // so to avoid missed LWAIT at the LPC master output direction has to be set one // cycle "early"... lpc_data_direction <= 1; end else if (fw_cycle_rx_nibble_counter >= 256) begin // Assert TX cycle start flag for > 1 clock start_tx_cycle <= 1; rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; end end default: begin // Disallowed size codeword // Abort cycle and signal exception rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; // Signal exception to CPU if (!exception_ack) begin exception[2] <= 1; end end endcase fw_cycle_rx_nibble_counter <= fw_cycle_rx_nibble_counter + 1; end default: begin // Not reachable under normal operation! rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; end endcase if (rx_transfer_state != LPC_RX_TRANSFER_STATE_IW01) begin // Handle asynchronous IPMI BT interface handshake signals if (ipmi_bt_bmc_to_host_ctl_sms_ack_cont) begin ipmi_bt_bmc_to_host_ctl_sms_ack_reg <= 0; end if (ipmi_bt_bmc_to_host_ctl_attn_ack_cont) begin ipmi_bt_bmc_to_host_ctl_attn_ack_reg <= 0; end if (ipmi_bt_host_to_bmc_ctl_attn_req_cont) begin ipmi_bt_host_to_bmc_ctl_attn_req_reg <= 0; end if (ipmi_bt_host_to_bmc_ctl_oem0_req_cont) begin ipmi_bt_host_to_bmc_ctl_oem0_req_reg <= 0; end if (ipmi_bt_irq_ack_cont) begin ipmi_bt_irq_ack_reg <= 0; end end if (exception_ack) begin exception <= 0; end end end // VUART IRQ signalling handlers if (vuart1_rx_fifo_rpop_reg) begin vuart1_rx_fifo_read_timeout_counter <= 0; vuart1_rx_data_queue_contents_read_timeout <= 0; end else begin if (vuart1_rx_fifo_empty) begin vuart1_rx_fifo_read_timeout_counter <= 0; vuart1_rx_data_queue_contents_read_timeout <= 0; end else begin // NOTE // This deviates intentionally from the 16550 UART timeouts to keep overall logic simple // In a VUART situation we don't care that much about exact character timing, since we'll continue // to eat up bytes until the FIFOs are full. // Use 10us as a reasonable value for the timeout here (slightly longer than 1 character time at 115200 baud) if (vuart1_rx_fifo_read_timeout_counter > 333) begin vuart1_rx_data_queue_contents_read_timeout <= 1; end else begin vuart1_rx_fifo_read_timeout_counter <= vuart1_rx_fifo_read_timeout_counter + 1; end end end if (vuart2_rx_fifo_rpop_reg) begin vuart2_rx_fifo_read_timeout_counter <= 0; vuart2_rx_data_queue_contents_read_timeout <= 0; end else begin if (vuart2_rx_fifo_empty) begin vuart2_rx_fifo_read_timeout_counter <= 0; vuart2_rx_data_queue_contents_read_timeout <= 0; end else begin // NOTE // This deviates intentionally from the 16550 UART timeouts to keep overall logic simple // In a VUART situation we don't care that much about exact character timing, since we'll continue // to eat up bytes until the FIFOs are full. // Use 10us as a reasonable value for the timeout here (slightly longer than 1 character time at 115200 baud) if (vuart2_rx_fifo_read_timeout_counter > 333) begin vuart2_rx_data_queue_contents_read_timeout <= 1; end else begin vuart2_rx_fifo_read_timeout_counter <= vuart2_rx_fifo_read_timeout_counter + 1; end end end case (vuart1_rcvr_trigger) 2'b00: begin if ((vuart1_rx_data_available_count >= 1) || vuart1_rx_fifo_full) begin vuart1_rx_data_queue_contents_past_trigger <= 1; end else begin vuart1_rx_data_queue_contents_past_trigger <= 0; end end 2'b01: begin if ((vuart1_rx_data_available_count >= 4) || vuart1_rx_fifo_full) begin vuart1_rx_data_queue_contents_past_trigger <= 1; end else begin vuart1_rx_data_queue_contents_past_trigger <= 0; end end 2'b10: begin if ((vuart1_rx_data_available_count >= 8) || vuart1_rx_fifo_full) begin vuart1_rx_data_queue_contents_past_trigger <= 1; end else begin vuart1_rx_data_queue_contents_past_trigger <= 0; end end 2'b11: begin if ((vuart1_rx_data_available_count >= 14) || vuart1_rx_fifo_full) begin vuart1_rx_data_queue_contents_past_trigger <= 1; end else begin vuart1_rx_data_queue_contents_past_trigger <= 0; end end endcase case (vuart2_rcvr_trigger) 2'b00: begin if ((vuart2_rx_data_available_count >= 1) || vuart2_rx_fifo_full) begin vuart2_rx_data_queue_contents_past_trigger <= 1; end else begin vuart2_rx_data_queue_contents_past_trigger <= 0; end end 2'b01: begin if ((vuart2_rx_data_available_count >= 4) || vuart2_rx_fifo_full) begin vuart2_rx_data_queue_contents_past_trigger <= 1; end else begin vuart2_rx_data_queue_contents_past_trigger <= 0; end end 2'b10: begin if ((vuart2_rx_data_available_count >= 8) || vuart2_rx_fifo_full) begin vuart2_rx_data_queue_contents_past_trigger <= 1; end else begin vuart2_rx_data_queue_contents_past_trigger <= 0; end end 2'b11: begin if ((vuart2_rx_data_available_count >= 14) || vuart2_rx_fifo_full) begin vuart2_rx_data_queue_contents_past_trigger <= 1; end else begin vuart2_rx_data_queue_contents_past_trigger <= 0; end end endcase if (vuart1_ier[2] && vuart1_rx_break_irq_pending) begin vuart1_interrupt_pending <= 1; vuart1_interrupt_id <= 3'b010; end else if (vuart1_ier[0] && vuart1_rx_data_queue_contents_past_trigger) begin vuart1_interrupt_pending <= 1; vuart1_interrupt_id <= 3'b010; end else if (vuart1_ier[0] && vuart1_rx_data_queue_contents_read_timeout) begin vuart1_interrupt_pending <= 1; vuart1_interrupt_id <= 3'b110; end else if (vuart1_ier[1] && vuart1_tx_fifo_empty_irq_pending) begin vuart1_interrupt_pending <= 1; vuart1_interrupt_id <= 3'b001; end else begin vuart1_interrupt_pending <= 0; vuart1_interrupt_id <= 3'b000; end if (vuart1_tx_fifo_wren_reg || vuart1_iir_read_tx_empty_assert) begin vuart1_tx_fifo_empty_irq_pending <= 0; end else begin if (vuart1_tx_fifo_empty && !vuart1_tx_fifo_empty_prev) begin vuart1_tx_fifo_empty_irq_pending <= 1; end end if (vuart1_lsr_read_assert || !vuart1_control_register[0]) begin vuart1_rx_break_irq_pending <= 0; end else begin if (vuart1_control_register[0] && !vuart1_rx_break_request_prev) begin vuart1_rx_break_irq_pending <= 1; end end if (vuart2_ier[2] && vuart2_rx_break_irq_pending) begin vuart2_interrupt_pending <= 1; vuart2_interrupt_id <= 3'b010; end else if (vuart2_ier[0] && vuart2_rx_data_queue_contents_past_trigger) begin vuart2_interrupt_pending <= 1; vuart2_interrupt_id <= 3'b010; end else if (vuart2_ier[0] && vuart2_rx_data_queue_contents_read_timeout) begin vuart2_interrupt_pending <= 1; vuart2_interrupt_id <= 3'b110; end else if (vuart2_ier[1] && vuart2_tx_fifo_empty_irq_pending) begin vuart2_interrupt_pending <= 1; vuart2_interrupt_id <= 3'b001; end else begin vuart2_interrupt_pending <= 0; vuart2_interrupt_id <= 3'b000; end if (vuart2_tx_fifo_wren_reg || vuart2_iir_read_tx_empty_assert) begin vuart2_tx_fifo_empty_irq_pending <= 0; end else begin if (vuart2_tx_fifo_empty && !vuart2_tx_fifo_empty_prev) begin vuart2_tx_fifo_empty_irq_pending <= 1; end end if (vuart2_lsr_read_assert || !vuart2_control_register[0]) begin vuart2_rx_break_irq_pending <= 0; end else begin if (vuart2_control_register[0] && !vuart2_rx_break_request_prev) begin vuart2_rx_break_irq_pending <= 1; end end if (vuart1_iir_read_tx_empty_assert) begin vuart1_iir_read_tx_empty_assert <= 0; end if (vuart2_iir_read_tx_empty_assert) begin vuart2_iir_read_tx_empty_assert <= 0; end if (!vuart1_control_register[0]) begin vuart1_assert_b2h_break_clear_reg <= 0; end if (!vuart2_control_register[0]) begin vuart2_assert_b2h_break_clear_reg <= 0; end if (vuart1_lsr_read_assert) begin vuart1_lsr_read_assert <= 0; end if (vuart2_lsr_read_assert) begin vuart2_lsr_read_assert <= 0; end vuart1_tx_fifo_empty_prev <= vuart1_tx_fifo_empty; vuart2_tx_fifo_empty_prev <= vuart2_tx_fifo_empty; vuart1_rx_break_request_prev <= vuart1_control_register[0]; vuart2_rx_break_request_prev <= vuart2_control_register[0]; lpc_frame_n_prev <= lpc_frame_n; end reg start_tx_cycle_reg_tx = 0; reg abort_tx_cycle_reg_tx = 0; reg data_ack_reg_tx = 0; reg continue_reg_tx = 0; reg special_data_ack_reg_tx = 0; reg special_continue_reg_tx = 0; reg read_is_special_tx = 0; reg [7:0] lpc_tx_data_buffer = 0; reg [7:0] cycle_completion_codeword = 0; always @(posedge lpc_clock) begin // Avoid logic glitches due to these signals crossing clock domains start_tx_cycle_reg_tx <= start_tx_cycle; abort_tx_cycle_reg_tx <= abort_tx_cycle; data_ack_reg_tx <= data_ack; continue_reg_tx <= continue; special_data_ack_reg_tx <= rx_special_data_ack; special_continue_reg_tx <= rx_special_continue; if (abort_tx_cycle_reg_tx) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_IDLE; end else begin case (tx_transfer_state) LPC_TX_TRANSFER_STATE_IDLE: begin if (start_tx_cycle_reg_tx) begin if (cycle_direction == 1) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR01; end else begin if (firmware_cycle) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR01; end else begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR04; end end end data_ready_cont <= 0; continue_cont <= 0; tx_cycle_done <= 0; // Drive LWAIT by default lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_TR01: begin if (data_ack_reg_tx || special_data_ack_reg_tx) begin data_ready_cont <= 1; if (transfer_error && !special_data_ack_reg_tx) begin cycle_completion_codeword <= LPC_CODEWORD_SYNC_ERROR; end else begin cycle_completion_codeword <= LPC_CODEWORD_SYNC_READY; end tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR02; end // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_TR02: begin if (!data_ack_reg_tx && !special_data_ack_reg_tx) begin data_ready_cont <= 0; tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR03; end // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_TR03: begin // Drive sync tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; // Drive sync lpc_data_out <= cycle_completion_codeword; end LPC_TX_TRANSFER_STATE_TR04: begin if (continue_reg_tx || special_continue_reg_tx) begin continue_cont <= 1; if (transfer_error && !special_continue_reg_tx) begin cycle_completion_codeword <= LPC_CODEWORD_SYNC_ERROR; end else begin cycle_completion_codeword <= LPC_CODEWORD_SYNC_READY; end tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR05; end if (special_continue_reg_tx) begin read_is_special_tx <= 1; end else begin read_is_special_tx <= 0; end // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_TR05: begin if (!continue_reg_tx && !special_continue_reg_tx) begin continue_cont <= 0; tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR06; end // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_TR06: begin // Drive sync tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR07; // Drive sync lpc_data_out <= cycle_completion_codeword; end LPC_TX_TRANSFER_STATE_TR07: begin // Transmit first nibble of I/O data tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR08; // Transmit first nibble of I/O data if (read_is_special_tx) begin lpc_data_out <= special_tx_data[3:0]; end else begin lpc_data_out <= tx_data[3:0]; end end LPC_TX_TRANSFER_STATE_TR08: begin // Transmit second nibble of I/O data tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; // Transmit second nibble of I/O data if (read_is_special_tx) begin lpc_data_out <= special_tx_data[7:4]; end else begin lpc_data_out <= tx_data[7:4]; end end LPC_TX_TRANSFER_STATE_TR09: begin // Drive turn-around cycle part 1 tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR10; // Drive turn-around cycle part 1 lpc_data_out <= LPC_CODEWORD_TURNAROUND; end LPC_TX_TRANSFER_STATE_TR10: begin // Drive turn-around cycle part 2 tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR11; tx_cycle_done <= 1; // Drive turn-around cycle part 2 lpc_data_out <= LPC_CODEWORD_TURNAROUND; end LPC_TX_TRANSFER_STATE_TR11: begin // Assert done flag for > 1 clock, then return to idle tx_transfer_state <= LPC_TX_TRANSFER_STATE_IDLE; // Keep driving turn-around cycle during I/O direction switch lpc_data_out <= LPC_CODEWORD_TURNAROUND; end LPC_TX_TRANSFER_STATE_FR01: begin if (continue_reg_tx) begin continue_cont <= 1; tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR02; end // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_FR02: begin if (!continue_reg_tx) begin continue_cont <= 0; tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR03; end // Set up transfer lpc_fw_input_xfer_read_addr <= 0; fw_cycle_tx_byte_counter <= 0; // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end LPC_TX_TRANSFER_STATE_FR03: begin // Drive sync tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; // Drive sync lpc_data_out <= LPC_CODEWORD_SYNC_READY; end LPC_TX_TRANSFER_STATE_FR04: begin // Drive first nibble in TX state machine, then set up next byte read from RAM tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR05; lpc_tx_data_buffer <= lpc_fw_input_xfer_read_data; fw_cycle_tx_byte_counter <= fw_cycle_tx_byte_counter + 1; lpc_fw_input_xfer_read_addr <= fw_cycle_tx_byte_counter + 1; // Transmit first nibble of FW data byte lpc_data_out <= lpc_fw_input_xfer_read_data[3:0]; end LPC_TX_TRANSFER_STATE_FR05: begin // Drive second nibble in TX state machine case (fw_cycle_msize) 4'b0000: begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; end 4'b0001: begin if (fw_cycle_tx_byte_counter >= 1) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; end else begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; end end 4'b0010: begin if (fw_cycle_tx_byte_counter >= 4) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; end else begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; end end 4'b0100: begin if (fw_cycle_tx_byte_counter >= 16) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; end else begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; end end 4'b0111: begin if (fw_cycle_tx_byte_counter >= 128) begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; end else begin tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; end end default: begin // Disallowed size codeword // Abort cycle tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; end endcase // Transmit second nibble of FW data byte lpc_data_out <= lpc_tx_data_buffer[7:4]; end default: begin // Should never reach this point! // In case of a glitch into this state, drive // turnaround in preparation to unlock bus... tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; // Drive LWAIT lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; end endcase end end pinyon_ram_wishbone #( .BLOCK_RAM_ADDR_BUS_WIDTH(9), .BLOCK_RAM_DATA_BUS_WIDTH(8) ) lpc_fw_cycle_input_xfer_bram( .wb_read_port_clk(lpc_clock), .wb_read_port_adr(lpc_fw_input_xfer_read_addr), .wb_read_port_dat_r(lpc_fw_input_xfer_read_data), .wb_write_port_clk(lpc_fw_input_xfer_write_clk), .wb_write_port_cyc(lpc_fw_input_xfer_write_wren), .wb_write_port_stb(lpc_fw_input_xfer_write_wren), .wb_write_port_we(lpc_fw_input_xfer_write_wren), .wb_write_port_adr(lpc_fw_input_xfer_write_addr), .wb_write_port_dat_w(lpc_fw_input_xfer_write_data) ); pinyon_ram_wishbone #( .BLOCK_RAM_ADDR_BUS_WIDTH(9), .BLOCK_RAM_DATA_BUS_WIDTH(8) ) lpc_fw_cycle_output_xfer_bram( .wb_read_port_clk(lpc_fw_output_xfer_read_clk), .wb_read_port_adr(lpc_fw_output_xfer_read_addr), .wb_read_port_dat_r(lpc_fw_output_xfer_read_data), .wb_write_port_clk(lpc_clock), .wb_write_port_cyc(lpc_fw_output_xfer_write_wren), .wb_write_port_stb(lpc_fw_output_xfer_write_wren), .wb_write_port_we(lpc_fw_output_xfer_write_wren), .wb_write_port_adr(lpc_fw_output_xfer_write_addr), .wb_write_port_dat_w(lpc_fw_output_xfer_write_data) ); pinyon_ram_wishbone #( .BLOCK_RAM_ADDR_BUS_WIDTH(9), .BLOCK_RAM_DATA_BUS_WIDTH(8) ) ipmi_bt_cycle_input_xfer_bram( .wb_read_port_clk(lpc_clock), .wb_read_port_adr(ipmi_bt_input_xfer_read_addr), .wb_read_port_dat_r(ipmi_bt_input_xfer_read_data), .wb_write_port_clk(ipmi_bt_input_xfer_write_clk), .wb_write_port_cyc(ipmi_bt_input_xfer_write_wren), .wb_write_port_stb(ipmi_bt_input_xfer_write_wren), .wb_write_port_we(ipmi_bt_input_xfer_write_wren), .wb_write_port_adr(ipmi_bt_input_xfer_write_addr), .wb_write_port_dat_w(ipmi_bt_input_xfer_write_data) ); pinyon_ram_wishbone #( .BLOCK_RAM_ADDR_BUS_WIDTH(9), .BLOCK_RAM_DATA_BUS_WIDTH(8) ) ipmi_bt_cycle_output_xfer_bram( .wb_read_port_clk(ipmi_bt_output_xfer_read_clk), .wb_read_port_adr(ipmi_bt_output_xfer_read_addr), .wb_read_port_dat_r(ipmi_bt_output_xfer_read_data), .wb_write_port_clk(lpc_clock), .wb_write_port_cyc(ipmi_bt_output_xfer_write_wren), .wb_write_port_stb(ipmi_bt_output_xfer_write_wren), .wb_write_port_we(ipmi_bt_output_xfer_write_wren), .wb_write_port_adr(ipmi_bt_output_xfer_write_addr), .wb_write_port_dat_w(ipmi_bt_output_xfer_write_data) ); endmodule