// © 2017 - 2022 Raptor Engineering, LLC // // Released under the terms of the LGPL v3+ // See the LICENSE file for full details // // This LPC slave peripheral currently implements I/O, TPM, and firmware memory read/write functionality // An optional DMA engine is provided to accelerate LPC firmware cycle transfers // // The LPC firmware cycle DMA engine provides configurable access protection on 32-bit (word) boundaries -- short word and byte access that overlaps an allowed range at 32-bit granularity will also be allowed. // As a result, it is recommended to keep a single word safety buffer before and after an active LPC firmware DMA target region in memory. // // ============================================================================================= // Memory Map: // ============================================================================================= // Device ID string (8 bytes) // Version register (4 bytes): {24'b0, version} // Control register 1 (4 bytes): {12'b0, lpc_io_cycle_irq_enable, lpc_tpm_cycle_irq_enable, lpc_firmware_cycle_irq_enable, ipmi_bt_bmc_irq_enable, ipmi_bt_port_address, ipmi_bt_alt_irq, enable_ipmi_bt, enable_vuart2, enable_vuart1, enable_io_cycles, enable_tpm_cycles, enable_firmware_cycles, enable_irq} // Control register 2 (4 bytes): {16'b0, data_out, 6'b0, signal_error, continue_transfer} // Range config 1 (4 bytes): {enable_range, range_type, 1'b0, start_address} // Range end 1 (4 bytes): {4'b0, end_address} // Range config 2 (4 bytes): {enable_range, range_type, 1'b0, start_address} // Range end 2 (4 bytes): {4'b0, end_address} // Range config 3 (4 bytes): {enable_range, range_type, 1'b0, start_address} // Range end 3 (4 bytes): {4'b0, end_address} // Range config 4 (4 bytes): {enable_range, range_type, 1'b0, start_address} // Range end 4 (4 bytes): {4'b0, end_address} // Range config 5 (4 bytes): {enable_range, range_type, 1'b0, start_address} // Range end 5 (4 bytes): {4'b0, end_address} // Range config 6 (4 bytes): {enable_range, range_type, 1'b0, start_address} // Range end 6 (4 bytes): {4'b0, end_address} // DMA config 1 (4 bytes): {24'b0, lpc_fw_cycle_dma_idsel_filter, 1'b0, lpc_fw_cycle_dma_idsel_filter_enable, lpc_fw_cycle_dma_write_enable, lpc_fw_cycle_dma_read_enable} // DMA config 2 (4 bytes): {lpc_fw_dma_base_address_low} // DMA config 3 (4 bytes): {lpc_fw_dma_base_address_high} // DMA config 4 (4 bytes): {lpc_fw_dma_length} // DMA config 5 (4 bytes): {lpc_fw_dma_valid_window_start_offset} // DMA config 6 (4 bytes): {lpc_fw_dma_valid_window_end_offset} // DMA config 7 (4 bytes): {lpc_fw_dma_offset_address_mask} // Status register 1 (4 bytes): {8'b0, pending_fw_cycle_idsel, pending_fw_cycle_msize, 11'b0, bus_in_reset, cycle_type, cycle_direction, attn_req} // Status register 2 (4 bytes): {4'b0, pending_address} // Status register 3 (4 bytes): {24'b0, pending_data} // Status register 4 (4 bytes): {16'b0, 4'b0, vuart2_irq_source, vuart1_irq_source, 1'b0, lpc_io_cycle_irq_asserted, lpc_tpm_cycle_irq_asserted, lpc_firmware_cycle_irq_asserted, ipmi_bt_bmc_irq_asserted, vuart2_irq_asserted, vuart1_irq_asserted, irq_asserted} // IPMI BT interface control register (4 bytes): {24'b0, BT_CTRL} // Required by this wrapper module `define LPC_SLAVE_DEBUG `ifndef DISABLE_FIRMWARE_MEMORY_CYCLES `define ENABLE_FIRMWARE_MEMORY_CYCLES `endif // Stop LiteX silently ignoring net naming / missing register errors `default_nettype none module aquila_lpc_slave_wishbone( // Wishbone slave port signals input wire slave_wishbone_cyc, input wire slave_wishbone_stb, input wire slave_wishbone_we, input wire [29:0] slave_wishbone_adr, input wire [31:0] slave_wishbone_dat_w, output wire [31:0] slave_wishbone_dat_r, input wire [3:0] slave_wishbone_sel, output wire slave_wishbone_ack, output wire slave_wishbone_err, output wire slave_irq_o, // Wishbone master port signals (DMA) output wire master_wishbone_cyc, output wire master_wishbone_stb, output wire master_wishbone_we, output wire [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] master_wishbone_adr, output wire [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_w, input wire [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_r, output wire [((WISHBONE_DMA_DATA_BUS_WIDTH/8)-1):0] master_wishbone_sel, input wire master_wishbone_ack, input wire master_wishbone_err, // LPC core signals output wire [3:0] lpc_data_out, // Must have a LPC-clocked register packed into the I/O buffer input wire [3:0] lpc_data_in, // Must have a LPC-clocked register packed into the I/O buffer output wire lpc_data_direction, // 0 == tristate (input), 1 == driven (output) [IOB packed] output wire lpc_irq_out, input wire lpc_irq_in, output wire lpc_irq_direction, // 0 == tristate (input), 1 == driven (output) [IOB packed] input wire lpc_frame_n, input wire lpc_reset_n, input wire lpc_clock, output wire [15:0] debug_port, output wire lpc_clock_mirror, input wire peripheral_reset, input wire peripheral_clock ); // Wishbone master port parameters parameter WISHBONE_DMA_ADDR_BUS_WIDTH = 32; parameter WISHBONE_DMA_DATA_BUS_WIDTH = 64; // VUART parameters parameter VUART_IRQ_REASON_NONE = 0; parameter VUART_IRQ_REASON_QUEUE_TRIGGER = 1; parameter VUART_IRQ_REASON_QUEUE_TIMEOUT = 2; // Control and status registers wire [63:0] device_id; wire [31:0] device_version; reg [31:0] control_reg1 = 0; reg [31:0] control_reg2 = 0; reg [31:0] range_config1 = 0; reg [31:0] range_end1 = 0; reg [31:0] range_config2 = 0; reg [31:0] range_end2 = 0; reg [31:0] range_config3 = 0; reg [31:0] range_end3 = 0; reg [31:0] range_config4 = 0; reg [31:0] range_end4 = 0; reg [31:0] range_config5 = 0; reg [31:0] range_end5 = 0; reg [31:0] range_config6 = 0; reg [31:0] range_end6 = 0; reg [31:0] dma_config_reg1 = 0; reg [31:0] dma_config_reg2 = 0; reg [31:0] dma_config_reg3 = 0; reg [31:0] dma_config_reg4 = 0; reg [31:0] dma_config_reg5 = 0; reg [31:0] dma_config_reg6 = 0; reg [31:0] dma_config_reg7 = 0; wire [31:0] status_reg1; wire [31:0] status_reg2; wire [31:0] status_reg3; wire [31:0] status_reg4; wire [31:0] ipmi_bt_status_reg; // Device identifier assign device_id = 64'h7c5250544c504353; assign device_version = 32'h00010000; // CSR connections wire lpc_io_cycle_irq_enable; wire lpc_tpm_cycle_irq_enable; wire lpc_firmware_cycle_irq_enable; wire ipmi_bt_bmc_irq_enable; wire [7:0] ipmi_bt_port_address; wire enable_vuart1; wire enable_vuart2; wire enable_ipmi_bt; wire ipmi_bt_alt_irq; wire [15:0] ipmi_bt_port_base_address; wire enable_io_cycles; wire enable_tpm_cycles; wire enable_firmware_cycles; wire enable_irq; wire [7:0] data_out; wire signal_error; reg continue_transfer = 0; // Write 1 to continue, always reads 0 wire range_1_enable; wire range_1_allow_io; wire range_1_allow_tpm; wire [27:0] range_1_start_address; wire [27:0] range_1_end_address; wire range_2_enable; wire range_2_allow_io; wire range_2_allow_tpm; wire [27:0] range_2_start_address; wire [27:0] range_2_end_address; wire range_3_enable; wire range_3_allow_io; wire range_3_allow_tpm; wire [27:0] range_3_start_address; wire [27:0] range_3_end_address; wire range_4_enable; wire range_4_allow_io; wire range_4_allow_tpm; wire [27:0] range_4_start_address; wire [27:0] range_4_end_address; wire range_5_enable; wire range_5_allow_io; wire range_5_allow_tpm; wire [27:0] range_5_start_address; wire [27:0] range_5_end_address; wire range_6_enable; wire range_6_allow_io; wire range_6_allow_tpm; wire [27:0] range_6_start_address; wire [27:0] range_6_end_address; wire lpc_fw_cycle_dma_read_enable; wire lpc_fw_cycle_dma_write_enable; wire lpc_fw_cycle_dma_idsel_filter_enable; wire [3:0] lpc_fw_cycle_dma_idsel_filter; wire [63:0] lpc_fw_dma_base_address; wire [31:0] lpc_fw_dma_length; wire [31:0] lpc_fw_dma_valid_window_start_offset; wire [31:0] lpc_fw_dma_valid_window_end_offset; wire [31:0] lpc_fw_dma_offset_address_mask; reg [1:0] cycle_type = 0; reg cycle_direction = 0; reg attn_req = 0; reg [27:0] pending_address = 0; reg [7:0] pending_data = 0; reg [3:0] pending_fw_cycle_idsel = 0; reg [3:0] pending_fw_cycle_msize = 0; reg [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] lpc_fw_dma_current_wb_address = 0; reg [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] lpc_fw_dma_data_cache_reg = 0; reg [8:0] lpc_fw_dma_current_buffer_address = 0; wire lpc_io_cycle_irq_asserted; wire lpc_tpm_cycle_irq_asserted; wire lpc_firmware_cycle_irq_asserted; reg lpc_io_cycle_irq = 0; reg lpc_tpm_cycle_irq = 0; reg lpc_firmware_cycle_irq = 0; reg [1:0] vuart1_irq_source = 0; reg [1:0] vuart2_irq_source = 0; wire vuart1_irq_asserted; wire vuart2_irq_asserted; wire [4:0] vuart1_h2b_fifo_data_available_count; wire [4:0] vuart2_h2b_fifo_data_available_count; wire ipmi_bt_bmc_irq_asserted; // IPMI BT_CTRL-defined signals wire ipmi_bt_h2b_oem0_req; wire ipmi_bt_sms_attn_req; wire ipmi_bt_b2h_attn_req; wire ipmi_bt_h2b_attn_req; wire ipmi_bt_h2b_ctl_h_busy; wire ipmi_bt_b2h_ctl_b_busy; assign lpc_io_cycle_irq_enable = control_reg1[19]; assign lpc_tpm_cycle_irq_enable = control_reg1[18]; assign lpc_firmware_cycle_irq_enable = control_reg1[17]; assign ipmi_bt_bmc_irq_enable = control_reg1[16]; assign ipmi_bt_port_address = control_reg1[15:8]; assign ipmi_bt_alt_irq = control_reg1[7]; assign enable_ipmi_bt = control_reg1[6]; assign enable_vuart2 = control_reg1[5]; assign enable_vuart1 = control_reg1[4]; assign enable_io_cycles = control_reg1[3]; assign enable_tpm_cycles = control_reg1[2]; assign enable_firmware_cycles = control_reg1[1]; assign enable_irq = control_reg1[0]; assign data_out = control_reg2[15:8]; assign signal_error = control_reg2[1]; assign range_1_enable = range_config1[31]; assign range_1_allow_io = range_config1[30]; assign range_1_allow_tpm = range_config1[29]; assign range_1_start_address = range_config1[27:0]; assign range_1_end_address = range_end1[27:0]; assign range_2_enable = range_config2[31]; assign range_2_allow_io = range_config2[30]; assign range_2_allow_tpm = range_config2[29]; assign range_2_start_address = range_config2[27:0]; assign range_2_end_address = range_end2[27:0]; assign range_3_enable = range_config3[31]; assign range_3_allow_io = range_config3[30]; assign range_3_allow_tpm = range_config3[29]; assign range_3_start_address = range_config3[27:0]; assign range_3_end_address = range_end3[27:0]; assign range_4_enable = range_config4[31]; assign range_4_allow_io = range_config4[30]; assign range_4_allow_tpm = range_config4[29]; assign range_4_start_address = range_config4[27:0]; assign range_4_end_address = range_end4[27:0]; assign range_5_enable = range_config5[31]; assign range_5_allow_io = range_config5[30]; assign range_5_allow_tpm = range_config5[29]; assign range_5_start_address = range_config5[27:0]; assign range_5_end_address = range_end5[27:0]; assign range_6_enable = range_config6[31]; assign range_6_allow_io = range_config6[30]; assign range_6_allow_tpm = range_config6[29]; assign range_6_start_address = range_config6[27:0]; assign range_6_end_address = range_end6[27:0]; assign lpc_fw_cycle_dma_read_enable = dma_config_reg1[0]; assign lpc_fw_cycle_dma_write_enable = dma_config_reg1[1]; assign lpc_fw_cycle_dma_idsel_filter_enable = dma_config_reg1[2]; assign lpc_fw_cycle_dma_idsel_filter = dma_config_reg1[7:4]; assign lpc_fw_dma_base_address = {dma_config_reg3, dma_config_reg2[31:4], 4'b0000}; assign lpc_fw_dma_length = {dma_config_reg4[31:4], 4'b0000}; assign lpc_fw_dma_valid_window_start_offset = dma_config_reg5; assign lpc_fw_dma_valid_window_end_offset = dma_config_reg6; assign lpc_fw_dma_offset_address_mask = dma_config_reg7; assign status_reg1 = {8'b0, pending_fw_cycle_idsel, pending_fw_cycle_msize, 11'b0, ~lpc_reset_n, cycle_type, cycle_direction, attn_req}; assign status_reg2 = {4'b0, pending_address}; assign status_reg3 = {24'b0, pending_data}; assign status_reg4 = {8'b0, (!vuart2_h2b_fifo_rempty && vuart2_h2b_fifo_data_available_count[3:0] == 0)?4'b1111:vuart2_h2b_fifo_data_available_count[3:0], (!vuart1_h2b_fifo_rempty && vuart1_h2b_fifo_data_available_count[3:0] == 0)?4'b1111:vuart1_h2b_fifo_data_available_count[3:0], 4'b0, vuart2_irq_source, vuart1_irq_source, 1'b0, lpc_io_cycle_irq_asserted, lpc_tpm_cycle_irq_asserted, lpc_firmware_cycle_irq_asserted, ipmi_bt_bmc_irq_asserted, vuart2_irq_asserted, vuart1_irq_asserted, slave_irq_o}; assign ipmi_bt_status_reg = {24'b0, ipmi_bt_b2h_ctl_b_busy, ipmi_bt_h2b_ctl_h_busy, ipmi_bt_h2b_oem0_req, ipmi_bt_sms_attn_req, ipmi_bt_b2h_attn_req, ipmi_bt_h2b_attn_req, 2'b00}; // Internal LPC interface signals wire [27:0] lpc_slave_address; reg [7:0] lpc_slave_tx_data = 0; wire [7:0] lpc_slave_rx_data; wire lpc_slave_tpm_cycle; wire lpc_slave_firmware_cycle; reg lpc_slave_continue = 0; reg lpc_slave_data_ack = 0; reg lpc_slave_signal_error = 0; reg lpc_slave_exception_ack = 0; wire lpc_slave_address_ready; wire lpc_slave_data_ready; wire lpc_slave_data_ready_cont; wire lpc_slave_continue_cont; wire [2:0] lpc_slave_exception; wire lpc_slave_cycle_direction; reg [16:0] irq_request = 0; reg irq_tx_ready = 0; wire irq_tx_queued; reg [7:0] xfer_read_data_buffer = 0; reg [7:0] xfer_write_data_buffer = 0; reg lpc_fw_dma_cycle_active = 0; reg lpc_fw_dma_cycle_inactive = 0; reg [8:0] lpc_fw_input_xfer_mmio_write_addr = 0; reg [7:0] lpc_fw_input_xfer_mmio_write_data = 0; reg lpc_fw_input_xfer_mmio_write_wren = 0; reg [8:0] lpc_fw_output_xfer_mmio_read_addr = 0; reg [8:0] lpc_fw_input_xfer_dma_write_addr = 0; reg [7:0] lpc_fw_input_xfer_dma_write_data = 0; reg lpc_fw_input_xfer_dma_write_wren = 0; reg [8:0] lpc_fw_output_xfer_dma_read_addr = 0; wire [8:0] lpc_fw_input_xfer_write_addr; wire [7:0] lpc_fw_input_xfer_write_data; wire lpc_fw_input_xfer_write_wren; wire [8:0] lpc_fw_output_xfer_read_addr; assign lpc_fw_input_xfer_write_addr = (!lpc_fw_dma_cycle_active)?lpc_fw_input_xfer_mmio_write_addr:lpc_fw_input_xfer_dma_write_addr; assign lpc_fw_input_xfer_write_data = (!lpc_fw_dma_cycle_active)?lpc_fw_input_xfer_mmio_write_data:lpc_fw_input_xfer_dma_write_data; assign lpc_fw_input_xfer_write_wren = (!lpc_fw_dma_cycle_active)?lpc_fw_input_xfer_mmio_write_wren:lpc_fw_input_xfer_dma_write_wren; assign lpc_fw_output_xfer_read_addr = (!lpc_fw_dma_cycle_active)?lpc_fw_output_xfer_mmio_read_addr:lpc_fw_output_xfer_dma_read_addr; reg [8:0] lpc_fw_output_xfer_read_addr_prev = 0; wire [7:0] lpc_fw_output_xfer_read_data; reg [8:0] ipmi_bt_input_xfer_write_addr = 0; reg [7:0] ipmi_bt_input_xfer_write_data = 0; reg ipmi_bt_input_xfer_write_wren = 0; reg [8:0] ipmi_bt_output_xfer_read_addr = 0; reg [8:0] ipmi_bt_output_xfer_read_addr_prev = 0; wire [7:0] ipmi_bt_output_xfer_read_data; wire [3:0] lpc_slave_fw_idsel; wire [3:0] lpc_slave_fw_msize; wire [15:0] lpc_slave_debug_port; wire [3:0] lpc_slave_lpc_data_out; wire [3:0] lpc_slave_lpc_data_in; wire lpc_slave_lpc_data_direction; wire lpc_slave_lpc_irq_out; wire lpc_slave_lpc_irq_in; wire lpc_slave_lpc_irq_direction; reg lpc_slave_lpc_irq_out_reg = 0; reg lpc_slave_lpc_irq_direction_reg = 0; (* syn_useioff *) reg lpc_slave_lpc_frame_n = 0; // Must be packed into the I/O buffer flip flops reg lpc_slave_lpc_reset_n = 0; // Status register format: {7'b0, b2h_fifo_wfull, fifos_enabled, 1'b0, rcvr_trigger, mcr, lcr}; // Control register format: {irqs_enabled, FIFO IRQ enabled, 18'b0, h2b_fifo_irq_trigger_level, 7'b0, assert_b2h_break} wire [31:0] vuart1_status_register; reg [31:0] vuart1_control_register = 0; wire [31:0] vuart2_status_register; reg [31:0] vuart2_control_register = 0; wire vuart1_assert_b2h_break_clear; wire vuart2_assert_b2h_break_clear; reg vuart1_lcr_break_request = 0; reg vuart2_lcr_break_request = 0; reg vuart1_lcr_break_ack = 0; reg vuart2_lcr_break_ack = 0; wire [3:0] vuart1_h2b_fifo_irq_trigger_level; wire [3:0] vuart2_h2b_fifo_irq_trigger_level; assign vuart1_h2b_fifo_irq_trigger_level = vuart1_control_register[11:8]; assign vuart2_h2b_fifo_irq_trigger_level = vuart2_control_register[11:8]; // Wishbone to LPC core synchronizer registers reg [1:0] peripheral_reset_sync = 0; reg [1:0] lpc_slave_continue_sync = 0; reg [1:0] lpc_slave_data_ack_sync = 0; reg [1:0] lpc_slave_signal_error_sync = 0; reg [1:0] lpc_slave_exception_ack_sync = 0; reg [1:0] irq_tx_ready_sync = 0; reg [16:0] irq_request_sync_1 = 0; reg [16:0] irq_request_sync_0 = 0; reg [31:0] vuart1_control_register_sync_1 = 0; reg [31:0] vuart1_control_register_sync_0 = 0; reg [31:0] vuart2_control_register_sync_1 = 0; reg [31:0] vuart2_control_register_sync_0 = 0; // LPC core to Wishbone synchronizer registers reg [2:0] lpc_slave_address_ready_sync = 0; reg [2:0] lpc_slave_data_ready_sync = 0; reg [2:0] lpc_slave_exception_sync_2 = 0; reg [2:0] lpc_slave_exception_sync_1 = 0; reg [2:0] lpc_slave_exception_sync_0 = 0; reg [2:0] lpc_slave_data_ready_cont_sync = 0; reg [2:0] lpc_slave_continue_cont_sync = 0; reg [2:0] lpc_reset_n_sync = 0; reg [31:0] vuart1_status_register_sync_2 = 0; reg [31:0] vuart1_status_register_sync_1 = 0; reg [31:0] vuart1_status_register_sync_0 = 0; reg [31:0] vuart2_status_register_sync_2 = 0; reg [31:0] vuart2_status_register_sync_1 = 0; reg [31:0] vuart2_status_register_sync_0 = 0; reg [2:0] vuart1_assert_b2h_break_clear_sync = 0; reg [2:0] vuart2_assert_b2h_break_clear_sync = 0; // VUART FIFOs wire vuart1_h2b_fifo_reset; wire vuart1_h2b_fifo_wwren; wire [7:0] vuart1_h2b_fifo_wdata; wire vuart1_h2b_fifo_wfull; wire vuart1_h2b_fifo_walmost_full; reg vuart1_h2b_fifo_rpop = 0; wire [7:0] vuart1_h2b_fifo_rdata; wire vuart1_h2b_fifo_rempty; wire vuart1_h2b_fifo_ralmost_empty; wire [4:0] vuart1_h2b_fifo_rptr; wire [4:0] vuart1_h2b_fifo_wptr; reg [1:0] vuart1_h2b_fifo_rempty_sync = 0; reg [2:0] vuart1_h2b_fifo_reset_sync = 0; wire vuart1_irqs_enabled; reg vuart1_h2b_fifo_queue_past_trigger = 0; reg vuart1_h2b_fifo_read_timeout = 0; reg vuart1_h2b_fifo_irq = 0; wire vuart1_h2b_fifo_irq_enabled; reg [15:0] vuart1_h2b_fifo_read_timeout_counter = 0; // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately // at or above the configured data queue threshold. assign vuart1_h2b_fifo_data_available_count = (vuart1_h2b_fifo_rptr > vuart1_h2b_fifo_wptr)?(vuart1_h2b_fifo_wptr-vuart1_h2b_fifo_rptr):((vuart1_h2b_fifo_wptr+16)-vuart1_h2b_fifo_rptr); async_fifo #( .DSIZE(8), .ASIZE(4), .FALLTHROUGH("TRUE") ) vuart1_h2b_fifo ( .wclk(lpc_clock), .wrst_n(!peripheral_reset && lpc_slave_lpc_reset_n && !vuart1_h2b_fifo_reset), .winc(vuart1_h2b_fifo_wwren), .wdata(vuart1_h2b_fifo_wdata), .wfull(vuart1_h2b_fifo_wfull), .awfull(vuart1_h2b_fifo_walmost_full), .rclk(peripheral_clock), .rrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart1_h2b_fifo_reset_sync[2]), .rinc(vuart1_h2b_fifo_rpop), .rdata(vuart1_h2b_fifo_rdata), .rempty(vuart1_h2b_fifo_rempty), .arempty(vuart1_h2b_fifo_ralmost_empty), .rclk_rptr(vuart1_h2b_fifo_rptr), .rclk_wptr(vuart1_h2b_fifo_wptr) ); wire vuart1_b2h_fifo_reset; reg vuart1_b2h_fifo_wwren = 0; reg [7:0] vuart1_b2h_fifo_wdata = 0; wire vuart1_b2h_fifo_wfull; wire vuart1_b2h_fifo_walmost_full; wire vuart1_b2h_fifo_rpop; wire [7:0] vuart1_b2h_fifo_rdata; wire vuart1_b2h_fifo_rempty; wire vuart1_b2h_fifo_ralmost_empty; wire [4:0] vuart1_b2h_fifo_rptr; wire [4:0] vuart1_b2h_fifo_wptr; reg [2:0] vuart1_b2h_fifo_wfull_sync = 0; reg [2:0] vuart1_b2h_fifo_reset_sync = 0; // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately // at or above the configured data queue threshold. wire [4:0] vuart1_b2h_fifo_data_available_count; assign vuart1_b2h_fifo_data_available_count = (vuart1_b2h_fifo_rptr > vuart1_b2h_fifo_wptr)?(vuart1_b2h_fifo_wptr-vuart1_b2h_fifo_rptr):((vuart1_b2h_fifo_wptr+16)-vuart1_b2h_fifo_rptr); async_fifo #( .DSIZE(8), .ASIZE(4), .FALLTHROUGH("TRUE") ) vuart1_b2h_fifo ( .wclk(peripheral_clock), .wrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart1_b2h_fifo_reset_sync[2]), .winc(vuart1_b2h_fifo_wwren), .wdata(vuart1_b2h_fifo_wdata), .wfull(vuart1_b2h_fifo_wfull), .awfull(vuart1_b2h_fifo_walmost_full), .rclk(lpc_clock), .rrst_n(!peripheral_reset_sync[1] && lpc_slave_lpc_reset_n && !vuart1_b2h_fifo_reset), .rinc(vuart1_b2h_fifo_rpop), .rdata(vuart1_b2h_fifo_rdata), .rempty(vuart1_b2h_fifo_rempty), .arempty(vuart1_b2h_fifo_ralmost_empty), .rclk_rptr(vuart1_b2h_fifo_rptr), .rclk_wptr(vuart1_b2h_fifo_wptr) ); wire vuart2_h2b_fifo_reset; wire vuart2_h2b_fifo_wwren; wire [7:0] vuart2_h2b_fifo_wdata; wire vuart2_h2b_fifo_wfull; wire vuart2_h2b_fifo_walmost_full; reg vuart2_h2b_fifo_rpop = 0; wire [7:0] vuart2_h2b_fifo_rdata; wire vuart2_h2b_fifo_rempty; wire vuart2_h2b_fifo_ralmost_empty; wire [4:0] vuart2_h2b_fifo_rptr; wire [4:0] vuart2_h2b_fifo_wptr; reg [1:0] vuart2_h2b_fifo_rempty_sync = 0; reg [2:0] vuart2_h2b_fifo_reset_sync = 0; wire vuart2_irqs_enabled; reg vuart2_h2b_fifo_queue_past_trigger = 0; reg vuart2_h2b_fifo_read_timeout = 0; reg vuart2_h2b_fifo_irq = 0; wire vuart2_h2b_fifo_irq_enabled; reg [15:0] vuart2_h2b_fifo_read_timeout_counter = 0; // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately // at or above the configured data queue threshold. assign vuart2_h2b_fifo_data_available_count = (vuart2_h2b_fifo_rptr > vuart2_h2b_fifo_wptr)?(vuart2_h2b_fifo_wptr-vuart2_h2b_fifo_rptr):((vuart2_h2b_fifo_wptr+16)-vuart2_h2b_fifo_rptr); async_fifo #( .DSIZE(8), .ASIZE(4), .FALLTHROUGH("TRUE") ) vuart2_h2b_fifo ( .wclk(lpc_clock), .wrst_n(!peripheral_reset && lpc_slave_lpc_reset_n && !vuart2_h2b_fifo_reset), .winc(vuart2_h2b_fifo_wwren), .wdata(vuart2_h2b_fifo_wdata), .wfull(vuart2_h2b_fifo_wfull), .awfull(vuart2_h2b_fifo_walmost_full), .rclk(peripheral_clock), .rrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart2_h2b_fifo_reset_sync[2]), .rinc(vuart2_h2b_fifo_rpop), .rdata(vuart2_h2b_fifo_rdata), .rempty(vuart2_h2b_fifo_rempty), .arempty(vuart2_h2b_fifo_ralmost_empty), .rclk_rptr(vuart2_h2b_fifo_rptr), .rclk_wptr(vuart2_h2b_fifo_wptr) ); wire vuart2_b2h_fifo_reset; reg vuart2_b2h_fifo_wwren = 0; reg [7:0] vuart2_b2h_fifo_wdata = 0; wire vuart2_b2h_fifo_wfull; wire vuart2_b2h_fifo_walmost_full; wire vuart2_b2h_fifo_rpop; wire [7:0] vuart2_b2h_fifo_rdata; wire vuart2_b2h_fifo_rempty; wire vuart2_b2h_fifo_ralmost_empty; wire [4:0] vuart2_b2h_fifo_rptr; wire [4:0] vuart2_b2h_fifo_wptr; reg [2:0] vuart2_b2h_fifo_wfull_sync = 0; reg [2:0] vuart2_b2h_fifo_reset_sync = 0; // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately // at or above the configured data queue threshold. wire [4:0] vuart2_b2h_fifo_data_available_count; assign vuart2_b2h_fifo_data_available_count = (vuart2_b2h_fifo_rptr > vuart2_b2h_fifo_wptr)?(vuart2_b2h_fifo_wptr-vuart2_b2h_fifo_rptr):((vuart2_b2h_fifo_wptr+16)-vuart2_b2h_fifo_rptr); async_fifo #( .DSIZE(8), .ASIZE(4), .FALLTHROUGH("TRUE") ) vuart2_b2h_fifo ( .wclk(peripheral_clock), .wrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart2_b2h_fifo_reset_sync[2]), .winc(vuart2_b2h_fifo_wwren), .wdata(vuart2_b2h_fifo_wdata), .wfull(vuart2_b2h_fifo_wfull), .awfull(vuart2_b2h_fifo_walmost_full), .rclk(lpc_clock), .rrst_n(!peripheral_reset_sync[1] && lpc_slave_lpc_reset_n && !vuart2_b2h_fifo_reset), .rinc(vuart2_b2h_fifo_rpop), .rdata(vuart2_b2h_fifo_rdata), .rempty(vuart2_b2h_fifo_rempty), .arempty(vuart2_b2h_fifo_ralmost_empty), .rclk_rptr(vuart2_b2h_fifo_rptr), .rclk_wptr(vuart2_b2h_fifo_wptr) ); // IPMI BT signals wire ipmi_bt_bmc_to_host_ctl_sms_ack; wire ipmi_bt_bmc_to_host_ctl_attn_ack; wire ipmi_bt_host_to_bmc_ctl_attn_req; wire ipmi_bt_host_to_bmc_ctl_oem0_req; wire ipmi_bt_irq_ack; wire ipmi_bt_irq_bmc_reset; wire ipmi_bt_host_to_bmc_ctl_h_busy; wire ipmi_bt_irq_enable; reg ipmi_bt_bmc_to_host_ctl_sms_req = 0; reg ipmi_bt_bmc_to_host_ctl_attn_req = 0; reg ipmi_bt_bmc_to_host_ctl_sms_ack_cont = 0; reg ipmi_bt_bmc_to_host_ctl_attn_ack_cont = 0; reg ipmi_bt_host_to_bmc_ctl_attn_req_cont = 0; reg ipmi_bt_host_to_bmc_ctl_oem0_req_cont = 0; reg ipmi_bt_irq_ack_cont = 0; reg ipmi_bt_irq_bmc_reset_cont = 0; reg ipmi_bt_bmc_to_host_ctl_b_busy = 0; reg ipmi_bt_irq_req = 0; reg ipmi_bt_bmc_irq = 0; reg [1:0] ipmi_bt_bmc_to_host_ctl_sms_req_sync = 0; reg [1:0] ipmi_bt_bmc_to_host_ctl_attn_req_sync = 0; reg [1:0] ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync = 0; reg [1:0] ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync = 0; reg [1:0] ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync = 0; reg [1:0] ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync = 0; reg [1:0] ipmi_bt_irq_ack_cont_sync = 0; reg [1:0] ipmi_bt_irq_bmc_reset_cont_sync = 0; reg [1:0] ipmi_bt_bmc_to_host_ctl_b_busy_sync = 0; reg [1:0] ipmi_bt_irq_req_sync = 0; reg [2:0] ipmi_bt_bmc_to_host_ctl_sms_ack_sync = 0; reg [2:0] ipmi_bt_bmc_to_host_ctl_attn_ack_sync = 0; reg [2:0] ipmi_bt_host_to_bmc_ctl_attn_req_sync = 0; reg [2:0] ipmi_bt_host_to_bmc_ctl_oem0_req_sync = 0; reg [2:0] ipmi_bt_irq_ack_sync = 0; reg [2:0] ipmi_bt_irq_bmc_reset_sync = 0; reg [2:0] ipmi_bt_host_to_bmc_ctl_h_busy_sync = 0; reg [2:0] ipmi_bt_irq_enable_sync = 0; reg ipmi_bt_bmc_to_host_ctl_attn_req_prev = 0; reg ipmi_bt_bmc_to_host_ctl_sms_req_prev = 0; reg ipmi_bt_h2b_oem0_req_prev = 0; assign ipmi_bt_h2b_oem0_req = ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2]; assign ipmi_bt_sms_attn_req = ipmi_bt_bmc_to_host_ctl_sms_req; assign ipmi_bt_b2h_attn_req = ipmi_bt_bmc_to_host_ctl_attn_req; assign ipmi_bt_h2b_attn_req = ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]; assign ipmi_bt_h2b_ctl_h_busy = ipmi_bt_host_to_bmc_ctl_h_busy_sync[2]; assign ipmi_bt_b2h_ctl_b_busy = ipmi_bt_bmc_to_host_ctl_b_busy; // IRQ control assign vuart1_irqs_enabled = vuart1_control_register[31]; assign vuart1_h2b_fifo_irq_enabled = vuart1_control_register[30]; assign vuart2_irqs_enabled = vuart2_control_register[31]; assign vuart2_h2b_fifo_irq_enabled = vuart2_control_register[30]; // IRQ connections assign vuart1_irq_asserted = vuart1_h2b_fifo_irq; assign vuart2_irq_asserted = vuart2_h2b_fifo_irq; assign ipmi_bt_bmc_irq_asserted = ipmi_bt_bmc_irq; assign lpc_io_cycle_irq_asserted = lpc_io_cycle_irq; assign lpc_tpm_cycle_irq_asserted = lpc_tpm_cycle_irq; assign lpc_firmware_cycle_irq_asserted = lpc_firmware_cycle_irq; assign slave_irq_o = lpc_io_cycle_irq_asserted | lpc_tpm_cycle_irq_asserted | lpc_firmware_cycle_irq_asserted | vuart1_irq_asserted | vuart2_irq_asserted | ipmi_bt_bmc_irq_asserted; // Instantiate slave lpc_slave_interface lpc_slave_interface( .address(lpc_slave_address), .tx_data(lpc_slave_tx_data), .rx_data(lpc_slave_rx_data), .tpm_cycle(lpc_slave_tpm_cycle), .firmware_cycle(lpc_slave_firmware_cycle), .continue(lpc_slave_continue_sync[1]), .data_ack(lpc_slave_data_ack_sync[1]), .transfer_error(lpc_slave_signal_error_sync[1]), .exception_ack(lpc_slave_exception_ack_sync[1]), .address_ready(lpc_slave_address_ready), .data_ready(lpc_slave_data_ready), .data_ready_cont(lpc_slave_data_ready_cont), .continue_cont(lpc_slave_continue_cont), .exception(lpc_slave_exception), .data_direction(lpc_slave_cycle_direction), .irq_request(irq_request_sync_1), .irq_tx_ready(irq_tx_ready), .irq_tx_queued(irq_tx_queued), .lpc_fw_input_xfer_write_addr(lpc_fw_input_xfer_write_addr), .lpc_fw_input_xfer_write_data(lpc_fw_input_xfer_write_data), .lpc_fw_input_xfer_write_clk(peripheral_clock), .lpc_fw_input_xfer_write_wren(lpc_fw_input_xfer_write_wren), .lpc_fw_output_xfer_read_addr(lpc_fw_output_xfer_read_addr), .lpc_fw_output_xfer_read_data(lpc_fw_output_xfer_read_data), .lpc_fw_output_xfer_read_clk(peripheral_clock), .ipmi_bt_input_xfer_write_addr(ipmi_bt_input_xfer_write_addr), .ipmi_bt_input_xfer_write_data(ipmi_bt_input_xfer_write_data), .ipmi_bt_input_xfer_write_clk(peripheral_clock), .ipmi_bt_input_xfer_write_wren(ipmi_bt_input_xfer_write_wren), .ipmi_bt_output_xfer_read_addr(ipmi_bt_output_xfer_read_addr), .ipmi_bt_output_xfer_read_data(ipmi_bt_output_xfer_read_data), .ipmi_bt_output_xfer_read_clk(peripheral_clock), .range1_start(range_1_start_address[15:0]), .range1_end(range_1_end_address[15:0]), .range2_start(range_2_start_address[15:0]), .range2_end(range_2_end_address[15:0]), .range3_start(range_3_start_address[15:0]), .range3_end(range_3_end_address[15:0]), .range4_start(range_4_start_address[15:0]), .range4_end(range_4_end_address[15:0]), .range5_start(range_5_start_address[15:0]), .range5_end(range_5_end_address[15:0]), .range6_start(range_6_start_address[15:0]), .range6_end(range_6_end_address[15:0]), .enable_vuart1(enable_vuart1), .vuart1_status_register(vuart1_status_register), .vuart1_control_register(vuart1_control_register_sync_1), .vuart1_assert_b2h_break_clear(vuart1_assert_b2h_break_clear), .vuart1_tx_fifo_reset(vuart1_h2b_fifo_reset), .vuart1_tx_fifo_wren(vuart1_h2b_fifo_wwren), .vuart1_tx_fifo_data(vuart1_h2b_fifo_wdata), .vuart1_tx_fifo_full(vuart1_h2b_fifo_wfull), .vuart1_tx_fifo_almost_full(vuart1_h2b_fifo_walmost_full), .vuart1_tx_fifo_empty(vuart1_h2b_fifo_rempty_sync[1]), .vuart1_rx_fifo_reset(vuart1_b2h_fifo_reset), .vuart1_rx_fifo_rpop(vuart1_b2h_fifo_rpop), .vuart1_rx_fifo_data(vuart1_b2h_fifo_rdata), .vuart1_rx_fifo_empty(vuart1_b2h_fifo_rempty), .vuart1_rx_fifo_almost_empty(vuart1_b2h_fifo_ralmost_empty), .vuart1_rx_fifo_full(vuart1_b2h_fifo_wfull_sync[2]), .vuart1_rx_data_available_count(vuart1_b2h_fifo_data_available_count[3:0]), .enable_vuart2(enable_vuart2), .vuart2_status_register(vuart2_status_register), .vuart2_control_register(vuart2_control_register_sync_1), .vuart2_assert_b2h_break_clear(vuart2_assert_b2h_break_clear), .vuart2_tx_fifo_reset(vuart2_h2b_fifo_reset), .vuart2_tx_fifo_wren(vuart2_h2b_fifo_wwren), .vuart2_tx_fifo_data(vuart2_h2b_fifo_wdata), .vuart2_tx_fifo_full(vuart2_h2b_fifo_wfull), .vuart2_tx_fifo_almost_full(vuart2_h2b_fifo_walmost_full), .vuart2_tx_fifo_empty(vuart2_h2b_fifo_rempty_sync[1]), .vuart2_rx_fifo_reset(vuart2_b2h_fifo_reset), .vuart2_rx_fifo_rpop(vuart2_b2h_fifo_rpop), .vuart2_rx_fifo_data(vuart2_b2h_fifo_rdata), .vuart2_rx_fifo_empty(vuart2_b2h_fifo_rempty), .vuart2_rx_fifo_almost_empty(vuart2_b2h_fifo_ralmost_empty), .vuart2_rx_fifo_full(vuart2_b2h_fifo_wfull_sync[2]), .vuart2_rx_data_available_count(vuart2_b2h_fifo_data_available_count[3:0]), .enable_ipmi_bt(enable_ipmi_bt), .ipmi_bt_alt_irq(ipmi_bt_alt_irq), .ipmi_bt_port_base_address({8'h00, ipmi_bt_port_address}), .ipmi_bt_bmc_to_host_ctl_sms_ack(ipmi_bt_bmc_to_host_ctl_sms_ack), .ipmi_bt_bmc_to_host_ctl_attn_ack(ipmi_bt_bmc_to_host_ctl_attn_ack), .ipmi_bt_host_to_bmc_ctl_attn_req(ipmi_bt_host_to_bmc_ctl_attn_req), .ipmi_bt_host_to_bmc_ctl_oem0_req(ipmi_bt_host_to_bmc_ctl_oem0_req), .ipmi_bt_irq_ack(ipmi_bt_irq_ack), .ipmi_bt_irq_bmc_reset(ipmi_bt_irq_bmc_reset), .ipmi_bt_host_to_bmc_ctl_h_busy(ipmi_bt_host_to_bmc_ctl_h_busy), .ipmi_bt_irq_enable(ipmi_bt_irq_enable), .ipmi_bt_bmc_to_host_ctl_sms_req(ipmi_bt_bmc_to_host_ctl_sms_req_sync[1]), .ipmi_bt_bmc_to_host_ctl_attn_req(ipmi_bt_bmc_to_host_ctl_attn_req_sync[1]), .ipmi_bt_bmc_to_host_ctl_sms_ack_cont(ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[1]), .ipmi_bt_bmc_to_host_ctl_attn_ack_cont(ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[1]), .ipmi_bt_host_to_bmc_ctl_attn_req_cont(ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[1]), .ipmi_bt_host_to_bmc_ctl_oem0_req_cont(ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[1]), .ipmi_bt_irq_ack_cont(ipmi_bt_irq_ack_cont_sync[1]), .ipmi_bt_irq_bmc_reset_cont(ipmi_bt_irq_bmc_reset_cont_sync[1]), .ipmi_bt_bmc_to_host_ctl_b_busy(ipmi_bt_bmc_to_host_ctl_b_busy_sync[1]), .ipmi_bt_irq_req(ipmi_bt_irq_req_sync[1]), .fw_idsel(lpc_slave_fw_idsel), .fw_msize(lpc_slave_fw_msize), `ifdef LPC_SLAVE_DEBUG .debug_port(lpc_slave_debug_port), `endif .lpc_data_out(lpc_slave_lpc_data_out), .lpc_data_in(lpc_slave_lpc_data_in), .lpc_data_direction(lpc_slave_lpc_data_direction), .lpc_irq_out(lpc_slave_lpc_irq_out), .lpc_irq_in(lpc_slave_lpc_irq_in), .lpc_irq_direction(lpc_slave_lpc_irq_direction), .lpc_frame_n(lpc_slave_lpc_frame_n), .lpc_reset_n(lpc_slave_lpc_reset_n), .lpc_clock(lpc_clock) ); // Create registered I/O signals on external LPC bus always @(posedge lpc_clock) begin lpc_slave_lpc_frame_n <= lpc_frame_n; lpc_slave_lpc_reset_n <= lpc_reset_n; end assign lpc_data_out = lpc_slave_lpc_data_out; assign lpc_slave_lpc_data_in = lpc_data_in; assign lpc_data_direction = lpc_slave_lpc_data_direction; always @(posedge lpc_clock) begin lpc_slave_lpc_irq_out_reg <= lpc_slave_lpc_irq_out; lpc_slave_lpc_irq_direction_reg <= lpc_slave_lpc_irq_direction; end assign lpc_irq_out = lpc_slave_lpc_irq_out_reg; assign lpc_irq_in = lpc_slave_lpc_irq_in; assign lpc_irq_direction = lpc_slave_lpc_irq_direction_reg; assign lpc_clock = lpc_clock; reg [3:0] slave_wishbone_sel_reg = 0; reg slave_wishbone_ack_reg = 0; reg [31:0] slave_wishbone_dat_r_reg = 0; assign slave_wishbone_ack = slave_wishbone_ack_reg; assign slave_wishbone_dat_r = slave_wishbone_dat_r_reg; reg master_wishbone_cyc_reg = 0; reg master_wishbone_stb_reg = 0; reg master_wishbone_we_reg = 0; reg [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] master_wishbone_adr_reg = 0; reg [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_w_reg = 0; reg [((WISHBONE_DMA_DATA_BUS_WIDTH/8)-1):0] master_wishbone_sel_reg = 0; assign master_wishbone_cyc = master_wishbone_cyc_reg; assign master_wishbone_stb = master_wishbone_stb_reg; assign master_wishbone_we = master_wishbone_we_reg; assign master_wishbone_adr = master_wishbone_adr_reg; assign master_wishbone_dat_w = master_wishbone_dat_w_reg; assign master_wishbone_sel = master_wishbone_sel_reg; parameter AQUIEL_LPC_CYCLE_TYPE_IO = 0; parameter AQUIEL_LPC_CYCLE_TYPE_TPM = 1; parameter AQUIEL_LPC_CYCLE_TYPE_FIRMWARE = 2; parameter LPC_SLAVE_TRANSFER_STATE_IDLE = 0; parameter LPC_SLAVE_TRANSFER_STATE_IR01 = 1; parameter LPC_SLAVE_TRANSFER_STATE_IR02 = 2; parameter LPC_SLAVE_TRANSFER_STATE_IR03 = 3; parameter LPC_SLAVE_TRANSFER_STATE_IW01 = 5; parameter LPC_SLAVE_TRANSFER_STATE_IW02 = 6; parameter LPC_SLAVE_TRANSFER_STATE_IW03 = 7; parameter LPC_SLAVE_TRANSFER_STATE_IW04 = 8; parameter LPC_SLAVE_TRANSFER_STATE_FR01 = 9; parameter LPC_SLAVE_TRANSFER_STATE_FR02 = 10; parameter LPC_SLAVE_TRANSFER_STATE_FR03 = 11; parameter LPC_SLAVE_TRANSFER_STATE_FW01 = 12; parameter LPC_SLAVE_TRANSFER_STATE_ER01 = 16; parameter LPC_SLAVE_TRANSFER_STATE_DR01 = 17; parameter LPC_SLAVE_TRANSFER_STATE_DR02 = 18; parameter LPC_SLAVE_TRANSFER_STATE_DR03 = 19; parameter LPC_SLAVE_TRANSFER_STATE_DR04 = 20; parameter LPC_SLAVE_TRANSFER_STATE_DR05 = 21; parameter LPC_SLAVE_TRANSFER_STATE_DR06 = 22; parameter LPC_SLAVE_TRANSFER_STATE_DR07 = 23; parameter LPC_SLAVE_TRANSFER_STATE_DR08 = 24; parameter LPC_SLAVE_TRANSFER_STATE_DR09 = 25; parameter LPC_SLAVE_TRANSFER_STATE_DR10 = 26; parameter LPC_SLAVE_TRANSFER_STATE_DW01 = 27; parameter LPC_SLAVE_TRANSFER_STATE_DW02 = 28; parameter LPC_SLAVE_TRANSFER_STATE_DW03 = 29; parameter LPC_SLAVE_TRANSFER_STATE_DW04 = 30; parameter LPC_SLAVE_TRANSFER_STATE_DW05 = 31; parameter LPC_SLAVE_TRANSFER_STATE_DW06 = 32; parameter LPC_SLAVE_TRANSFER_STATE_DW07 = 33; parameter LPC_SLAVE_TRANSFER_STATE_DW08 = 34; parameter LPC_SLAVE_TRANSFER_STATE_DW09 = 35; parameter LPC_SLAVE_TRANSFER_STATE_DW10 = 36; parameter LPC_SLAVE_TRANSFER_STATE_DW11 = 37; parameter LPC_SLAVE_TRANSFER_STATE_DF01 = 38; parameter MMIO_TRANSFER_STATE_IDLE = 0; parameter MMIO_TRANSFER_STATE_TR01 = 8; parameter MMIO_TRANSFER_STATE_TR02 = 9; parameter MMIO_TRANSFER_STATE_RD01 = 16; parameter MMIO_TRANSFER_STATE_RD02 = 17; parameter MMIO_TRANSFER_STATE_RD03 = 18; parameter MMIO_TRANSFER_STATE_RD04 = 19; parameter MMIO_TRANSFER_STATE_RD05 = 20; parameter MMIO_TRANSFER_STATE_WR01 = 32; parameter MMIO_TRANSFER_STATE_WR02 = 33; parameter MMIO_TRANSFER_STATE_WR03 = 34; parameter MMIO_TRANSFER_STATE_WR04 = 35; reg [27:0] lpc_slave_address_reg = 0; reg lpc_slave_firmware_cycle_reg = 0; reg is_firmware_cycle_type = 0; reg is_tpm_cycle_type = 0; reg is_io_cycle_type = 0; reg cycle_range_intercept_allowed = 0; reg wishbone_mmio_access_is_32_bits = 0; reg [31:0] mmio_lpc_buffer_address_reg = 0; reg [7:0] mmio_transfer_state = 0; reg [7:0] lpc_slave_transfer_state = 0; reg mmio_guard_condition_required = 0; reg [31:0] mmio_peripheral_tx_buffer = 0; reg [31:0] mmio_peripheral_rx_buffer = 0; reg [31:0] mmio_cfg_space_tx_buffer = 0; reg [31:0] mmio_cfg_space_rx_buffer = 0; assign debug_port[11:8] = lpc_slave_transfer_state[3:0]; assign debug_port[12] = master_wishbone_cyc; assign debug_port[13] = master_wishbone_stb; assign debug_port[14] = master_wishbone_ack; assign debug_port[15] = master_wishbone_err; assign lpc_clock_mirror = lpc_clock; // Wishbone connector -- MMIO always @(posedge peripheral_clock) begin if (peripheral_reset) begin // Reset Wishbone interface / control state machine slave_wishbone_ack_reg <= 0; wishbone_mmio_access_is_32_bits = 0; continue_transfer <= 0; vuart1_control_register <= 0; vuart2_control_register <= 0; vuart1_lcr_break_request <= 0; vuart2_lcr_break_request <= 0; vuart1_lcr_break_ack <= 0; vuart2_lcr_break_ack <= 0; vuart1_irq_source <= 0; vuart2_irq_source <= 0; vuart1_h2b_fifo_irq <= 0; vuart2_h2b_fifo_irq <= 0; vuart1_h2b_fifo_read_timeout <= 0; vuart2_h2b_fifo_read_timeout <= 0; vuart1_h2b_fifo_queue_past_trigger <= 0; vuart2_h2b_fifo_queue_past_trigger <= 0; vuart1_h2b_fifo_read_timeout_counter <= 0; vuart2_h2b_fifo_read_timeout_counter <= 0; ipmi_bt_bmc_to_host_ctl_sms_req <= 0; ipmi_bt_bmc_to_host_ctl_attn_req <= 0; ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 0; ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 0; ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 0; ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 0; ipmi_bt_irq_ack_cont <= 0; ipmi_bt_irq_bmc_reset_cont <= 0; ipmi_bt_bmc_to_host_ctl_b_busy <= 1; // BMC should always indicate busy until BMC software is online and clears the busy flag ipmi_bt_irq_req <= 0; ipmi_bt_bmc_irq <= 0; ipmi_bt_bmc_to_host_ctl_attn_req_prev <= 0; ipmi_bt_bmc_to_host_ctl_sms_req_prev <= 0; ipmi_bt_h2b_oem0_req_prev <= 0; mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE; end else begin case (mmio_transfer_state) MMIO_TRANSFER_STATE_IDLE: begin // Compute effective address mmio_lpc_buffer_address_reg[31:2] = slave_wishbone_adr; case (slave_wishbone_sel) 4'b0001: mmio_lpc_buffer_address_reg[1:0] = 0; 4'b0010: mmio_lpc_buffer_address_reg[1:0] = 1; 4'b0100: mmio_lpc_buffer_address_reg[1:0] = 2; 4'b1000: mmio_lpc_buffer_address_reg[1:0] = 3; 4'b1111: mmio_lpc_buffer_address_reg[1:0] = 0; default: mmio_lpc_buffer_address_reg[1:0] = 0; endcase if (slave_wishbone_cyc && slave_wishbone_stb) begin mmio_guard_condition_required = 0; if (mmio_lpc_buffer_address_reg[31:20] == 12'h00e) begin // VUART register space access if (!continue_transfer) begin // Single clock pulse signals in deasserted state...process incoming request! if (!slave_wishbone_we) begin // Read requested case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) // Bus is little endian! 0: begin mmio_peripheral_tx_buffer = {(vuart1_h2b_fifo_rempty)?8'h00:vuart1_h2b_fifo_rdata, 7'b0, vuart1_h2b_fifo_rempty || !lpc_slave_lpc_reset_n || vuart1_h2b_fifo_reset, (vuart2_h2b_fifo_rempty)?8'h00:vuart2_h2b_fifo_rdata, 7'b0, vuart2_h2b_fifo_rempty || !lpc_slave_lpc_reset_n || vuart2_h2b_fifo_reset }; if (slave_wishbone_sel[0]) begin // Wishbone bits 31:24 if (!vuart1_h2b_fifo_rempty) begin vuart1_h2b_fifo_rpop <= 1; end end if (slave_wishbone_sel[2]) begin // Wishbone bits 15:8 if (!vuart2_h2b_fifo_rempty) begin vuart2_h2b_fifo_rpop <= 1; end end end 4: begin mmio_peripheral_tx_buffer[31:24] <= vuart1_status_register_sync_2[7:0]; mmio_peripheral_tx_buffer[23:16] <= vuart1_status_register_sync_2[15:8]; mmio_peripheral_tx_buffer[15:8] <= vuart1_status_register_sync_2[23:16]; mmio_peripheral_tx_buffer[7:0] <= {6'b0, vuart1_lcr_break_request, vuart1_b2h_fifo_wfull}; vuart1_lcr_break_ack <= 1; end 8: begin mmio_peripheral_tx_buffer[31:24] <= vuart1_control_register[7:0]; mmio_peripheral_tx_buffer[23:16] <= vuart1_control_register[15:8]; mmio_peripheral_tx_buffer[15:8] <= vuart1_control_register[23:16]; mmio_peripheral_tx_buffer[7:0] <= vuart1_control_register[31:24]; end 12: begin mmio_peripheral_tx_buffer[31:24] <= vuart2_status_register_sync_2[7:0]; mmio_peripheral_tx_buffer[23:16] <= vuart2_status_register_sync_2[15:8]; mmio_peripheral_tx_buffer[15:8] <= vuart2_status_register_sync_2[23:16]; mmio_peripheral_tx_buffer[7:0] <= {6'b0, vuart2_lcr_break_request, vuart2_b2h_fifo_wfull}; vuart2_lcr_break_ack <= 1; end 16: begin mmio_peripheral_tx_buffer[31:24] <= vuart2_control_register[7:0]; mmio_peripheral_tx_buffer[23:16] <= vuart2_control_register[15:8]; mmio_peripheral_tx_buffer[15:8] <= vuart2_control_register[23:16]; mmio_peripheral_tx_buffer[7:0] <= vuart2_control_register[31:24]; end default: mmio_peripheral_tx_buffer = 32'hffffffff; endcase // Place data on Wishbone bus slave_wishbone_dat_r_reg <= mmio_peripheral_tx_buffer; // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end else begin // Write requested case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 8: mmio_cfg_space_rx_buffer = vuart1_control_register; 16: mmio_cfg_space_rx_buffer = vuart2_control_register; default: mmio_cfg_space_rx_buffer = 0; endcase if (slave_wishbone_sel[0]) begin mmio_cfg_space_rx_buffer[7:0] = slave_wishbone_dat_w[31:24]; end if (slave_wishbone_sel[1]) begin mmio_cfg_space_rx_buffer[15:8] = slave_wishbone_dat_w[23:16]; end if (slave_wishbone_sel[2]) begin mmio_cfg_space_rx_buffer[23:16] = slave_wishbone_dat_w[15:8]; end if (slave_wishbone_sel[3]) begin mmio_cfg_space_rx_buffer[31:24] = slave_wishbone_dat_w[7:0]; end // Specialty bit handlers case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 0: begin if (slave_wishbone_sel[0]) begin // Load VUART1 B2H FIFO if (!vuart1_b2h_fifo_wfull) begin vuart1_b2h_fifo_wdata <= mmio_cfg_space_rx_buffer[7:0]; vuart1_b2h_fifo_wwren <= 1; end end if (slave_wishbone_sel[2]) begin // Load VUART2 B2H FIFO if (!vuart2_b2h_fifo_wfull) begin vuart2_b2h_fifo_wdata <= mmio_cfg_space_rx_buffer[23:16]; vuart2_b2h_fifo_wwren <= 1; end end end 8: begin if (mmio_cfg_space_rx_buffer[0]) begin // B2H BREAK request mmio_cfg_space_rx_buffer[0] = 0; if (!vuart1_assert_b2h_break_clear_sync[2]) begin vuart1_control_register[0] <= 1; end end end 16: begin if (mmio_cfg_space_rx_buffer[0]) begin // B2H BREAK request mmio_cfg_space_rx_buffer[0] = 0; if (!vuart2_assert_b2h_break_clear_sync[2]) begin vuart2_control_register[0] <= 1; end end end endcase case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 8: vuart1_control_register <= mmio_cfg_space_rx_buffer; 16: vuart2_control_register <= mmio_cfg_space_rx_buffer; endcase // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end end end else if (mmio_lpc_buffer_address_reg[31:20] == 12'h00f) begin // Configuration register space access if (!continue_transfer) begin // Single clock pulse signals in deasserted state...process incoming request! if (!slave_wishbone_we) begin // Read requested case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 0: mmio_cfg_space_tx_buffer = device_id[63:32]; 4: mmio_cfg_space_tx_buffer = device_id[31:0]; 8: mmio_cfg_space_tx_buffer = device_version; 12: mmio_cfg_space_tx_buffer = control_reg1; 16: mmio_cfg_space_tx_buffer = control_reg2; 20: mmio_cfg_space_tx_buffer = range_config1; 24: mmio_cfg_space_tx_buffer = range_end1; 28: mmio_cfg_space_tx_buffer = range_config2; 32: mmio_cfg_space_tx_buffer = range_end2; 36: mmio_cfg_space_tx_buffer = range_config3; 40: mmio_cfg_space_tx_buffer = range_end3; 44: mmio_cfg_space_tx_buffer = range_config4; 48: mmio_cfg_space_tx_buffer = range_end4; 52: mmio_cfg_space_tx_buffer = range_config5; 56: mmio_cfg_space_tx_buffer = range_end5; 60: mmio_cfg_space_tx_buffer = range_config6; 64: mmio_cfg_space_tx_buffer = range_end6; 68: mmio_cfg_space_tx_buffer = dma_config_reg1; 72: mmio_cfg_space_tx_buffer = dma_config_reg2; 76: mmio_cfg_space_tx_buffer = dma_config_reg3; 80: mmio_cfg_space_tx_buffer = dma_config_reg4; 84: mmio_cfg_space_tx_buffer = dma_config_reg5; 88: mmio_cfg_space_tx_buffer = dma_config_reg6; 92: mmio_cfg_space_tx_buffer = dma_config_reg7; 96: mmio_cfg_space_tx_buffer = status_reg1; 100: mmio_cfg_space_tx_buffer = status_reg2; 104: mmio_cfg_space_tx_buffer = status_reg3; 108: mmio_cfg_space_tx_buffer = status_reg4; 112: mmio_cfg_space_tx_buffer = ipmi_bt_status_reg; default: mmio_cfg_space_tx_buffer = 0; endcase // Specialty bit handlers case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 12: mmio_cfg_space_tx_buffer[0] = 0; // continue_transfer endcase // Endian swap slave_wishbone_dat_r_reg[31:24] <= mmio_cfg_space_tx_buffer[7:0]; slave_wishbone_dat_r_reg[23:16] <= mmio_cfg_space_tx_buffer[15:8]; slave_wishbone_dat_r_reg[15:8] <= mmio_cfg_space_tx_buffer[23:16]; slave_wishbone_dat_r_reg[7:0] <= mmio_cfg_space_tx_buffer[31:24]; // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end else begin // Write requested case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) // Device ID / version registers cannot be written, don't even try... 12: mmio_cfg_space_rx_buffer = control_reg1; 16: mmio_cfg_space_rx_buffer = control_reg2; 20: mmio_cfg_space_rx_buffer = range_config1; 24: mmio_cfg_space_rx_buffer = range_end1; 28: mmio_cfg_space_rx_buffer = range_config2; 32: mmio_cfg_space_rx_buffer = range_end2; 36: mmio_cfg_space_rx_buffer = range_config3; 40: mmio_cfg_space_rx_buffer = range_end3; 44: mmio_cfg_space_rx_buffer = range_config4; 48: mmio_cfg_space_rx_buffer = range_end4; 52: mmio_cfg_space_rx_buffer = range_config5; 56: mmio_cfg_space_rx_buffer = range_end5; 60: mmio_cfg_space_rx_buffer = range_config6; 64: mmio_cfg_space_rx_buffer = range_end6; 68: mmio_cfg_space_rx_buffer = dma_config_reg1; 72: mmio_cfg_space_rx_buffer = dma_config_reg2; 76: mmio_cfg_space_rx_buffer = dma_config_reg3; 80: mmio_cfg_space_rx_buffer = dma_config_reg4; 84: mmio_cfg_space_rx_buffer = dma_config_reg5; 88: mmio_cfg_space_rx_buffer = dma_config_reg6; 92: mmio_cfg_space_rx_buffer = dma_config_reg7; // Status registers cannot be written, don't even try... default: mmio_cfg_space_rx_buffer = 0; endcase if (slave_wishbone_sel[0]) begin mmio_cfg_space_rx_buffer[7:0] = slave_wishbone_dat_w[31:24]; end if (slave_wishbone_sel[1]) begin mmio_cfg_space_rx_buffer[15:8] = slave_wishbone_dat_w[23:16]; end if (slave_wishbone_sel[2]) begin mmio_cfg_space_rx_buffer[23:16] = slave_wishbone_dat_w[15:8]; end if (slave_wishbone_sel[3]) begin mmio_cfg_space_rx_buffer[31:24] = slave_wishbone_dat_w[7:0]; end // Specialty bit handlers case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 16: begin // continue_transfer if (mmio_cfg_space_rx_buffer[0]) begin mmio_cfg_space_rx_buffer[0] = 0; continue_transfer <= 1; end end 108: begin // IPMI BT control register if (mmio_cfg_space_rx_buffer[0]) begin // CLR_WR_PTR ipmi_bt_input_xfer_write_addr <= 0; end if (mmio_cfg_space_rx_buffer[1]) begin // CLR_RD_PTR ipmi_bt_output_xfer_read_addr <= 0; end if (mmio_cfg_space_rx_buffer[2]) begin // H2B_ATN clear ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 1; mmio_guard_condition_required = 1; end if (mmio_cfg_space_rx_buffer[3]) begin // B2H_ATN set ipmi_bt_bmc_to_host_ctl_attn_req <= 1; end if (mmio_cfg_space_rx_buffer[4]) begin // SMS_ATN set ipmi_bt_bmc_to_host_ctl_sms_req <= 1; end if (mmio_cfg_space_rx_buffer[5]) begin // OEM0 clear ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 1; mmio_guard_condition_required = 1; end if (mmio_cfg_space_rx_buffer[7]) begin // B_BUSY if (ipmi_bt_bmc_to_host_ctl_b_busy) begin ipmi_bt_bmc_to_host_ctl_b_busy <= 0; end else begin ipmi_bt_bmc_to_host_ctl_b_busy <= 1; end end end endcase case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) 12: control_reg1 <= mmio_cfg_space_rx_buffer; 16: control_reg2 <= mmio_cfg_space_rx_buffer; 20: range_config1 <= mmio_cfg_space_rx_buffer; 24: range_end1 <= mmio_cfg_space_rx_buffer; 28: range_config2 <= mmio_cfg_space_rx_buffer; 32: range_end2 <= mmio_cfg_space_rx_buffer; 36: range_config3 <= mmio_cfg_space_rx_buffer; 40: range_end3 <= mmio_cfg_space_rx_buffer; 44: range_config4 <= mmio_cfg_space_rx_buffer; 48: range_end4 <= mmio_cfg_space_rx_buffer; 52: range_config5 <= mmio_cfg_space_rx_buffer; 56: range_end5 <= mmio_cfg_space_rx_buffer; 60: range_config6 <= mmio_cfg_space_rx_buffer; 64: range_end6 <= mmio_cfg_space_rx_buffer; 68: dma_config_reg1 <= mmio_cfg_space_rx_buffer; 72: dma_config_reg2 <= mmio_cfg_space_rx_buffer; 76: dma_config_reg3 <= mmio_cfg_space_rx_buffer; 80: dma_config_reg4 <= mmio_cfg_space_rx_buffer; 84: dma_config_reg5 <= mmio_cfg_space_rx_buffer; 88: dma_config_reg6 <= mmio_cfg_space_rx_buffer; 92: dma_config_reg7 <= mmio_cfg_space_rx_buffer; endcase if (mmio_guard_condition_required) begin // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end else begin mmio_transfer_state <= MMIO_TRANSFER_STATE_TR01; end end end end else begin // Select 8-bit/32-bit transfer size via Wishbone access mode if (slave_wishbone_sel == 4'b1111) begin wishbone_mmio_access_is_32_bits = 1; end else begin wishbone_mmio_access_is_32_bits = 0; end slave_wishbone_sel_reg <= slave_wishbone_sel; if (!slave_wishbone_we) begin // Read requested // Set up read if (wishbone_mmio_access_is_32_bits) begin case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0]; 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0]; endcase end else begin case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0]; 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0]; endcase end mmio_transfer_state <= MMIO_TRANSFER_STATE_RD01; end else begin // Write requested // Take single cycle performance hit for simplicity here... mmio_transfer_state <= MMIO_TRANSFER_STATE_WR01; end end end end MMIO_TRANSFER_STATE_RD01: begin if (wishbone_mmio_access_is_32_bits) begin // Set up next read case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_RD02; end else begin // Wait for read data to become available mmio_transfer_state <= MMIO_TRANSFER_STATE_RD02; end end MMIO_TRANSFER_STATE_RD02: begin case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: xfer_read_data_buffer = lpc_fw_output_xfer_read_data; 12'h00d: xfer_read_data_buffer = ipmi_bt_output_xfer_read_data; endcase if (wishbone_mmio_access_is_32_bits) begin // Read first byte slave_wishbone_dat_r_reg[31:24] <= xfer_read_data_buffer; // Set up next read case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_RD03; end else begin // Replicate the data byte to all active lanes if (slave_wishbone_sel_reg[0]) begin slave_wishbone_dat_r_reg[31:24] <= xfer_read_data_buffer; end if (slave_wishbone_sel_reg[1]) begin slave_wishbone_dat_r_reg[23:16] <= xfer_read_data_buffer; end if (slave_wishbone_sel_reg[2]) begin slave_wishbone_dat_r_reg[15:8] <= xfer_read_data_buffer; end if (slave_wishbone_sel_reg[3]) begin slave_wishbone_dat_r_reg[7:0] <= xfer_read_data_buffer; end // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end end MMIO_TRANSFER_STATE_RD03: begin // Read second byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: begin slave_wishbone_dat_r_reg[23:16] <= lpc_fw_output_xfer_read_data; lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; end 12'h00d: begin slave_wishbone_dat_r_reg[23:16] <= ipmi_bt_output_xfer_read_data; ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; end endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_RD04; end MMIO_TRANSFER_STATE_RD04: begin // Read third byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: slave_wishbone_dat_r_reg[15:8] <= lpc_fw_output_xfer_read_data; 12'h00d: slave_wishbone_dat_r_reg[15:8] <= ipmi_bt_output_xfer_read_data; endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_RD05; end MMIO_TRANSFER_STATE_RD05: begin // Read fourth byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: slave_wishbone_dat_r_reg[7:0] <= lpc_fw_output_xfer_read_data; 12'h00d: slave_wishbone_dat_r_reg[7:0] <= ipmi_bt_output_xfer_read_data; endcase // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end MMIO_TRANSFER_STATE_WR01: begin if (lpc_fw_dma_cycle_inactive) begin // No conflict present on LPC buffer write signals if (wishbone_mmio_access_is_32_bits) begin // Write first byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: begin lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[7:0]; lpc_fw_input_xfer_mmio_write_wren <= 1; end 12'h00d: begin ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[7:0]; ipmi_bt_input_xfer_write_wren <= 1; end endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_WR02; end else begin // Read the data byte to write from the active lane if (slave_wishbone_sel_reg[0]) begin xfer_write_data_buffer = slave_wishbone_dat_w[31:24]; end else if (slave_wishbone_sel_reg[1]) begin xfer_write_data_buffer = slave_wishbone_dat_w[23:16]; end else if (slave_wishbone_sel_reg[2]) begin xfer_write_data_buffer = slave_wishbone_dat_w[15:8]; end else if (slave_wishbone_sel_reg[3]) begin xfer_write_data_buffer = slave_wishbone_dat_w[7:0]; end else begin xfer_write_data_buffer = 8'hff; end case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: begin lpc_fw_input_xfer_mmio_write_data <= xfer_write_data_buffer; lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0]; lpc_fw_input_xfer_mmio_write_wren <= 1; end 12'h00d: begin ipmi_bt_input_xfer_write_data <= xfer_write_data_buffer; ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0]; ipmi_bt_input_xfer_write_wren <= 1; end endcase // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end end end MMIO_TRANSFER_STATE_WR02: begin if (lpc_fw_dma_cycle_inactive) begin // No conflict present on LPC buffer write signals // Write second byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: begin lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[15:8]; lpc_fw_input_xfer_mmio_write_wren <= 1; end 12'h00d: begin ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[15:8]; ipmi_bt_input_xfer_write_wren <= 1; end endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_WR03; end end MMIO_TRANSFER_STATE_WR03: begin if (lpc_fw_dma_cycle_inactive) begin // No conflict present on LPC buffer write signals // Write third byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: begin lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[23:16]; lpc_fw_input_xfer_mmio_write_wren <= 1; end 12'h00d: begin ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[23:16]; ipmi_bt_input_xfer_write_wren <= 1; end endcase mmio_transfer_state <= MMIO_TRANSFER_STATE_WR04; end end MMIO_TRANSFER_STATE_WR04: begin if (lpc_fw_dma_cycle_inactive) begin // No conflict present on LPC buffer write signals // Write fourth byte case (mmio_lpc_buffer_address_reg[31:20]) 12'h00c: begin lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0]; lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[31:24]; lpc_fw_input_xfer_mmio_write_wren <= 1; end 12'h00d: begin ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0]; ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[31:24]; ipmi_bt_input_xfer_write_wren <= 1; end endcase // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end end MMIO_TRANSFER_STATE_TR01: begin if (ipmi_bt_host_to_bmc_ctl_attn_req_cont) begin if (!ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]) begin ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 0; end end else if (ipmi_bt_host_to_bmc_ctl_oem0_req_cont) begin if (!ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2]) begin ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 0; end end else begin // Signal transfer complete slave_wishbone_ack_reg <= 1; mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; end end MMIO_TRANSFER_STATE_TR02: begin // Cycle complete slave_wishbone_ack_reg <= 0; vuart1_h2b_fifo_rpop <= 0; vuart2_h2b_fifo_rpop <= 0; vuart1_b2h_fifo_wwren <= 0; vuart2_b2h_fifo_wwren <= 0; lpc_fw_input_xfer_mmio_write_wren <= 0; ipmi_bt_input_xfer_write_wren <= 0; mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE; end default: begin // Should never reach this state mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE; end endcase // This assumes the MMIO handler is driven off the same clock as the LPC transfer handler // It will generate a single clock width pulse on the continue line if (continue_transfer) begin continue_transfer <= 0; end if ((mmio_transfer_state == MMIO_TRANSFER_STATE_IDLE) && !(slave_wishbone_cyc && slave_wishbone_stb)) begin // Bits are not being actively set / cleared by the BMC, therefore it is now safe // to execute the handshake logic (potential race conditions avoided) // VUART handshake logic if (vuart1_control_register[0] && vuart1_assert_b2h_break_clear_sync[2]) begin vuart1_control_register[0] <= 0; end if (vuart2_control_register[0] && vuart2_assert_b2h_break_clear_sync[2]) begin vuart2_control_register[0] <= 0; end if (vuart1_status_register_sync_2[6]) begin vuart1_lcr_break_request <= 1; end else begin if (vuart1_lcr_break_ack) begin vuart1_lcr_break_request <= 0; vuart1_lcr_break_ack <= 0; end end if (vuart2_status_register_sync_2[6]) begin vuart2_lcr_break_request <= 1; end else begin if (vuart2_lcr_break_ack) begin vuart2_lcr_break_request <= 0; vuart2_lcr_break_ack <= 0; end end // VUART1 IRQ handler logic if (vuart1_h2b_fifo_rpop) begin vuart1_h2b_fifo_read_timeout_counter <= 0; vuart1_h2b_fifo_read_timeout <= 0; end else begin if (vuart1_h2b_fifo_rempty) begin vuart1_h2b_fifo_read_timeout_counter <= 0; vuart1_h2b_fifo_read_timeout <= 0; end else begin if (vuart1_h2b_fifo_read_timeout_counter > 1000) begin vuart1_h2b_fifo_read_timeout <= 1; end else begin vuart1_h2b_fifo_read_timeout_counter <= vuart1_h2b_fifo_read_timeout_counter + 1; end end end if ((vuart1_h2b_fifo_data_available_count[3:0] >= vuart1_h2b_fifo_irq_trigger_level) || vuart1_h2b_fifo_wfull) begin vuart1_h2b_fifo_queue_past_trigger <= 1; end else begin vuart1_h2b_fifo_queue_past_trigger <= 0; end if (vuart1_irqs_enabled) begin if (vuart1_h2b_fifo_irq_enabled && vuart1_h2b_fifo_queue_past_trigger) begin vuart1_h2b_fifo_irq <= 1; vuart1_irq_source <= VUART_IRQ_REASON_QUEUE_TRIGGER; end else if (vuart1_h2b_fifo_irq_enabled && vuart1_h2b_fifo_read_timeout) begin vuart1_h2b_fifo_irq <= 1; vuart1_irq_source <= VUART_IRQ_REASON_QUEUE_TIMEOUT; end else begin vuart1_irq_source <= VUART_IRQ_REASON_NONE; vuart1_h2b_fifo_irq <= 0; end end else begin vuart1_irq_source <= VUART_IRQ_REASON_NONE; vuart1_h2b_fifo_irq <= 0; end // VUART2 IRQ handler logic if (vuart2_h2b_fifo_rpop) begin vuart2_h2b_fifo_read_timeout_counter <= 0; vuart2_h2b_fifo_read_timeout <= 0; end else begin if (vuart2_h2b_fifo_rempty) begin vuart2_h2b_fifo_read_timeout_counter <= 0; vuart2_h2b_fifo_read_timeout <= 0; end else begin if (vuart2_h2b_fifo_read_timeout_counter > 1000) begin vuart2_h2b_fifo_read_timeout <= 1; end else begin vuart2_h2b_fifo_read_timeout_counter <= vuart2_h2b_fifo_read_timeout_counter + 1; end end end if ((vuart2_h2b_fifo_data_available_count[3:0] >= vuart2_h2b_fifo_irq_trigger_level) || vuart2_h2b_fifo_wfull) begin vuart2_h2b_fifo_queue_past_trigger <= 1; end else begin vuart2_h2b_fifo_queue_past_trigger <= 0; end if (vuart2_irqs_enabled) begin if (vuart2_h2b_fifo_irq_enabled && vuart2_h2b_fifo_queue_past_trigger) begin vuart2_h2b_fifo_irq <= 1; vuart2_irq_source <= VUART_IRQ_REASON_QUEUE_TRIGGER; end else if (vuart2_h2b_fifo_irq_enabled && vuart2_h2b_fifo_read_timeout) begin vuart2_h2b_fifo_irq <= 1; vuart2_irq_source <= VUART_IRQ_REASON_QUEUE_TIMEOUT; end else begin vuart2_irq_source <= VUART_IRQ_REASON_NONE; vuart2_h2b_fifo_irq <= 0; end end else begin vuart2_irq_source <= VUART_IRQ_REASON_NONE; vuart2_h2b_fifo_irq <= 0; end // IPMI handshake handler logic if (ipmi_bt_bmc_to_host_ctl_attn_ack_sync[2]) begin ipmi_bt_bmc_to_host_ctl_attn_req <= 0; ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 1; end else begin ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 0; end if (ipmi_bt_bmc_to_host_ctl_sms_ack_sync[2]) begin ipmi_bt_bmc_to_host_ctl_sms_req <= 0; ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 1; end else begin ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 0; end // IPMI BMC IRQ handler logic if (ipmi_bt_irq_ack_sync[2]) begin ipmi_bt_irq_req <= 0; ipmi_bt_irq_ack_cont <= 1; end else begin if (!ipmi_bt_irq_ack_cont) begin if (ipmi_bt_irq_enable_sync[2] && !ipmi_bt_irq_ack_cont && ((!ipmi_bt_h2b_oem0_req_prev && ipmi_bt_h2b_oem0_req) || (!ipmi_bt_bmc_to_host_ctl_attn_req_prev && ipmi_bt_bmc_to_host_ctl_attn_req) || (!ipmi_bt_bmc_to_host_ctl_sms_req_prev && ipmi_bt_bmc_to_host_ctl_sms_req))) begin ipmi_bt_irq_req <= 1; end end else begin ipmi_bt_irq_ack_cont <= 0; end end if (!ipmi_bt_irq_ack_cont) begin // Wait for prior IRQ line handshake to complete before sampling the B2H_ATN line // This ensures that the IRQ is still fired if the continue signal is asserted while // B2H_ATN transitions from inactive to active. ipmi_bt_bmc_to_host_ctl_attn_req_prev <= ipmi_bt_bmc_to_host_ctl_attn_req; ipmi_bt_bmc_to_host_ctl_sms_req_prev <= ipmi_bt_bmc_to_host_ctl_sms_req; ipmi_bt_h2b_oem0_req_prev <= ipmi_bt_h2b_oem0_req; end if (ipmi_bt_bmc_irq_enable && ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]) begin ipmi_bt_bmc_irq <= 1; end else begin ipmi_bt_bmc_irq <= 0; end end end end // Wishbone connector -- CSRs // // WARNING: The LPC slave will run at ~33.33MHz from an external clock source. // This module assumes the Wishbone clock will be clocked no lower than 1.5x the // external LPC frequency, i.e. no lower than 50MHz. All synchronizer logic is // built around this assumption; violating it *will* lead to data corruption and // unpredictable / undefined behavior! always @(posedge peripheral_clock) begin if (peripheral_reset || !lpc_reset_n_sync[2]) begin // Reset Wishbone interface / control state machine lpc_slave_address_reg <= 0; lpc_slave_firmware_cycle_reg <= 0; attn_req <= 0; pending_address <= 0; pending_data <= 0; pending_fw_cycle_idsel <= 0; pending_fw_cycle_msize <= 0; lpc_fw_dma_cycle_active <= 0; lpc_fw_dma_cycle_inactive <= 1; lpc_io_cycle_irq <= 0; lpc_tpm_cycle_irq <= 0; lpc_firmware_cycle_irq <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; end else begin case (lpc_slave_transfer_state) LPC_SLAVE_TRANSFER_STATE_IDLE: begin if (lpc_slave_address_ready_sync[2]) begin // Determine cycle type is_firmware_cycle_type = lpc_slave_firmware_cycle && !lpc_slave_tpm_cycle; is_tpm_cycle_type = !lpc_slave_firmware_cycle && lpc_slave_tpm_cycle; is_io_cycle_type = !lpc_slave_firmware_cycle && !lpc_slave_tpm_cycle; // Check if cycle is configured for intercept cycle_range_intercept_allowed = 0; if (range_1_enable && ((is_io_cycle_type && range_1_allow_io) || (is_tpm_cycle_type && range_1_allow_tpm))) begin if ((lpc_slave_address >= range_1_start_address) && (lpc_slave_address <= range_1_end_address)) begin cycle_range_intercept_allowed = 1; end end if (range_2_enable && ((is_io_cycle_type && range_2_allow_io) || (is_tpm_cycle_type && range_2_allow_tpm))) begin if ((lpc_slave_address >= range_2_start_address) && (lpc_slave_address <= range_2_end_address)) begin cycle_range_intercept_allowed = 1; end end if (range_3_enable && ((is_io_cycle_type && range_3_allow_io) || (is_tpm_cycle_type && range_3_allow_tpm))) begin if ((lpc_slave_address >= range_3_start_address) && (lpc_slave_address <= range_3_end_address)) begin cycle_range_intercept_allowed = 1; end end if (range_4_enable && ((is_io_cycle_type && range_4_allow_io) || (is_tpm_cycle_type && range_4_allow_tpm))) begin if ((lpc_slave_address >= range_4_start_address) && (lpc_slave_address <= range_4_end_address)) begin cycle_range_intercept_allowed = 1; end end if (range_5_enable && ((is_io_cycle_type && range_5_allow_io) || (is_tpm_cycle_type && range_5_allow_tpm))) begin if ((lpc_slave_address >= range_5_start_address) && (lpc_slave_address <= range_5_end_address)) begin cycle_range_intercept_allowed = 1; end end if (range_6_enable && ((is_io_cycle_type && range_6_allow_io) || (is_tpm_cycle_type && range_6_allow_tpm))) begin if ((lpc_slave_address >= range_6_start_address) && (lpc_slave_address <= range_6_end_address)) begin cycle_range_intercept_allowed = 1; end end if (is_firmware_cycle_type) begin // Firmware cycles are not range-configurable cycle_range_intercept_allowed = 1; end if (enable_firmware_cycles && is_firmware_cycle_type && cycle_range_intercept_allowed) begin // Handle firmware cycle here... cycle_type <= AQUIEL_LPC_CYCLE_TYPE_FIRMWARE; pending_address <= lpc_slave_address; cycle_direction <= lpc_slave_cycle_direction; pending_fw_cycle_idsel <= lpc_slave_fw_idsel; pending_fw_cycle_msize <= lpc_slave_fw_msize; if (lpc_slave_cycle_direction) begin // Write if (lpc_fw_cycle_dma_write_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == lpc_slave_fw_idsel))) begin // DMA enabled lpc_fw_dma_current_buffer_address <= 0; lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + (lpc_slave_address & lpc_fw_dma_offset_address_mask); lpc_fw_dma_cycle_active <= 1; lpc_fw_dma_cycle_inactive <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW01; end else begin // DMA disabled lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FW01; end end else begin // Read if (lpc_fw_cycle_dma_read_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == lpc_slave_fw_idsel))) begin // DMA enabled lpc_fw_dma_current_buffer_address <= 0; lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + (lpc_slave_address & lpc_fw_dma_offset_address_mask); lpc_fw_dma_cycle_active <= 1; lpc_fw_dma_cycle_inactive <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR01; end else begin // DMA disabled attn_req <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR01; end end end else if (enable_tpm_cycles && is_tpm_cycle_type && cycle_range_intercept_allowed) begin // Handle TPM cycle here... cycle_type <= AQUIEL_LPC_CYCLE_TYPE_TPM; pending_address <= lpc_slave_address; cycle_direction <= lpc_slave_cycle_direction; pending_fw_cycle_idsel <= 0; pending_fw_cycle_msize <= 0; if (lpc_slave_cycle_direction) begin // Write lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW01; end else begin // Read attn_req <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR01; end end else if (enable_io_cycles && is_io_cycle_type && cycle_range_intercept_allowed) begin // Handle I/O cycle here... cycle_type <= AQUIEL_LPC_CYCLE_TYPE_IO; pending_address <= lpc_slave_address; cycle_direction <= lpc_slave_cycle_direction; pending_fw_cycle_idsel <= 0; pending_fw_cycle_msize <= 0; if (lpc_slave_cycle_direction) begin // Write lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW01; end else begin // Read attn_req <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR01; end end else begin // Ignore every other cycle type and any known cycle types that the CPU has chosen to ignore if (lpc_slave_data_ready_sync[2] && !lpc_slave_continue) begin lpc_slave_continue <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR02; end if (lpc_slave_data_ready_sync[2] && !lpc_slave_data_ack) begin lpc_slave_data_ack <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03; end if (lpc_slave_exception_sync_2 && !lpc_slave_exception_ack) begin lpc_slave_exception_ack <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_ER01; end end // Latch address and control registers for further use lpc_slave_address_reg <= lpc_slave_address; lpc_slave_firmware_cycle_reg <= lpc_slave_firmware_cycle; end else begin // Ensure LPC DMA transfer buffer control is released if no LPC cycle is active lpc_fw_dma_cycle_active <= 0; lpc_fw_dma_cycle_inactive <= 1; end end LPC_SLAVE_TRANSFER_STATE_IW01: begin if (lpc_slave_data_ready_sync[2]) begin // Latch data register for CPU to read pending_data <= lpc_slave_rx_data; // Signal CPU attention required attn_req <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02; end end LPC_SLAVE_TRANSFER_STATE_IW02: begin if (continue_transfer) begin // CPU handler complete! // Deassert attention request and start LPC ACK process lpc_slave_data_ack <= 1; attn_req <= 0; lpc_slave_signal_error <= signal_error; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03; end end LPC_SLAVE_TRANSFER_STATE_IW03: begin if (lpc_slave_data_ready_cont_sync[2]) begin lpc_slave_data_ack <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW04; end end LPC_SLAVE_TRANSFER_STATE_IW04: begin if ((!lpc_slave_address_ready_sync[2]) && (!lpc_slave_data_ready_cont_sync[2])) begin // Interlocked handshake complete! // Return to idle lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; end end LPC_SLAVE_TRANSFER_STATE_IR01: begin if (continue_transfer) begin // CPU handler complete! // Deassert attention request and start LPC response process if (signal_error) begin lpc_slave_tx_data <= 8'hff; end else begin lpc_slave_tx_data <= data_out; end lpc_slave_continue <= 1; attn_req <= 0; lpc_slave_signal_error <= signal_error; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR02; end end LPC_SLAVE_TRANSFER_STATE_IR02: begin if (lpc_slave_continue_cont_sync[2]) begin lpc_slave_continue <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR03; end end LPC_SLAVE_TRANSFER_STATE_IR03: begin if (!lpc_slave_address_ready_sync[2]) begin // Interlocked handshake complete! // Return to idle lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; end end LPC_SLAVE_TRANSFER_STATE_DW01: begin if (lpc_slave_data_ready_sync[2]) begin // Set up first byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 0; // Data ready, fire off DMA engine lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW02; end end LPC_SLAVE_TRANSFER_STATE_DW02: begin if (((pending_address & lpc_fw_dma_offset_address_mask) >= lpc_fw_dma_valid_window_start_offset) && ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_valid_window_end_offset)) begin if ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_length) begin // DMA request is valid, start transfer // Set up second byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 1; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW03; end else begin // Invalid DMA requested, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end end else begin // Invalid DMA requested, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end lpc_fw_input_xfer_dma_write_wren <= 0; end LPC_SLAVE_TRANSFER_STATE_DW03: begin // Read first byte master_wishbone_dat_w_reg[63:56] <= lpc_fw_output_xfer_read_data; // Set up third byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 2; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW04; end LPC_SLAVE_TRANSFER_STATE_DW04: begin // Read second byte master_wishbone_dat_w_reg[55:48] <= lpc_fw_output_xfer_read_data; // Set up fourth byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 3; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW05; end LPC_SLAVE_TRANSFER_STATE_DW05: begin // Read third byte master_wishbone_dat_w_reg[47:40] <= lpc_fw_output_xfer_read_data; // Set up fifth byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 4; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW06; end LPC_SLAVE_TRANSFER_STATE_DW06: begin // Read fourth byte master_wishbone_dat_w_reg[39:32] <= lpc_fw_output_xfer_read_data; // Set up sixth byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 5; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW07; end LPC_SLAVE_TRANSFER_STATE_DW07: begin // Read fifth byte master_wishbone_dat_w_reg[31:24] <= lpc_fw_output_xfer_read_data; // Set up seventh byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 6; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW08; end LPC_SLAVE_TRANSFER_STATE_DW08: begin // Read sixth byte master_wishbone_dat_w_reg[23:16] <= lpc_fw_output_xfer_read_data; // Set up eigth byte read lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 7; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW09; end LPC_SLAVE_TRANSFER_STATE_DW09: begin // Read seventh byte master_wishbone_dat_w_reg[15:8] <= lpc_fw_output_xfer_read_data; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW10; end LPC_SLAVE_TRANSFER_STATE_DW10: begin if (master_wishbone_ack) begin // Internal fault, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end else begin if (lpc_fw_cycle_dma_write_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == pending_fw_cycle_idsel))) begin // Read eigth byte master_wishbone_dat_w_reg[7:0] <= lpc_fw_output_xfer_read_data; // Start Wishbone transfer master_wishbone_adr_reg <= lpc_fw_dma_current_wb_address; master_wishbone_we_reg <= 1; master_wishbone_sel_reg <= 8'b11111111; master_wishbone_cyc_reg <= 1; master_wishbone_stb_reg <= 1; end else begin // Internal fault, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end end // Wait for Wishbone response lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW11; end LPC_SLAVE_TRANSFER_STATE_DW11: begin if (master_wishbone_err) begin // Release bus master_wishbone_cyc_reg <= 0; master_wishbone_stb_reg <= 0; // DMA failed, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end else if (master_wishbone_ack) begin // Release bus master_wishbone_cyc_reg <= 0; master_wishbone_stb_reg <= 0; if (((pending_fw_cycle_msize == 4'b0100) && (lpc_fw_dma_current_buffer_address < (16 - 4))) || ((pending_fw_cycle_msize == 4'b0111) && (lpc_fw_dma_current_buffer_address < (128 - 4)))) begin // Set up next transfer lpc_fw_dma_current_buffer_address <= lpc_fw_dma_current_buffer_address + 8; pending_address <= pending_address + 8; lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + ((pending_address + 8) & lpc_fw_dma_offset_address_mask); lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW02; end else begin // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; // Start LPC response process lpc_slave_data_ack <= 1; lpc_slave_signal_error <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03; end end end LPC_SLAVE_TRANSFER_STATE_FW01: begin if (lpc_slave_data_ready_sync[2]) begin // Signal CPU attention required attn_req <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02; end end LPC_SLAVE_TRANSFER_STATE_DR01: begin if (((pending_address & lpc_fw_dma_offset_address_mask) >= lpc_fw_dma_valid_window_start_offset) && ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_valid_window_end_offset)) begin if ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_length) begin if (master_wishbone_ack) begin // Internal fault, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end else begin // DMA request is valid, start transfer master_wishbone_adr_reg <= lpc_fw_dma_current_wb_address; master_wishbone_we_reg <= 0; master_wishbone_sel_reg <= 8'b11111111; master_wishbone_cyc_reg <= 1; master_wishbone_stb_reg <= 1; // Wait for Wishbone response lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR02; end end else begin // Invalid DMA requested, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end end else begin // Invalid DMA requested, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end lpc_fw_input_xfer_dma_write_wren <= 0; end LPC_SLAVE_TRANSFER_STATE_DR02: begin if (master_wishbone_err) begin // Release bus master_wishbone_cyc_reg <= 0; master_wishbone_stb_reg <= 0; // DMA failed, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end else if (master_wishbone_ack) begin // Release bus master_wishbone_cyc_reg <= 0; master_wishbone_stb_reg <= 0; // Cache read data lpc_fw_dma_data_cache_reg <= master_wishbone_dat_r; // Write first byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 0; lpc_fw_input_xfer_dma_write_data <= master_wishbone_dat_r[63:56]; lpc_fw_input_xfer_dma_write_wren <= 1; // Continue processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR03; end end LPC_SLAVE_TRANSFER_STATE_DR03: begin // Write second byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 1; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[55:48]; lpc_fw_input_xfer_dma_write_wren <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR04; end LPC_SLAVE_TRANSFER_STATE_DR04: begin // Write third byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 2; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[47:40]; lpc_fw_input_xfer_dma_write_wren <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR05; end LPC_SLAVE_TRANSFER_STATE_DR05: begin // Write third byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 3; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[39:32]; lpc_fw_input_xfer_dma_write_wren <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR06; end LPC_SLAVE_TRANSFER_STATE_DR06: begin // Write third byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 4; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[31:24]; lpc_fw_input_xfer_dma_write_wren <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR07; end LPC_SLAVE_TRANSFER_STATE_DR07: begin // Write third byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 5; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[23:16]; lpc_fw_input_xfer_dma_write_wren <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR08; end LPC_SLAVE_TRANSFER_STATE_DR08: begin // Write third byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 6; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[15:8]; lpc_fw_input_xfer_dma_write_wren <= 1; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR09; end LPC_SLAVE_TRANSFER_STATE_DR09: begin // Write fourth byte lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 7; lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[7:0]; lpc_fw_input_xfer_dma_write_wren <= 1; if (lpc_fw_cycle_dma_read_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == pending_fw_cycle_idsel))) begin if (((pending_fw_cycle_msize == 4'b0100) && (lpc_fw_dma_current_buffer_address < (16 - 4))) || ((pending_fw_cycle_msize == 4'b0111) && (lpc_fw_dma_current_buffer_address < (128 - 4)))) begin // Set up next transfer lpc_fw_dma_current_buffer_address <= lpc_fw_dma_current_buffer_address + 8; pending_address <= pending_address + 8; lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + ((pending_address + 8) & lpc_fw_dma_offset_address_mask); lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR01; end else begin lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR10; end end else begin // DMA failed, fall back to CPU processing lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; end end LPC_SLAVE_TRANSFER_STATE_DR10: begin // Release transfer RAM control signals lpc_fw_dma_cycle_active <= 0; // Start LPC response process lpc_slave_continue <= 1; lpc_slave_signal_error <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR02; end LPC_SLAVE_TRANSFER_STATE_DF01: begin // If DMA was active, allow one cycle for RAM control signals to reload from override status lpc_fw_dma_cycle_inactive <= 1; // DMA transfer failed, fall back to CPU processing attn_req <= 1; if (cycle_direction) begin lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02; end else begin lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR01; end end LPC_SLAVE_TRANSFER_STATE_FR01: begin if (continue_transfer) begin // CPU handler complete! // Deassert attention request and start LPC response process lpc_slave_continue <= 1; attn_req <= 0; lpc_slave_signal_error <= signal_error; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR02; end end LPC_SLAVE_TRANSFER_STATE_FR02: begin // Allow one cycle for RAM control signals to reload from override status // This is safe to set here regardless of if the previous cycle was actually the DMA engine lpc_fw_dma_cycle_inactive <= 1; if (lpc_slave_continue_cont_sync[2]) begin lpc_slave_continue <= 0; lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR03; end end LPC_SLAVE_TRANSFER_STATE_FR03: begin if (!lpc_slave_address_ready_sync[2]) begin // Interlocked handshake complete! // Return to idle lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; end end LPC_SLAVE_TRANSFER_STATE_ER01: begin if (!lpc_slave_exception_sync_2) begin lpc_slave_exception_ack <= 0; // Interlocked handshake complete! // Return to idle lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; end end endcase if (attn_req) begin case (cycle_type) AQUIEL_LPC_CYCLE_TYPE_IO: begin if (lpc_io_cycle_irq_enable) begin lpc_io_cycle_irq <= 1; end else begin lpc_io_cycle_irq <= 0; end lpc_tpm_cycle_irq <= 0; lpc_firmware_cycle_irq <= 0; end AQUIEL_LPC_CYCLE_TYPE_TPM: begin lpc_io_cycle_irq <= 0; if (lpc_tpm_cycle_irq_enable) begin lpc_tpm_cycle_irq <= 1; end else begin lpc_tpm_cycle_irq <= 0; end lpc_firmware_cycle_irq <= 0; end AQUIEL_LPC_CYCLE_TYPE_FIRMWARE: begin lpc_io_cycle_irq <= 0; lpc_tpm_cycle_irq <= 0; if (lpc_firmware_cycle_irq_enable) begin lpc_firmware_cycle_irq <= 1; end else begin lpc_firmware_cycle_irq <= 0; end end default: begin lpc_io_cycle_irq <= 0; lpc_tpm_cycle_irq <= 0; lpc_firmware_cycle_irq <= 0; end endcase end else begin lpc_io_cycle_irq <= 0; lpc_tpm_cycle_irq <= 0; lpc_firmware_cycle_irq <= 0; end end // Synchronizer logic for LPC core to Wishbone traffic // Three flip flops used for maximum MTBF on control lines // All data paths are synhronized from these signals using req/ack handshaking mechanisms lpc_slave_address_ready_sync[2] <= lpc_slave_address_ready_sync[1]; lpc_slave_address_ready_sync[1] <= lpc_slave_address_ready_sync[0]; lpc_slave_address_ready_sync[0] <= lpc_slave_address_ready; lpc_slave_data_ready_sync[2] <= lpc_slave_data_ready_sync[1]; lpc_slave_data_ready_sync[1] <= lpc_slave_data_ready_sync[0]; lpc_slave_data_ready_sync[0] <= lpc_slave_data_ready; lpc_slave_exception_sync_2 <= lpc_slave_exception_sync_1; lpc_slave_exception_sync_1 <= lpc_slave_exception_sync_0; lpc_slave_exception_sync_0 <= lpc_slave_exception; lpc_slave_data_ready_cont_sync[2] <= lpc_slave_data_ready_cont_sync[1]; lpc_slave_data_ready_cont_sync[1] <= lpc_slave_data_ready_cont_sync[0]; lpc_slave_data_ready_cont_sync[0] <= lpc_slave_data_ready_cont; lpc_slave_continue_cont_sync[2] <= lpc_slave_continue_cont_sync[1]; lpc_slave_continue_cont_sync[1] <= lpc_slave_continue_cont; lpc_reset_n_sync[2] <= lpc_reset_n_sync[1]; lpc_reset_n_sync[1] <= lpc_reset_n_sync[0]; lpc_reset_n_sync[0] <= lpc_reset_n; vuart1_h2b_fifo_reset_sync[2] <= vuart1_h2b_fifo_reset_sync[1]; vuart1_h2b_fifo_reset_sync[1] <= vuart1_h2b_fifo_reset_sync[0]; vuart1_h2b_fifo_reset_sync[0] <= vuart1_h2b_fifo_reset; vuart2_h2b_fifo_reset_sync[2] <= vuart2_h2b_fifo_reset_sync[1]; vuart2_h2b_fifo_reset_sync[1] <= vuart2_h2b_fifo_reset_sync[0]; vuart2_h2b_fifo_reset_sync[0] <= vuart2_h2b_fifo_reset; vuart1_b2h_fifo_wfull_sync[2] <= vuart1_b2h_fifo_wfull_sync[1]; vuart1_b2h_fifo_wfull_sync[1] <= vuart1_b2h_fifo_wfull_sync[0]; vuart1_b2h_fifo_wfull_sync[0] <= vuart1_b2h_fifo_wfull; vuart1_b2h_fifo_reset_sync[2] <= vuart1_b2h_fifo_reset_sync[1]; vuart1_b2h_fifo_reset_sync[1] <= vuart1_b2h_fifo_reset_sync[0]; vuart1_b2h_fifo_reset_sync[0] <= vuart1_b2h_fifo_reset; vuart2_b2h_fifo_wfull_sync[2] <= vuart2_b2h_fifo_wfull_sync[1]; vuart2_b2h_fifo_wfull_sync[1] <= vuart2_b2h_fifo_wfull_sync[0]; vuart2_b2h_fifo_wfull_sync[0] <= vuart2_b2h_fifo_wfull; vuart2_b2h_fifo_reset_sync[2] <= vuart2_b2h_fifo_reset_sync[1]; vuart2_b2h_fifo_reset_sync[1] <= vuart2_b2h_fifo_reset_sync[0]; vuart2_b2h_fifo_reset_sync[0] <= vuart2_b2h_fifo_reset; vuart1_status_register_sync_2 <= vuart1_status_register_sync_1; vuart1_status_register_sync_1 <= vuart1_status_register_sync_0; vuart1_status_register_sync_0 <= vuart1_status_register; vuart2_status_register_sync_2 <= vuart2_status_register_sync_1; vuart2_status_register_sync_1 <= vuart2_status_register_sync_0; vuart2_status_register_sync_0 <= vuart2_status_register; vuart1_assert_b2h_break_clear_sync[2] <= vuart1_assert_b2h_break_clear_sync[1]; vuart1_assert_b2h_break_clear_sync[1] <= vuart1_assert_b2h_break_clear_sync[0]; vuart1_assert_b2h_break_clear_sync[0] <= vuart1_assert_b2h_break_clear; vuart2_assert_b2h_break_clear_sync[2] <= vuart2_assert_b2h_break_clear_sync[1]; vuart2_assert_b2h_break_clear_sync[1] <= vuart2_assert_b2h_break_clear_sync[0]; vuart2_assert_b2h_break_clear_sync[0] <= vuart2_assert_b2h_break_clear; ipmi_bt_bmc_to_host_ctl_sms_ack_sync[2] <= ipmi_bt_bmc_to_host_ctl_sms_ack_sync[1]; ipmi_bt_bmc_to_host_ctl_sms_ack_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_ack_sync[0]; ipmi_bt_bmc_to_host_ctl_sms_ack_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_ack; ipmi_bt_bmc_to_host_ctl_attn_ack_sync[2] <= ipmi_bt_bmc_to_host_ctl_attn_ack_sync[1]; ipmi_bt_bmc_to_host_ctl_attn_ack_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_ack_sync[0]; ipmi_bt_bmc_to_host_ctl_attn_ack_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_ack; ipmi_bt_host_to_bmc_ctl_attn_req_sync[2] <= ipmi_bt_host_to_bmc_ctl_attn_req_sync[1]; ipmi_bt_host_to_bmc_ctl_attn_req_sync[1] <= ipmi_bt_host_to_bmc_ctl_attn_req_sync[0]; ipmi_bt_host_to_bmc_ctl_attn_req_sync[0] <= ipmi_bt_host_to_bmc_ctl_attn_req; ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2] <= ipmi_bt_host_to_bmc_ctl_oem0_req_sync[1]; ipmi_bt_host_to_bmc_ctl_oem0_req_sync[1] <= ipmi_bt_host_to_bmc_ctl_oem0_req_sync[0]; ipmi_bt_host_to_bmc_ctl_oem0_req_sync[0] <= ipmi_bt_host_to_bmc_ctl_oem0_req; ipmi_bt_irq_ack_sync[2] <= ipmi_bt_irq_ack_sync[1]; ipmi_bt_irq_ack_sync[1] <= ipmi_bt_irq_ack_sync[0]; ipmi_bt_irq_ack_sync[0] <= ipmi_bt_irq_ack; ipmi_bt_irq_bmc_reset_sync[2] <= ipmi_bt_irq_bmc_reset_sync[1]; ipmi_bt_irq_bmc_reset_sync[1] <= ipmi_bt_irq_bmc_reset_sync[0]; ipmi_bt_irq_bmc_reset_sync[0] <= ipmi_bt_irq_bmc_reset; ipmi_bt_host_to_bmc_ctl_h_busy_sync[2] <= ipmi_bt_host_to_bmc_ctl_h_busy_sync[1]; ipmi_bt_host_to_bmc_ctl_h_busy_sync[1] <= ipmi_bt_host_to_bmc_ctl_h_busy_sync[0]; ipmi_bt_host_to_bmc_ctl_h_busy_sync[0] <= ipmi_bt_host_to_bmc_ctl_h_busy; ipmi_bt_irq_enable_sync[2] <= ipmi_bt_irq_enable_sync[1]; ipmi_bt_irq_enable_sync[1] <= ipmi_bt_irq_enable_sync[0]; ipmi_bt_irq_enable_sync[0] <= ipmi_bt_irq_enable; end // Synchronizer logic for Wishbone to LPC core traffic always @(posedge lpc_clock) begin // Two flip flops used on the return path lpc_slave_continue_sync[1] <= lpc_slave_continue_sync[0]; lpc_slave_continue_sync[0] <= lpc_slave_continue; lpc_slave_data_ack_sync[1] <= lpc_slave_data_ack_sync[0]; lpc_slave_data_ack_sync[0] <= lpc_slave_data_ack; lpc_slave_signal_error_sync[1] <= lpc_slave_signal_error_sync[0]; lpc_slave_signal_error_sync[0] <= lpc_slave_signal_error; lpc_slave_exception_ack_sync[1] <= lpc_slave_exception_ack_sync[0]; lpc_slave_exception_ack_sync[0] <= lpc_slave_exception_ack; irq_tx_ready_sync[1] <= irq_tx_ready_sync[0]; irq_tx_ready_sync[0] <= irq_tx_ready; irq_request_sync_1 <= irq_request_sync_0; irq_request_sync_0 <= irq_request; peripheral_reset_sync[1] <= peripheral_reset_sync[0]; peripheral_reset_sync[0] <= peripheral_reset; vuart1_h2b_fifo_rempty_sync[1] <= vuart1_h2b_fifo_rempty_sync[0]; vuart1_h2b_fifo_rempty_sync[0] <= vuart1_h2b_fifo_rempty; vuart2_h2b_fifo_rempty_sync[1] <= vuart2_h2b_fifo_rempty_sync[0]; vuart2_h2b_fifo_rempty_sync[0] <= vuart2_h2b_fifo_rempty; vuart1_control_register_sync_1 <= vuart1_control_register_sync_0; vuart1_control_register_sync_0 <= vuart1_control_register; vuart2_control_register_sync_1 <= vuart2_control_register_sync_0; vuart2_control_register_sync_0 <= vuart2_control_register; ipmi_bt_bmc_to_host_ctl_sms_req_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_req_sync[0]; ipmi_bt_bmc_to_host_ctl_sms_req_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_req; ipmi_bt_bmc_to_host_ctl_attn_req_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_req_sync[0]; ipmi_bt_bmc_to_host_ctl_attn_req_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_req; ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[0]; ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_ack_cont; ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[0]; ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_ack_cont; ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[1] <= ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[0]; ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[0] <= ipmi_bt_host_to_bmc_ctl_attn_req_cont; ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[1] <= ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[0]; ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[0] <= ipmi_bt_host_to_bmc_ctl_oem0_req_cont; ipmi_bt_irq_ack_cont_sync[1] <= ipmi_bt_irq_ack_cont_sync[0]; ipmi_bt_irq_ack_cont_sync[0] <= ipmi_bt_irq_ack_cont; ipmi_bt_irq_bmc_reset_cont_sync[1] <= ipmi_bt_irq_bmc_reset_cont_sync[0]; ipmi_bt_irq_bmc_reset_cont_sync[0] <= ipmi_bt_irq_bmc_reset_cont; ipmi_bt_bmc_to_host_ctl_b_busy_sync[1] <= ipmi_bt_bmc_to_host_ctl_b_busy_sync[0]; ipmi_bt_bmc_to_host_ctl_b_busy_sync[0] <= ipmi_bt_bmc_to_host_ctl_b_busy; ipmi_bt_irq_req_sync[1] <= ipmi_bt_irq_req_sync[0]; ipmi_bt_irq_req_sync[0] <= ipmi_bt_irq_req; end endmodule