Index: head/sys/dev/sfxge/common/efx.h =================================================================== --- head/sys/dev/sfxge/common/efx.h (revision 310751) +++ head/sys/dev/sfxge/common/efx.h (revision 310752) @@ -1,2497 +1,2497 @@ /*- * Copyright (c) 2006-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. * * $FreeBSD$ */ #ifndef _SYS_EFX_H #define _SYS_EFX_H #include "efsys.h" #include "efx_check.h" #include "efx_phy_ids.h" #ifdef __cplusplus extern "C" { #endif #define EFX_STATIC_ASSERT(_cond) \ ((void)sizeof(char[(_cond) ? 1 : -1])) #define EFX_ARRAY_SIZE(_array) \ (sizeof(_array) / sizeof((_array)[0])) #define EFX_FIELD_OFFSET(_type, _field) \ ((size_t) &(((_type *)0)->_field)) /* Return codes */ typedef __success(return == 0) int efx_rc_t; /* Chip families */ typedef enum efx_family_e { EFX_FAMILY_INVALID, EFX_FAMILY_FALCON, /* Obsolete and not supported */ EFX_FAMILY_SIENA, EFX_FAMILY_HUNTINGTON, EFX_FAMILY_MEDFORD, EFX_FAMILY_NTYPES } efx_family_t; extern __checkReturn efx_rc_t efx_family( __in uint16_t venid, __in uint16_t devid, __out efx_family_t *efp); #define EFX_PCI_VENID_SFC 0x1924 #define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */ #define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */ #define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */ #define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810 #define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901 #define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */ #define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */ #define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */ #define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */ #define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913 #define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */ #define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */ #define EFX_MEM_BAR 2 /* Error codes */ enum { EFX_ERR_INVALID, EFX_ERR_SRAM_OOB, EFX_ERR_BUFID_DC_OOB, EFX_ERR_MEM_PERR, EFX_ERR_RBUF_OWN, EFX_ERR_TBUF_OWN, EFX_ERR_RDESQ_OWN, EFX_ERR_TDESQ_OWN, EFX_ERR_EVQ_OWN, EFX_ERR_EVFF_OFLO, EFX_ERR_ILL_ADDR, EFX_ERR_SRAM_PERR, EFX_ERR_NCODES }; /* Calculate the IEEE 802.3 CRC32 of a MAC addr */ extern __checkReturn uint32_t efx_crc32_calculate( __in uint32_t crc_init, __in_ecount(length) uint8_t const *input, __in int length); /* Type prototypes */ typedef struct efx_rxq_s efx_rxq_t; /* NIC */ typedef struct efx_nic_s efx_nic_t; #define EFX_NIC_FUNC_PRIMARY 0x00000001 #define EFX_NIC_FUNC_LINKCTRL 0x00000002 #define EFX_NIC_FUNC_TRUSTED 0x00000004 extern __checkReturn efx_rc_t efx_nic_create( __in efx_family_t family, __in efsys_identifier_t *esip, __in efsys_bar_t *esbp, __in efsys_lock_t *eslp, __deref_out efx_nic_t **enpp); extern __checkReturn efx_rc_t efx_nic_probe( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_nic_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_nic_reset( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t efx_nic_register_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern void efx_nic_fini( __in efx_nic_t *enp); extern void efx_nic_unprobe( __in efx_nic_t *enp); extern void efx_nic_destroy( __in efx_nic_t *enp); #define EFX_PCIE_LINK_SPEED_GEN1 1 #define EFX_PCIE_LINK_SPEED_GEN2 2 #define EFX_PCIE_LINK_SPEED_GEN3 3 typedef enum efx_pcie_link_performance_e { EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH, EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH, EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY, EFX_PCIE_LINK_PERFORMANCE_OPTIMAL } efx_pcie_link_performance_t; extern __checkReturn efx_rc_t efx_nic_calculate_pcie_link_bandwidth( __in uint32_t pcie_link_width, __in uint32_t pcie_link_gen, __out uint32_t *bandwidth_mbpsp); extern __checkReturn efx_rc_t efx_nic_check_pcie_link_speed( __in efx_nic_t *enp, __in uint32_t pcie_link_width, __in uint32_t pcie_link_gen, __out efx_pcie_link_performance_t *resultp); #if EFSYS_OPT_MCDI #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD /* Huntington and Medford require MCDIv2 commands */ #define WITH_MCDI_V2 1 #endif typedef struct efx_mcdi_req_s efx_mcdi_req_t; typedef enum efx_mcdi_exception_e { EFX_MCDI_EXCEPTION_MC_REBOOT, EFX_MCDI_EXCEPTION_MC_BADASSERT, } efx_mcdi_exception_t; #if EFSYS_OPT_MCDI_LOGGING typedef enum efx_log_msg_e { EFX_LOG_INVALID, EFX_LOG_MCDI_REQUEST, EFX_LOG_MCDI_RESPONSE, } efx_log_msg_t; #endif /* EFSYS_OPT_MCDI_LOGGING */ typedef struct efx_mcdi_transport_s { void *emt_context; efsys_mem_t *emt_dma_mem; void (*emt_execute)(void *, efx_mcdi_req_t *); void (*emt_ev_cpl)(void *); void (*emt_exception)(void *, efx_mcdi_exception_t); #if EFSYS_OPT_MCDI_LOGGING void (*emt_logger)(void *, efx_log_msg_t, void *, size_t, void *, size_t); #endif /* EFSYS_OPT_MCDI_LOGGING */ #if EFSYS_OPT_MCDI_PROXY_AUTH void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t); #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ } efx_mcdi_transport_t; extern __checkReturn efx_rc_t efx_mcdi_init( __in efx_nic_t *enp, __in const efx_mcdi_transport_t *mtp); extern __checkReturn efx_rc_t efx_mcdi_reboot( __in efx_nic_t *enp); void efx_mcdi_new_epoch( __in efx_nic_t *enp); extern void efx_mcdi_request_start( __in efx_nic_t *enp, __in efx_mcdi_req_t *emrp, __in boolean_t ev_cpl); extern __checkReturn boolean_t efx_mcdi_request_poll( __in efx_nic_t *enp); extern __checkReturn boolean_t efx_mcdi_request_abort( __in efx_nic_t *enp); extern void efx_mcdi_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_MCDI */ /* INTR */ #define EFX_NINTR_SIENA 1024 typedef enum efx_intr_type_e { EFX_INTR_INVALID = 0, EFX_INTR_LINE, EFX_INTR_MESSAGE, EFX_INTR_NTYPES } efx_intr_type_t; #define EFX_INTR_SIZE (sizeof (efx_oword_t)) extern __checkReturn efx_rc_t efx_intr_init( __in efx_nic_t *enp, __in efx_intr_type_t type, __in efsys_mem_t *esmp); extern void efx_intr_enable( __in efx_nic_t *enp); extern void efx_intr_disable( __in efx_nic_t *enp); extern void efx_intr_disable_unlocked( __in efx_nic_t *enp); #define EFX_INTR_NEVQS 32 extern __checkReturn efx_rc_t efx_intr_trigger( __in efx_nic_t *enp, __in unsigned int level); extern void efx_intr_status_line( __in efx_nic_t *enp, __out boolean_t *fatalp, __out uint32_t *maskp); extern void efx_intr_status_message( __in efx_nic_t *enp, __in unsigned int message, __out boolean_t *fatalp); extern void efx_intr_fatal( __in efx_nic_t *enp); extern void efx_intr_fini( __in efx_nic_t *enp); /* MAC */ #if EFSYS_OPT_MAC_STATS /* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */ typedef enum efx_mac_stat_e { EFX_MAC_RX_OCTETS, EFX_MAC_RX_PKTS, EFX_MAC_RX_UNICST_PKTS, EFX_MAC_RX_MULTICST_PKTS, EFX_MAC_RX_BRDCST_PKTS, EFX_MAC_RX_PAUSE_PKTS, EFX_MAC_RX_LE_64_PKTS, EFX_MAC_RX_65_TO_127_PKTS, EFX_MAC_RX_128_TO_255_PKTS, EFX_MAC_RX_256_TO_511_PKTS, EFX_MAC_RX_512_TO_1023_PKTS, EFX_MAC_RX_1024_TO_15XX_PKTS, EFX_MAC_RX_GE_15XX_PKTS, EFX_MAC_RX_ERRORS, EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS, EFX_MAC_RX_FALSE_CARRIER_ERRORS, EFX_MAC_RX_SYMBOL_ERRORS, EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_INTERNAL_ERRORS, EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_LANE0_CHAR_ERR, EFX_MAC_RX_LANE1_CHAR_ERR, EFX_MAC_RX_LANE2_CHAR_ERR, EFX_MAC_RX_LANE3_CHAR_ERR, EFX_MAC_RX_LANE0_DISP_ERR, EFX_MAC_RX_LANE1_DISP_ERR, EFX_MAC_RX_LANE2_DISP_ERR, EFX_MAC_RX_LANE3_DISP_ERR, EFX_MAC_RX_MATCH_FAULT, EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_OCTETS, EFX_MAC_TX_PKTS, EFX_MAC_TX_UNICST_PKTS, EFX_MAC_TX_MULTICST_PKTS, EFX_MAC_TX_BRDCST_PKTS, EFX_MAC_TX_PAUSE_PKTS, EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_65_TO_127_PKTS, EFX_MAC_TX_128_TO_255_PKTS, EFX_MAC_TX_256_TO_511_PKTS, EFX_MAC_TX_512_TO_1023_PKTS, EFX_MAC_TX_1024_TO_15XX_PKTS, EFX_MAC_TX_GE_15XX_PKTS, EFX_MAC_TX_ERRORS, EFX_MAC_TX_SGL_COL_PKTS, EFX_MAC_TX_MULT_COL_PKTS, EFX_MAC_TX_EX_COL_PKTS, EFX_MAC_TX_LATE_COL_PKTS, EFX_MAC_TX_DEF_PKTS, EFX_MAC_TX_EX_DEF_PKTS, EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_PM_DISCARD_BB_OVERFLOW, EFX_MAC_PM_TRUNC_VFIFO_FULL, EFX_MAC_PM_DISCARD_VFIFO_FULL, EFX_MAC_PM_TRUNC_QBB, EFX_MAC_PM_DISCARD_QBB, EFX_MAC_PM_DISCARD_MAPPING, EFX_MAC_RXDP_Q_DISABLED_PKTS, EFX_MAC_RXDP_DI_DROPPED_PKTS, EFX_MAC_RXDP_STREAMING_PKTS, EFX_MAC_RXDP_HLB_FETCH, EFX_MAC_RXDP_HLB_WAIT, EFX_MAC_VADAPTER_RX_UNICAST_PACKETS, EFX_MAC_VADAPTER_RX_UNICAST_BYTES, EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS, EFX_MAC_VADAPTER_RX_MULTICAST_BYTES, EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS, EFX_MAC_VADAPTER_RX_BROADCAST_BYTES, EFX_MAC_VADAPTER_RX_BAD_PACKETS, EFX_MAC_VADAPTER_RX_BAD_BYTES, EFX_MAC_VADAPTER_RX_OVERFLOW, EFX_MAC_VADAPTER_TX_UNICAST_PACKETS, EFX_MAC_VADAPTER_TX_UNICAST_BYTES, EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS, EFX_MAC_VADAPTER_TX_MULTICAST_BYTES, EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS, EFX_MAC_VADAPTER_TX_BROADCAST_BYTES, EFX_MAC_VADAPTER_TX_BAD_PACKETS, EFX_MAC_VADAPTER_TX_BAD_BYTES, EFX_MAC_VADAPTER_TX_OVERFLOW, EFX_MAC_NSTATS } efx_mac_stat_t; /* END MKCONFIG GENERATED EfxHeaderMacBlock */ #endif /* EFSYS_OPT_MAC_STATS */ typedef enum efx_link_mode_e { EFX_LINK_UNKNOWN = 0, EFX_LINK_DOWN, EFX_LINK_10HDX, EFX_LINK_10FDX, EFX_LINK_100HDX, EFX_LINK_100FDX, EFX_LINK_1000HDX, EFX_LINK_1000FDX, EFX_LINK_10000FDX, EFX_LINK_40000FDX, EFX_LINK_NMODES } efx_link_mode_t; #define EFX_MAC_ADDR_LEN 6 #define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01) #define EFX_MAC_MULTICAST_LIST_MAX 256 #define EFX_MAC_SDU_MAX 9202 #define EFX_MAC_PDU_ADJUSTMENT \ (/* EtherII */ 14 \ + /* VLAN */ 4 \ + /* CRC */ 4 \ + /* bug16011 */ 16) \ #define EFX_MAC_PDU(_sdu) \ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8) /* * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give * the SDU rounded up slightly. */ #define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT) #define EFX_MAC_PDU_MIN 60 #define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX) extern __checkReturn efx_rc_t efx_mac_pdu_get( __in efx_nic_t *enp, __out size_t *pdu); extern __checkReturn efx_rc_t efx_mac_pdu_set( __in efx_nic_t *enp, __in size_t pdu); extern __checkReturn efx_rc_t efx_mac_addr_set( __in efx_nic_t *enp, __in uint8_t *addr); extern __checkReturn efx_rc_t efx_mac_filter_set( __in efx_nic_t *enp, __in boolean_t all_unicst, __in boolean_t mulcst, __in boolean_t all_mulcst, __in boolean_t brdcst); extern __checkReturn efx_rc_t efx_mac_multicast_list_set( __in efx_nic_t *enp, __in_ecount(6*count) uint8_t const *addrs, __in int count); extern __checkReturn efx_rc_t efx_mac_filter_default_rxq_set( __in efx_nic_t *enp, __in efx_rxq_t *erp, __in boolean_t using_rss); extern void efx_mac_filter_default_rxq_clear( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_mac_drain( __in efx_nic_t *enp, __in boolean_t enabled); extern __checkReturn efx_rc_t efx_mac_up( __in efx_nic_t *enp, __out boolean_t *mac_upp); #define EFX_FCNTL_RESPOND 0x00000001 #define EFX_FCNTL_GENERATE 0x00000002 extern __checkReturn efx_rc_t efx_mac_fcntl_set( __in efx_nic_t *enp, __in unsigned int fcntl, __in boolean_t autoneg); extern void efx_mac_fcntl_get( __in efx_nic_t *enp, __out unsigned int *fcntl_wantedp, __out unsigned int *fcntl_linkp); #if EFSYS_OPT_MAC_STATS #if EFSYS_OPT_NAMES extern __checkReturn const char * efx_mac_stat_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ #define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t)) #define EFX_MAC_STATS_MASK_NPAGES \ (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \ EFX_MAC_STATS_MASK_BITS_PER_PAGE) /* * Get mask of MAC statistics supported by the hardware. * * If mask_size is insufficient to return the mask, EINVAL error is * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page * (which is sizeof (uint32_t)) is sufficient. */ extern __checkReturn efx_rc_t efx_mac_stats_get_mask( __in efx_nic_t *enp, __out_bcount(mask_size) uint32_t *maskp, __in size_t mask_size); #define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \ ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1)))) #define EFX_MAC_STATS_SIZE 0x400 /* * Upload mac statistics supported by the hardware into the given buffer. * * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes, * and page aligned. * * The hardware will only DMA statistics that it understands (of course). * Drivers should not make any assumptions about which statistics are * supported, especially when the statistics are generated by firmware. * * Thus, drivers should zero this buffer before use, so that not-understood * statistics read back as zero. */ extern __checkReturn efx_rc_t efx_mac_stats_upload( __in efx_nic_t *enp, __in efsys_mem_t *esmp); extern __checkReturn efx_rc_t efx_mac_stats_periodic( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __in uint16_t period_ms, __in boolean_t events); extern __checkReturn efx_rc_t efx_mac_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat, __inout_opt uint32_t *generationp); #endif /* EFSYS_OPT_MAC_STATS */ /* MON */ typedef enum efx_mon_type_e { EFX_MON_INVALID = 0, EFX_MON_SFC90X0, EFX_MON_SFC91X0, EFX_MON_SFC92X0, EFX_MON_NTYPES } efx_mon_type_t; #if EFSYS_OPT_NAMES extern const char * efx_mon_name( __in efx_nic_t *enp); #endif /* EFSYS_OPT_NAMES */ extern __checkReturn efx_rc_t efx_mon_init( __in efx_nic_t *enp); #if EFSYS_OPT_MON_STATS #define EFX_MON_STATS_PAGE_SIZE 0x100 #define EFX_MON_MASK_ELEMENT_SIZE 32 /* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */ typedef enum efx_mon_stat_e { EFX_MON_STAT_2_5V, EFX_MON_STAT_VCCP1, EFX_MON_STAT_VCC, EFX_MON_STAT_5V, EFX_MON_STAT_12V, EFX_MON_STAT_VCCP2, EFX_MON_STAT_EXT_TEMP, EFX_MON_STAT_INT_TEMP, EFX_MON_STAT_AIN1, EFX_MON_STAT_AIN2, EFX_MON_STAT_INT_COOLING, EFX_MON_STAT_EXT_COOLING, EFX_MON_STAT_1V, EFX_MON_STAT_1_2V, EFX_MON_STAT_1_8V, EFX_MON_STAT_3_3V, EFX_MON_STAT_1_2VA, EFX_MON_STAT_VREF, EFX_MON_STAT_VAOE, EFX_MON_STAT_AOE_TEMP, EFX_MON_STAT_PSU_AOE_TEMP, EFX_MON_STAT_PSU_TEMP, EFX_MON_STAT_FAN0, EFX_MON_STAT_FAN1, EFX_MON_STAT_FAN2, EFX_MON_STAT_FAN3, EFX_MON_STAT_FAN4, EFX_MON_STAT_VAOE_IN, EFX_MON_STAT_IAOE, EFX_MON_STAT_IAOE_IN, EFX_MON_STAT_NIC_POWER, EFX_MON_STAT_0_9V, EFX_MON_STAT_I0_9V, EFX_MON_STAT_I1_2V, EFX_MON_STAT_0_9V_ADC, EFX_MON_STAT_INT_TEMP2, EFX_MON_STAT_VREG_TEMP, EFX_MON_STAT_VREG_0_9V_TEMP, EFX_MON_STAT_VREG_1_2V_TEMP, EFX_MON_STAT_INT_VPTAT, EFX_MON_STAT_INT_ADC_TEMP, EFX_MON_STAT_EXT_VPTAT, EFX_MON_STAT_EXT_ADC_TEMP, EFX_MON_STAT_AMBIENT_TEMP, EFX_MON_STAT_AIRFLOW, EFX_MON_STAT_VDD08D_VSS08D_CSR, EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC, EFX_MON_STAT_HOTPOINT_TEMP, EFX_MON_STAT_PHY_POWER_SWITCH_PORT0, EFX_MON_STAT_PHY_POWER_SWITCH_PORT1, EFX_MON_STAT_MUM_VCC, EFX_MON_STAT_0V9_A, EFX_MON_STAT_I0V9_A, EFX_MON_STAT_0V9_A_TEMP, EFX_MON_STAT_0V9_B, EFX_MON_STAT_I0V9_B, EFX_MON_STAT_0V9_B_TEMP, EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY, EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC, EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY, EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC, EFX_MON_STAT_CONTROLLER_MASTER_VPTAT, EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP, EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC, EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC, EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT, EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP, EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC, EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC, EFX_MON_STAT_SODIMM_VOUT, EFX_MON_STAT_SODIMM_0_TEMP, EFX_MON_STAT_SODIMM_1_TEMP, EFX_MON_STAT_PHY0_VCC, EFX_MON_STAT_PHY1_VCC, EFX_MON_STAT_CONTROLLER_TDIODE_TEMP, EFX_MON_STAT_BOARD_FRONT_TEMP, EFX_MON_STAT_BOARD_BACK_TEMP, EFX_MON_NSTATS } efx_mon_stat_t; /* END MKCONFIG GENERATED MonitorHeaderStatsBlock */ typedef enum efx_mon_stat_state_e { EFX_MON_STAT_STATE_OK = 0, EFX_MON_STAT_STATE_WARNING = 1, EFX_MON_STAT_STATE_FATAL = 2, EFX_MON_STAT_STATE_BROKEN = 3, EFX_MON_STAT_STATE_NO_READING = 4, } efx_mon_stat_state_t; typedef struct efx_mon_stat_value_s { uint16_t emsv_value; uint16_t emsv_state; } efx_mon_stat_value_t; #if EFSYS_OPT_NAMES extern const char * efx_mon_stat_name( __in efx_nic_t *enp, __in efx_mon_stat_t id); #endif /* EFSYS_OPT_NAMES */ extern __checkReturn efx_rc_t efx_mon_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values); #endif /* EFSYS_OPT_MON_STATS */ extern void efx_mon_fini( __in efx_nic_t *enp); /* PHY */ extern __checkReturn efx_rc_t efx_phy_verify( __in efx_nic_t *enp); #if EFSYS_OPT_PHY_LED_CONTROL typedef enum efx_phy_led_mode_e { EFX_PHY_LED_DEFAULT = 0, EFX_PHY_LED_OFF, EFX_PHY_LED_ON, EFX_PHY_LED_FLASH, EFX_PHY_LED_NMODES } efx_phy_led_mode_t; extern __checkReturn efx_rc_t efx_phy_led_set( __in efx_nic_t *enp, __in efx_phy_led_mode_t mode); #endif /* EFSYS_OPT_PHY_LED_CONTROL */ extern __checkReturn efx_rc_t efx_port_init( __in efx_nic_t *enp); #if EFSYS_OPT_LOOPBACK typedef enum efx_loopback_type_e { EFX_LOOPBACK_OFF = 0, EFX_LOOPBACK_DATA = 1, EFX_LOOPBACK_GMAC = 2, EFX_LOOPBACK_XGMII = 3, EFX_LOOPBACK_XGXS = 4, EFX_LOOPBACK_XAUI = 5, EFX_LOOPBACK_GMII = 6, EFX_LOOPBACK_SGMII = 7, EFX_LOOPBACK_XGBR = 8, EFX_LOOPBACK_XFI = 9, EFX_LOOPBACK_XAUI_FAR = 10, EFX_LOOPBACK_GMII_FAR = 11, EFX_LOOPBACK_SGMII_FAR = 12, EFX_LOOPBACK_XFI_FAR = 13, EFX_LOOPBACK_GPHY = 14, EFX_LOOPBACK_PHY_XS = 15, EFX_LOOPBACK_PCS = 16, EFX_LOOPBACK_PMA_PMD = 17, EFX_LOOPBACK_XPORT = 18, EFX_LOOPBACK_XGMII_WS = 19, EFX_LOOPBACK_XAUI_WS = 20, EFX_LOOPBACK_XAUI_WS_FAR = 21, EFX_LOOPBACK_XAUI_WS_NEAR = 22, EFX_LOOPBACK_GMII_WS = 23, EFX_LOOPBACK_XFI_WS = 24, EFX_LOOPBACK_XFI_WS_FAR = 25, EFX_LOOPBACK_PHYXS_WS = 26, EFX_LOOPBACK_PMA_INT = 27, EFX_LOOPBACK_SD_NEAR = 28, EFX_LOOPBACK_SD_FAR = 29, EFX_LOOPBACK_PMA_INT_WS = 30, EFX_LOOPBACK_SD_FEP2_WS = 31, EFX_LOOPBACK_SD_FEP1_5_WS = 32, EFX_LOOPBACK_SD_FEP_WS = 33, EFX_LOOPBACK_SD_FES_WS = 34, EFX_LOOPBACK_NTYPES } efx_loopback_type_t; typedef enum efx_loopback_kind_e { EFX_LOOPBACK_KIND_OFF = 0, EFX_LOOPBACK_KIND_ALL, EFX_LOOPBACK_KIND_MAC, EFX_LOOPBACK_KIND_PHY, EFX_LOOPBACK_NKINDS } efx_loopback_kind_t; extern void efx_loopback_mask( __in efx_loopback_kind_t loopback_kind, __out efx_qword_t *maskp); extern __checkReturn efx_rc_t efx_port_loopback_set( __in efx_nic_t *enp, __in efx_link_mode_t link_mode, __in efx_loopback_type_t type); #if EFSYS_OPT_NAMES extern __checkReturn const char * efx_loopback_type_name( __in efx_nic_t *enp, __in efx_loopback_type_t type); #endif /* EFSYS_OPT_NAMES */ #endif /* EFSYS_OPT_LOOPBACK */ extern __checkReturn efx_rc_t efx_port_poll( __in efx_nic_t *enp, __out_opt efx_link_mode_t *link_modep); extern void efx_port_fini( __in efx_nic_t *enp); typedef enum efx_phy_cap_type_e { EFX_PHY_CAP_INVALID = 0, EFX_PHY_CAP_10HDX, EFX_PHY_CAP_10FDX, EFX_PHY_CAP_100HDX, EFX_PHY_CAP_100FDX, EFX_PHY_CAP_1000HDX, EFX_PHY_CAP_1000FDX, EFX_PHY_CAP_10000FDX, EFX_PHY_CAP_PAUSE, EFX_PHY_CAP_ASYM, EFX_PHY_CAP_AN, EFX_PHY_CAP_40000FDX, EFX_PHY_CAP_NTYPES } efx_phy_cap_type_t; #define EFX_PHY_CAP_CURRENT 0x00000000 #define EFX_PHY_CAP_DEFAULT 0x00000001 #define EFX_PHY_CAP_PERM 0x00000002 extern void efx_phy_adv_cap_get( __in efx_nic_t *enp, __in uint32_t flag, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_phy_adv_cap_set( __in efx_nic_t *enp, __in uint32_t mask); extern void efx_phy_lp_cap_get( __in efx_nic_t *enp, __out uint32_t *maskp); extern __checkReturn efx_rc_t efx_phy_oui_get( __in efx_nic_t *enp, __out uint32_t *ouip); typedef enum efx_phy_media_type_e { EFX_PHY_MEDIA_INVALID = 0, EFX_PHY_MEDIA_XAUI, EFX_PHY_MEDIA_CX4, EFX_PHY_MEDIA_KX4, EFX_PHY_MEDIA_XFP, EFX_PHY_MEDIA_SFP_PLUS, EFX_PHY_MEDIA_BASE_T, EFX_PHY_MEDIA_QSFP_PLUS, EFX_PHY_MEDIA_NTYPES } efx_phy_media_type_t; /* Get the type of medium currently used. If the board has ports for * modules, a module is present, and we recognise the media type of * the module, then this will be the media type of the module. * Otherwise it will be the media type of the port. */ extern void efx_phy_media_type_get( __in efx_nic_t *enp, __out efx_phy_media_type_t *typep); extern efx_rc_t efx_phy_module_get_info( __in efx_nic_t *enp, __in uint8_t dev_addr, __in uint8_t offset, __in uint8_t len, __out_bcount(len) uint8_t *data); #if EFSYS_OPT_PHY_STATS /* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */ typedef enum efx_phy_stat_e { EFX_PHY_STAT_OUI, EFX_PHY_STAT_PMA_PMD_LINK_UP, EFX_PHY_STAT_PMA_PMD_RX_FAULT, EFX_PHY_STAT_PMA_PMD_TX_FAULT, EFX_PHY_STAT_PMA_PMD_REV_A, EFX_PHY_STAT_PMA_PMD_REV_B, EFX_PHY_STAT_PMA_PMD_REV_C, EFX_PHY_STAT_PMA_PMD_REV_D, EFX_PHY_STAT_PCS_LINK_UP, EFX_PHY_STAT_PCS_RX_FAULT, EFX_PHY_STAT_PCS_TX_FAULT, EFX_PHY_STAT_PCS_BER, EFX_PHY_STAT_PCS_BLOCK_ERRORS, EFX_PHY_STAT_PHY_XS_LINK_UP, EFX_PHY_STAT_PHY_XS_RX_FAULT, EFX_PHY_STAT_PHY_XS_TX_FAULT, EFX_PHY_STAT_PHY_XS_ALIGN, EFX_PHY_STAT_PHY_XS_SYNC_A, EFX_PHY_STAT_PHY_XS_SYNC_B, EFX_PHY_STAT_PHY_XS_SYNC_C, EFX_PHY_STAT_PHY_XS_SYNC_D, EFX_PHY_STAT_AN_LINK_UP, EFX_PHY_STAT_AN_MASTER, EFX_PHY_STAT_AN_LOCAL_RX_OK, EFX_PHY_STAT_AN_REMOTE_RX_OK, EFX_PHY_STAT_CL22EXT_LINK_UP, EFX_PHY_STAT_SNR_A, EFX_PHY_STAT_SNR_B, EFX_PHY_STAT_SNR_C, EFX_PHY_STAT_SNR_D, EFX_PHY_STAT_PMA_PMD_SIGNAL_A, EFX_PHY_STAT_PMA_PMD_SIGNAL_B, EFX_PHY_STAT_PMA_PMD_SIGNAL_C, EFX_PHY_STAT_PMA_PMD_SIGNAL_D, EFX_PHY_STAT_AN_COMPLETE, EFX_PHY_STAT_PMA_PMD_REV_MAJOR, EFX_PHY_STAT_PMA_PMD_REV_MINOR, EFX_PHY_STAT_PMA_PMD_REV_MICRO, EFX_PHY_STAT_PCS_FW_VERSION_0, EFX_PHY_STAT_PCS_FW_VERSION_1, EFX_PHY_STAT_PCS_FW_VERSION_2, EFX_PHY_STAT_PCS_FW_VERSION_3, EFX_PHY_STAT_PCS_FW_BUILD_YY, EFX_PHY_STAT_PCS_FW_BUILD_MM, EFX_PHY_STAT_PCS_FW_BUILD_DD, EFX_PHY_STAT_PCS_OP_MODE, EFX_PHY_NSTATS } efx_phy_stat_t; /* END MKCONFIG GENERATED PhyHeaderStatsBlock */ #if EFSYS_OPT_NAMES extern const char * efx_phy_stat_name( __in efx_nic_t *enp, __in efx_phy_stat_t stat); #endif /* EFSYS_OPT_NAMES */ #define EFX_PHY_STATS_SIZE 0x100 extern __checkReturn efx_rc_t efx_phy_stats_update( __in efx_nic_t *enp, __in efsys_mem_t *esmp, __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat); #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_BIST typedef enum efx_bist_type_e { EFX_BIST_TYPE_UNKNOWN, EFX_BIST_TYPE_PHY_NORMAL, EFX_BIST_TYPE_PHY_CABLE_SHORT, EFX_BIST_TYPE_PHY_CABLE_LONG, EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/ EFX_BIST_TYPE_REG, /* Test the register memories */ EFX_BIST_TYPE_NTYPES, } efx_bist_type_t; typedef enum efx_bist_result_e { EFX_BIST_RESULT_UNKNOWN, EFX_BIST_RESULT_RUNNING, EFX_BIST_RESULT_PASSED, EFX_BIST_RESULT_FAILED, } efx_bist_result_t; typedef enum efx_phy_cable_status_e { EFX_PHY_CABLE_STATUS_OK, EFX_PHY_CABLE_STATUS_INVALID, EFX_PHY_CABLE_STATUS_OPEN, EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT, EFX_PHY_CABLE_STATUS_INTERPAIRSHORT, EFX_PHY_CABLE_STATUS_BUSY, } efx_phy_cable_status_t; typedef enum efx_bist_value_e { EFX_BIST_PHY_CABLE_LENGTH_A, EFX_BIST_PHY_CABLE_LENGTH_B, EFX_BIST_PHY_CABLE_LENGTH_C, EFX_BIST_PHY_CABLE_LENGTH_D, EFX_BIST_PHY_CABLE_STATUS_A, EFX_BIST_PHY_CABLE_STATUS_B, EFX_BIST_PHY_CABLE_STATUS_C, EFX_BIST_PHY_CABLE_STATUS_D, EFX_BIST_FAULT_CODE, /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL * response. */ EFX_BIST_MEM_TEST, EFX_BIST_MEM_ADDR, EFX_BIST_MEM_BUS, EFX_BIST_MEM_EXPECT, EFX_BIST_MEM_ACTUAL, EFX_BIST_MEM_ECC, EFX_BIST_MEM_ECC_PARITY, EFX_BIST_MEM_ECC_FATAL, EFX_BIST_NVALUES, } efx_bist_value_t; extern __checkReturn efx_rc_t efx_bist_enable_offline( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_bist_start( __in efx_nic_t *enp, __in efx_bist_type_t type); extern __checkReturn efx_rc_t efx_bist_poll( __in efx_nic_t *enp, __in efx_bist_type_t type, __out efx_bist_result_t *resultp, __out_opt uint32_t *value_maskp, __out_ecount_opt(count) unsigned long *valuesp, __in size_t count); extern void efx_bist_stop( __in efx_nic_t *enp, __in efx_bist_type_t type); #endif /* EFSYS_OPT_BIST */ #define EFX_FEATURE_IPV6 0x00000001 #define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002 #define EFX_FEATURE_LINK_EVENTS 0x00000004 #define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008 #define EFX_FEATURE_WOL 0x00000010 #define EFX_FEATURE_MCDI 0x00000020 #define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040 #define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080 #define EFX_FEATURE_TURBO 0x00000100 #define EFX_FEATURE_MCDI_DMA 0x00000200 #define EFX_FEATURE_TX_SRC_FILTERS 0x00000400 #define EFX_FEATURE_PIO_BUFFERS 0x00000800 #define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000 #define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000 typedef struct efx_nic_cfg_s { uint32_t enc_board_type; uint32_t enc_phy_type; #if EFSYS_OPT_NAMES char enc_phy_name[21]; #endif char enc_phy_revision[21]; efx_mon_type_t enc_mon_type; #if EFSYS_OPT_MON_STATS uint32_t enc_mon_stat_dma_buf_size; uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32]; #endif unsigned int enc_features; uint8_t enc_mac_addr[6]; uint8_t enc_port; /* PHY port number */ uint32_t enc_func_flags; uint32_t enc_intr_vec_base; uint32_t enc_intr_limit; uint32_t enc_evq_limit; uint32_t enc_txq_limit; uint32_t enc_rxq_limit; uint32_t enc_buftbl_limit; uint32_t enc_piobuf_limit; uint32_t enc_piobuf_size; uint32_t enc_piobuf_min_alloc_size; uint32_t enc_evq_timer_quantum_ns; uint32_t enc_evq_timer_max_us; uint32_t enc_clk_mult; uint32_t enc_rx_prefix_size; uint32_t enc_rx_buf_align_start; uint32_t enc_rx_buf_align_end; #if EFSYS_OPT_LOOPBACK efx_qword_t enc_loopback_types[EFX_LINK_NMODES]; #endif /* EFSYS_OPT_LOOPBACK */ #if EFSYS_OPT_PHY_FLAGS uint32_t enc_phy_flags_mask; #endif /* EFSYS_OPT_PHY_FLAGS */ #if EFSYS_OPT_PHY_LED_CONTROL uint32_t enc_led_mask; #endif /* EFSYS_OPT_PHY_LED_CONTROL */ #if EFSYS_OPT_PHY_STATS uint64_t enc_phy_stat_mask; #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_MCDI uint8_t enc_mcdi_mdio_channel; #if EFSYS_OPT_PHY_STATS uint32_t enc_mcdi_phy_stat_mask; #endif /* EFSYS_OPT_PHY_STATS */ #if EFSYS_OPT_MON_STATS uint32_t *enc_mcdi_sensor_maskp; uint32_t enc_mcdi_sensor_mask_size; #endif /* EFSYS_OPT_MON_STATS */ #endif /* EFSYS_OPT_MCDI */ #if EFSYS_OPT_BIST uint32_t enc_bist_mask; #endif /* EFSYS_OPT_BIST */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD uint32_t enc_pf; uint32_t enc_vf; uint32_t enc_privilege_mask; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ boolean_t enc_bug26807_workaround; boolean_t enc_bug35388_workaround; boolean_t enc_bug41750_workaround; boolean_t enc_bug61265_workaround; boolean_t enc_rx_batching_enabled; /* Maximum number of descriptors completed in an rx event. */ uint32_t enc_rx_batch_max; /* Number of rx descriptors the hardware requires for a push. */ uint32_t enc_rx_push_align; /* * Maximum number of bytes into the packet the TCP header can start for * the hardware to apply TSO packet edits. */ uint32_t enc_tx_tso_tcp_header_offset_limit; boolean_t enc_fw_assisted_tso_enabled; boolean_t enc_fw_assisted_tso_v2_enabled; /* Number of TSO contexts on the NIC (FATSOv2) */ uint32_t enc_fw_assisted_tso_v2_n_contexts; boolean_t enc_hw_tx_insert_vlan_enabled; /* Number of PFs on the NIC */ uint32_t enc_hw_pf_count; /* Datapath firmware vadapter/vport/vswitch support */ boolean_t enc_datapath_cap_evb; boolean_t enc_rx_disable_scatter_supported; boolean_t enc_allow_set_mac_with_installed_filters; boolean_t enc_enhanced_set_mac_supported; boolean_t enc_init_evq_v2_supported; boolean_t enc_pm_and_rxdp_counters; boolean_t enc_mac_stats_40g_tx_size_bins; /* External port identifier */ uint8_t enc_external_port; uint32_t enc_mcdi_max_payload_length; /* VPD may be per-PF or global */ boolean_t enc_vpd_is_global; /* Minimum unidirectional bandwidth in Mb/s to max out all ports */ uint32_t enc_required_pcie_bandwidth_mbps; uint32_t enc_max_pcie_link_gen; } efx_nic_cfg_t; #define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff) #define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff) #define EFX_PCI_FUNCTION(_encp) \ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf) #define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf) extern const efx_nic_cfg_t * efx_nic_cfg_get( __in efx_nic_t *enp); /* Driver resource limits (minimum required/maximum usable). */ typedef struct efx_drv_limits_s { uint32_t edl_min_evq_count; uint32_t edl_max_evq_count; uint32_t edl_min_rxq_count; uint32_t edl_max_rxq_count; uint32_t edl_min_txq_count; uint32_t edl_max_txq_count; /* PIO blocks (sub-allocated from piobuf) */ uint32_t edl_min_pio_alloc_size; uint32_t edl_max_pio_alloc_count; } efx_drv_limits_t; extern __checkReturn efx_rc_t efx_nic_set_drv_limits( __inout efx_nic_t *enp, __in efx_drv_limits_t *edlp); typedef enum efx_nic_region_e { EFX_REGION_VI, /* Memory BAR UC mapping */ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */ } efx_nic_region_t; extern __checkReturn efx_rc_t efx_nic_get_bar_region( __in efx_nic_t *enp, __in efx_nic_region_t region, __out uint32_t *offsetp, __out size_t *sizep); extern __checkReturn efx_rc_t efx_nic_get_vi_pool( __in efx_nic_t *enp, __out uint32_t *evq_countp, __out uint32_t *rxq_countp, __out uint32_t *txq_countp); #if EFSYS_OPT_VPD typedef enum efx_vpd_tag_e { EFX_VPD_ID = 0x02, EFX_VPD_END = 0x0f, EFX_VPD_RO = 0x10, EFX_VPD_RW = 0x11, } efx_vpd_tag_t; typedef uint16_t efx_vpd_keyword_t; typedef struct efx_vpd_value_s { efx_vpd_tag_t evv_tag; efx_vpd_keyword_t evv_keyword; uint8_t evv_length; uint8_t evv_value[0x100]; } efx_vpd_value_t; #define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8)) extern __checkReturn efx_rc_t efx_vpd_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_vpd_size( __in efx_nic_t *enp, __out size_t *sizep); extern __checkReturn efx_rc_t efx_vpd_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_vpd_verify( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_vpd_reinit( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_vpd_get( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size, __inout efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t efx_vpd_set( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __in efx_vpd_value_t *evvp); extern __checkReturn efx_rc_t efx_vpd_next( __in efx_nic_t *enp, __inout_bcount(size) caddr_t data, __in size_t size, __out efx_vpd_value_t *evvp, __inout unsigned int *contp); extern __checkReturn efx_rc_t efx_vpd_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); extern void efx_vpd_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_VPD */ /* NVRAM */ #if EFSYS_OPT_NVRAM typedef enum efx_nvram_type_e { EFX_NVRAM_INVALID = 0, EFX_NVRAM_BOOTROM, EFX_NVRAM_BOOTROM_CFG, EFX_NVRAM_MC_FIRMWARE, EFX_NVRAM_MC_GOLDEN, EFX_NVRAM_PHY, EFX_NVRAM_NULLPHY, EFX_NVRAM_FPGA, EFX_NVRAM_FCFW, EFX_NVRAM_CPLD, EFX_NVRAM_FPGA_BACKUP, EFX_NVRAM_DYNAMIC_CFG, EFX_NVRAM_LICENSE, EFX_NVRAM_UEFIROM, EFX_NVRAM_NTYPES, } efx_nvram_type_t; extern __checkReturn efx_rc_t efx_nvram_init( __in efx_nic_t *enp); #if EFSYS_OPT_DIAG extern __checkReturn efx_rc_t efx_nvram_test( __in efx_nic_t *enp); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t efx_nvram_size( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out size_t *sizep); extern __checkReturn efx_rc_t efx_nvram_rw_start( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out_opt size_t *pref_chunkp); extern void efx_nvram_rw_finish( __in efx_nic_t *enp, __in efx_nvram_type_t type); extern __checkReturn efx_rc_t efx_nvram_get_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __out uint32_t *subtypep, __out_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t efx_nvram_read_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __out_bcount(size) caddr_t data, __in size_t size); extern __checkReturn efx_rc_t efx_nvram_set_version( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_ecount(4) uint16_t version[4]); extern __checkReturn efx_rc_t efx_nvram_validate( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in_bcount(partn_size) caddr_t partn_data, __in size_t partn_size); extern __checkReturn efx_rc_t efx_nvram_erase( __in efx_nic_t *enp, __in efx_nvram_type_t type); extern __checkReturn efx_rc_t efx_nvram_write_chunk( __in efx_nic_t *enp, __in efx_nvram_type_t type, __in unsigned int offset, __in_bcount(size) caddr_t data, __in size_t size); extern void efx_nvram_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_NVRAM */ #if EFSYS_OPT_BOOTCFG extern efx_rc_t efx_bootcfg_read( __in efx_nic_t *enp, __out_bcount(size) caddr_t data, __in size_t size); extern efx_rc_t efx_bootcfg_write( __in efx_nic_t *enp, __in_bcount(size) caddr_t data, __in size_t size); #endif /* EFSYS_OPT_BOOTCFG */ #if EFSYS_OPT_WOL typedef enum efx_wol_type_e { EFX_WOL_TYPE_INVALID, EFX_WOL_TYPE_MAGIC, EFX_WOL_TYPE_BITMAP, EFX_WOL_TYPE_LINK, EFX_WOL_NTYPES, } efx_wol_type_t; typedef enum efx_lightsout_offload_type_e { EFX_LIGHTSOUT_OFFLOAD_TYPE_INVALID, EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP, EFX_LIGHTSOUT_OFFLOAD_TYPE_NS, } efx_lightsout_offload_type_t; #define EFX_WOL_BITMAP_MASK_SIZE (48) #define EFX_WOL_BITMAP_VALUE_SIZE (128) typedef union efx_wol_param_u { struct { uint8_t mac_addr[6]; } ewp_magic; struct { uint8_t mask[EFX_WOL_BITMAP_MASK_SIZE]; /* 1 bit per byte */ uint8_t value[EFX_WOL_BITMAP_VALUE_SIZE]; /* value to match */ uint8_t value_len; } ewp_bitmap; } efx_wol_param_t; typedef union efx_lightsout_offload_param_u { struct { uint8_t mac_addr[6]; uint32_t ip; } elop_arp; struct { uint8_t mac_addr[6]; uint32_t solicited_node[4]; uint32_t ip[4]; } elop_ns; } efx_lightsout_offload_param_t; extern __checkReturn efx_rc_t efx_wol_init( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_wol_filter_clear( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_wol_filter_add( __in efx_nic_t *enp, __in efx_wol_type_t type, __in efx_wol_param_t *paramp, __out uint32_t *filter_idp); extern __checkReturn efx_rc_t efx_wol_filter_remove( __in efx_nic_t *enp, __in uint32_t filter_id); extern __checkReturn efx_rc_t efx_lightsout_offload_add( __in efx_nic_t *enp, __in efx_lightsout_offload_type_t type, __in efx_lightsout_offload_param_t *paramp, __out uint32_t *filter_idp); extern __checkReturn efx_rc_t efx_lightsout_offload_remove( __in efx_nic_t *enp, __in efx_lightsout_offload_type_t type, __in uint32_t filter_id); extern void efx_wol_fini( __in efx_nic_t *enp); #endif /* EFSYS_OPT_WOL */ #if EFSYS_OPT_DIAG typedef enum efx_pattern_type_t { EFX_PATTERN_BYTE_INCREMENT = 0, EFX_PATTERN_ALL_THE_SAME, EFX_PATTERN_BIT_ALTERNATE, EFX_PATTERN_BYTE_ALTERNATE, EFX_PATTERN_BYTE_CHANGING, EFX_PATTERN_BIT_SWEEP, EFX_PATTERN_NTYPES } efx_pattern_type_t; typedef void (*efx_sram_pattern_fn_t)( __in size_t row, __in boolean_t negate, __out efx_qword_t *eqp); extern __checkReturn efx_rc_t efx_sram_test( __in efx_nic_t *enp, __in efx_pattern_type_t type); #endif /* EFSYS_OPT_DIAG */ extern __checkReturn efx_rc_t efx_sram_buf_tbl_set( __in efx_nic_t *enp, __in uint32_t id, __in efsys_mem_t *esmp, __in size_t n); extern void efx_sram_buf_tbl_clear( __in efx_nic_t *enp, __in uint32_t id, __in size_t n); #define EFX_BUF_TBL_SIZE 0x20000 #define EFX_BUF_SIZE 4096 /* EV */ typedef struct efx_evq_s efx_evq_t; #if EFSYS_OPT_QSTATS /* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */ typedef enum efx_ev_qstat_e { EV_ALL, EV_RX, EV_RX_OK, EV_RX_FRM_TRUNC, EV_RX_TOBE_DISC, EV_RX_PAUSE_FRM_ERR, EV_RX_BUF_OWNER_ID_ERR, EV_RX_IPV4_HDR_CHKSUM_ERR, EV_RX_TCP_UDP_CHKSUM_ERR, EV_RX_ETH_CRC_ERR, EV_RX_IP_FRAG_ERR, EV_RX_MCAST_PKT, EV_RX_MCAST_HASH_MATCH, EV_RX_TCP_IPV4, EV_RX_TCP_IPV6, EV_RX_UDP_IPV4, EV_RX_UDP_IPV6, EV_RX_OTHER_IPV4, EV_RX_OTHER_IPV6, EV_RX_NON_IP, EV_RX_BATCH, EV_TX, EV_TX_WQ_FF_FULL, EV_TX_PKT_ERR, EV_TX_PKT_TOO_BIG, EV_TX_UNEXPECTED, EV_GLOBAL, EV_GLOBAL_MNT, EV_DRIVER, EV_DRIVER_SRM_UPD_DONE, EV_DRIVER_TX_DESCQ_FLS_DONE, EV_DRIVER_RX_DESCQ_FLS_DONE, EV_DRIVER_RX_DESCQ_FLS_FAILED, EV_DRIVER_RX_DSC_ERROR, EV_DRIVER_TX_DSC_ERROR, EV_DRV_GEN, EV_MCDI_RESPONSE, EV_NQSTATS } efx_ev_qstat_t; /* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */ #endif /* EFSYS_OPT_QSTATS */ extern __checkReturn efx_rc_t efx_ev_init( __in efx_nic_t *enp); extern void efx_ev_fini( __in efx_nic_t *enp); #define EFX_EVQ_MAXNEVS 32768 #define EFX_EVQ_MINNEVS 512 #define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t)) #define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE) #define EFX_EVQ_FLAGS_TYPE_MASK (0x3) #define EFX_EVQ_FLAGS_TYPE_AUTO (0x0) #define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1) #define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2) extern __checkReturn efx_rc_t efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint32_t us, __in uint32_t flags, __deref_out efx_evq_t **eepp); extern void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data); typedef __checkReturn boolean_t (*efx_initialized_ev_t)( __in_opt void *arg); #define EFX_PKT_UNICAST 0x0004 #define EFX_PKT_START 0x0008 #define EFX_PKT_VLAN_TAGGED 0x0010 #define EFX_CKSUM_TCPUDP 0x0020 #define EFX_CKSUM_IPV4 0x0040 #define EFX_PKT_CONT 0x0080 #define EFX_CHECK_VLAN 0x0100 #define EFX_PKT_TCP 0x0200 #define EFX_PKT_UDP 0x0400 #define EFX_PKT_IPV4 0x0800 #define EFX_PKT_IPV6 0x1000 #define EFX_PKT_PREFIX_LEN 0x2000 #define EFX_ADDR_MISMATCH 0x4000 #define EFX_DISCARD 0x8000 #define EFX_EV_RX_NLABELS 32 #define EFX_EV_TX_NLABELS 32 typedef __checkReturn boolean_t (*efx_rx_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id, __in uint32_t size, __in uint16_t flags); typedef __checkReturn boolean_t (*efx_tx_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t id); #define EFX_EXCEPTION_RX_RECOVERY 0x00000001 #define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002 #define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003 #define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004 #define EFX_EXCEPTION_FWALERT_SRAM 0x00000005 #define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006 #define EFX_EXCEPTION_RX_ERROR 0x00000007 #define EFX_EXCEPTION_TX_ERROR 0x00000008 #define EFX_EXCEPTION_EV_ERROR 0x00000009 typedef __checkReturn boolean_t (*efx_exception_ev_t)( __in_opt void *arg, __in uint32_t label, __in uint32_t data); typedef __checkReturn boolean_t (*efx_rxq_flush_done_ev_t)( __in_opt void *arg, __in uint32_t rxq_index); typedef __checkReturn boolean_t (*efx_rxq_flush_failed_ev_t)( __in_opt void *arg, __in uint32_t rxq_index); typedef __checkReturn boolean_t (*efx_txq_flush_done_ev_t)( __in_opt void *arg, __in uint32_t txq_index); typedef __checkReturn boolean_t (*efx_software_ev_t)( __in_opt void *arg, __in uint16_t magic); typedef __checkReturn boolean_t (*efx_sram_ev_t)( __in_opt void *arg, __in uint32_t code); #define EFX_SRAM_CLEAR 0 #define EFX_SRAM_UPDATE 1 #define EFX_SRAM_ILLEGAL_CLEAR 2 typedef __checkReturn boolean_t (*efx_wake_up_ev_t)( __in_opt void *arg, __in uint32_t label); typedef __checkReturn boolean_t (*efx_timer_ev_t)( __in_opt void *arg, __in uint32_t label); typedef __checkReturn boolean_t (*efx_link_change_ev_t)( __in_opt void *arg, __in efx_link_mode_t link_mode); #if EFSYS_OPT_MON_STATS typedef __checkReturn boolean_t (*efx_monitor_ev_t)( __in_opt void *arg, __in efx_mon_stat_t id, __in efx_mon_stat_value_t value); #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MAC_STATS typedef __checkReturn boolean_t (*efx_mac_stats_ev_t)( __in_opt void *arg, __in uint32_t generation ); #endif /* EFSYS_OPT_MAC_STATS */ typedef struct efx_ev_callbacks_s { efx_initialized_ev_t eec_initialized; efx_rx_ev_t eec_rx; efx_tx_ev_t eec_tx; efx_exception_ev_t eec_exception; efx_rxq_flush_done_ev_t eec_rxq_flush_done; efx_rxq_flush_failed_ev_t eec_rxq_flush_failed; efx_txq_flush_done_ev_t eec_txq_flush_done; efx_software_ev_t eec_software; efx_sram_ev_t eec_sram; efx_wake_up_ev_t eec_wake_up; efx_timer_ev_t eec_timer; efx_link_change_ev_t eec_link_change; #if EFSYS_OPT_MON_STATS efx_monitor_ev_t eec_monitor; #endif /* EFSYS_OPT_MON_STATS */ #if EFSYS_OPT_MAC_STATS efx_mac_stats_ev_t eec_mac_stats; #endif /* EFSYS_OPT_MAC_STATS */ } efx_ev_callbacks_t; extern __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count); #if EFSYS_OPT_EV_PREFETCH extern void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count); #endif /* EFSYS_OPT_EV_PREFETCH */ extern void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg); extern __checkReturn efx_rc_t efx_ev_usecs_to_ticks( __in efx_nic_t *enp, __in unsigned int usecs, __out unsigned int *ticksp); extern __checkReturn efx_rc_t efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us); extern __checkReturn efx_rc_t efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count); #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES extern const char * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ extern void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ extern void efx_ev_qdestroy( __in efx_evq_t *eep); /* RX */ extern __checkReturn efx_rc_t efx_rx_init( __inout efx_nic_t *enp); extern void efx_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER __checkReturn efx_rc_t efx_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE typedef enum efx_rx_hash_alg_e { EFX_RX_HASHALG_LFSR = 0, EFX_RX_HASHALG_TOEPLITZ } efx_rx_hash_alg_t; typedef enum efx_rx_hash_type_e { EFX_RX_HASH_IPV4 = 0, EFX_RX_HASH_TCPIPV4, EFX_RX_HASH_IPV6, EFX_RX_HASH_TCPIPV6, } efx_rx_hash_type_t; typedef enum efx_rx_hash_support_e { EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */ } efx_rx_hash_support_t; #define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */ #define EFX_MAXRSS 64 /* RX indirection entry range */ #define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */ typedef enum efx_rx_scale_support_e { EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */ } efx_rx_scale_support_t; extern __checkReturn efx_rc_t efx_rx_hash_support_get( __in efx_nic_t *enp, __out efx_rx_hash_support_t *supportp); extern __checkReturn efx_rc_t efx_rx_scale_support_get( __in efx_nic_t *enp, __out efx_rx_scale_support_t *supportp); extern __checkReturn efx_rc_t efx_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); extern __checkReturn efx_rc_t efx_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n); extern __checkReturn efx_rc_t efx_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n); extern __checkReturn uint32_t efx_psuedo_hdr_hash_get( - __in efx_nic_t *enp, + __in efx_rxq_t *erp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ extern __checkReturn efx_rc_t efx_psuedo_hdr_pkt_length_get( - __in efx_nic_t *enp, + __in efx_rxq_t *erp, __in uint8_t *buffer, __out uint16_t *pkt_lengthp); #define EFX_RXQ_MAXNDESCS 4096 #define EFX_RXQ_MINNDESCS 512 #define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t)) #define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE) #define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16) #define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize) typedef enum efx_rxq_type_e { EFX_RXQ_TYPE_DEFAULT, EFX_RXQ_TYPE_SCATTER, EFX_RXQ_NTYPES } efx_rxq_type_t; extern __checkReturn efx_rc_t efx_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp); typedef struct efx_buffer_s { efsys_dma_addr_t eb_addr; size_t eb_size; boolean_t eb_eop; } efx_buffer_t; typedef struct efx_desc_s { efx_qword_t ed_eq; } efx_desc_t; extern void efx_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); extern void efx_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); extern __checkReturn efx_rc_t efx_rx_qflush( __in efx_rxq_t *erp); extern void efx_rx_qenable( __in efx_rxq_t *erp); extern void efx_rx_qdestroy( __in efx_rxq_t *erp); /* TX */ typedef struct efx_txq_s efx_txq_t; #if EFSYS_OPT_QSTATS /* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */ typedef enum efx_tx_qstat_e { TX_POST, TX_POST_PIO, TX_NQSTATS } efx_tx_qstat_t; /* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */ #endif /* EFSYS_OPT_QSTATS */ extern __checkReturn efx_rc_t efx_tx_init( __in efx_nic_t *enp); extern void efx_tx_fini( __in efx_nic_t *enp); #define EFX_BUG35388_WORKAROUND(_encp) \ (((_encp) == NULL) ? 1 : ((_encp)->enc_bug35388_workaround != 0)) #define EFX_TXQ_MAXNDESCS(_encp) \ ((EFX_BUG35388_WORKAROUND(_encp)) ? 2048 : 4096) #define EFX_TXQ_MINNDESCS 512 #define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t)) #define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE) #define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16) #define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize) #define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */ #define EFX_TXQ_CKSUM_IPV4 0x0001 #define EFX_TXQ_CKSUM_TCPUDP 0x0002 #define EFX_TXQ_FATSOV2 0x0004 extern __checkReturn efx_rc_t efx_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __deref_out efx_txq_t **etpp, __out unsigned int *addedp); extern __checkReturn efx_rc_t efx_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern __checkReturn efx_rc_t efx_tx_qpace( __in efx_txq_t *etp, __in unsigned int ns); extern void efx_tx_qpush( __in efx_txq_t *etp, __in unsigned int added, __in unsigned int pushed); extern __checkReturn efx_rc_t efx_tx_qflush( __in efx_txq_t *etp); extern void efx_tx_qenable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t efx_tx_qpio_enable( __in efx_txq_t *etp); extern void efx_tx_qpio_disable( __in efx_txq_t *etp); extern __checkReturn efx_rc_t efx_tx_qpio_write( __in efx_txq_t *etp, __in_ecount(buf_length) uint8_t *buffer, __in size_t buf_length, __in size_t pio_buf_offset); extern __checkReturn efx_rc_t efx_tx_qpio_post( __in efx_txq_t *etp, __in size_t pkt_length, __in unsigned int completed, __inout unsigned int *addedp); extern __checkReturn efx_rc_t efx_tx_qdesc_post( __in efx_txq_t *etp, __in_ecount(n) efx_desc_t *ed, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp); extern void efx_tx_qdesc_dma_create( __in efx_txq_t *etp, __in efsys_dma_addr_t addr, __in size_t size, __in boolean_t eop, __out efx_desc_t *edp); extern void efx_tx_qdesc_tso_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint8_t tcp_flags, __out efx_desc_t *edp); /* Number of FATSOv2 option descriptors */ #define EFX_TX_FATSOV2_OPT_NDESCS 2 /* Maximum number of DMA segments per TSO packet (not superframe) */ #define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24 extern void efx_tx_qdesc_tso2_create( __in efx_txq_t *etp, __in uint16_t ipv4_id, __in uint32_t tcp_seq, __in uint16_t tcp_mss, __out_ecount(count) efx_desc_t *edp, __in int count); extern void efx_tx_qdesc_vlantci_create( __in efx_txq_t *etp, __in uint16_t tci, __out efx_desc_t *edp); #if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES extern const char * efx_tx_qstat_name( __in efx_nic_t *etp, __in unsigned int id); #endif /* EFSYS_OPT_NAMES */ extern void efx_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat); #endif /* EFSYS_OPT_QSTATS */ extern void efx_tx_qdestroy( __in efx_txq_t *etp); /* FILTER */ #if EFSYS_OPT_FILTER #define EFX_ETHER_TYPE_IPV4 0x0800 #define EFX_ETHER_TYPE_IPV6 0x86DD #define EFX_IPPROTO_TCP 6 #define EFX_IPPROTO_UDP 17 typedef enum efx_filter_flag_e { EFX_FILTER_FLAG_RX_RSS = 0x01, /* use RSS to spread across * multiple queues */ EFX_FILTER_FLAG_RX_SCATTER = 0x02, /* enable RX scatter */ EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04, /* Override an automatic filter * (priority EFX_FILTER_PRI_AUTO). * May only be set by the filter * implementation for each type. * A removal request will * restore the automatic filter * in its place. */ EFX_FILTER_FLAG_RX = 0x08, /* Filter is for RX */ EFX_FILTER_FLAG_TX = 0x10, /* Filter is for TX */ } efx_filter_flag_t; typedef enum efx_filter_match_flags_e { EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host * address */ EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host * address */ EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */ EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */ EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */ EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */ EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */ EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */ EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport * protocol */ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400, /* Match by local MAC address * I/G bit. Used for RX default * unicast and multicast/ * broadcast filters. */ } efx_filter_match_flags_t; typedef enum efx_filter_priority_s { EFX_FILTER_PRI_HINT = 0, /* Performance hint */ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device * address list or hardware * requirements. This may only be used * by the filter implementation for * each NIC type. */ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the * client (e.g. SR-IOV, HyperV VMQ etc.) */ } efx_filter_priority_t; /* * FIXME: All these fields are assumed to be in little-endian byte order. * It may be better for some to be big-endian. See bug42804. */ typedef struct efx_filter_spec_s { uint32_t efs_match_flags:12; uint32_t efs_priority:2; uint32_t efs_flags:6; uint32_t efs_dmaq_id:12; uint32_t efs_rss_context; uint16_t efs_outer_vid; uint16_t efs_inner_vid; uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN]; uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN]; uint16_t efs_ether_type; uint8_t efs_ip_proto; uint16_t efs_loc_port; uint16_t efs_rem_port; efx_oword_t efs_rem_host; efx_oword_t efs_loc_host; } efx_filter_spec_t; /* Default values for use in filter specifications */ #define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff #define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff #define EFX_FILTER_SPEC_VID_UNSPEC 0xffff extern __checkReturn efx_rc_t efx_filter_init( __in efx_nic_t *enp); extern void efx_filter_fini( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_filter_insert( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t efx_filter_remove( __in efx_nic_t *enp, __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t efx_filter_restore( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_filter_supported_filters( __in efx_nic_t *enp, __out uint32_t *list, __out size_t *length); extern void efx_filter_spec_init_rx( __out efx_filter_spec_t *spec, __in efx_filter_priority_t priority, __in efx_filter_flag_t flags, __in efx_rxq_t *erp); extern void efx_filter_spec_init_tx( __out efx_filter_spec_t *spec, __in efx_txq_t *etp); extern __checkReturn efx_rc_t efx_filter_spec_set_ipv4_local( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t host, __in uint16_t port); extern __checkReturn efx_rc_t efx_filter_spec_set_ipv4_full( __inout efx_filter_spec_t *spec, __in uint8_t proto, __in uint32_t lhost, __in uint16_t lport, __in uint32_t rhost, __in uint16_t rport); extern __checkReturn efx_rc_t efx_filter_spec_set_eth_local( __inout efx_filter_spec_t *spec, __in uint16_t vid, __in const uint8_t *addr); extern __checkReturn efx_rc_t efx_filter_spec_set_uc_def( __inout efx_filter_spec_t *spec); extern __checkReturn efx_rc_t efx_filter_spec_set_mc_def( __inout efx_filter_spec_t *spec); #endif /* EFSYS_OPT_FILTER */ /* HASH */ extern __checkReturn uint32_t efx_hash_dwords( __in_ecount(count) uint32_t const *input, __in size_t count, __in uint32_t init); extern __checkReturn uint32_t efx_hash_bytes( __in_ecount(length) uint8_t const *input, __in size_t length, __in uint32_t init); #if EFSYS_OPT_LICENSING /* LICENSING */ typedef struct efx_key_stats_s { uint32_t eks_valid; uint32_t eks_invalid; uint32_t eks_blacklisted; uint32_t eks_unverifiable; uint32_t eks_wrong_node; uint32_t eks_licensed_apps_lo; uint32_t eks_licensed_apps_hi; uint32_t eks_licensed_features_lo; uint32_t eks_licensed_features_hi; } efx_key_stats_t; extern __checkReturn efx_rc_t efx_lic_init( __in efx_nic_t *enp); extern void efx_lic_fini( __in efx_nic_t *enp); extern __checkReturn boolean_t efx_lic_check_support( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_lic_update_licenses( __in efx_nic_t *enp); extern __checkReturn efx_rc_t efx_lic_get_key_stats( __in efx_nic_t *enp, __out efx_key_stats_t *ksp); extern __checkReturn efx_rc_t efx_lic_app_state( __in efx_nic_t *enp, __in uint64_t app_id, __out boolean_t *licensedp); extern __checkReturn efx_rc_t efx_lic_get_id( __in efx_nic_t *enp, __in size_t buffer_size, __out uint32_t *typep, __out size_t *lengthp, __out_opt uint8_t *bufferp); extern __checkReturn efx_rc_t efx_lic_find_start( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __out uint32_t *startp ); extern __checkReturn efx_rc_t efx_lic_find_end( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *endp ); extern __checkReturn __success(return != B_FALSE) boolean_t efx_lic_find_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __out uint32_t *startp, __out uint32_t *lengthp ); extern __checkReturn __success(return != B_FALSE) boolean_t efx_lic_validate_key( __in efx_nic_t *enp, __in_bcount(length) caddr_t keyp, __in uint32_t length ); extern __checkReturn efx_rc_t efx_lic_read_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __out_bcount_part(key_max_size, *lengthp) caddr_t keyp, __in size_t key_max_size, __out uint32_t *lengthp ); extern __checkReturn efx_rc_t efx_lic_write_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in_bcount(length) caddr_t keyp, __in uint32_t length, __out uint32_t *lengthp ); __checkReturn efx_rc_t efx_lic_delete_key( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size, __in uint32_t offset, __in uint32_t length, __in uint32_t end, __out uint32_t *deltap ); extern __checkReturn efx_rc_t efx_lic_create_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); extern __checkReturn efx_rc_t efx_lic_finish_partition( __in efx_nic_t *enp, __in_bcount(buffer_size) caddr_t bufferp, __in size_t buffer_size ); #endif /* EFSYS_OPT_LICENSING */ #ifdef __cplusplus } #endif #endif /* _SYS_EFX_H */ Index: head/sys/dev/sfxge/common/efx_rx.c =================================================================== --- head/sys/dev/sfxge/common/efx_rx.c (revision 310751) +++ head/sys/dev/sfxge/common/efx_rx.c (revision 310752) @@ -1,1228 +1,1234 @@ /*- * Copyright (c) 2007-2016 Solarflare Communications Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "efx.h" #include "efx_impl.h" #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_rx_init( __in efx_nic_t *enp); static void siena_rx_fini( __in efx_nic_t *enp); #if EFSYS_OPT_RX_SCATTER static __checkReturn efx_rc_t siena_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size); #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert); static __checkReturn efx_rc_t siena_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n); static __checkReturn efx_rc_t siena_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n); static __checkReturn uint32_t siena_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer); #endif /* EFSYS_OPT_RX_SCALE */ static __checkReturn efx_rc_t siena_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp); static void siena_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added); static void siena_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp); static __checkReturn efx_rc_t siena_rx_qflush( __in efx_rxq_t *erp); static void siena_rx_qenable( __in efx_rxq_t *erp); static __checkReturn efx_rc_t siena_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp); static void siena_rx_qdestroy( __in efx_rxq_t *erp); #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_SIENA static const efx_rx_ops_t __efx_rx_siena_ops = { siena_rx_init, /* erxo_init */ siena_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_SCATTER siena_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE siena_rx_scale_mode_set, /* erxo_scale_mode_set */ siena_rx_scale_key_set, /* erxo_scale_key_set */ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */ siena_rx_prefix_hash, /* erxo_prefix_hash */ #endif siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */ siena_rx_qpost, /* erxo_qpost */ siena_rx_qpush, /* erxo_qpush */ siena_rx_qflush, /* erxo_qflush */ siena_rx_qenable, /* erxo_qenable */ siena_rx_qcreate, /* erxo_qcreate */ siena_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD static const efx_rx_ops_t __efx_rx_ef10_ops = { ef10_rx_init, /* erxo_init */ ef10_rx_fini, /* erxo_fini */ #if EFSYS_OPT_RX_SCATTER ef10_rx_scatter_enable, /* erxo_scatter_enable */ #endif #if EFSYS_OPT_RX_SCALE ef10_rx_scale_mode_set, /* erxo_scale_mode_set */ ef10_rx_scale_key_set, /* erxo_scale_key_set */ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */ ef10_rx_prefix_hash, /* erxo_prefix_hash */ #endif ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */ ef10_rx_qpost, /* erxo_qpost */ ef10_rx_qpush, /* erxo_qpush */ ef10_rx_qflush, /* erxo_qflush */ ef10_rx_qenable, /* erxo_qenable */ ef10_rx_qcreate, /* erxo_qcreate */ ef10_rx_qdestroy, /* erxo_qdestroy */ }; #endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */ __checkReturn efx_rc_t efx_rx_init( __inout efx_nic_t *enp) { const efx_rx_ops_t *erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (!(enp->en_mod_flags & EFX_MOD_EV)) { rc = EINVAL; goto fail1; } if (enp->en_mod_flags & EFX_MOD_RX) { rc = EINVAL; goto fail2; } switch (enp->en_family) { #if EFSYS_OPT_SIENA case EFX_FAMILY_SIENA: erxop = &__efx_rx_siena_ops; break; #endif /* EFSYS_OPT_SIENA */ #if EFSYS_OPT_HUNTINGTON case EFX_FAMILY_HUNTINGTON: erxop = &__efx_rx_ef10_ops; break; #endif /* EFSYS_OPT_HUNTINGTON */ #if EFSYS_OPT_MEDFORD case EFX_FAMILY_MEDFORD: erxop = &__efx_rx_ef10_ops; break; #endif /* EFSYS_OPT_MEDFORD */ default: EFSYS_ASSERT(0); rc = ENOTSUP; goto fail3; } if ((rc = erxop->erxo_init(enp)) != 0) goto fail4; enp->en_erxop = erxop; enp->en_mod_flags |= EFX_MOD_RX; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); enp->en_erxop = NULL; enp->en_mod_flags &= ~EFX_MOD_RX; return (rc); } void efx_rx_fini( __in efx_nic_t *enp) { const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0); erxop->erxo_fini(enp); enp->en_erxop = NULL; enp->en_mod_flags &= ~EFX_MOD_RX; } #if EFSYS_OPT_RX_SCATTER __checkReturn efx_rc_t efx_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCATTER */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_hash_support_get( __in efx_nic_t *enp, __out efx_rx_hash_support_t *supportp) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (supportp == NULL) { rc = EINVAL; goto fail1; } /* Report if resources are available to insert RX hash value */ *supportp = enp->en_hash_support; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_rx_scale_support_get( __in efx_nic_t *enp, __out efx_rx_scale_support_t *supportp) { efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (supportp == NULL) { rc = EINVAL; goto fail1; } /* Report if resources are available to support RSS */ *supportp = enp->en_rss_support; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } __checkReturn efx_rc_t efx_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if (erxop->erxo_scale_mode_set != NULL) { if ((rc = erxop->erxo_scale_mode_set(enp, alg, type, insert)) != 0) goto fail1; } return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_RX_SCALE __checkReturn efx_rc_t efx_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCALE */ void efx_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qpost(erp, addrp, size, n, completed, added); } void efx_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qpush(erp, added, pushedp); } __checkReturn efx_rc_t efx_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; efx_rc_t rc; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); if ((rc = erxop->erxo_qflush(erp)) != 0) goto fail1; return (0); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_rx_qenable( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qenable(erp); } __checkReturn efx_rc_t efx_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __deref_out efx_rxq_t **erpp) { const efx_rx_ops_t *erxop = enp->en_erxop; efx_rxq_t *erp; efx_rc_t rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX); /* Allocate an RXQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp); if (erp == NULL) { rc = ENOMEM; goto fail1; } erp->er_magic = EFX_RXQ_MAGIC; erp->er_enp = enp; erp->er_index = index; erp->er_mask = n - 1; erp->er_esmp = esmp; if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id, eep, erp)) != 0) goto fail2; enp->en_rx_qcount++; *erpp = erp; return (0); fail2: EFSYS_PROBE(fail2); EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } void efx_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); erxop->erxo_qdestroy(erp); } __checkReturn efx_rc_t efx_psuedo_hdr_pkt_length_get( - __in efx_nic_t *enp, + __in efx_rxq_t *erp, __in uint8_t *buffer, __out uint16_t *lengthp) { + efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); + return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp)); } #if EFSYS_OPT_RX_SCALE __checkReturn uint32_t efx_psuedo_hdr_hash_get( - __in efx_nic_t *enp, + __in efx_rxq_t *erp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { + efx_nic_t *enp = erp->er_enp; const efx_rx_ops_t *erxop = enp->en_erxop; + + EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE); return (erxop->erxo_prefix_hash(enp, func, buffer)); } #endif /* EFSYS_OPT_RX_SCALE */ #if EFSYS_OPT_SIENA static __checkReturn efx_rc_t siena_rx_init( __in efx_nic_t *enp) { efx_oword_t oword; unsigned int index; EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Zero the RSS table */ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); } #if EFSYS_OPT_RX_SCALE /* The RSS key and indirection table are writable. */ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE; /* Hardware can insert RX hash with/without RSS */ enp->en_hash_support = EFX_RX_HASH_AVAILABLE; #endif /* EFSYS_OPT_RX_SCALE */ return (0); } #if EFSYS_OPT_RX_SCATTER static __checkReturn efx_rc_t siena_rx_scatter_enable( __in efx_nic_t *enp, __in unsigned int buf_size) { unsigned int nbuf32; efx_oword_t oword; efx_rc_t rc; nbuf32 = buf_size / 32; if ((nbuf32 == 0) || (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) || ((buf_size % 32) != 0)) { rc = EINVAL; goto fail1; } if (enp->en_rx_qcount > 0) { rc = EBUSY; goto fail2; } /* Set scatter buffer size */ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32); EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword); /* Enable scatter for packets not matching a filter */ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1); EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword); return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* EFSYS_OPT_RX_SCATTER */ #define EFX_RX_LFSR_HASH(_enp, _insert) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ (_insert) ? 1 : 0); \ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ \ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ &oword); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \ &oword); \ } \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \ (_ip) ? 1 : 0); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \ (_tcp) ? 0 : 1); \ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \ (_insert) ? 1 : 0); \ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \ do { \ efx_oword_t oword; \ \ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \ EFX_SET_OWORD_FIELD(oword, \ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \ \ (_rc) = 0; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_mode_set( __in efx_nic_t *enp, __in efx_rx_hash_alg_t alg, __in efx_rx_hash_type_t type, __in boolean_t insert) { efx_rc_t rc; switch (alg) { case EFX_RX_HASHALG_LFSR: EFX_RX_LFSR_HASH(enp, insert); break; case EFX_RX_HASHALG_TOEPLITZ: EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert, type & (1 << EFX_RX_HASH_IPV4), type & (1 << EFX_RX_HASH_TCPIPV4)); EFX_RX_TOEPLITZ_IPV6_HASH(enp, type & (1 << EFX_RX_HASH_IPV6), type & (1 << EFX_RX_HASH_TCPIPV6), rc); if (rc != 0) goto fail1; break; default: rc = EINVAL; goto fail2; } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); EFX_RX_LFSR_HASH(enp, B_FALSE); return (rc); } #endif #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_key_set( __in efx_nic_t *enp, __in_ecount(n) uint8_t *key, __in size_t n) { efx_oword_t oword; unsigned int byte; unsigned int offset; efx_rc_t rc; byte = 0; /* Write Toeplitz IPv4 hash key */ EFX_ZERO_OWORD(oword); for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); byte = 0; /* Verify Toeplitz IPv4 hash key */ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword); for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail1; } } if ((enp->en_features & EFX_FEATURE_IPV6) == 0) goto done; byte = 0; /* Write Toeplitz IPv6 hash key 3 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); /* Write Toeplitz IPv6 hash key 2 */ EFX_ZERO_OWORD(oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); /* Write Toeplitz IPv6 hash key 1 */ EFX_ZERO_OWORD(oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; offset > 0 && byte < n; --offset) oword.eo_u8[offset - 1] = key[byte++]; EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); byte = 0; /* Verify Toeplitz IPv6 hash key 3 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail2; } } /* Verify Toeplitz IPv6 hash key 2 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail3; } } /* Verify Toeplitz IPv6 hash key 1 */ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword); for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN + FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8; offset > 0 && byte < n; --offset) { if (oword.eo_u8[offset - 1] != key[byte++]) { rc = EFAULT; goto fail4; } } done: return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif #if EFSYS_OPT_RX_SCALE static __checkReturn efx_rc_t siena_rx_scale_tbl_set( __in efx_nic_t *enp, __in_ecount(n) unsigned int *table, __in size_t n) { efx_oword_t oword; int index; efx_rc_t rc; EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS); EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH)); if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) { rc = EINVAL; goto fail1; } for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) { uint32_t byte; /* Calculate the entry to place in the table */ byte = (n > 0) ? (uint32_t)table[index % n] : 0; EFSYS_PROBE2(table, int, index, uint32_t, byte); EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte); /* Write the table */ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); } for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) { uint32_t byte; /* Determine if we're starting a new batch */ byte = (n > 0) ? (uint32_t)table[index % n] : 0; /* Read the table */ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL, index, &oword, B_TRUE); /* Verify the entry */ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) { rc = EFAULT; goto fail2; } } return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } #endif /* * Falcon/Siena psuedo-header * -------------------------- * * Receive packets are prefixed by an optional 16 byte pseudo-header. * The psuedo-header is a byte array of one of the forms: * * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL * * where: * TT.TT.TT.TT Toeplitz hash (32-bit big-endian) * LL.LL LFSR hash (16-bit big-endian) */ #if EFSYS_OPT_RX_SCALE static __checkReturn uint32_t siena_rx_prefix_hash( __in efx_nic_t *enp, __in efx_rx_hash_alg_t func, __in uint8_t *buffer) { _NOTE(ARGUNUSED(enp)) switch (func) { case EFX_RX_HASHALG_TOEPLITZ: return ((buffer[12] << 24) | (buffer[13] << 16) | (buffer[14] << 8) | buffer[15]); case EFX_RX_HASHALG_LFSR: return ((buffer[14] << 8) | buffer[15]); default: EFSYS_ASSERT(0); return (0); } } #endif /* EFSYS_OPT_RX_SCALE */ static __checkReturn efx_rc_t siena_rx_prefix_pktlen( __in efx_nic_t *enp, __in uint8_t *buffer, __out uint16_t *lengthp) { _NOTE(ARGUNUSED(enp, buffer, lengthp)) /* Not supported by Falcon/Siena hardware */ EFSYS_ASSERT(0); return (ENOTSUP); } static void siena_rx_qpost( __in efx_rxq_t *erp, __in_ecount(n) efsys_dma_addr_t *addrp, __in size_t size, __in unsigned int n, __in unsigned int completed, __in unsigned int added) { efx_qword_t qword; unsigned int i; unsigned int offset; unsigned int id; /* The client driver must not overfill the queue */ EFSYS_ASSERT3U(added - completed + n, <=, EFX_RXQ_LIMIT(erp->er_mask + 1)); id = added & (erp->er_mask); for (i = 0; i < n; i++) { EFSYS_PROBE4(rx_post, unsigned int, erp->er_index, unsigned int, id, efsys_dma_addr_t, addrp[i], size_t, size); EFX_POPULATE_QWORD_3(qword, FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size), FSF_AZ_RX_KER_BUF_ADDR_DW0, (uint32_t)(addrp[i] & 0xffffffff), FSF_AZ_RX_KER_BUF_ADDR_DW1, (uint32_t)(addrp[i] >> 32)); offset = id * sizeof (efx_qword_t); EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword); id = (id + 1) & (erp->er_mask); } } static void siena_rx_qpush( __in efx_rxq_t *erp, __in unsigned int added, __inout unsigned int *pushedp) { efx_nic_t *enp = erp->er_enp; unsigned int pushed = *pushedp; uint32_t wptr; efx_oword_t oword; efx_dword_t dword; /* All descriptors are pushed */ *pushedp = added; /* Push the populated descriptors out */ wptr = added & erp->er_mask; EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr); /* Only write the third DWORD */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1, wptr, pushed & erp->er_mask); EFSYS_PIO_WRITE_BARRIER(); EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0, erp->er_index, &dword, B_FALSE); } static __checkReturn efx_rc_t siena_rx_qflush( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; uint32_t label; label = erp->er_index; /* Flush the queue */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, FRF_AZ_RX_FLUSH_DESCQ, label); EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword); return (0); } static void siena_rx_qenable( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC); EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); } static __checkReturn efx_rc_t siena_rx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efx_rxq_type_t type, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in efx_evq_t *eep, __in efx_rxq_t *erp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_oword_t oword; uint32_t size; boolean_t jumbo; efx_rc_t rc; _NOTE(ARGUNUSED(esmp)) EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH)); EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS); EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS)); EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS)); if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_rxq_limit) { rc = EINVAL; goto fail2; } for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS); size++) if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail3; } switch (type) { case EFX_RXQ_TYPE_DEFAULT: jumbo = B_FALSE; break; #if EFSYS_OPT_RX_SCATTER case EFX_RXQ_TYPE_SCATTER: if (enp->en_family < EFX_FAMILY_SIENA) { rc = EINVAL; goto fail4; } jumbo = B_TRUE; break; #endif /* EFSYS_OPT_RX_SCATTER */ default: rc = EINVAL; goto fail4; } /* Set up the new descriptor queue */ EFX_POPULATE_OWORD_7(oword, FRF_AZ_RX_DESCQ_BUF_BASE_ID, id, FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index, FRF_AZ_RX_DESCQ_OWNER_ID, 0, FRF_AZ_RX_DESCQ_LABEL, label, FRF_AZ_RX_DESCQ_SIZE, size, FRF_AZ_RX_DESCQ_TYPE, 0, FRF_AZ_RX_DESCQ_JUMBO, jumbo); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, efx_rc_t, rc); return (rc); } static void siena_rx_qdestroy( __in efx_rxq_t *erp) { efx_nic_t *enp = erp->er_enp; efx_oword_t oword; EFSYS_ASSERT(enp->en_rx_qcount != 0); --enp->en_rx_qcount; /* Purge descriptor queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL, erp->er_index, &oword, B_TRUE); /* Free the RXQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp); } static void siena_rx_fini( __in efx_nic_t *enp) { _NOTE(ARGUNUSED(enp)) } #endif /* EFSYS_OPT_SIENA */ Index: head/sys/dev/sfxge/sfxge_rx.c =================================================================== --- head/sys/dev/sfxge/sfxge_rx.c (revision 310751) +++ head/sys/dev/sfxge/sfxge_rx.c (revision 310752) @@ -1,1420 +1,1421 @@ /*- * Copyright (c) 2010-2016 Solarflare Communications Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of the FreeBSD Project. */ #include __FBSDID("$FreeBSD$"); #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #endif #include "common/efx.h" #include "sfxge.h" #include "sfxge_rx.h" #define RX_REFILL_THRESHOLD(_entries) (EFX_RXQ_LIMIT(_entries) * 9 / 10) #ifdef SFXGE_LRO SYSCTL_NODE(_hw_sfxge, OID_AUTO, lro, CTLFLAG_RD, NULL, "Large receive offload (LRO) parameters"); #define SFXGE_LRO_PARAM(_param) SFXGE_PARAM(lro._param) /* Size of the LRO hash table. Must be a power of 2. A larger table * means we can accelerate a larger number of streams. */ static unsigned lro_table_size = 128; TUNABLE_INT(SFXGE_LRO_PARAM(table_size), &lro_table_size); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, table_size, CTLFLAG_RDTUN, &lro_table_size, 0, "Size of the LRO hash table (must be a power of 2)"); /* Maximum length of a hash chain. If chains get too long then the lookup * time increases and may exceed the benefit of LRO. */ static unsigned lro_chain_max = 20; TUNABLE_INT(SFXGE_LRO_PARAM(chain_max), &lro_chain_max); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, chain_max, CTLFLAG_RDTUN, &lro_chain_max, 0, "The maximum length of a hash chain"); /* Maximum time (in ticks) that a connection can be idle before it's LRO * state is discarded. */ static unsigned lro_idle_ticks; /* initialised in sfxge_rx_init() */ TUNABLE_INT(SFXGE_LRO_PARAM(idle_ticks), &lro_idle_ticks); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, idle_ticks, CTLFLAG_RDTUN, &lro_idle_ticks, 0, "The maximum time (in ticks) that a connection can be idle " "before it's LRO state is discarded"); /* Number of packets with payload that must arrive in-order before a * connection is eligible for LRO. The idea is we should avoid coalescing * segments when the sender is in slow-start because reducing the ACK rate * can damage performance. */ static int lro_slow_start_packets = 2000; TUNABLE_INT(SFXGE_LRO_PARAM(slow_start_packets), &lro_slow_start_packets); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, slow_start_packets, CTLFLAG_RDTUN, &lro_slow_start_packets, 0, "Number of packets with payload that must arrive in-order before " "a connection is eligible for LRO"); /* Number of packets with payload that must arrive in-order following loss * before a connection is eligible for LRO. The idea is we should avoid * coalescing segments when the sender is recovering from loss, because * reducing the ACK rate can damage performance. */ static int lro_loss_packets = 20; TUNABLE_INT(SFXGE_LRO_PARAM(loss_packets), &lro_loss_packets); SYSCTL_UINT(_hw_sfxge_lro, OID_AUTO, loss_packets, CTLFLAG_RDTUN, &lro_loss_packets, 0, "Number of packets with payload that must arrive in-order " "following loss before a connection is eligible for LRO"); /* Flags for sfxge_lro_conn::l2_id; must not collide with EVL_VLID_MASK */ #define SFXGE_LRO_L2_ID_VLAN 0x4000 #define SFXGE_LRO_L2_ID_IPV6 0x8000 #define SFXGE_LRO_CONN_IS_VLAN_ENCAP(c) ((c)->l2_id & SFXGE_LRO_L2_ID_VLAN) #define SFXGE_LRO_CONN_IS_TCPIPV4(c) (!((c)->l2_id & SFXGE_LRO_L2_ID_IPV6)) /* Compare IPv6 addresses, avoiding conditional branches */ static unsigned long ipv6_addr_cmp(const struct in6_addr *left, const struct in6_addr *right) { #if LONG_BIT == 64 const uint64_t *left64 = (const uint64_t *)left; const uint64_t *right64 = (const uint64_t *)right; return (left64[0] - right64[0]) | (left64[1] - right64[1]); #else return (left->s6_addr32[0] - right->s6_addr32[0]) | (left->s6_addr32[1] - right->s6_addr32[1]) | (left->s6_addr32[2] - right->s6_addr32[2]) | (left->s6_addr32[3] - right->s6_addr32[3]); #endif } #endif /* SFXGE_LRO */ void sfxge_rx_qflush_done(struct sfxge_rxq *rxq) { rxq->flush_state = SFXGE_FLUSH_DONE; } void sfxge_rx_qflush_failed(struct sfxge_rxq *rxq) { rxq->flush_state = SFXGE_FLUSH_FAILED; } #ifdef RSS static uint8_t toep_key[RSS_KEYSIZE]; #else static uint8_t toep_key[] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; #endif static void sfxge_rx_post_refill(void *arg) { struct sfxge_rxq *rxq = arg; struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; uint16_t magic; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; magic = sfxge_sw_ev_rxq_magic(SFXGE_SW_EV_RX_QREFILL, rxq); /* This is guaranteed due to the start/stop order of rx and ev */ KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq not started")); KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, ("rxq not started")); efx_ev_qpost(evq->common, magic); } static void sfxge_rx_schedule_refill(struct sfxge_rxq *rxq, boolean_t retrying) { /* Initially retry after 100 ms, but back off in case of * repeated failures as we probably have to wait for the * administrator to raise the pool limit. */ if (retrying) rxq->refill_delay = min(rxq->refill_delay * 2, 10 * hz); else rxq->refill_delay = hz / 10; callout_reset_curcpu(&rxq->refill_callout, rxq->refill_delay, sfxge_rx_post_refill, rxq); } #define SFXGE_REFILL_BATCH 64 static void sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying) { struct sfxge_softc *sc; unsigned int index; struct sfxge_evq *evq; unsigned int batch; unsigned int rxfill; unsigned int mblksize; int ntodo; efsys_dma_addr_t addr[SFXGE_REFILL_BATCH]; sc = rxq->sc; index = rxq->index; evq = sc->evq[index]; prefetch_read_many(sc->enp); prefetch_read_many(rxq->common); SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; rxfill = rxq->added - rxq->completed; KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries), ("rxfill > EFX_RXQ_LIMIT(rxq->entries)")); ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target); KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries), ("ntodo > EFX_RQX_LIMIT(rxq->entries)")); if (ntodo == 0) return; batch = 0; mblksize = sc->rx_buffer_size - sc->rx_buffer_align; while (ntodo-- > 0) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; bus_dma_segment_t seg; struct mbuf *m; id = (rxq->added + batch) & rxq->ptr_mask; rx_desc = &rxq->queue[id]; KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL")); rx_desc->flags = EFX_DISCARD; m = rx_desc->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_cluster_size); if (m == NULL) break; /* m_len specifies length of area to be mapped for DMA */ m->m_len = mblksize; m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE); m->m_data += sc->rx_buffer_align; sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg); addr[batch++] = seg.ds_addr; if (batch == SFXGE_REFILL_BATCH) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; batch = 0; } } if (ntodo != 0) sfxge_rx_schedule_refill(rxq, retrying); if (batch != 0) { efx_rx_qpost(rxq->common, addr, mblksize, batch, rxq->completed, rxq->added); rxq->added += batch; } /* Make the descriptors visible to the hardware */ bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map, BUS_DMASYNC_PREWRITE); efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed); /* The queue could still be empty if no descriptors were actually * pushed, in which case there will be no event to cause the next * refill, so we must schedule a refill ourselves. */ if(rxq->pushed == rxq->completed) { sfxge_rx_schedule_refill(rxq, retrying); } } void sfxge_rx_qrefill(struct sfxge_rxq *rxq) { if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) return; /* Make sure the queue is full */ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_TRUE); } static void __sfxge_rx_deliver(struct sfxge_softc *sc, struct mbuf *m) { struct ifnet *ifp = sc->ifnet; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_data = 0xffff; ifp->if_input(ifp, m); } static void -sfxge_rx_deliver(struct sfxge_softc *sc, struct sfxge_rx_sw_desc *rx_desc) +sfxge_rx_deliver(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_desc) { + struct sfxge_softc *sc = rxq->sc; struct mbuf *m = rx_desc->mbuf; int flags = rx_desc->flags; int csum_flags; /* Convert checksum flags */ csum_flags = (flags & EFX_CKSUM_IPV4) ? (CSUM_IP_CHECKED | CSUM_IP_VALID) : 0; if (flags & EFX_CKSUM_TCPUDP) csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { m->m_pkthdr.flowid = - efx_psuedo_hdr_hash_get(sc->enp, + efx_psuedo_hdr_hash_get(rxq->common, EFX_RX_HASHALG_TOEPLITZ, mtod(m, uint8_t *)); /* The hash covers a 4-tuple for TCP only */ M_HASHTYPE_SET(m, (flags & EFX_PKT_IPV4) ? ((flags & EFX_PKT_TCP) ? M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_IPV4) : ((flags & EFX_PKT_TCP) ? M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_IPV6)); } m->m_data += sc->rx_prefix_size; m->m_len = rx_desc->size - sc->rx_prefix_size; m->m_pkthdr.len = m->m_len; m->m_pkthdr.csum_flags = csum_flags; __sfxge_rx_deliver(sc, rx_desc->mbuf); rx_desc->flags = EFX_DISCARD; rx_desc->mbuf = NULL; } #ifdef SFXGE_LRO static void sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c) { struct sfxge_softc *sc = st->sc; struct mbuf *m = c->mbuf; struct tcphdr *c_th; int csum_flags; KASSERT(m, ("no mbuf to deliver")); ++st->n_bursts; /* Finish off packet munging and recalculate IP header checksum. */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->nh; iph->ip_len = htons(iph->ip_len); iph->ip_sum = 0; iph->ip_sum = in_cksum_hdr(iph); c_th = (struct tcphdr *)(iph + 1); csum_flags = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID); } else { struct ip6_hdr *iph = c->nh; iph->ip6_plen = htons(iph->ip6_plen); c_th = (struct tcphdr *)(iph + 1); csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } c_th->th_win = c->th_last->th_win; c_th->th_ack = c->th_last->th_ack; if (c_th->th_off == c->th_last->th_off) { /* Copy TCP options (take care to avoid going negative). */ int optlen = ((c_th->th_off - 5) & 0xf) << 2u; memcpy(c_th + 1, c->th_last + 1, optlen); } m->m_pkthdr.flowid = c->conn_hash; M_HASHTYPE_SET(m, SFXGE_LRO_CONN_IS_TCPIPV4(c) ? M_HASHTYPE_RSS_TCP_IPV4 : M_HASHTYPE_RSS_TCP_IPV6); m->m_pkthdr.csum_flags = csum_flags; __sfxge_rx_deliver(sc, m); c->mbuf = NULL; c->delivered = 1; } /* Drop the given connection, and add it to the free list. */ static void sfxge_lro_drop(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) { unsigned bucket; KASSERT(!c->mbuf, ("found orphaned mbuf")); if (c->next_buf.mbuf != NULL) { - sfxge_rx_deliver(rxq->sc, &c->next_buf); + sfxge_rx_deliver(rxq, &c->next_buf); LIST_REMOVE(c, active_link); } bucket = c->conn_hash & rxq->lro.conns_mask; KASSERT(rxq->lro.conns_n[bucket] > 0, ("LRO: bucket fill level wrong")); --rxq->lro.conns_n[bucket]; TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); TAILQ_INSERT_HEAD(&rxq->lro.free_conns, c, link); } /* Stop tracking connections that have gone idle in order to keep hash * chains short. */ static void sfxge_lro_purge_idle(struct sfxge_rxq *rxq, unsigned now) { struct sfxge_lro_conn *c; unsigned i; KASSERT(LIST_EMPTY(&rxq->lro.active_conns), ("found active connections")); rxq->lro.last_purge_ticks = now; for (i = 0; i <= rxq->lro.conns_mask; ++i) { if (TAILQ_EMPTY(&rxq->lro.conns[i])) continue; c = TAILQ_LAST(&rxq->lro.conns[i], sfxge_lro_tailq); if (now - c->last_pkt_ticks > lro_idle_ticks) { ++rxq->lro.n_drop_idle; sfxge_lro_drop(rxq, c); } } } static void sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, struct mbuf *mbuf, struct tcphdr *th) { struct tcphdr *c_th; /* Tack the new mbuf onto the chain. */ KASSERT(!mbuf->m_next, ("mbuf already chained")); c->mbuf_tail->m_next = mbuf; c->mbuf_tail = mbuf; /* Increase length appropriately */ c->mbuf->m_pkthdr.len += mbuf->m_len; /* Update the connection state flags */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->nh; iph->ip_len += mbuf->m_len; c_th = (struct tcphdr *)(iph + 1); } else { struct ip6_hdr *iph = c->nh; iph->ip6_plen += mbuf->m_len; c_th = (struct tcphdr *)(iph + 1); } c_th->th_flags |= (th->th_flags & TH_PUSH); c->th_last = th; ++st->n_merges; /* Pass packet up now if another segment could overflow the IP * length. */ if (c->mbuf->m_pkthdr.len > 65536 - 9200) sfxge_lro_deliver(st, c); } static void sfxge_lro_start(struct sfxge_lro_state *st, struct sfxge_lro_conn *c, struct mbuf *mbuf, void *nh, struct tcphdr *th) { /* Start the chain */ c->mbuf = mbuf; c->mbuf_tail = c->mbuf; c->nh = nh; c->th_last = th; mbuf->m_pkthdr.len = mbuf->m_len; /* Mangle header fields for later processing */ if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = nh; iph->ip_len = ntohs(iph->ip_len); } else { struct ip6_hdr *iph = nh; iph->ip6_plen = ntohs(iph->ip6_plen); } } /* Try to merge or otherwise hold or deliver (as appropriate) the * packet buffered for this connection (c->next_buf). Return a flag * indicating whether the connection is still active for LRO purposes. */ static int sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c) { struct sfxge_rx_sw_desc *rx_buf = &c->next_buf; char *eh = c->next_eh; int data_length, hdr_length, dont_merge; unsigned th_seq, pkt_length; struct tcphdr *th; unsigned now; if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *iph = c->next_nh; th = (struct tcphdr *)(iph + 1); pkt_length = ntohs(iph->ip_len) + (char *) iph - eh; } else { struct ip6_hdr *iph = c->next_nh; th = (struct tcphdr *)(iph + 1); pkt_length = ntohs(iph->ip6_plen) + (char *) th - eh; } hdr_length = (char *) th + th->th_off * 4 - eh; data_length = (min(pkt_length, rx_buf->size - rxq->sc->rx_prefix_size) - hdr_length); th_seq = ntohl(th->th_seq); dont_merge = ((data_length <= 0) | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN))); /* Check for options other than aligned timestamp. */ if (th->th_off != 5) { const uint32_t *opt_ptr = (const uint32_t *) (th + 1); if (th->th_off == 8 && opt_ptr[0] == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { /* timestamp option -- okay */ } else { dont_merge = 1; } } if (__predict_false(th_seq != c->next_seq)) { /* Out-of-order, so start counting again. */ if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); c->n_in_order_pkts -= lro_loss_packets; c->next_seq = th_seq + data_length; ++rxq->lro.n_misorder; goto deliver_buf_out; } c->next_seq = th_seq + data_length; now = ticks; if (now - c->last_pkt_ticks > lro_idle_ticks) { ++rxq->lro.n_drop_idle; if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); sfxge_lro_drop(rxq, c); return (0); } c->last_pkt_ticks = ticks; if (c->n_in_order_pkts < lro_slow_start_packets) { /* May be in slow-start, so don't merge. */ ++rxq->lro.n_slow_start; ++c->n_in_order_pkts; goto deliver_buf_out; } if (__predict_false(dont_merge)) { if (c->mbuf != NULL) sfxge_lro_deliver(&rxq->lro, c); if (th->th_flags & (TH_FIN | TH_RST)) { ++rxq->lro.n_drop_closed; sfxge_lro_drop(rxq, c); return (0); } goto deliver_buf_out; } rx_buf->mbuf->m_data += rxq->sc->rx_prefix_size; if (__predict_true(c->mbuf != NULL)) { /* Remove headers and any padding */ rx_buf->mbuf->m_data += hdr_length; rx_buf->mbuf->m_len = data_length; sfxge_lro_merge(&rxq->lro, c, rx_buf->mbuf, th); } else { /* Remove any padding */ rx_buf->mbuf->m_len = pkt_length; sfxge_lro_start(&rxq->lro, c, rx_buf->mbuf, c->next_nh, th); } rx_buf->mbuf = NULL; return (1); deliver_buf_out: - sfxge_rx_deliver(rxq->sc, rx_buf); + sfxge_rx_deliver(rxq, rx_buf); return (1); } static void sfxge_lro_new_conn(struct sfxge_lro_state *st, uint32_t conn_hash, uint16_t l2_id, void *nh, struct tcphdr *th) { unsigned bucket = conn_hash & st->conns_mask; struct sfxge_lro_conn *c; if (st->conns_n[bucket] >= lro_chain_max) { ++st->n_too_many; return; } if (!TAILQ_EMPTY(&st->free_conns)) { c = TAILQ_FIRST(&st->free_conns); TAILQ_REMOVE(&st->free_conns, c, link); } else { c = malloc(sizeof(*c), M_SFXGE, M_NOWAIT); if (c == NULL) return; c->mbuf = NULL; c->next_buf.mbuf = NULL; } /* Create the connection tracking data */ ++st->conns_n[bucket]; TAILQ_INSERT_HEAD(&st->conns[bucket], c, link); c->l2_id = l2_id; c->conn_hash = conn_hash; c->source = th->th_sport; c->dest = th->th_dport; c->n_in_order_pkts = 0; c->last_pkt_ticks = *(volatile int *)&ticks; c->delivered = 0; ++st->n_new_stream; /* NB. We don't initialise c->next_seq, and it doesn't matter what * value it has. Most likely the next packet received for this * connection will not match -- no harm done. */ } /* Process mbuf and decide whether to dispatch it to the stack now or * later. */ static void sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) { struct sfxge_softc *sc = rxq->sc; struct mbuf *m = rx_buf->mbuf; struct ether_header *eh; struct sfxge_lro_conn *c; uint16_t l2_id; uint16_t l3_proto; void *nh; struct tcphdr *th; uint32_t conn_hash; unsigned bucket; /* Get the hardware hash */ - conn_hash = efx_psuedo_hdr_hash_get(sc->enp, + conn_hash = efx_psuedo_hdr_hash_get(rxq->common, EFX_RX_HASHALG_TOEPLITZ, mtod(m, uint8_t *)); eh = (struct ether_header *)(m->m_data + sc->rx_prefix_size); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = (struct ether_vlan_header *)eh; l2_id = EVL_VLANOFTAG(ntohs(veh->evl_tag)) | SFXGE_LRO_L2_ID_VLAN; l3_proto = veh->evl_proto; nh = veh + 1; } else { l2_id = 0; l3_proto = eh->ether_type; nh = eh + 1; } /* Check whether this is a suitable packet (unfragmented * TCP/IPv4 or TCP/IPv6). If so, find the TCP header and * length, and compute a hash if necessary. If not, return. */ if (l3_proto == htons(ETHERTYPE_IP)) { struct ip *iph = nh; KASSERT(iph->ip_p == IPPROTO_TCP, ("IPv4 protocol is not TCP, but packet marker is set")); if ((iph->ip_hl - (sizeof(*iph) >> 2u)) | (iph->ip_off & htons(IP_MF | IP_OFFMASK))) goto deliver_now; th = (struct tcphdr *)(iph + 1); } else if (l3_proto == htons(ETHERTYPE_IPV6)) { struct ip6_hdr *iph = nh; KASSERT(iph->ip6_nxt == IPPROTO_TCP, ("IPv6 next header is not TCP, but packet marker is set")); l2_id |= SFXGE_LRO_L2_ID_IPV6; th = (struct tcphdr *)(iph + 1); } else { goto deliver_now; } bucket = conn_hash & rxq->lro.conns_mask; TAILQ_FOREACH(c, &rxq->lro.conns[bucket], link) { if ((c->l2_id - l2_id) | (c->conn_hash - conn_hash)) continue; if ((c->source - th->th_sport) | (c->dest - th->th_dport)) continue; if (c->mbuf != NULL) { if (SFXGE_LRO_CONN_IS_TCPIPV4(c)) { struct ip *c_iph, *iph = nh; c_iph = c->nh; if ((c_iph->ip_src.s_addr - iph->ip_src.s_addr) | (c_iph->ip_dst.s_addr - iph->ip_dst.s_addr)) continue; } else { struct ip6_hdr *c_iph, *iph = nh; c_iph = c->nh; if (ipv6_addr_cmp(&c_iph->ip6_src, &iph->ip6_src) | ipv6_addr_cmp(&c_iph->ip6_dst, &iph->ip6_dst)) continue; } } /* Re-insert at head of list to reduce lookup time. */ TAILQ_REMOVE(&rxq->lro.conns[bucket], c, link); TAILQ_INSERT_HEAD(&rxq->lro.conns[bucket], c, link); if (c->next_buf.mbuf != NULL) { if (!sfxge_lro_try_merge(rxq, c)) goto deliver_now; } else { LIST_INSERT_HEAD(&rxq->lro.active_conns, c, active_link); } c->next_buf = *rx_buf; c->next_eh = eh; c->next_nh = nh; rx_buf->mbuf = NULL; rx_buf->flags = EFX_DISCARD; return; } sfxge_lro_new_conn(&rxq->lro, conn_hash, l2_id, nh, th); deliver_now: - sfxge_rx_deliver(sc, rx_buf); + sfxge_rx_deliver(rxq, rx_buf); } static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; struct sfxge_lro_conn *c; unsigned t; while (!LIST_EMPTY(&st->active_conns)) { c = LIST_FIRST(&st->active_conns); if (!c->delivered && c->mbuf != NULL) sfxge_lro_deliver(st, c); if (sfxge_lro_try_merge(rxq, c)) { if (c->mbuf != NULL) sfxge_lro_deliver(st, c); LIST_REMOVE(c, active_link); } c->delivered = 0; } t = *(volatile int *)&ticks; if (__predict_false(t != st->last_purge_ticks)) sfxge_lro_purge_idle(rxq, t); } #else /* !SFXGE_LRO */ static void sfxge_lro(struct sfxge_rxq *rxq, struct sfxge_rx_sw_desc *rx_buf) { } static void sfxge_lro_end_of_burst(struct sfxge_rxq *rxq) { } #endif /* SFXGE_LRO */ void sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop) { struct sfxge_softc *sc = rxq->sc; int if_capenable = sc->ifnet->if_capenable; int lro_enabled = if_capenable & IFCAP_LRO; unsigned int index; struct sfxge_evq *evq; unsigned int completed; unsigned int level; struct mbuf *m; struct sfxge_rx_sw_desc *prev = NULL; index = rxq->index; evq = sc->evq[index]; SFXGE_EVQ_LOCK_ASSERT_OWNED(evq); completed = rxq->completed; while (completed != rxq->pending) { unsigned int id; struct sfxge_rx_sw_desc *rx_desc; id = completed++ & rxq->ptr_mask; rx_desc = &rxq->queue[id]; m = rx_desc->mbuf; if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED)) goto discard; if (rx_desc->flags & (EFX_ADDR_MISMATCH | EFX_DISCARD)) goto discard; /* Read the length from the pseudo header if required */ if (rx_desc->flags & EFX_PKT_PREFIX_LEN) { uint16_t tmp_size; int rc; - rc = efx_psuedo_hdr_pkt_length_get(sc->enp, + rc = efx_psuedo_hdr_pkt_length_get(rxq->common, mtod(m, uint8_t *), &tmp_size); KASSERT(rc == 0, ("cannot get packet length: %d", rc)); rx_desc->size = (int)tmp_size + sc->rx_prefix_size; } prefetch_read_many(mtod(m, caddr_t)); switch (rx_desc->flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) { case EFX_PKT_IPV4: if (~if_capenable & IFCAP_RXCSUM) rx_desc->flags &= ~(EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP); break; case EFX_PKT_IPV6: if (~if_capenable & IFCAP_RXCSUM_IPV6) rx_desc->flags &= ~EFX_CKSUM_TCPUDP; break; case 0: /* Check for loopback packets */ { struct ether_header *etherhp; /*LINTED*/ etherhp = mtod(m, struct ether_header *); if (etherhp->ether_type == htons(SFXGE_ETHERTYPE_LOOPBACK)) { EFSYS_PROBE(loopback); rxq->loopback++; goto discard; } } break; default: KASSERT(B_FALSE, ("Rx descriptor with both IPv4 and IPv6 flags")); goto discard; } /* Pass packet up the stack or into LRO (pipelined) */ if (prev != NULL) { if (lro_enabled && ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) sfxge_lro(rxq, prev); else - sfxge_rx_deliver(sc, prev); + sfxge_rx_deliver(rxq, prev); } prev = rx_desc; continue; discard: /* Return the packet to the pool */ m_free(m); rx_desc->mbuf = NULL; } rxq->completed = completed; level = rxq->added - rxq->completed; /* Pass last packet up the stack or into LRO */ if (prev != NULL) { if (lro_enabled && ((prev->flags & (EFX_PKT_TCP | EFX_CKSUM_TCPUDP)) == (EFX_PKT_TCP | EFX_CKSUM_TCPUDP))) sfxge_lro(rxq, prev); else - sfxge_rx_deliver(sc, prev); + sfxge_rx_deliver(rxq, prev); } /* * If there are any pending flows and this is the end of the * poll then they must be completed. */ if (eop) sfxge_lro_end_of_burst(rxq); /* Top up the queue if necessary */ if (level < rxq->refill_threshold) sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(rxq->entries), B_FALSE); } static void sfxge_rx_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; struct sfxge_evq *evq; unsigned int count; unsigned int retry = 3; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); rxq = sc->rxq[index]; evq = sc->evq[index]; SFXGE_EVQ_LOCK(evq); KASSERT(rxq->init_state == SFXGE_RXQ_STARTED, ("rxq not started")); rxq->init_state = SFXGE_RXQ_INITIALIZED; callout_stop(&rxq->refill_callout); while (rxq->flush_state != SFXGE_FLUSH_DONE && retry != 0) { rxq->flush_state = SFXGE_FLUSH_PENDING; SFXGE_EVQ_UNLOCK(evq); /* Flush the receive queue */ if (efx_rx_qflush(rxq->common) != 0) { SFXGE_EVQ_LOCK(evq); rxq->flush_state = SFXGE_FLUSH_FAILED; break; } count = 0; do { /* Spin for 100 ms */ DELAY(100000); if (rxq->flush_state != SFXGE_FLUSH_PENDING) break; } while (++count < 20); SFXGE_EVQ_LOCK(evq); if (rxq->flush_state == SFXGE_FLUSH_PENDING) { /* Flush timeout - neither done nor failed */ log(LOG_ERR, "%s: Cannot flush Rx queue %u\n", device_get_nameunit(sc->dev), index); rxq->flush_state = SFXGE_FLUSH_DONE; } retry--; } if (rxq->flush_state == SFXGE_FLUSH_FAILED) { log(LOG_ERR, "%s: Flushing Rx queue %u failed\n", device_get_nameunit(sc->dev), index); rxq->flush_state = SFXGE_FLUSH_DONE; } rxq->pending = rxq->added; sfxge_rx_qcomplete(rxq, B_TRUE); KASSERT(rxq->completed == rxq->pending, ("rxq->completed != rxq->pending")); rxq->added = 0; rxq->pushed = 0; rxq->pending = 0; rxq->completed = 0; rxq->loopback = 0; /* Destroy the common code receive queue. */ efx_rx_qdestroy(rxq->common); efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, EFX_RXQ_NBUFS(sc->rxq_entries)); SFXGE_EVQ_UNLOCK(evq); } static int sfxge_rx_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; efsys_mem_t *esmp; struct sfxge_evq *evq; int rc; SFXGE_ADAPTER_LOCK_ASSERT_OWNED(sc); rxq = sc->rxq[index]; esmp = &rxq->mem; evq = sc->evq[index]; KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, rxq->buf_base_id, esmp, EFX_RXQ_NBUFS(sc->rxq_entries))) != 0) return (rc); /* Create the common code receive queue. */ if ((rc = efx_rx_qcreate(sc->enp, index, 0, EFX_RXQ_TYPE_DEFAULT, esmp, sc->rxq_entries, rxq->buf_base_id, evq->common, &rxq->common)) != 0) goto fail; SFXGE_EVQ_LOCK(evq); /* Enable the receive queue. */ efx_rx_qenable(rxq->common); rxq->init_state = SFXGE_RXQ_STARTED; rxq->flush_state = SFXGE_FLUSH_REQUIRED; /* Try to fill the queue from the pool. */ sfxge_rx_qfill(rxq, EFX_RXQ_LIMIT(sc->rxq_entries), B_FALSE); SFXGE_EVQ_UNLOCK(evq); return (0); fail: efx_sram_buf_tbl_clear(sc->enp, rxq->buf_base_id, EFX_RXQ_NBUFS(sc->rxq_entries)); return (rc); } void sfxge_rx_stop(struct sfxge_softc *sc) { int index; efx_mac_filter_default_rxq_clear(sc->enp); /* Stop the receive queue(s) */ index = sc->rxq_count; while (--index >= 0) sfxge_rx_qstop(sc, index); sc->rx_prefix_size = 0; sc->rx_buffer_size = 0; efx_rx_fini(sc->enp); } int sfxge_rx_start(struct sfxge_softc *sc) { struct sfxge_intr *intr; const efx_nic_cfg_t *encp; size_t hdrlen, align, reserved; int index; int rc; intr = &sc->intr; /* Initialize the common code receive module. */ if ((rc = efx_rx_init(sc->enp)) != 0) return (rc); encp = efx_nic_cfg_get(sc->enp); sc->rx_buffer_size = EFX_MAC_PDU(sc->ifnet->if_mtu); /* Calculate the receive packet buffer size. */ sc->rx_prefix_size = encp->enc_rx_prefix_size; /* Ensure IP headers are 32bit aligned */ hdrlen = sc->rx_prefix_size + sizeof (struct ether_header); sc->rx_buffer_align = P2ROUNDUP(hdrlen, 4) - hdrlen; sc->rx_buffer_size += sc->rx_buffer_align; /* Align end of packet buffer for RX DMA end padding */ align = MAX(1, encp->enc_rx_buf_align_end); EFSYS_ASSERT(ISP2(align)); sc->rx_buffer_size = P2ROUNDUP(sc->rx_buffer_size, align); /* * Standard mbuf zones only guarantee pointer-size alignment; * we need extra space to align to the cache line */ reserved = sc->rx_buffer_size + CACHE_LINE_SIZE; /* Select zone for packet buffers */ if (reserved <= MCLBYTES) sc->rx_cluster_size = MCLBYTES; else if (reserved <= MJUMPAGESIZE) sc->rx_cluster_size = MJUMPAGESIZE; else if (reserved <= MJUM9BYTES) sc->rx_cluster_size = MJUM9BYTES; else sc->rx_cluster_size = MJUM16BYTES; /* * Set up the scale table. Enable all hash types and hash insertion. */ for (index = 0; index < nitems(sc->rx_indir_table); index++) #ifdef RSS sc->rx_indir_table[index] = rss_get_indirection_to_bucket(index) % sc->rxq_count; #else sc->rx_indir_table[index] = index % sc->rxq_count; #endif if ((rc = efx_rx_scale_tbl_set(sc->enp, sc->rx_indir_table, nitems(sc->rx_indir_table))) != 0) goto fail; (void)efx_rx_scale_mode_set(sc->enp, EFX_RX_HASHALG_TOEPLITZ, (1 << EFX_RX_HASH_IPV4) | (1 << EFX_RX_HASH_TCPIPV4) | (1 << EFX_RX_HASH_IPV6) | (1 << EFX_RX_HASH_TCPIPV6), B_TRUE); #ifdef RSS rss_getkey(toep_key); #endif if ((rc = efx_rx_scale_key_set(sc->enp, toep_key, sizeof(toep_key))) != 0) goto fail; /* Start the receive queue(s). */ for (index = 0; index < sc->rxq_count; index++) { if ((rc = sfxge_rx_qstart(sc, index)) != 0) goto fail2; } rc = efx_mac_filter_default_rxq_set(sc->enp, sc->rxq[0]->common, sc->intr.n_alloc > 1); if (rc != 0) goto fail3; return (0); fail3: fail2: while (--index >= 0) sfxge_rx_qstop(sc, index); fail: efx_rx_fini(sc->enp); return (rc); } #ifdef SFXGE_LRO static void sfxge_lro_init(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; unsigned i; st->conns_mask = lro_table_size - 1; KASSERT(!((st->conns_mask + 1) & st->conns_mask), ("lro_table_size must be a power of 2")); st->sc = rxq->sc; st->conns = malloc((st->conns_mask + 1) * sizeof(st->conns[0]), M_SFXGE, M_WAITOK); st->conns_n = malloc((st->conns_mask + 1) * sizeof(st->conns_n[0]), M_SFXGE, M_WAITOK); for (i = 0; i <= st->conns_mask; ++i) { TAILQ_INIT(&st->conns[i]); st->conns_n[i] = 0; } LIST_INIT(&st->active_conns); TAILQ_INIT(&st->free_conns); } static void sfxge_lro_fini(struct sfxge_rxq *rxq) { struct sfxge_lro_state *st = &rxq->lro; struct sfxge_lro_conn *c; unsigned i; /* Return cleanly if sfxge_lro_init() has not been called. */ if (st->conns == NULL) return; KASSERT(LIST_EMPTY(&st->active_conns), ("found active connections")); for (i = 0; i <= st->conns_mask; ++i) { while (!TAILQ_EMPTY(&st->conns[i])) { c = TAILQ_LAST(&st->conns[i], sfxge_lro_tailq); sfxge_lro_drop(rxq, c); } } while (!TAILQ_EMPTY(&st->free_conns)) { c = TAILQ_FIRST(&st->free_conns); TAILQ_REMOVE(&st->free_conns, c, link); KASSERT(!c->mbuf, ("found orphaned mbuf")); free(c, M_SFXGE); } free(st->conns_n, M_SFXGE); free(st->conns, M_SFXGE); st->conns = NULL; } #else static void sfxge_lro_init(struct sfxge_rxq *rxq) { } static void sfxge_lro_fini(struct sfxge_rxq *rxq) { } #endif /* SFXGE_LRO */ static void sfxge_rx_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; rxq = sc->rxq[index]; KASSERT(rxq->init_state == SFXGE_RXQ_INITIALIZED, ("rxq->init_state != SFXGE_RXQ_INITIALIZED")); /* Free the context array and the flow table. */ free(rxq->queue, M_SFXGE); sfxge_lro_fini(rxq); /* Release DMA memory. */ sfxge_dma_free(&rxq->mem); sc->rxq[index] = NULL; free(rxq, M_SFXGE); } static int sfxge_rx_qinit(struct sfxge_softc *sc, unsigned int index) { struct sfxge_rxq *rxq; struct sfxge_evq *evq; efsys_mem_t *esmp; int rc; KASSERT(index < sc->rxq_count, ("index >= %d", sc->rxq_count)); rxq = malloc(sizeof(struct sfxge_rxq), M_SFXGE, M_ZERO | M_WAITOK); rxq->sc = sc; rxq->index = index; rxq->entries = sc->rxq_entries; rxq->ptr_mask = rxq->entries - 1; rxq->refill_threshold = RX_REFILL_THRESHOLD(rxq->entries); sc->rxq[index] = rxq; esmp = &rxq->mem; evq = sc->evq[index]; /* Allocate and zero DMA space. */ if ((rc = sfxge_dma_alloc(sc, EFX_RXQ_SIZE(sc->rxq_entries), esmp)) != 0) return (rc); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_RXQ_NBUFS(sc->rxq_entries), &rxq->buf_base_id); /* Allocate the context array and the flow table. */ rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries, M_SFXGE, M_WAITOK | M_ZERO); sfxge_lro_init(rxq); callout_init(&rxq->refill_callout, 1); rxq->init_state = SFXGE_RXQ_INITIALIZED; return (0); } static const struct { const char *name; size_t offset; } sfxge_rx_stats[] = { #define SFXGE_RX_STAT(name, member) \ { #name, offsetof(struct sfxge_rxq, member) } #ifdef SFXGE_LRO SFXGE_RX_STAT(lro_merges, lro.n_merges), SFXGE_RX_STAT(lro_bursts, lro.n_bursts), SFXGE_RX_STAT(lro_slow_start, lro.n_slow_start), SFXGE_RX_STAT(lro_misorder, lro.n_misorder), SFXGE_RX_STAT(lro_too_many, lro.n_too_many), SFXGE_RX_STAT(lro_new_stream, lro.n_new_stream), SFXGE_RX_STAT(lro_drop_idle, lro.n_drop_idle), SFXGE_RX_STAT(lro_drop_closed, lro.n_drop_closed) #endif }; static int sfxge_rx_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; unsigned int sum, index; /* Sum across all RX queues */ sum = 0; for (index = 0; index < sc->rxq_count; index++) sum += *(unsigned int *)((caddr_t)sc->rxq[index] + sfxge_rx_stats[id].offset); return (SYSCTL_OUT(req, &sum, sizeof(sum))); } static void sfxge_rx_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < nitems(sfxge_rx_stats); id++) { SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, sfxge_rx_stats[id].name, CTLTYPE_UINT|CTLFLAG_RD, sc, id, sfxge_rx_stat_handler, "IU", ""); } } void sfxge_rx_fini(struct sfxge_softc *sc) { int index; index = sc->rxq_count; while (--index >= 0) sfxge_rx_qfini(sc, index); sc->rxq_count = 0; } int sfxge_rx_init(struct sfxge_softc *sc) { struct sfxge_intr *intr; int index; int rc; #ifdef SFXGE_LRO if (!ISP2(lro_table_size)) { log(LOG_ERR, "%s=%u must be power of 2", SFXGE_LRO_PARAM(table_size), lro_table_size); rc = EINVAL; goto fail_lro_table_size; } if (lro_idle_ticks == 0) lro_idle_ticks = hz / 10 + 1; /* 100 ms */ #endif intr = &sc->intr; sc->rxq_count = intr->n_alloc; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); /* Initialize the receive queue(s) - one per interrupt. */ for (index = 0; index < sc->rxq_count; index++) { if ((rc = sfxge_rx_qinit(sc, index)) != 0) goto fail; } sfxge_rx_stat_init(sc); return (0); fail: /* Tear down the receive queue(s). */ while (--index >= 0) sfxge_rx_qfini(sc, index); sc->rxq_count = 0; #ifdef SFXGE_LRO fail_lro_table_size: #endif return (rc); }