Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixgbe/if_ix.c
Show First 20 Lines • Show All 103 Lines • ▼ Show 20 Lines | static pci_vendor_info_t ixgbe_vendor_info_array[] = | ||||
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), | PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), | ||||
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), | PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), | ||||
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), | PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), | ||||
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), | PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), | ||||
/* required last entry */ | /* required last entry */ | ||||
PVID_END | PVID_END | ||||
}; | }; | ||||
static void *ixgbe_register(device_t dev); | static void *ixgbe_register(device_t); | ||||
static int ixgbe_if_attach_pre(if_ctx_t ctx); | static int ixgbe_if_attach_pre(if_ctx_t); | ||||
static int ixgbe_if_attach_post(if_ctx_t ctx); | static int ixgbe_if_attach_post(if_ctx_t); | ||||
static int ixgbe_if_detach(if_ctx_t ctx); | static int ixgbe_if_detach(if_ctx_t); | ||||
static int ixgbe_if_shutdown(if_ctx_t ctx); | static int ixgbe_if_shutdown(if_ctx_t); | ||||
static int ixgbe_if_suspend(if_ctx_t ctx); | static int ixgbe_if_suspend(if_ctx_t); | ||||
static int ixgbe_if_resume(if_ctx_t ctx); | static int ixgbe_if_resume(if_ctx_t); | ||||
static void ixgbe_if_stop(if_ctx_t ctx); | static void ixgbe_if_stop(if_ctx_t); | ||||
void ixgbe_if_enable_intr(if_ctx_t ctx); | void ixgbe_if_enable_intr(if_ctx_t); | ||||
static void ixgbe_if_disable_intr(if_ctx_t ctx); | static void ixgbe_if_disable_intr(if_ctx_t); | ||||
static void ixgbe_link_intr_enable(if_ctx_t ctx); | static void ixgbe_link_intr_enable(if_ctx_t); | ||||
static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); | static int ixgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); | ||||
static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); | static void ixgbe_if_media_status(if_ctx_t, struct ifmediareq *); | ||||
static int ixgbe_if_media_change(if_ctx_t ctx); | static int ixgbe_if_media_change(if_ctx_t); | ||||
static int ixgbe_if_msix_intr_assign(if_ctx_t, int); | static int ixgbe_if_msix_intr_assign(if_ctx_t, int); | ||||
static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); | static int ixgbe_if_mtu_set(if_ctx_t, uint32_t); | ||||
static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); | static void ixgbe_if_crcstrip_set(if_ctx_t, int, int); | ||||
static void ixgbe_if_multi_set(if_ctx_t ctx); | static void ixgbe_if_multi_set(if_ctx_t); | ||||
static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); | static int ixgbe_if_promisc_set(if_ctx_t, int); | ||||
static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, | static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); | ||||
uint64_t *paddrs, int nrxqs, int nrxqsets); | static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); | ||||
static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, | static void ixgbe_if_queues_free(if_ctx_t); | ||||
uint64_t *paddrs, int nrxqs, int nrxqsets); | static void ixgbe_if_timer(if_ctx_t, uint16_t); | ||||
static void ixgbe_if_queues_free(if_ctx_t ctx); | static void ixgbe_if_update_admin_status(if_ctx_t); | ||||
static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); | static void ixgbe_if_vlan_register(if_ctx_t, u16); | ||||
static void ixgbe_if_update_admin_status(if_ctx_t ctx); | static void ixgbe_if_vlan_unregister(if_ctx_t, u16); | ||||
static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); | static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *); | ||||
static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); | static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); | ||||
static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); | int ixgbe_intr(void *); | ||||
static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); | |||||
int ixgbe_intr(void *arg); | |||||
/************************************************************************ | /************************************************************************ | ||||
* Function prototypes | * Function prototypes | ||||
************************************************************************/ | ************************************************************************/ | ||||
#if __FreeBSD_version >= 1100036 | |||||
static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); | static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); | ||||
#endif | |||||
static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); | static void ixgbe_enable_queue(struct ixgbe_softc *, u32); | ||||
static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); | static void ixgbe_disable_queue(struct ixgbe_softc *, u32); | ||||
static void ixgbe_add_device_sysctls(if_ctx_t ctx); | static void ixgbe_add_device_sysctls(if_ctx_t); | ||||
static int ixgbe_allocate_pci_resources(if_ctx_t ctx); | static int ixgbe_allocate_pci_resources(if_ctx_t); | ||||
static int ixgbe_setup_low_power_mode(if_ctx_t ctx); | static int ixgbe_setup_low_power_mode(if_ctx_t); | ||||
static void ixgbe_config_dmac(struct adapter *adapter); | static void ixgbe_config_dmac(struct ixgbe_softc *); | ||||
static void ixgbe_configure_ivars(struct adapter *adapter); | static void ixgbe_configure_ivars(struct ixgbe_softc *); | ||||
static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, | static void ixgbe_set_ivar(struct ixgbe_softc *, u8, u8, s8); | ||||
s8 type); | |||||
static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); | static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); | ||||
static bool ixgbe_sfp_probe(if_ctx_t ctx); | static bool ixgbe_sfp_probe(if_ctx_t); | ||||
static void ixgbe_free_pci_resources(if_ctx_t ctx); | static void ixgbe_free_pci_resources(if_ctx_t); | ||||
static int ixgbe_msix_link(void *arg); | static int ixgbe_msix_link(void *); | ||||
static int ixgbe_msix_que(void *arg); | static int ixgbe_msix_que(void *); | ||||
static void ixgbe_initialize_rss_mapping(struct adapter *adapter); | static void ixgbe_initialize_rss_mapping(struct ixgbe_softc *); | ||||
static void ixgbe_initialize_receive_units(if_ctx_t ctx); | static void ixgbe_initialize_receive_units(if_ctx_t); | ||||
static void ixgbe_initialize_transmit_units(if_ctx_t ctx); | static void ixgbe_initialize_transmit_units(if_ctx_t); | ||||
static int ixgbe_setup_interface(if_ctx_t ctx); | static int ixgbe_setup_interface(if_ctx_t); | ||||
static void ixgbe_init_device_features(struct adapter *adapter); | static void ixgbe_init_device_features(struct ixgbe_softc *); | ||||
static void ixgbe_check_fan_failure(struct adapter *, u32, bool); | static void ixgbe_check_fan_failure(struct ixgbe_softc *, u32, bool); | ||||
static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); | static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); | ||||
static void ixgbe_print_fw_version(if_ctx_t ctx); | static void ixgbe_print_fw_version(if_ctx_t); | ||||
static void ixgbe_add_media_types(if_ctx_t ctx); | static void ixgbe_add_media_types(if_ctx_t); | ||||
static void ixgbe_update_stats_counters(struct adapter *adapter); | static void ixgbe_update_stats_counters(struct ixgbe_softc *); | ||||
static void ixgbe_config_link(if_ctx_t ctx); | static void ixgbe_config_link(if_ctx_t); | ||||
static void ixgbe_get_slot_info(struct adapter *); | static void ixgbe_get_slot_info(struct ixgbe_softc *); | ||||
static void ixgbe_check_wol_support(struct adapter *adapter); | static void ixgbe_check_wol_support(struct ixgbe_softc *); | ||||
static void ixgbe_enable_rx_drop(struct adapter *); | static void ixgbe_enable_rx_drop(struct ixgbe_softc *); | ||||
static void ixgbe_disable_rx_drop(struct adapter *); | static void ixgbe_disable_rx_drop(struct ixgbe_softc *); | ||||
static void ixgbe_add_hw_stats(struct adapter *adapter); | static void ixgbe_add_hw_stats(struct ixgbe_softc *); | ||||
static int ixgbe_set_flowcntl(struct adapter *, int); | static int ixgbe_set_flowcntl(struct ixgbe_softc *, int); | ||||
static int ixgbe_set_advertise(struct adapter *, int); | static int ixgbe_set_advertise(struct ixgbe_softc *, int); | ||||
static int ixgbe_get_advertise(struct adapter *); | static int ixgbe_get_advertise(struct ixgbe_softc *); | ||||
static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); | static void ixgbe_setup_vlan_hw_support(if_ctx_t); | ||||
static void ixgbe_config_gpie(struct adapter *adapter); | static void ixgbe_config_gpie(struct ixgbe_softc *); | ||||
static void ixgbe_config_delay_values(struct adapter *adapter); | static void ixgbe_config_delay_values(struct ixgbe_softc *); | ||||
/* Sysctl handlers */ | /* Sysctl handlers */ | ||||
static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); | static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); | ||||
static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); | static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); | ||||
static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); | static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); | ||||
static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); | static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); | ||||
static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); | static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); | ||||
static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); | static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); | ||||
Show All 31 Lines | #ifdef PCI_IOV | ||||
DEVMETHOD(pci_iov_init, iflib_device_iov_init), | DEVMETHOD(pci_iov_init, iflib_device_iov_init), | ||||
DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), | DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), | ||||
DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), | DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), | ||||
#endif /* PCI_IOV */ | #endif /* PCI_IOV */ | ||||
DEVMETHOD_END | DEVMETHOD_END | ||||
}; | }; | ||||
static driver_t ix_driver = { | static driver_t ix_driver = { | ||||
"ix", ix_methods, sizeof(struct adapter), | "ix", ix_methods, sizeof(struct ixgbe_softc), | ||||
}; | }; | ||||
devclass_t ix_devclass; | devclass_t ix_devclass; | ||||
DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); | DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); | ||||
IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); | IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); | ||||
MODULE_DEPEND(ix, pci, 1, 1, 1); | MODULE_DEPEND(ix, pci, 1, 1, 1); | ||||
MODULE_DEPEND(ix, ether, 1, 1, 1); | MODULE_DEPEND(ix, ether, 1, 1, 1); | ||||
MODULE_DEPEND(ix, iflib, 1, 1, 1); | MODULE_DEPEND(ix, iflib, 1, 1, 1); | ||||
Show All 39 Lines | |||||
/* | /* | ||||
* TUNEABLE PARAMETERS: | * TUNEABLE PARAMETERS: | ||||
*/ | */ | ||||
static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, | static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, | ||||
"IXGBE driver parameters"); | "IXGBE driver parameters"); | ||||
static driver_t ixgbe_if_driver = { | static driver_t ixgbe_if_driver = { | ||||
"ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) | "ixgbe_if", ixgbe_if_methods, sizeof(struct ixgbe_softc) | ||||
}; | }; | ||||
static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); | static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); | ||||
SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, | ||||
&ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); | &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); | ||||
/* Flow control setting, default to full */ | /* Flow control setting, default to full */ | ||||
static int ixgbe_flow_control = ixgbe_fc_full; | static int ixgbe_flow_control = ixgbe_fc_full; | ||||
▲ Show 20 Lines • Show All 102 Lines • ▼ Show 20 Lines | |||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_tx_queues_alloc | * ixgbe_if_tx_queues_alloc | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, | ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, | ||||
int ntxqs, int ntxqsets) | int ntxqs, int ntxqsets) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
if_softc_ctx_t scctx = adapter->shared; | if_softc_ctx_t scctx = sc->shared; | ||||
struct ix_tx_queue *que; | struct ix_tx_queue *que; | ||||
int i, j, error; | int i, j, error; | ||||
MPASS(adapter->num_tx_queues > 0); | MPASS(sc->num_tx_queues > 0); | ||||
MPASS(adapter->num_tx_queues == ntxqsets); | MPASS(sc->num_tx_queues == ntxqsets); | ||||
MPASS(ntxqs == 1); | MPASS(ntxqs == 1); | ||||
/* Allocate queue structure memory */ | /* Allocate queue structure memory */ | ||||
adapter->tx_queues = | sc->tx_queues = | ||||
(struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, | (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, | ||||
M_IXGBE, M_NOWAIT | M_ZERO); | M_IXGBE, M_NOWAIT | M_ZERO); | ||||
if (!adapter->tx_queues) { | if (!sc->tx_queues) { | ||||
device_printf(iflib_get_dev(ctx), | device_printf(iflib_get_dev(ctx), | ||||
"Unable to allocate TX ring memory\n"); | "Unable to allocate TX ring memory\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { | for (i = 0, que = sc->tx_queues; i < ntxqsets; i++, que++) { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
/* In case SR-IOV is enabled, align the index properly */ | /* In case SR-IOV is enabled, align the index properly */ | ||||
txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, | txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, | ||||
i); | i); | ||||
txr->adapter = que->adapter = adapter; | txr->sc = que->sc = sc; | ||||
/* Allocate report status array */ | /* Allocate report status array */ | ||||
txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); | txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); | ||||
if (txr->tx_rsq == NULL) { | if (txr->tx_rsq == NULL) { | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
for (j = 0; j < scctx->isc_ntxd[0]; j++) | for (j = 0; j < scctx->isc_ntxd[0]; j++) | ||||
txr->tx_rsq[j] = QIDX_INVALID; | txr->tx_rsq[j] = QIDX_INVALID; | ||||
/* get the virtual and physical address of the hardware queues */ | /* get the virtual and physical address of the hardware queues */ | ||||
txr->tail = IXGBE_TDT(txr->me); | txr->tail = IXGBE_TDT(txr->me); | ||||
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; | txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; | ||||
txr->tx_paddr = paddrs[i]; | txr->tx_paddr = paddrs[i]; | ||||
txr->bytes = 0; | txr->bytes = 0; | ||||
txr->total_packets = 0; | txr->total_packets = 0; | ||||
/* Set the rate at which we sample packets */ | /* Set the rate at which we sample packets */ | ||||
if (adapter->feat_en & IXGBE_FEATURE_FDIR) | if (sc->feat_en & IXGBE_FEATURE_FDIR) | ||||
txr->atr_sample = atr_sample_rate; | txr->atr_sample = atr_sample_rate; | ||||
} | } | ||||
device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", | device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", | ||||
adapter->num_tx_queues); | sc->num_tx_queues); | ||||
return (0); | return (0); | ||||
fail: | fail: | ||||
ixgbe_if_queues_free(ctx); | ixgbe_if_queues_free(ctx); | ||||
return (error); | return (error); | ||||
} /* ixgbe_if_tx_queues_alloc */ | } /* ixgbe_if_tx_queues_alloc */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_rx_queues_alloc | * ixgbe_if_rx_queues_alloc | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, | ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, | ||||
int nrxqs, int nrxqsets) | int nrxqs, int nrxqsets) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ix_rx_queue *que; | struct ix_rx_queue *que; | ||||
int i; | int i; | ||||
MPASS(adapter->num_rx_queues > 0); | MPASS(sc->num_rx_queues > 0); | ||||
MPASS(adapter->num_rx_queues == nrxqsets); | MPASS(sc->num_rx_queues == nrxqsets); | ||||
MPASS(nrxqs == 1); | MPASS(nrxqs == 1); | ||||
/* Allocate queue structure memory */ | /* Allocate queue structure memory */ | ||||
adapter->rx_queues = | sc->rx_queues = | ||||
(struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, | (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, | ||||
M_IXGBE, M_NOWAIT | M_ZERO); | M_IXGBE, M_NOWAIT | M_ZERO); | ||||
if (!adapter->rx_queues) { | if (!sc->rx_queues) { | ||||
device_printf(iflib_get_dev(ctx), | device_printf(iflib_get_dev(ctx), | ||||
"Unable to allocate TX ring memory\n"); | "Unable to allocate TX ring memory\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { | for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) { | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
/* In case SR-IOV is enabled, align the index properly */ | /* In case SR-IOV is enabled, align the index properly */ | ||||
rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, | rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, | ||||
i); | i); | ||||
rxr->adapter = que->adapter = adapter; | rxr->sc = que->sc = sc; | ||||
/* get the virtual and physical address of the hw queues */ | /* get the virtual and physical address of the hw queues */ | ||||
rxr->tail = IXGBE_RDT(rxr->me); | rxr->tail = IXGBE_RDT(rxr->me); | ||||
rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; | rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; | ||||
rxr->rx_paddr = paddrs[i]; | rxr->rx_paddr = paddrs[i]; | ||||
rxr->bytes = 0; | rxr->bytes = 0; | ||||
rxr->que = que; | rxr->que = que; | ||||
} | } | ||||
device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", | device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", | ||||
adapter->num_rx_queues); | sc->num_rx_queues); | ||||
return (0); | return (0); | ||||
} /* ixgbe_if_rx_queues_alloc */ | } /* ixgbe_if_rx_queues_alloc */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_queues_free | * ixgbe_if_queues_free | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_if_queues_free(if_ctx_t ctx) | ixgbe_if_queues_free(if_ctx_t ctx) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ix_tx_queue *tx_que = adapter->tx_queues; | struct ix_tx_queue *tx_que = sc->tx_queues; | ||||
struct ix_rx_queue *rx_que = adapter->rx_queues; | struct ix_rx_queue *rx_que = sc->rx_queues; | ||||
int i; | int i; | ||||
if (tx_que != NULL) { | if (tx_que != NULL) { | ||||
for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { | for (i = 0; i < sc->num_tx_queues; i++, tx_que++) { | ||||
struct tx_ring *txr = &tx_que->txr; | struct tx_ring *txr = &tx_que->txr; | ||||
if (txr->tx_rsq == NULL) | if (txr->tx_rsq == NULL) | ||||
break; | break; | ||||
free(txr->tx_rsq, M_IXGBE); | free(txr->tx_rsq, M_IXGBE); | ||||
txr->tx_rsq = NULL; | txr->tx_rsq = NULL; | ||||
} | } | ||||
free(adapter->tx_queues, M_IXGBE); | free(sc->tx_queues, M_IXGBE); | ||||
adapter->tx_queues = NULL; | sc->tx_queues = NULL; | ||||
} | } | ||||
if (rx_que != NULL) { | if (rx_que != NULL) { | ||||
free(adapter->rx_queues, M_IXGBE); | free(sc->rx_queues, M_IXGBE); | ||||
adapter->rx_queues = NULL; | sc->rx_queues = NULL; | ||||
} | } | ||||
} /* ixgbe_if_queues_free */ | } /* ixgbe_if_queues_free */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_initialize_rss_mapping | * ixgbe_initialize_rss_mapping | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_initialize_rss_mapping(struct adapter *adapter) | ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) | ||||
{ | { | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
u32 reta = 0, mrqc, rss_key[10]; | u32 reta = 0, mrqc, rss_key[10]; | ||||
int queue_id, table_size, index_mult; | int queue_id, table_size, index_mult; | ||||
int i, j; | int i, j; | ||||
u32 rss_hash_config; | u32 rss_hash_config; | ||||
if (adapter->feat_en & IXGBE_FEATURE_RSS) { | if (sc->feat_en & IXGBE_FEATURE_RSS) { | ||||
/* Fetch the configured RSS key */ | /* Fetch the configured RSS key */ | ||||
rss_getkey((uint8_t *)&rss_key); | rss_getkey((uint8_t *)&rss_key); | ||||
} else { | } else { | ||||
/* set up random bits */ | /* set up random bits */ | ||||
arc4rand(&rss_key, sizeof(rss_key), 0); | arc4rand(&rss_key, sizeof(rss_key), 0); | ||||
} | } | ||||
/* Set multiplier for RETA setup and table size based on MAC */ | /* Set multiplier for RETA setup and table size based on MAC */ | ||||
index_mult = 0x1; | index_mult = 0x1; | ||||
table_size = 128; | table_size = 128; | ||||
switch (adapter->hw.mac.type) { | switch (sc->hw.mac.type) { | ||||
case ixgbe_mac_82598EB: | case ixgbe_mac_82598EB: | ||||
index_mult = 0x11; | index_mult = 0x11; | ||||
break; | break; | ||||
case ixgbe_mac_X550: | case ixgbe_mac_X550: | ||||
case ixgbe_mac_X550EM_x: | case ixgbe_mac_X550EM_x: | ||||
case ixgbe_mac_X550EM_a: | case ixgbe_mac_X550EM_a: | ||||
table_size = 512; | table_size = 512; | ||||
break; | break; | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
/* Set up the redirection table */ | /* Set up the redirection table */ | ||||
for (i = 0, j = 0; i < table_size; i++, j++) { | for (i = 0, j = 0; i < table_size; i++, j++) { | ||||
if (j == adapter->num_rx_queues) | if (j == sc->num_rx_queues) | ||||
j = 0; | j = 0; | ||||
if (adapter->feat_en & IXGBE_FEATURE_RSS) { | if (sc->feat_en & IXGBE_FEATURE_RSS) { | ||||
/* | /* | ||||
* Fetch the RSS bucket id for the given indirection | * Fetch the RSS bucket id for the given indirection | ||||
* entry. Cap it at the number of configured buckets | * entry. Cap it at the number of configured buckets | ||||
* (which is num_rx_queues.) | * (which is num_rx_queues.) | ||||
*/ | */ | ||||
queue_id = rss_get_indirection_to_bucket(i); | queue_id = rss_get_indirection_to_bucket(i); | ||||
queue_id = queue_id % adapter->num_rx_queues; | queue_id = queue_id % sc->num_rx_queues; | ||||
} else | } else | ||||
queue_id = (j * index_mult); | queue_id = (j * index_mult); | ||||
/* | /* | ||||
* The low 8 bits are for hash value (n+0); | * The low 8 bits are for hash value (n+0); | ||||
* The next 8 bits are for hash value (n+1), etc. | * The next 8 bits are for hash value (n+1), etc. | ||||
*/ | */ | ||||
reta = reta >> 8; | reta = reta >> 8; | ||||
reta = reta | (((uint32_t)queue_id) << 24); | reta = reta | (((uint32_t)queue_id) << 24); | ||||
if ((i & 3) == 3) { | if ((i & 3) == 3) { | ||||
if (i < 128) | if (i < 128) | ||||
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); | ||||
else | else | ||||
IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), | IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), | ||||
reta); | reta); | ||||
reta = 0; | reta = 0; | ||||
} | } | ||||
} | } | ||||
/* Now fill our hash function seeds */ | /* Now fill our hash function seeds */ | ||||
for (i = 0; i < 10; i++) | for (i = 0; i < 10; i++) | ||||
IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); | IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); | ||||
/* Perform hash on these packet types */ | /* Perform hash on these packet types */ | ||||
if (adapter->feat_en & IXGBE_FEATURE_RSS) | if (sc->feat_en & IXGBE_FEATURE_RSS) | ||||
rss_hash_config = rss_gethashconfig(); | rss_hash_config = rss_gethashconfig(); | ||||
else { | else { | ||||
/* | /* | ||||
* Disable UDP - IP fragments aren't currently being handled | * Disable UDP - IP fragments aren't currently being handled | ||||
* and so we end up with a mix of 2-tuple and 4-tuple | * and so we end up with a mix of 2-tuple and 4-tuple | ||||
* traffic. | * traffic. | ||||
*/ | */ | ||||
rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | ||||
Show All 18 Lines | ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc) | ||||
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) | if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) | ||||
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; | ||||
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) | ||||
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; | ||||
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) | ||||
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; | ||||
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) | if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) | ||||
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; | mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; | ||||
mrqc |= ixgbe_get_mrqc(adapter->iov_mode); | mrqc |= ixgbe_get_mrqc(sc->iov_mode); | ||||
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); | ||||
} /* ixgbe_initialize_rss_mapping */ | } /* ixgbe_initialize_rss_mapping */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_initialize_receive_units - Setup receive registers and features. | * ixgbe_initialize_receive_units - Setup receive registers and features. | ||||
************************************************************************/ | ************************************************************************/ | ||||
#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) | #define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1) | ||||
static void | static void | ||||
ixgbe_initialize_receive_units(if_ctx_t ctx) | ixgbe_initialize_receive_units(if_ctx_t ctx) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
if_softc_ctx_t scctx = adapter->shared; | if_softc_ctx_t scctx = sc->shared; | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
struct ifnet *ifp = iflib_get_ifp(ctx); | struct ifnet *ifp = iflib_get_ifp(ctx); | ||||
struct ix_rx_queue *que; | struct ix_rx_queue *que; | ||||
int i, j; | int i, j; | ||||
u32 bufsz, fctrl, srrctl, rxcsum; | u32 bufsz, fctrl, srrctl, rxcsum; | ||||
u32 hlreg; | u32 hlreg; | ||||
/* | /* | ||||
* Make sure receives are disabled while | * Make sure receives are disabled while | ||||
* setting up the descriptor ring | * setting up the descriptor ring | ||||
*/ | */ | ||||
ixgbe_disable_rx(hw); | ixgbe_disable_rx(hw); | ||||
/* Enable broadcasts */ | /* Enable broadcasts */ | ||||
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); | ||||
fctrl |= IXGBE_FCTRL_BAM; | fctrl |= IXGBE_FCTRL_BAM; | ||||
if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | ||||
fctrl |= IXGBE_FCTRL_DPF; | fctrl |= IXGBE_FCTRL_DPF; | ||||
fctrl |= IXGBE_FCTRL_PMCF; | fctrl |= IXGBE_FCTRL_PMCF; | ||||
} | } | ||||
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); | ||||
/* Set for Jumbo Frames? */ | /* Set for Jumbo Frames? */ | ||||
hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); | hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); | ||||
if (ifp->if_mtu > ETHERMTU) | if (ifp->if_mtu > ETHERMTU) | ||||
hlreg |= IXGBE_HLREG0_JUMBOEN; | hlreg |= IXGBE_HLREG0_JUMBOEN; | ||||
else | else | ||||
hlreg &= ~IXGBE_HLREG0_JUMBOEN; | hlreg &= ~IXGBE_HLREG0_JUMBOEN; | ||||
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); | IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); | ||||
bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> | bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> | ||||
IXGBE_SRRCTL_BSIZEPKT_SHIFT; | IXGBE_SRRCTL_BSIZEPKT_SHIFT; | ||||
/* Setup the Base and Length of the Rx Descriptor Ring */ | /* Setup the Base and Length of the Rx Descriptor Ring */ | ||||
for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { | for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
u64 rdba = rxr->rx_paddr; | u64 rdba = rxr->rx_paddr; | ||||
j = rxr->me; | j = rxr->me; | ||||
/* Setup the Base and Length of the Rx Descriptor Ring */ | /* Setup the Base and Length of the Rx Descriptor Ring */ | ||||
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), | IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), | ||||
(rdba & 0x00000000ffffffffULL)); | (rdba & 0x00000000ffffffffULL)); | ||||
Show All 9 Lines | for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) { | ||||
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; | ||||
/* | /* | ||||
* Set DROP_EN iff we have no flow control and >1 queue. | * Set DROP_EN iff we have no flow control and >1 queue. | ||||
* Note that srrctl was cleared shortly before during reset, | * Note that srrctl was cleared shortly before during reset, | ||||
* so we do not need to clear the bit, but do it just in case | * so we do not need to clear the bit, but do it just in case | ||||
* this code is moved elsewhere. | * this code is moved elsewhere. | ||||
*/ | */ | ||||
if (adapter->num_rx_queues > 1 && | if (sc->num_rx_queues > 1 && | ||||
adapter->hw.fc.requested_mode == ixgbe_fc_none) { | sc->hw.fc.requested_mode == ixgbe_fc_none) { | ||||
srrctl |= IXGBE_SRRCTL_DROP_EN; | srrctl |= IXGBE_SRRCTL_DROP_EN; | ||||
} else { | } else { | ||||
srrctl &= ~IXGBE_SRRCTL_DROP_EN; | srrctl &= ~IXGBE_SRRCTL_DROP_EN; | ||||
} | } | ||||
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); | IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); | ||||
/* Setup the HW Rx Head and Tail Descriptor Pointers */ | /* Setup the HW Rx Head and Tail Descriptor Pointers */ | ||||
IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); | IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); | ||||
IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); | IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); | ||||
/* Set the driver rx tail address */ | /* Set the driver rx tail address */ | ||||
rxr->tail = IXGBE_RDT(rxr->me); | rxr->tail = IXGBE_RDT(rxr->me); | ||||
} | } | ||||
if (adapter->hw.mac.type != ixgbe_mac_82598EB) { | if (sc->hw.mac.type != ixgbe_mac_82598EB) { | ||||
u32 psrtype = IXGBE_PSRTYPE_TCPHDR | u32 psrtype = IXGBE_PSRTYPE_TCPHDR | ||||
| IXGBE_PSRTYPE_UDPHDR | | IXGBE_PSRTYPE_UDPHDR | ||||
| IXGBE_PSRTYPE_IPV4HDR | | IXGBE_PSRTYPE_IPV4HDR | ||||
| IXGBE_PSRTYPE_IPV6HDR; | | IXGBE_PSRTYPE_IPV6HDR; | ||||
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); | ||||
} | } | ||||
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); | ||||
ixgbe_initialize_rss_mapping(adapter); | ixgbe_initialize_rss_mapping(sc); | ||||
if (adapter->num_rx_queues > 1) { | if (sc->num_rx_queues > 1) { | ||||
/* RSS and RX IPP Checksum are mutually exclusive */ | /* RSS and RX IPP Checksum are mutually exclusive */ | ||||
rxcsum |= IXGBE_RXCSUM_PCSD; | rxcsum |= IXGBE_RXCSUM_PCSD; | ||||
} | } | ||||
if (ifp->if_capenable & IFCAP_RXCSUM) | if (ifp->if_capenable & IFCAP_RXCSUM) | ||||
rxcsum |= IXGBE_RXCSUM_PCSD; | rxcsum |= IXGBE_RXCSUM_PCSD; | ||||
/* This is useful for calculating UDP/IP fragment checksums */ | /* This is useful for calculating UDP/IP fragment checksums */ | ||||
if (!(rxcsum & IXGBE_RXCSUM_PCSD)) | if (!(rxcsum & IXGBE_RXCSUM_PCSD)) | ||||
rxcsum |= IXGBE_RXCSUM_IPPCSE; | rxcsum |= IXGBE_RXCSUM_IPPCSE; | ||||
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); | ||||
} /* ixgbe_initialize_receive_units */ | } /* ixgbe_initialize_receive_units */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_initialize_transmit_units - Enable transmit units. | * ixgbe_initialize_transmit_units - Enable transmit units. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_initialize_transmit_units(if_ctx_t ctx) | ixgbe_initialize_transmit_units(if_ctx_t ctx) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
if_softc_ctx_t scctx = adapter->shared; | if_softc_ctx_t scctx = sc->shared; | ||||
struct ix_tx_queue *que; | struct ix_tx_queue *que; | ||||
int i; | int i; | ||||
/* Setup the Base and Length of the Tx Descriptor Ring */ | /* Setup the Base and Length of the Tx Descriptor Ring */ | ||||
for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; | for (i = 0, que = sc->tx_queues; i < sc->num_tx_queues; | ||||
i++, que++) { | i++, que++) { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
u64 tdba = txr->tx_paddr; | u64 tdba = txr->tx_paddr; | ||||
u32 txctrl = 0; | u32 txctrl = 0; | ||||
int j = txr->me; | int j = txr->me; | ||||
IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), | IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), | ||||
(tdba & 0x00000000ffffffffULL)); | (tdba & 0x00000000ffffffffULL)); | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | if (hw->mac.type != ixgbe_mac_82598EB) { | ||||
dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); | ||||
dmatxctl |= IXGBE_DMATXCTL_TE; | dmatxctl |= IXGBE_DMATXCTL_TE; | ||||
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); | ||||
/* Disable arbiter to set MTQC */ | /* Disable arbiter to set MTQC */ | ||||
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); | ||||
rttdcs |= IXGBE_RTTDCS_ARBDIS; | rttdcs |= IXGBE_RTTDCS_ARBDIS; | ||||
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | ||||
IXGBE_WRITE_REG(hw, IXGBE_MTQC, | IXGBE_WRITE_REG(hw, IXGBE_MTQC, | ||||
ixgbe_get_mtqc(adapter->iov_mode)); | ixgbe_get_mtqc(sc->iov_mode)); | ||||
rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | rttdcs &= ~IXGBE_RTTDCS_ARBDIS; | ||||
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); | ||||
} | } | ||||
} /* ixgbe_initialize_transmit_units */ | } /* ixgbe_initialize_transmit_units */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_register | * ixgbe_register | ||||
Show All 11 Lines | |||||
* Identifies the type of hardware, initializes the hardware, | * Identifies the type of hardware, initializes the hardware, | ||||
* and initializes iflib structures. | * and initializes iflib structures. | ||||
* | * | ||||
* return 0 on success, positive on failure | * return 0 on success, positive on failure | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_if_attach_pre(if_ctx_t ctx) | ixgbe_if_attach_pre(if_ctx_t ctx) | ||||
{ | { | ||||
struct adapter *adapter; | struct ixgbe_softc *sc; | ||||
device_t dev; | device_t dev; | ||||
if_softc_ctx_t scctx; | if_softc_ctx_t scctx; | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
int error = 0; | int error = 0; | ||||
u32 ctrl_ext; | u32 ctrl_ext; | ||||
INIT_DEBUGOUT("ixgbe_attach: begin"); | INIT_DEBUGOUT("ixgbe_attach: begin"); | ||||
/* Allocate, clear, and link in our adapter structure */ | /* Allocate, clear, and link in our adapter structure */ | ||||
dev = iflib_get_dev(ctx); | dev = iflib_get_dev(ctx); | ||||
adapter = iflib_get_softc(ctx); | sc = iflib_get_softc(ctx); | ||||
adapter->hw.back = adapter; | sc->hw.back = sc; | ||||
adapter->ctx = ctx; | sc->ctx = ctx; | ||||
adapter->dev = dev; | sc->dev = dev; | ||||
scctx = adapter->shared = iflib_get_softc_ctx(ctx); | scctx = sc->shared = iflib_get_softc_ctx(ctx); | ||||
adapter->media = iflib_get_media(ctx); | sc->media = iflib_get_media(ctx); | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
/* Determine hardware revision */ | /* Determine hardware revision */ | ||||
hw->vendor_id = pci_get_vendor(dev); | hw->vendor_id = pci_get_vendor(dev); | ||||
hw->device_id = pci_get_device(dev); | hw->device_id = pci_get_device(dev); | ||||
hw->revision_id = pci_get_revid(dev); | hw->revision_id = pci_get_revid(dev); | ||||
hw->subsystem_vendor_id = pci_get_subvendor(dev); | hw->subsystem_vendor_id = pci_get_subvendor(dev); | ||||
hw->subsystem_device_id = pci_get_subdevice(dev); | hw->subsystem_device_id = pci_get_subdevice(dev); | ||||
Show All 20 Lines | ixgbe_if_attach_pre(if_ctx_t ctx) | ||||
if (hw->mbx.ops.init_params) | if (hw->mbx.ops.init_params) | ||||
hw->mbx.ops.init_params(hw); | hw->mbx.ops.init_params(hw); | ||||
hw->allow_unsupported_sfp = allow_unsupported_sfp; | hw->allow_unsupported_sfp = allow_unsupported_sfp; | ||||
if (hw->mac.type != ixgbe_mac_82598EB) | if (hw->mac.type != ixgbe_mac_82598EB) | ||||
hw->phy.smart_speed = ixgbe_smart_speed; | hw->phy.smart_speed = ixgbe_smart_speed; | ||||
ixgbe_init_device_features(adapter); | ixgbe_init_device_features(sc); | ||||
/* Enable WoL (if supported) */ | /* Enable WoL (if supported) */ | ||||
ixgbe_check_wol_support(adapter); | ixgbe_check_wol_support(sc); | ||||
/* Verify adapter fan is still functional (if applicable) */ | /* Verify adapter fan is still functional (if applicable) */ | ||||
if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { | if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) { | ||||
u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); | ||||
ixgbe_check_fan_failure(adapter, esdp, false); | ixgbe_check_fan_failure(sc, esdp, false); | ||||
} | } | ||||
/* Ensure SW/FW semaphore is free */ | /* Ensure SW/FW semaphore is free */ | ||||
ixgbe_init_swfw_semaphore(hw); | ixgbe_init_swfw_semaphore(hw); | ||||
/* Set an initial default flow control value */ | /* Set an initial default flow control value */ | ||||
hw->fc.requested_mode = ixgbe_flow_control; | hw->fc.requested_mode = ixgbe_flow_control; | ||||
hw->phy.reset_if_overtemp = true; | hw->phy.reset_if_overtemp = true; | ||||
error = ixgbe_reset_hw(hw); | error = ixgbe_reset_hw(hw); | ||||
hw->phy.reset_if_overtemp = false; | hw->phy.reset_if_overtemp = false; | ||||
if (error == IXGBE_ERR_SFP_NOT_PRESENT) { | if (error == IXGBE_ERR_SFP_NOT_PRESENT) { | ||||
/* | /* | ||||
* No optics in this port, set up | * No optics in this port, set up | ||||
* so the timer routine will probe | * so the timer routine will probe | ||||
* for later insertion. | * for later insertion. | ||||
*/ | */ | ||||
adapter->sfp_probe = true; | sc->sfp_probe = true; | ||||
error = 0; | error = 0; | ||||
} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { | } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { | ||||
device_printf(dev, "Unsupported SFP+ module detected!\n"); | device_printf(dev, "Unsupported SFP+ module detected!\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_pci; | goto err_pci; | ||||
} else if (error) { | } else if (error) { | ||||
device_printf(dev, "Hardware initialization failed\n"); | device_printf(dev, "Hardware initialization failed\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_pci; | goto err_pci; | ||||
} | } | ||||
/* Make sure we have a good EEPROM before we read from it */ | /* Make sure we have a good EEPROM before we read from it */ | ||||
if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { | if (ixgbe_validate_eeprom_checksum(&sc->hw, NULL) < 0) { | ||||
device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); | device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_pci; | goto err_pci; | ||||
} | } | ||||
error = ixgbe_start_hw(hw); | error = ixgbe_start_hw(hw); | ||||
switch (error) { | switch (error) { | ||||
case IXGBE_ERR_EEPROM_VERSION: | case IXGBE_ERR_EEPROM_VERSION: | ||||
device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); | device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); | ||||
break; | break; | ||||
case IXGBE_ERR_SFP_NOT_SUPPORTED: | case IXGBE_ERR_SFP_NOT_SUPPORTED: | ||||
device_printf(dev, "Unsupported SFP+ Module\n"); | device_printf(dev, "Unsupported SFP+ Module\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_pci; | goto err_pci; | ||||
case IXGBE_ERR_SFP_NOT_PRESENT: | case IXGBE_ERR_SFP_NOT_PRESENT: | ||||
device_printf(dev, "No SFP+ Module found\n"); | device_printf(dev, "No SFP+ Module found\n"); | ||||
/* falls thru */ | /* falls thru */ | ||||
default: | default: | ||||
break; | break; | ||||
} | } | ||||
/* Most of the iflib initialization... */ | /* Most of the iflib initialization... */ | ||||
iflib_set_mac(ctx, hw->mac.addr); | iflib_set_mac(ctx, hw->mac.addr); | ||||
switch (adapter->hw.mac.type) { | switch (sc->hw.mac.type) { | ||||
case ixgbe_mac_X550: | case ixgbe_mac_X550: | ||||
case ixgbe_mac_X550EM_x: | case ixgbe_mac_X550EM_x: | ||||
case ixgbe_mac_X550EM_a: | case ixgbe_mac_X550EM_a: | ||||
scctx->isc_rss_table_size = 512; | scctx->isc_rss_table_size = 512; | ||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; | scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; | ||||
break; | break; | ||||
default: | default: | ||||
scctx->isc_rss_table_size = 128; | scctx->isc_rss_table_size = 128; | ||||
scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; | scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; | ||||
} | } | ||||
/* Allow legacy interrupts */ | /* Allow legacy interrupts */ | ||||
ixgbe_txrx.ift_legacy_intr = ixgbe_intr; | ixgbe_txrx.ift_legacy_intr = ixgbe_intr; | ||||
scctx->isc_txqsizes[0] = | scctx->isc_txqsizes[0] = | ||||
roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + | roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + | ||||
sizeof(u32), DBA_ALIGN), | sizeof(u32), DBA_ALIGN), | ||||
scctx->isc_rxqsizes[0] = | scctx->isc_rxqsizes[0] = | ||||
roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), | roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), | ||||
DBA_ALIGN); | DBA_ALIGN); | ||||
/* XXX */ | /* XXX */ | ||||
scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | | scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | | ||||
CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; | CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; | ||||
if (adapter->hw.mac.type == ixgbe_mac_82598EB) { | if (sc->hw.mac.type == ixgbe_mac_82598EB) { | ||||
scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; | scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; | ||||
} else { | } else { | ||||
scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; | scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; | ||||
scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; | scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; | ||||
} | } | ||||
scctx->isc_msix_bar = pci_msix_table_bar(dev); | scctx->isc_msix_bar = pci_msix_table_bar(dev); | ||||
scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; | scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; | ||||
scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; | scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; | ||||
scctx->isc_tx_tso_segsize_max = PAGE_SIZE; | scctx->isc_tx_tso_segsize_max = PAGE_SIZE; | ||||
scctx->isc_txrx = &ixgbe_txrx; | scctx->isc_txrx = &ixgbe_txrx; | ||||
scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; | scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; | ||||
return (0); | return (0); | ||||
err_pci: | err_pci: | ||||
ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); | ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT); | ||||
ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; | ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; | ||||
IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); | IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext); | ||||
ixgbe_free_pci_resources(ctx); | ixgbe_free_pci_resources(ctx); | ||||
return (error); | return (error); | ||||
} /* ixgbe_if_attach_pre */ | } /* ixgbe_if_attach_pre */ | ||||
/********************************************************************* | /********************************************************************* | ||||
* ixgbe_if_attach_post - Device initialization routine, part 2 | * ixgbe_if_attach_post - Device initialization routine, part 2 | ||||
* | * | ||||
* Called during driver load, but after interrupts and | * Called during driver load, but after interrupts and | ||||
* resources have been allocated and configured. | * resources have been allocated and configured. | ||||
* Sets up some data structures not relevant to iflib. | * Sets up some data structures not relevant to iflib. | ||||
* | * | ||||
* return 0 on success, positive on failure | * return 0 on success, positive on failure | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int | static int | ||||
ixgbe_if_attach_post(if_ctx_t ctx) | ixgbe_if_attach_post(if_ctx_t ctx) | ||||
{ | { | ||||
device_t dev; | device_t dev; | ||||
struct adapter *adapter; | struct ixgbe_softc *sc; | ||||
struct ixgbe_hw *hw; | struct ixgbe_hw *hw; | ||||
int error = 0; | int error = 0; | ||||
dev = iflib_get_dev(ctx); | dev = iflib_get_dev(ctx); | ||||
adapter = iflib_get_softc(ctx); | sc = iflib_get_softc(ctx); | ||||
hw = &adapter->hw; | hw = &sc->hw; | ||||
if (adapter->intr_type == IFLIB_INTR_LEGACY && | if (sc->intr_type == IFLIB_INTR_LEGACY && | ||||
(adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { | (sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { | ||||
device_printf(dev, "Device does not support legacy interrupts"); | device_printf(dev, "Device does not support legacy interrupts"); | ||||
error = ENXIO; | error = ENXIO; | ||||
goto err; | goto err; | ||||
} | } | ||||
/* Allocate multicast array memory. */ | /* Allocate multicast array memory. */ | ||||
adapter->mta = malloc(sizeof(*adapter->mta) * | sc->mta = malloc(sizeof(*sc->mta) * | ||||
MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); | MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); | ||||
if (adapter->mta == NULL) { | if (sc->mta == NULL) { | ||||
device_printf(dev, "Can not allocate multicast setup array\n"); | device_printf(dev, "Can not allocate multicast setup array\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto err; | goto err; | ||||
} | } | ||||
/* hw.ix defaults init */ | /* hw.ix defaults init */ | ||||
ixgbe_set_advertise(adapter, ixgbe_advertise_speed); | ixgbe_set_advertise(sc, ixgbe_advertise_speed); | ||||
/* Enable the optics for 82599 SFP+ fiber */ | /* Enable the optics for 82599 SFP+ fiber */ | ||||
ixgbe_enable_tx_laser(hw); | ixgbe_enable_tx_laser(hw); | ||||
/* Enable power to the phy. */ | /* Enable power to the phy. */ | ||||
ixgbe_set_phy_power(hw, true); | ixgbe_set_phy_power(hw, true); | ||||
ixgbe_initialize_iov(adapter); | ixgbe_initialize_iov(sc); | ||||
error = ixgbe_setup_interface(ctx); | error = ixgbe_setup_interface(ctx); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "Interface setup failed: %d\n", error); | device_printf(dev, "Interface setup failed: %d\n", error); | ||||
goto err; | goto err; | ||||
} | } | ||||
ixgbe_if_update_admin_status(ctx); | ixgbe_if_update_admin_status(ctx); | ||||
/* Initialize statistics */ | /* Initialize statistics */ | ||||
ixgbe_update_stats_counters(adapter); | ixgbe_update_stats_counters(sc); | ||||
ixgbe_add_hw_stats(adapter); | ixgbe_add_hw_stats(sc); | ||||
/* Check PCIE slot type/speed/width */ | /* Check PCIE slot type/speed/width */ | ||||
ixgbe_get_slot_info(adapter); | ixgbe_get_slot_info(sc); | ||||
/* | /* | ||||
* Do time init and sysctl init here, but | * Do time init and sysctl init here, but | ||||
* only on the first port of a bypass adapter. | * only on the first port of a bypass sc. | ||||
*/ | */ | ||||
ixgbe_bypass_init(adapter); | ixgbe_bypass_init(sc); | ||||
/* Display NVM and Option ROM versions */ | /* Display NVM and Option ROM versions */ | ||||
ixgbe_print_fw_version(ctx); | ixgbe_print_fw_version(ctx); | ||||
/* Set an initial dmac value */ | /* Set an initial dmac value */ | ||||
adapter->dmac = 0; | sc->dmac = 0; | ||||
/* Set initial advertised speeds (if applicable) */ | /* Set initial advertised speeds (if applicable) */ | ||||
adapter->advertise = ixgbe_get_advertise(adapter); | sc->advertise = ixgbe_get_advertise(sc); | ||||
if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) | if (sc->feat_cap & IXGBE_FEATURE_SRIOV) | ||||
ixgbe_define_iov_schemas(dev, &error); | ixgbe_define_iov_schemas(dev, &error); | ||||
/* Add sysctls */ | /* Add sysctls */ | ||||
ixgbe_add_device_sysctls(ctx); | ixgbe_add_device_sysctls(ctx); | ||||
return (0); | return (0); | ||||
err: | err: | ||||
return (error); | return (error); | ||||
} /* ixgbe_if_attach_post */ | } /* ixgbe_if_attach_post */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_check_wol_support | * ixgbe_check_wol_support | ||||
* | * | ||||
* Checks whether the adapter's ports are capable of | * Checks whether the adapter's ports are capable of | ||||
* Wake On LAN by reading the adapter's NVM. | * Wake On LAN by reading the adapter's NVM. | ||||
* | * | ||||
* Sets each port's hw->wol_enabled value depending | * Sets each port's hw->wol_enabled value depending | ||||
* on the value read here. | * on the value read here. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_check_wol_support(struct adapter *adapter) | ixgbe_check_wol_support(struct ixgbe_softc *sc) | ||||
{ | { | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
u16 dev_caps = 0; | u16 dev_caps = 0; | ||||
/* Find out WoL support for port */ | /* Find out WoL support for port */ | ||||
adapter->wol_support = hw->wol_enabled = 0; | sc->wol_support = hw->wol_enabled = 0; | ||||
ixgbe_get_device_caps(hw, &dev_caps); | ixgbe_get_device_caps(hw, &dev_caps); | ||||
if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || | if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || | ||||
((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && | ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && | ||||
hw->bus.func == 0)) | hw->bus.func == 0)) | ||||
adapter->wol_support = hw->wol_enabled = 1; | sc->wol_support = hw->wol_enabled = 1; | ||||
/* Save initial wake up filter configuration */ | /* Save initial wake up filter configuration */ | ||||
adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); | sc->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); | ||||
return; | return; | ||||
} /* ixgbe_check_wol_support */ | } /* ixgbe_check_wol_support */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_setup_interface | * ixgbe_setup_interface | ||||
* | * | ||||
* Setup networking device structure and register an interface. | * Setup networking device structure and register an interface. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_setup_interface(if_ctx_t ctx) | ixgbe_setup_interface(if_ctx_t ctx) | ||||
{ | { | ||||
struct ifnet *ifp = iflib_get_ifp(ctx); | struct ifnet *ifp = iflib_get_ifp(ctx); | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
INIT_DEBUGOUT("ixgbe_setup_interface: begin"); | INIT_DEBUGOUT("ixgbe_setup_interface: begin"); | ||||
if_setbaudrate(ifp, IF_Gbps(10)); | if_setbaudrate(ifp, IF_Gbps(10)); | ||||
adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; | sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; | ||||
adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); | sc->phy_layer = ixgbe_get_supported_physical_layer(&sc->hw); | ||||
ixgbe_add_media_types(ctx); | ixgbe_add_media_types(ctx); | ||||
/* Autoselect media by default */ | /* Autoselect media by default */ | ||||
ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); | ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); | ||||
return (0); | return (0); | ||||
} /* ixgbe_setup_interface */ | } /* ixgbe_setup_interface */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_get_counter | * ixgbe_if_get_counter | ||||
************************************************************************/ | ************************************************************************/ | ||||
static uint64_t | static uint64_t | ||||
ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) | ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
if_t ifp = iflib_get_ifp(ctx); | if_t ifp = iflib_get_ifp(ctx); | ||||
switch (cnt) { | switch (cnt) { | ||||
case IFCOUNTER_IPACKETS: | case IFCOUNTER_IPACKETS: | ||||
return (adapter->ipackets); | return (sc->ipackets); | ||||
case IFCOUNTER_OPACKETS: | case IFCOUNTER_OPACKETS: | ||||
return (adapter->opackets); | return (sc->opackets); | ||||
case IFCOUNTER_IBYTES: | case IFCOUNTER_IBYTES: | ||||
return (adapter->ibytes); | return (sc->ibytes); | ||||
case IFCOUNTER_OBYTES: | case IFCOUNTER_OBYTES: | ||||
return (adapter->obytes); | return (sc->obytes); | ||||
case IFCOUNTER_IMCASTS: | case IFCOUNTER_IMCASTS: | ||||
return (adapter->imcasts); | return (sc->imcasts); | ||||
case IFCOUNTER_OMCASTS: | case IFCOUNTER_OMCASTS: | ||||
return (adapter->omcasts); | return (sc->omcasts); | ||||
case IFCOUNTER_COLLISIONS: | case IFCOUNTER_COLLISIONS: | ||||
return (0); | return (0); | ||||
case IFCOUNTER_IQDROPS: | case IFCOUNTER_IQDROPS: | ||||
return (adapter->iqdrops); | return (sc->iqdrops); | ||||
case IFCOUNTER_OQDROPS: | case IFCOUNTER_OQDROPS: | ||||
return (0); | return (0); | ||||
case IFCOUNTER_IERRORS: | case IFCOUNTER_IERRORS: | ||||
return (adapter->ierrors); | return (sc->ierrors); | ||||
default: | default: | ||||
return (if_get_counter_default(ifp, cnt)); | return (if_get_counter_default(ifp, cnt)); | ||||
} | } | ||||
} /* ixgbe_if_get_counter */ | } /* ixgbe_if_get_counter */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_i2c_req | * ixgbe_if_i2c_req | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) | ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
int i; | int i; | ||||
if (hw->phy.ops.read_i2c_byte == NULL) | if (hw->phy.ops.read_i2c_byte == NULL) | ||||
return (ENXIO); | return (ENXIO); | ||||
for (i = 0; i < req->len; i++) | for (i = 0; i < req->len; i++) | ||||
hw->phy.ops.read_i2c_byte(hw, req->offset + i, | hw->phy.ops.read_i2c_byte(hw, req->offset + i, | ||||
req->dev_addr, &req->data[i]); | req->dev_addr, &req->data[i]); | ||||
Show All 20 Lines | |||||
} | } | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_add_media_types | * ixgbe_add_media_types | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_add_media_types(if_ctx_t ctx) | ixgbe_add_media_types(if_ctx_t ctx) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
device_t dev = iflib_get_dev(ctx); | device_t dev = iflib_get_dev(ctx); | ||||
u64 layer; | u64 layer; | ||||
layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); | layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw); | ||||
/* Media types with matching FreeBSD media defines */ | /* Media types with matching FreeBSD media defines */ | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) | if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) | if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || | if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || | ||||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) | layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0, | ||||
NULL); | NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL); | ||||
if (hw->phy.multispeed_fiber) | if (hw->phy.multispeed_fiber) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, | ||||
NULL); | NULL); | ||||
} | } | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ||||
if (hw->phy.multispeed_fiber) | if (hw->phy.multispeed_fiber) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, | ||||
NULL); | NULL); | ||||
} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) | } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); | ||||
#ifdef IFM_ETH_XTYPE | #ifdef IFM_ETH_XTYPE | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) | ||||
ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); | ifmedia_add( sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); | ||||
if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL); | ||||
#else | #else | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { | ||||
device_printf(dev, "Media supported: 10GbaseKR\n"); | device_printf(dev, "Media supported: 10GbaseKR\n"); | ||||
device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); | device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ||||
} | } | ||||
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { | if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { | ||||
device_printf(dev, "Media supported: 10GbaseKX4\n"); | device_printf(dev, "Media supported: 10GbaseKX4\n"); | ||||
device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); | device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); | ||||
} | } | ||||
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { | ||||
device_printf(dev, "Media supported: 1000baseKX\n"); | device_printf(dev, "Media supported: 1000baseKX\n"); | ||||
device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); | device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); | ||||
} | } | ||||
if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { | if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { | ||||
device_printf(dev, "Media supported: 2500baseKX\n"); | device_printf(dev, "Media supported: 2500baseKX\n"); | ||||
device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); | device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL); | ||||
} | } | ||||
#endif | #endif | ||||
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) | if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) | ||||
device_printf(dev, "Media supported: 1000baseBX\n"); | device_printf(dev, "Media supported: 1000baseBX\n"); | ||||
if (hw->device_id == IXGBE_DEV_ID_82598AT) { | if (hw->device_id == IXGBE_DEV_ID_82598AT) { | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, | ||||
0, NULL); | 0, NULL); | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); | ||||
} | } | ||||
ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
} /* ixgbe_add_media_types */ | } /* ixgbe_add_media_types */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_is_sfp | * ixgbe_is_sfp | ||||
************************************************************************/ | ************************************************************************/ | ||||
static inline bool | static inline bool | ||||
ixgbe_is_sfp(struct ixgbe_hw *hw) | ixgbe_is_sfp(struct ixgbe_hw *hw) | ||||
{ | { | ||||
Show All 21 Lines | |||||
} /* ixgbe_is_sfp */ | } /* ixgbe_is_sfp */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_config_link | * ixgbe_config_link | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_config_link(if_ctx_t ctx) | ixgbe_config_link(if_ctx_t ctx) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
u32 autoneg, err = 0; | u32 autoneg, err = 0; | ||||
bool sfp, negotiate; | bool sfp, negotiate; | ||||
sfp = ixgbe_is_sfp(hw); | sfp = ixgbe_is_sfp(hw); | ||||
if (sfp) { | if (sfp) { | ||||
adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; | sc->task_requests |= IXGBE_REQUEST_TASK_MOD; | ||||
iflib_admin_intr_deferred(ctx); | iflib_admin_intr_deferred(ctx); | ||||
} else { | } else { | ||||
if (hw->mac.ops.check_link) | if (hw->mac.ops.check_link) | ||||
err = ixgbe_check_link(hw, &adapter->link_speed, | err = ixgbe_check_link(hw, &sc->link_speed, | ||||
&adapter->link_up, false); | &sc->link_up, false); | ||||
if (err) | if (err) | ||||
return; | return; | ||||
autoneg = hw->phy.autoneg_advertised; | autoneg = hw->phy.autoneg_advertised; | ||||
if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) | ||||
err = hw->mac.ops.get_link_capabilities(hw, &autoneg, | err = hw->mac.ops.get_link_capabilities(hw, &autoneg, | ||||
&negotiate); | &negotiate); | ||||
if (err) | if (err) | ||||
return; | return; | ||||
if (hw->mac.ops.setup_link) | if (hw->mac.ops.setup_link) | ||||
err = hw->mac.ops.setup_link(hw, autoneg, | err = hw->mac.ops.setup_link(hw, autoneg, | ||||
adapter->link_up); | sc->link_up); | ||||
} | } | ||||
} /* ixgbe_config_link */ | } /* ixgbe_config_link */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_update_stats_counters - Update board statistics counters. | * ixgbe_update_stats_counters - Update board statistics counters. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_update_stats_counters(struct adapter *adapter) | ixgbe_update_stats_counters(struct ixgbe_softc *sc) | ||||
{ | { | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
struct ixgbe_hw_stats *stats = &adapter->stats.pf; | struct ixgbe_hw_stats *stats = &sc->stats.pf; | ||||
u32 missed_rx = 0, bprc, lxon, lxoff, total; | u32 missed_rx = 0, bprc, lxon, lxoff, total; | ||||
u32 lxoffrxc; | u32 lxoffrxc; | ||||
u64 total_missed_rx = 0; | u64 total_missed_rx = 0; | ||||
stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); | ||||
stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); | stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); | ||||
stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); | stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); | ||||
stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); | stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); | ||||
Show All 32 Lines | if (hw->mac.type != ixgbe_mac_82598EB) { | ||||
stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); | ||||
} | } | ||||
/* | /* | ||||
* For watchdog management we need to know if we have been paused | * For watchdog management we need to know if we have been paused | ||||
* during the last interval, so capture that here. | * during the last interval, so capture that here. | ||||
*/ | */ | ||||
if (lxoffrxc) | if (lxoffrxc) | ||||
adapter->shared->isc_pause_frames = 1; | sc->shared->isc_pause_frames = 1; | ||||
/* | /* | ||||
* Workaround: mprc hardware is incorrectly counting | * Workaround: mprc hardware is incorrectly counting | ||||
* broadcasts, so for now we subtract those. | * broadcasts, so for now we subtract those. | ||||
*/ | */ | ||||
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); | bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); | ||||
stats->bprc += bprc; | stats->bprc += bprc; | ||||
stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); | stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | if (hw->mac.type != ixgbe_mac_82598EB) { | ||||
stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); | ||||
stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); | ||||
stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); | ||||
stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); | ||||
stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); | ||||
} | } | ||||
/* Fill out the OS statistics structure */ | /* Fill out the OS statistics structure */ | ||||
IXGBE_SET_IPACKETS(adapter, stats->gprc); | IXGBE_SET_IPACKETS(sc, stats->gprc); | ||||
IXGBE_SET_OPACKETS(adapter, stats->gptc); | IXGBE_SET_OPACKETS(sc, stats->gptc); | ||||
IXGBE_SET_IBYTES(adapter, stats->gorc); | IXGBE_SET_IBYTES(sc, stats->gorc); | ||||
IXGBE_SET_OBYTES(adapter, stats->gotc); | IXGBE_SET_OBYTES(sc, stats->gotc); | ||||
IXGBE_SET_IMCASTS(adapter, stats->mprc); | IXGBE_SET_IMCASTS(sc, stats->mprc); | ||||
IXGBE_SET_OMCASTS(adapter, stats->mptc); | IXGBE_SET_OMCASTS(sc, stats->mptc); | ||||
IXGBE_SET_COLLISIONS(adapter, 0); | IXGBE_SET_COLLISIONS(sc, 0); | ||||
IXGBE_SET_IQDROPS(adapter, total_missed_rx); | IXGBE_SET_IQDROPS(sc, total_missed_rx); | ||||
/* | /* | ||||
* Aggregate following types of errors as RX errors: | * Aggregate following types of errors as RX errors: | ||||
* - CRC error count, | * - CRC error count, | ||||
* - illegal byte error count, | * - illegal byte error count, | ||||
* - checksum error count, | * - checksum error count, | ||||
* - missed packets count, | * - missed packets count, | ||||
* - length error count, | * - length error count, | ||||
* - undersized packets count, | * - undersized packets count, | ||||
* - fragmented packets count, | * - fragmented packets count, | ||||
* - oversized packets count, | * - oversized packets count, | ||||
* - jabber count. | * - jabber count. | ||||
*/ | */ | ||||
IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec + | IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc + stats->xec + | ||||
stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + | stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + | ||||
stats->rjc); | stats->rjc); | ||||
} /* ixgbe_update_stats_counters */ | } /* ixgbe_update_stats_counters */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_add_hw_stats | * ixgbe_add_hw_stats | ||||
* | * | ||||
* Add sysctl variables, one per statistic, to the system. | * Add sysctl variables, one per statistic, to the system. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_add_hw_stats(struct adapter *adapter) | ixgbe_add_hw_stats(struct ixgbe_softc *sc) | ||||
{ | { | ||||
device_t dev = iflib_get_dev(adapter->ctx); | device_t dev = iflib_get_dev(sc->ctx); | ||||
struct ix_rx_queue *rx_que; | struct ix_rx_queue *rx_que; | ||||
struct ix_tx_queue *tx_que; | struct ix_tx_queue *tx_que; | ||||
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); | struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); | ||||
struct sysctl_oid *tree = device_get_sysctl_tree(dev); | struct sysctl_oid *tree = device_get_sysctl_tree(dev); | ||||
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); | struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); | ||||
struct ixgbe_hw_stats *stats = &adapter->stats.pf; | struct ixgbe_hw_stats *stats = &sc->stats.pf; | ||||
struct sysctl_oid *stat_node, *queue_node; | struct sysctl_oid *stat_node, *queue_node; | ||||
struct sysctl_oid_list *stat_list, *queue_list; | struct sysctl_oid_list *stat_list, *queue_list; | ||||
int i; | int i; | ||||
#define QUEUE_NAME_LEN 32 | #define QUEUE_NAME_LEN 32 | ||||
char namebuf[QUEUE_NAME_LEN]; | char namebuf[QUEUE_NAME_LEN]; | ||||
/* Driver Statistics */ | /* Driver Statistics */ | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", | ||||
CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); | CTLFLAG_RD, &sc->dropped_pkts, "Driver dropped packets"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", | ||||
CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); | CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", | ||||
CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); | CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled"); | ||||
for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { | for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) { | ||||
struct tx_ring *txr = &tx_que->txr; | struct tx_ring *txr = &tx_que->txr; | ||||
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); | snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); | ||||
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | ||||
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); | ||||
queue_list = SYSCTL_CHILDREN(queue_node); | queue_list = SYSCTL_CHILDREN(queue_node); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", | ||||
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, | CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, | ||||
ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); | ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", | ||||
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, | CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, | ||||
ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); | ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", | ||||
CTLFLAG_RD, &txr->tso_tx, "TSO"); | CTLFLAG_RD, &txr->tso_tx, "TSO"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", | ||||
CTLFLAG_RD, &txr->total_packets, | CTLFLAG_RD, &txr->total_packets, | ||||
"Queue Packets Transmitted"); | "Queue Packets Transmitted"); | ||||
} | } | ||||
for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { | for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) { | ||||
struct rx_ring *rxr = &rx_que->rxr; | struct rx_ring *rxr = &rx_que->rxr; | ||||
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); | snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); | ||||
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, | ||||
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); | ||||
queue_list = SYSCTL_CHILDREN(queue_node); | queue_list = SYSCTL_CHILDREN(queue_node); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", | ||||
CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, | CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, | ||||
&adapter->rx_queues[i], 0, | &sc->rx_queues[i], 0, | ||||
ixgbe_sysctl_interrupt_rate_handler, "IU", | ixgbe_sysctl_interrupt_rate_handler, "IU", | ||||
"Interrupt Rate"); | "Interrupt Rate"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", | ||||
CTLFLAG_RD, &(adapter->rx_queues[i].irqs), | CTLFLAG_RD, &(sc->rx_queues[i].irqs), | ||||
"irqs on this queue"); | "irqs on this queue"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", | ||||
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, | CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, | ||||
ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); | ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", | ||||
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, | CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, | ||||
ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); | ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", | ||||
CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); | CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", | ||||
CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); | CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", | ||||
CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); | CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); | ||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", | ||||
CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); | CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); | ||||
} | } | ||||
/* MAC stats get their own sub node */ | /* MAC stats get their own sub node */ | ||||
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", | stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", | ||||
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); | ||||
stat_list = SYSCTL_CHILDREN(stat_node); | stat_list = SYSCTL_CHILDREN(stat_node); | ||||
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", | SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", | ||||
CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); | CTLFLAG_RD, &sc->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); | ||||
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", | SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", | ||||
CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); | CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); | ||||
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", | SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", | ||||
CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); | CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); | ||||
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", | SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", | ||||
CTLFLAG_RD, &stats->errbc, "Byte Errors"); | CTLFLAG_RD, &stats->errbc, "Byte Errors"); | ||||
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", | SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", | ||||
CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); | CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); | ||||
▲ Show 20 Lines • Show All 93 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); | struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); | ||||
int error; | int error; | ||||
unsigned int val; | unsigned int val; | ||||
if (!txr) | if (!txr) | ||||
return (0); | return (0); | ||||
val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); | val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me)); | ||||
error = sysctl_handle_int(oidp, &val, 0, req); | error = sysctl_handle_int(oidp, &val, 0, req); | ||||
if (error || !req->newptr) | if (error || !req->newptr) | ||||
return error; | return error; | ||||
return (0); | return (0); | ||||
} /* ixgbe_sysctl_tdh_handler */ | } /* ixgbe_sysctl_tdh_handler */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function | * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function | ||||
* | * | ||||
* Retrieves the TDT value from the hardware | * Retrieves the TDT value from the hardware | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) | ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); | struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); | ||||
int error; | int error; | ||||
unsigned int val; | unsigned int val; | ||||
if (!txr) | if (!txr) | ||||
return (0); | return (0); | ||||
val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); | val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me)); | ||||
error = sysctl_handle_int(oidp, &val, 0, req); | error = sysctl_handle_int(oidp, &val, 0, req); | ||||
if (error || !req->newptr) | if (error || !req->newptr) | ||||
return error; | return error; | ||||
return (0); | return (0); | ||||
} /* ixgbe_sysctl_tdt_handler */ | } /* ixgbe_sysctl_tdt_handler */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function | * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function | ||||
* | * | ||||
* Retrieves the RDH value from the hardware | * Retrieves the RDH value from the hardware | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) | ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); | struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); | ||||
int error; | int error; | ||||
unsigned int val; | unsigned int val; | ||||
if (!rxr) | if (!rxr) | ||||
return (0); | return (0); | ||||
val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); | val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me)); | ||||
error = sysctl_handle_int(oidp, &val, 0, req); | error = sysctl_handle_int(oidp, &val, 0, req); | ||||
if (error || !req->newptr) | if (error || !req->newptr) | ||||
return error; | return error; | ||||
return (0); | return (0); | ||||
} /* ixgbe_sysctl_rdh_handler */ | } /* ixgbe_sysctl_rdh_handler */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function | * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function | ||||
* | * | ||||
* Retrieves the RDT value from the hardware | * Retrieves the RDT value from the hardware | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) | ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); | struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); | ||||
int error; | int error; | ||||
unsigned int val; | unsigned int val; | ||||
if (!rxr) | if (!rxr) | ||||
return (0); | return (0); | ||||
val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); | val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me)); | ||||
error = sysctl_handle_int(oidp, &val, 0, req); | error = sysctl_handle_int(oidp, &val, 0, req); | ||||
if (error || !req->newptr) | if (error || !req->newptr) | ||||
return error; | return error; | ||||
return (0); | return (0); | ||||
} /* ixgbe_sysctl_rdt_handler */ | } /* ixgbe_sysctl_rdt_handler */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_vlan_register | * ixgbe_if_vlan_register | ||||
* | * | ||||
* Run via vlan config EVENT, it enables us to use the | * Run via vlan config EVENT, it enables us to use the | ||||
* HW Filter table since we can get the vlan id. This | * HW Filter table since we can get the vlan id. This | ||||
* just creates the entry in the soft version of the | * just creates the entry in the soft version of the | ||||
* VFTA, init will repopulate the real table. | * VFTA, init will repopulate the real table. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) | ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
u16 index, bit; | u16 index, bit; | ||||
index = (vtag >> 5) & 0x7F; | index = (vtag >> 5) & 0x7F; | ||||
bit = vtag & 0x1F; | bit = vtag & 0x1F; | ||||
adapter->shadow_vfta[index] |= (1 << bit); | sc->shadow_vfta[index] |= (1 << bit); | ||||
++adapter->num_vlans; | ++sc->num_vlans; | ||||
ixgbe_setup_vlan_hw_support(ctx); | ixgbe_setup_vlan_hw_support(ctx); | ||||
} /* ixgbe_if_vlan_register */ | } /* ixgbe_if_vlan_register */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_vlan_unregister | * ixgbe_if_vlan_unregister | ||||
* | * | ||||
* Run via vlan unconfig EVENT, remove our entry in the soft vfta. | * Run via vlan unconfig EVENT, remove our entry in the soft vfta. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) | ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
u16 index, bit; | u16 index, bit; | ||||
index = (vtag >> 5) & 0x7F; | index = (vtag >> 5) & 0x7F; | ||||
bit = vtag & 0x1F; | bit = vtag & 0x1F; | ||||
adapter->shadow_vfta[index] &= ~(1 << bit); | sc->shadow_vfta[index] &= ~(1 << bit); | ||||
--adapter->num_vlans; | --sc->num_vlans; | ||||
/* Re-init to load the changes */ | /* Re-init to load the changes */ | ||||
ixgbe_setup_vlan_hw_support(ctx); | ixgbe_setup_vlan_hw_support(ctx); | ||||
} /* ixgbe_if_vlan_unregister */ | } /* ixgbe_if_vlan_unregister */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_setup_vlan_hw_support | * ixgbe_setup_vlan_hw_support | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_setup_vlan_hw_support(if_ctx_t ctx) | ixgbe_setup_vlan_hw_support(if_ctx_t ctx) | ||||
{ | { | ||||
struct ifnet *ifp = iflib_get_ifp(ctx); | struct ifnet *ifp = iflib_get_ifp(ctx); | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
int i; | int i; | ||||
u32 ctrl; | u32 ctrl; | ||||
/* | /* | ||||
* We get here thru init_locked, meaning | * We get here thru init_locked, meaning | ||||
* a soft reset, this has already cleared | * a soft reset, this has already cleared | ||||
* the VFTA and other state, so if there | * the VFTA and other state, so if there | ||||
* have been no vlan's registered do nothing. | * have been no vlan's registered do nothing. | ||||
*/ | */ | ||||
if (adapter->num_vlans == 0) | if (sc->num_vlans == 0) | ||||
return; | return; | ||||
/* Setup the queues for vlans */ | /* Setup the queues for vlans */ | ||||
if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { | if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { | ||||
for (i = 0; i < adapter->num_rx_queues; i++) { | for (i = 0; i < sc->num_rx_queues; i++) { | ||||
rxr = &adapter->rx_queues[i].rxr; | rxr = &sc->rx_queues[i].rxr; | ||||
/* On 82599 the VLAN enable is per/queue in RXDCTL */ | /* On 82599 the VLAN enable is per/queue in RXDCTL */ | ||||
if (hw->mac.type != ixgbe_mac_82598EB) { | if (hw->mac.type != ixgbe_mac_82598EB) { | ||||
ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); | ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); | ||||
ctrl |= IXGBE_RXDCTL_VME; | ctrl |= IXGBE_RXDCTL_VME; | ||||
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); | IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); | ||||
} | } | ||||
rxr->vtag_strip = true; | rxr->vtag_strip = true; | ||||
} | } | ||||
} | } | ||||
if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) | if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) | ||||
return; | return; | ||||
/* | /* | ||||
* A soft reset zero's out the VFTA, so | * A soft reset zero's out the VFTA, so | ||||
* we need to repopulate it now. | * we need to repopulate it now. | ||||
*/ | */ | ||||
for (i = 0; i < IXGBE_VFTA_SIZE; i++) | for (i = 0; i < IXGBE_VFTA_SIZE; i++) | ||||
if (adapter->shadow_vfta[i] != 0) | if (sc->shadow_vfta[i] != 0) | ||||
IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), | IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), | ||||
adapter->shadow_vfta[i]); | sc->shadow_vfta[i]); | ||||
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||||
/* Enable the Filter Table if enabled */ | /* Enable the Filter Table if enabled */ | ||||
if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { | if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { | ||||
ctrl &= ~IXGBE_VLNCTRL_CFIEN; | ctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||||
ctrl |= IXGBE_VLNCTRL_VFE; | ctrl |= IXGBE_VLNCTRL_VFE; | ||||
} | } | ||||
if (hw->mac.type == ixgbe_mac_82598EB) | if (hw->mac.type == ixgbe_mac_82598EB) | ||||
ctrl |= IXGBE_VLNCTRL_VME; | ctrl |= IXGBE_VLNCTRL_VME; | ||||
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); | ||||
} /* ixgbe_setup_vlan_hw_support */ | } /* ixgbe_setup_vlan_hw_support */ | ||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_get_slot_info | * ixgbe_get_slot_info | ||||
* | * | ||||
* Get the width and transaction speed of | * Get the width and transaction speed of | ||||
* the slot this adapter is plugged into. | * the slot this adapter is plugged into. | ||||
************************************************************************/ | ************************************************************************/ | ||||
static void | static void | ||||
ixgbe_get_slot_info(struct adapter *adapter) | ixgbe_get_slot_info(struct ixgbe_softc *sc) | ||||
{ | { | ||||
device_t dev = iflib_get_dev(adapter->ctx); | device_t dev = iflib_get_dev(sc->ctx); | ||||
struct ixgbe_hw *hw = &adapter->hw; | struct ixgbe_hw *hw = &sc->hw; | ||||
int bus_info_valid = true; | int bus_info_valid = true; | ||||
u32 offset; | u32 offset; | ||||
u16 link; | u16 link; | ||||
/* Some devices are behind an internal bridge */ | /* Some devices are behind an internal bridge */ | ||||
switch (hw->device_id) { | switch (hw->device_id) { | ||||
case IXGBE_DEV_ID_82599_SFP_SF_QP: | case IXGBE_DEV_ID_82599_SFP_SF_QP: | ||||
case IXGBE_DEV_ID_82599_QSFP_SF_QP: | case IXGBE_DEV_ID_82599_QSFP_SF_QP: | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | |||||
/************************************************************************ | /************************************************************************ | ||||
* ixgbe_if_msix_intr_assign | * ixgbe_if_msix_intr_assign | ||||
* | * | ||||
* Setup MSI-X Interrupt resources and handlers | * Setup MSI-X Interrupt resources and handlers | ||||
************************************************************************/ | ************************************************************************/ | ||||
static int | static int | ||||
ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) | ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) | ||||
{ | { | ||||
struct adapter *adapter = iflib_get_softc(ctx); | struct ixgbe_softc *sc = iflib_get_softc(ctx); | ||||
struct ix_rx_queue *rx_que = adapter->rx_queues; | struct ix_rx_queue *rx_que = sc->rx_queues; | ||||
struct ix_tx_queue *tx_que; | struct ix_tx_queue *tx_que; | ||||
int error, rid, vector = 0; | int error, rid, vector = 0; | ||||
int cpu_id = 0; | int cpu_id = 0; | ||||
char buf[16]; | char buf[16]; | ||||
/* Admin Que is vector 0*/ | /* Admin Que is vector 0*/ | ||||
rid = vector + 1; | rid = vector + 1; | ||||
for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { | for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) { | ||||
rid = vector + 1; | rid = vector + 1; | ||||
snprintf(buf, sizeof(buf), "rxq%d", i); | snprintf(buf, sizeof(buf), "rxq%d", i); | ||||
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, | error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, | ||||
IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); | IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); | ||||
if (error) { | if (error) { | ||||
device_printf(iflib_get_dev(ctx), | device_printf(iflib_get_dev(ctx), | ||||
"Failed to allocate que int %d err: %d", i, error); | "Failed to allocate que int %d err: %d", i, error); | ||||
adapter->num_rx_queues = i + 1; | sc->num_rx_queues = i + 1; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
rx_que->msix = vector; | rx_que->msix = vector; | ||||
if (adapter->feat_en & IXGBE_FEATURE_RSS) { | if (sc->feat_en & IXGBE_FEATURE_RSS) { | ||||
/* | /* | ||||
* The queue ID is used as the RSS layer bucket ID. | * The queue ID is used as the RSS layer bucket ID. | ||||
* We look up the queue ID -> RSS CPU ID and select | * We look up the queue ID -> RS |