Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/if_ixl.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
Show All 30 Lines | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
#include "ixl_pf_iov.h" | #include "ixl_pf_iov.h" | ||||
#endif | #endif | ||||
/********************************************************************* | /********************************************************************* | ||||
* Driver version | * Driver version | ||||
*********************************************************************/ | *********************************************************************/ | ||||
char ixl_driver_version[] = "1.7.12-k"; | #define IXL_DRIVER_VERSION_MAJOR 1 | ||||
#define IXL_DRIVER_VERSION_MINOR 9 | |||||
#define IXL_DRIVER_VERSION_BUILD 8 | |||||
char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." | |||||
__XSTRING(IXL_DRIVER_VERSION_MINOR) "." | |||||
__XSTRING(IXL_DRIVER_VERSION_BUILD) "-iflib-k"; | |||||
/********************************************************************* | /********************************************************************* | ||||
* PCI Device ID Table | * PCI Device ID Table | ||||
* | * | ||||
* Used by probe to select devices to load on | * Used by probe to select devices to load on | ||||
* Last field stores an index into ixl_strings | |||||
* Last entry must be all 0s | |||||
* | * | ||||
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | * ( Vendor ID, Device ID, Branding String ) | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static ixl_vendor_info_t ixl_vendor_info_array[] = | static pci_vendor_info_t ixl_vendor_info_array[] = | ||||
{ | { | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), | ||||
/* required last entry */ | /* required last entry */ | ||||
{0, 0, 0, 0, 0} | PVID_END | ||||
}; | }; | ||||
/********************************************************************* | /********************************************************************* | ||||
* Table of branding strings | |||||
*********************************************************************/ | |||||
static char *ixl_strings[] = { | |||||
"Intel(R) Ethernet Connection XL710/X722 Driver" | |||||
}; | |||||
/********************************************************************* | |||||
* Function prototypes | * Function prototypes | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int ixl_probe(device_t); | /*** IFLIB interface ***/ | ||||
static int ixl_attach(device_t); | static void *ixl_register(device_t dev); | ||||
static int ixl_detach(device_t); | static int ixl_if_attach_pre(if_ctx_t ctx); | ||||
static int ixl_shutdown(device_t); | static int ixl_if_attach_post(if_ctx_t ctx); | ||||
static int ixl_if_detach(if_ctx_t ctx); | |||||
static int ixl_if_shutdown(if_ctx_t ctx); | |||||
static int ixl_if_suspend(if_ctx_t ctx); | |||||
static int ixl_if_resume(if_ctx_t ctx); | |||||
static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); | |||||
static void ixl_if_enable_intr(if_ctx_t ctx); | |||||
static void ixl_if_disable_intr(if_ctx_t ctx); | |||||
static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); | |||||
static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); | |||||
static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); | |||||
static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); | |||||
static void ixl_if_queues_free(if_ctx_t ctx); | |||||
static void ixl_if_update_admin_status(if_ctx_t ctx); | |||||
static void ixl_if_multi_set(if_ctx_t ctx); | |||||
static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); | |||||
static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); | |||||
static int ixl_if_media_change(if_ctx_t ctx); | |||||
static int ixl_if_promisc_set(if_ctx_t ctx, int flags); | |||||
static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); | |||||
static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); | |||||
static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); | |||||
static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); | |||||
static void ixl_if_vflr_handle(if_ctx_t ctx); | |||||
// static void ixl_if_link_intr_enable(if_ctx_t ctx); | |||||
static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); | |||||
/*** Other ***/ | |||||
static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int); | |||||
static int ixl_save_pf_tunables(struct ixl_pf *); | static int ixl_save_pf_tunables(struct ixl_pf *); | ||||
static int ixl_attach_get_link_status(struct ixl_pf *); | static int ixl_allocate_pci_resources(struct ixl_pf *); | ||||
/********************************************************************* | /********************************************************************* | ||||
* FreeBSD Device Interface Entry Points | * FreeBSD Device Interface Entry Points | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static device_method_t ixl_methods[] = { | static device_method_t ixl_methods[] = { | ||||
/* Device interface */ | /* Device interface */ | ||||
DEVMETHOD(device_probe, ixl_probe), | DEVMETHOD(device_register, ixl_register), | ||||
DEVMETHOD(device_attach, ixl_attach), | DEVMETHOD(device_probe, iflib_device_probe), | ||||
DEVMETHOD(device_detach, ixl_detach), | DEVMETHOD(device_attach, iflib_device_attach), | ||||
DEVMETHOD(device_shutdown, ixl_shutdown), | DEVMETHOD(device_detach, iflib_device_detach), | ||||
DEVMETHOD(device_shutdown, iflib_device_shutdown), | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
DEVMETHOD(pci_iov_init, ixl_iov_init), | DEVMETHOD(pci_iov_init, ixl_iov_init), | ||||
DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), | DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), | ||||
DEVMETHOD(pci_iov_add_vf, ixl_add_vf), | DEVMETHOD(pci_iov_add_vf, ixl_add_vf), | ||||
#endif | #endif | ||||
{0, 0} | DEVMETHOD_END | ||||
}; | }; | ||||
static driver_t ixl_driver = { | static driver_t ixl_driver = { | ||||
"ixl", ixl_methods, sizeof(struct ixl_pf), | "ixl", ixl_methods, sizeof(struct ixl_pf), | ||||
}; | }; | ||||
devclass_t ixl_devclass; | devclass_t ixl_devclass; | ||||
DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); | DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); | ||||
MODULE_VERSION(ixl, 3); | |||||
MODULE_VERSION(ixl, 1); | |||||
MODULE_DEPEND(ixl, pci, 1, 1, 1); | MODULE_DEPEND(ixl, pci, 1, 1, 1); | ||||
MODULE_DEPEND(ixl, ether, 1, 1, 1); | MODULE_DEPEND(ixl, ether, 1, 1, 1); | ||||
#if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000 | MODULE_DEPEND(ixl, iflib, 1, 1, 1); | ||||
MODULE_DEPEND(ixl, netmap, 1, 1, 1); | |||||
#endif /* DEV_NETMAP */ | |||||
static device_method_t ixl_if_methods[] = { | |||||
DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), | |||||
DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), | |||||
DEVMETHOD(ifdi_detach, ixl_if_detach), | |||||
DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), | |||||
DEVMETHOD(ifdi_suspend, ixl_if_suspend), | |||||
DEVMETHOD(ifdi_resume, ixl_if_resume), | |||||
DEVMETHOD(ifdi_init, ixl_if_init), | |||||
DEVMETHOD(ifdi_stop, ixl_if_stop), | |||||
DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), | |||||
DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), | |||||
DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), | |||||
//DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable), | |||||
DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), | |||||
DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), | |||||
DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), | |||||
DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), | |||||
DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), | |||||
DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), | |||||
DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), | |||||
DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), | |||||
DEVMETHOD(ifdi_media_status, ixl_if_media_status), | |||||
DEVMETHOD(ifdi_media_change, ixl_if_media_change), | |||||
DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), | |||||
DEVMETHOD(ifdi_timer, ixl_if_timer), | |||||
DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), | |||||
DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), | |||||
DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), | |||||
DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), | |||||
DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), | |||||
// ifdi_led_func | |||||
// ifdi_debug | |||||
DEVMETHOD_END | |||||
}; | |||||
static driver_t ixl_if_driver = { | |||||
"ixl_if", ixl_if_methods, sizeof(struct ixl_pf) | |||||
}; | |||||
/* | /* | ||||
** TUNEABLE PARAMETERS: | ** TUNEABLE PARAMETERS: | ||||
*/ | */ | ||||
static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, | static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, | ||||
"IXL driver parameters"); | "IXL driver parameters"); | ||||
/* | /* | ||||
* MSIX should be the default for best performance, | * MSIX should be the default for best performance, | ||||
* but this allows it to be forced off for testing. | * but this allows it to be forced off for testing. | ||||
*/ | */ | ||||
static int ixl_enable_msix = 1; | static int ixl_enable_msix = 1; | ||||
TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); | TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, | SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, | ||||
"Enable MSI-X interrupts"); | "Enable MSI-X interrupts"); | ||||
/* | /* | ||||
** Number of descriptors per ring: | ** Number of descriptors per ring | ||||
** - TX and RX are the same size | ** - TX and RX sizes are independently configurable | ||||
*/ | */ | ||||
static int ixl_ring_size = IXL_DEFAULT_RING; | static int ixl_tx_ring_size = IXL_DEFAULT_RING; | ||||
TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size); | TUNABLE_INT("hw.ixl.tx_ring_size", &ixl_tx_ring_size); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, | ||||
&ixl_ring_size, 0, "Descriptor Ring Size"); | &ixl_tx_ring_size, 0, "TX Descriptor Ring Size"); | ||||
static int ixl_rx_ring_size = IXL_DEFAULT_RING; | |||||
TUNABLE_INT("hw.ixl.rx_ring_size", &ixl_rx_ring_size); | |||||
SYSCTL_INT(_hw_ixl, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, | |||||
&ixl_rx_ring_size, 0, "RX Descriptor Ring Size"); | |||||
/* | /* | ||||
** This can be set manually, if left as 0 the | ** This can be set manually, if left as 0 the | ||||
** number of queues will be calculated based | ** number of queues will be calculated based | ||||
** on cpus and msix vectors available. | ** on cpus and msix vectors available. | ||||
*/ | */ | ||||
static int ixl_max_queues = 0; | static int ixl_max_queues = 0; | ||||
TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); | TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, | ||||
&ixl_max_queues, 0, "Number of Queues"); | &ixl_max_queues, 0, "Number of Queues"); | ||||
/* | |||||
* Leave this on unless you need to send flow control | |||||
* frames (or other control frames) from software | |||||
*/ | |||||
static int ixl_enable_tx_fc_filter = 1; | static int ixl_enable_tx_fc_filter = 1; | ||||
TUNABLE_INT("hw.ixl.enable_tx_fc_filter", | TUNABLE_INT("hw.ixl.enable_tx_fc_filter", | ||||
&ixl_enable_tx_fc_filter); | &ixl_enable_tx_fc_filter); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, | ||||
&ixl_enable_tx_fc_filter, 0, | &ixl_enable_tx_fc_filter, 0, | ||||
"Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); | "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); | ||||
/* | |||||
* Different method for processing TX descriptor | |||||
* completion. | |||||
*/ | |||||
static int ixl_enable_head_writeback = 1; | |||||
TUNABLE_INT("hw.ixl.enable_head_writeback", | |||||
&ixl_enable_head_writeback); | |||||
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, | |||||
&ixl_enable_head_writeback, 0, | |||||
"For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); | |||||
static int ixl_core_debug_mask = 0; | static int ixl_core_debug_mask = 0; | ||||
TUNABLE_INT("hw.ixl.core_debug_mask", | TUNABLE_INT("hw.ixl.core_debug_mask", | ||||
&ixl_core_debug_mask); | &ixl_core_debug_mask); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, | ||||
&ixl_core_debug_mask, 0, | &ixl_core_debug_mask, 0, | ||||
"Display debug statements that are printed in non-shared code"); | "Display debug statements that are printed in non-shared code"); | ||||
static int ixl_shared_debug_mask = 0; | static int ixl_shared_debug_mask = 0; | ||||
TUNABLE_INT("hw.ixl.shared_debug_mask", | TUNABLE_INT("hw.ixl.shared_debug_mask", | ||||
&ixl_shared_debug_mask); | &ixl_shared_debug_mask); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, | ||||
&ixl_shared_debug_mask, 0, | &ixl_shared_debug_mask, 0, | ||||
"Display debug statements that are printed in shared code"); | "Display debug statements that are printed in shared code"); | ||||
/* | /* | ||||
** Controls for Interrupt Throttling | ** Controls for Interrupt Throttling | ||||
** - true/false for dynamic adjustment | ** - true/false for dynamic adjustment | ||||
** - default values for static ITR | ** - default values for static ITR | ||||
*/ | */ | ||||
static int ixl_dynamic_rx_itr = 1; | static int ixl_dynamic_rx_itr = 0; | ||||
TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); | TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, | ||||
&ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); | &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); | ||||
static int ixl_dynamic_tx_itr = 1; | static int ixl_dynamic_tx_itr = 0; | ||||
TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); | TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, | ||||
&ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); | &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); | ||||
static int ixl_rx_itr = IXL_ITR_8K; | static int ixl_rx_itr = IXL_ITR_8K; | ||||
TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); | TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, | ||||
&ixl_rx_itr, 0, "RX Interrupt Rate"); | &ixl_rx_itr, 0, "RX Interrupt Rate"); | ||||
static int ixl_tx_itr = IXL_ITR_4K; | static int ixl_tx_itr = IXL_ITR_4K; | ||||
TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); | TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, | ||||
&ixl_tx_itr, 0, "TX Interrupt Rate"); | &ixl_tx_itr, 0, "TX Interrupt Rate"); | ||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
int ixl_enable_iwarp = 0; | int ixl_enable_iwarp = 0; | ||||
TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); | TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); | ||||
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, | |||||
&ixl_enable_iwarp, 0, "iWARP enabled"); | |||||
#if __FreeBSD_version < 1100000 | |||||
int ixl_limit_iwarp_msix = 1; | |||||
#else | |||||
int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; | |||||
#endif | #endif | ||||
TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); | |||||
SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, | |||||
&ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); | |||||
#endif | |||||
#ifdef DEV_NETMAP | extern struct if_txrx ixl_txrx_hwb; | ||||
#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */ | extern struct if_txrx ixl_txrx_dwb; | ||||
#include <dev/netmap/if_ixl_netmap.h> | |||||
#endif /* DEV_NETMAP */ | |||||
/********************************************************************* | static struct if_shared_ctx ixl_sctx_init = { | ||||
* Device identification routine | .isc_magic = IFLIB_MAGIC, | ||||
* | .isc_q_align = PAGE_SIZE, | ||||
* ixl_probe determines if the driver should be loaded on | .isc_tx_maxsize = IXL_TSO_SIZE, | ||||
* the hardware based on PCI vendor/device id of the device. | .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, | ||||
* | |||||
* return BUS_PROBE_DEFAULT on success, positive on failure | |||||
*********************************************************************/ | |||||
static int | .isc_rx_maxsize = 16384, | ||||
ixl_probe(device_t dev) | .isc_rx_nsegments = IXL_MAX_RX_SEGS, | ||||
{ | .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, | ||||
ixl_vendor_info_t *ent; | .isc_nfl = 1, | ||||
.isc_ntxqs = 1, | |||||
.isc_nrxqs = 1, | |||||
u16 pci_vendor_id, pci_device_id; | .isc_admin_intrcnt = 1, | ||||
u16 pci_subvendor_id, pci_subdevice_id; | .isc_vendor_info = ixl_vendor_info_array, | ||||
char device_name[256]; | .isc_driver_version = ixl_driver_version, | ||||
.isc_driver = &ixl_if_driver, | |||||
#if 0 | .isc_nrxd_min = {IXL_MIN_RING}, | ||||
INIT_DEBUGOUT("ixl_probe: begin"); | .isc_ntxd_min = {IXL_MIN_RING}, | ||||
#endif | .isc_nrxd_max = {IXL_MAX_RING}, | ||||
pci_vendor_id = pci_get_vendor(dev); | .isc_ntxd_max = {IXL_MAX_RING}, | ||||
if (pci_vendor_id != I40E_INTEL_VENDOR_ID) | .isc_nrxd_default = {IXL_DEFAULT_RING}, | ||||
return (ENXIO); | .isc_ntxd_default = {IXL_DEFAULT_RING}, | ||||
}; | |||||
pci_device_id = pci_get_device(dev); | if_shared_ctx_t ixl_sctx = &ixl_sctx_init; | ||||
pci_subvendor_id = pci_get_subvendor(dev); | |||||
pci_subdevice_id = pci_get_subdevice(dev); | |||||
ent = ixl_vendor_info_array; | /*** Functions ***/ | ||||
while (ent->vendor_id != 0) { | static void * | ||||
if ((pci_vendor_id == ent->vendor_id) && | ixl_register(device_t dev) | ||||
(pci_device_id == ent->device_id) && | { | ||||
return (ixl_sctx); | |||||
((pci_subvendor_id == ent->subvendor_id) || | |||||
(ent->subvendor_id == 0)) && | |||||
((pci_subdevice_id == ent->subdevice_id) || | |||||
(ent->subdevice_id == 0))) { | |||||
sprintf(device_name, "%s, Version - %s", | |||||
ixl_strings[ent->index], | |||||
ixl_driver_version); | |||||
device_set_desc_copy(dev, device_name); | |||||
return (BUS_PROBE_DEFAULT); | |||||
} | } | ||||
ent++; | |||||
} | |||||
return (ENXIO); | |||||
} | |||||
static int | static int | ||||
ixl_attach_get_link_status(struct ixl_pf *pf) | ixl_allocate_pci_resources(struct ixl_pf *pf) | ||||
{ | { | ||||
int rid; | |||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
int error = 0; | |||||
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || | /* Map BAR0 */ | ||||
(hw->aq.fw_maj_ver < 4)) { | rid = PCIR_BAR(0); | ||||
i40e_msec_delay(75); | pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, | ||||
error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); | &rid, RF_ACTIVE); | ||||
if (error) { | |||||
device_printf(dev, "link restart failed, aq_err=%d\n", | |||||
pf->hw.aq.asq_last_status); | |||||
return error; | |||||
} | |||||
} | |||||
/* Determine link state */ | if (!(pf->pci_mem)) { | ||||
hw->phy.get_link_info = TRUE; | device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); | ||||
i40e_get_link_status(hw, &pf->link_up); | return (ENXIO); | ||||
return (0); | |||||
} | } | ||||
/* | /* Save off the PCI information */ | ||||
* Sanity check and save off tunable values. | hw->vendor_id = pci_get_vendor(dev); | ||||
*/ | hw->device_id = pci_get_device(dev); | ||||
static int | hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); | ||||
ixl_save_pf_tunables(struct ixl_pf *pf) | hw->subsystem_vendor_id = | ||||
{ | pci_read_config(dev, PCIR_SUBVEND_0, 2); | ||||
device_t dev = pf->dev; | hw->subsystem_device_id = | ||||
pci_read_config(dev, PCIR_SUBDEV_0, 2); | |||||
/* Save tunable information */ | hw->bus.device = pci_get_slot(dev); | ||||
pf->enable_msix = ixl_enable_msix; | hw->bus.func = pci_get_function(dev); | ||||
pf->max_queues = ixl_max_queues; | |||||
pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; | |||||
pf->dynamic_rx_itr = ixl_dynamic_rx_itr; | |||||
pf->dynamic_tx_itr = ixl_dynamic_tx_itr; | |||||
pf->dbg_mask = ixl_core_debug_mask; | |||||
pf->hw.debug_mask = ixl_shared_debug_mask; | |||||
if (ixl_ring_size < IXL_MIN_RING | /* Save off register access information */ | ||||
|| ixl_ring_size > IXL_MAX_RING | pf->osdep.mem_bus_space_tag = | ||||
|| ixl_ring_size % IXL_RING_INCREMENT != 0) { | rman_get_bustag(pf->pci_mem); | ||||
device_printf(dev, "Invalid ring_size value of %d set!\n", | pf->osdep.mem_bus_space_handle = | ||||
ixl_ring_size); | rman_get_bushandle(pf->pci_mem); | ||||
device_printf(dev, "ring_size must be between %d and %d, " | pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); | ||||
"inclusive, and must be a multiple of %d\n", | pf->osdep.flush_reg = I40E_GLGEN_STAT; | ||||
IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); | pf->osdep.dev = dev; | ||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_DEFAULT_RING); | |||||
pf->ringsz = IXL_DEFAULT_RING; | |||||
} else | |||||
pf->ringsz = ixl_ring_size; | |||||
if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { | pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; | ||||
device_printf(dev, "Invalid tx_itr value of %d set!\n", | pf->hw.back = &pf->osdep; | ||||
ixl_tx_itr); | |||||
device_printf(dev, "tx_itr must be between %d and %d, " | |||||
"inclusive\n", | |||||
0, IXL_MAX_ITR); | |||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_ITR_4K); | |||||
pf->tx_itr = IXL_ITR_4K; | |||||
} else | |||||
pf->tx_itr = ixl_tx_itr; | |||||
if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { | |||||
device_printf(dev, "Invalid rx_itr value of %d set!\n", | |||||
ixl_rx_itr); | |||||
device_printf(dev, "rx_itr must be between %d and %d, " | |||||
"inclusive\n", | |||||
0, IXL_MAX_ITR); | |||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_ITR_8K); | |||||
pf->rx_itr = IXL_ITR_8K; | |||||
} else | |||||
pf->rx_itr = ixl_rx_itr; | |||||
return (0); | return (0); | ||||
} | } | ||||
/********************************************************************* | |||||
* Device initialization routine | |||||
* | |||||
* The attach entry point is called when the driver is being loaded. | |||||
* This routine identifies the type of hardware, allocates all resources | |||||
* and initializes the hardware. | |||||
* | |||||
* return 0 on success, positive on failure | |||||
*********************************************************************/ | |||||
static int | static int | ||||
ixl_attach(device_t dev) | ixl_if_attach_pre(if_ctx_t ctx) | ||||
{ | { | ||||
device_t dev; | |||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct ixl_vsi *vsi; | struct ixl_vsi *vsi; | ||||
if_softc_ctx_t scctx; | |||||
struct i40e_filter_control_settings filter; | |||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
int error = 0; | int error = 0; | ||||
INIT_DEBUGOUT("ixl_attach: begin"); | INIT_DEBUGOUT("ixl_if_attach_pre: begin"); | ||||
/* Allocate, clear, and link in our primary soft structure */ | /* Allocate, clear, and link in our primary soft structure */ | ||||
pf = device_get_softc(dev); | dev = iflib_get_dev(ctx); | ||||
pf->dev = pf->osdep.dev = dev; | pf = iflib_get_softc(ctx); | ||||
vsi = &pf->vsi; | |||||
vsi->back = pf; | |||||
pf->dev = dev; | |||||
hw = &pf->hw; | hw = &pf->hw; | ||||
/* | /* | ||||
** Note this assumes we have a single embedded VSI, | ** Note this assumes we have a single embedded VSI, | ||||
** this could be enhanced later to allocate multiple | ** this could be enhanced later to allocate multiple | ||||
*/ | */ | ||||
vsi = &pf->vsi; | //vsi->dev = pf->dev; | ||||
vsi->dev = pf->dev; | vsi->hw = &pf->hw; | ||||
vsi->id = 0; | |||||
vsi->num_vlans = 0; | |||||
vsi->ctx = ctx; | |||||
vsi->media = iflib_get_media(ctx); | |||||
vsi->shared = scctx = iflib_get_softc_ctx(ctx); | |||||
/* Save tunable values */ | /* Save tunable values */ | ||||
error = ixl_save_pf_tunables(pf); | error = ixl_save_pf_tunables(pf); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* Core Lock Init*/ | |||||
IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); | |||||
/* Set up the timer callout */ | |||||
callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); | |||||
/* Do PCI setup - map BAR0, etc */ | /* Do PCI setup - map BAR0, etc */ | ||||
if (ixl_allocate_pci_resources(pf)) { | if (ixl_allocate_pci_resources(pf)) { | ||||
device_printf(dev, "Allocation of PCI resources failed\n"); | device_printf(dev, "Allocation of PCI resources failed\n"); | ||||
error = ENXIO; | error = ENXIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
/* Establish a clean starting point */ | /* Establish a clean starting point */ | ||||
Show All 10 Lines | ixl_if_attach_pre(if_ctx_t ctx) | ||||
status = i40e_init_shared_code(hw); | status = i40e_init_shared_code(hw); | ||||
if (status) { | if (status) { | ||||
device_printf(dev, "Unable to initialize shared code, error %s\n", | device_printf(dev, "Unable to initialize shared code, error %s\n", | ||||
i40e_stat_str(hw, status)); | i40e_stat_str(hw, status)); | ||||
error = EIO; | error = EIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
/* | |||||
* Allocate interrupts and figure out number of queues to use | |||||
* for PF interface | |||||
*/ | |||||
pf->msix = ixl_init_msix(pf); | |||||
/* Set up the admin queue */ | /* Set up the admin queue */ | ||||
hw->aq.num_arq_entries = IXL_AQ_LEN; | hw->aq.num_arq_entries = IXL_AQ_LEN; | ||||
hw->aq.num_asq_entries = IXL_AQ_LEN; | hw->aq.num_asq_entries = IXL_AQ_LEN; | ||||
hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; | hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; | ||||
hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; | hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; | ||||
status = i40e_init_adminq(hw); | status = i40e_init_adminq(hw); | ||||
if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { | if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { | ||||
device_printf(dev, "Unable to initialize Admin Queue, error %s\n", | device_printf(dev, "Unable to initialize Admin Queue, error %s\n", | ||||
i40e_stat_str(hw, status)); | i40e_stat_str(hw, status)); | ||||
error = EIO; | error = EIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
ixl_print_nvm_version(pf); | ixl_print_nvm_version(pf); | ||||
if (status == I40E_ERR_FIRMWARE_API_VERSION) { | if (status == I40E_ERR_FIRMWARE_API_VERSION) { | ||||
device_printf(dev, "The driver for the device stopped " | device_printf(dev, "The driver for the device stopped " | ||||
"because the NVM image is newer than expected.\n" | "because the NVM image is newer than expected.\n"); | ||||
"You must install the most recent version of " | device_printf(dev, "You must install the most recent version of " | ||||
"the network driver.\n"); | "the network driver.\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && | if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && | ||||
hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) | hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { | ||||
device_printf(dev, "The driver for the device detected " | device_printf(dev, "The driver for the device detected " | ||||
"a newer version of the NVM image than expected.\n" | "a newer version of the NVM image than expected.\n"); | ||||
"Please install the most recent version of the network driver.\n"); | device_printf(dev, "Please install the most recent version " | ||||
else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || | "of the network driver.\n"); | ||||
hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) | } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { | ||||
device_printf(dev, "The driver for the device detected " | device_printf(dev, "The driver for the device detected " | ||||
"an older version of the NVM image than expected.\n" | "an older version of the NVM image than expected.\n"); | ||||
"Please update the NVM image.\n"); | device_printf(dev, "Please update the NVM image.\n"); | ||||
} | |||||
/* Clear PXE mode */ | /* Clear PXE mode */ | ||||
i40e_clear_pxe_mode(hw); | i40e_clear_pxe_mode(hw); | ||||
/* Get capabilities from the device */ | /* Get capabilities from the device */ | ||||
error = ixl_get_hw_capabilities(pf); | error = ixl_get_hw_capabilities(pf); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "HW capabilities failure!\n"); | device_printf(dev, "get_hw_capabilities failed: %d\n", | ||||
error); | |||||
goto err_get_cap; | goto err_get_cap; | ||||
} | } | ||||
/* Set up host memory cache */ | /* Set up host memory cache */ | ||||
status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, | status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, | ||||
hw->func_caps.num_rx_qp, 0, 0); | hw->func_caps.num_rx_qp, 0, 0); | ||||
if (status) { | if (status) { | ||||
device_printf(dev, "init_lan_hmc failed: %s\n", | device_printf(dev, "init_lan_hmc failed: %s\n", | ||||
i40e_stat_str(hw, status)); | i40e_stat_str(hw, status)); | ||||
goto err_get_cap; | goto err_get_cap; | ||||
} | } | ||||
status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); | status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); | ||||
if (status) { | if (status) { | ||||
device_printf(dev, "configure_lan_hmc failed: %s\n", | device_printf(dev, "configure_lan_hmc failed: %s\n", | ||||
i40e_stat_str(hw, status)); | i40e_stat_str(hw, status)); | ||||
goto err_mac_hmc; | goto err_mac_hmc; | ||||
} | } | ||||
/* Init queue allocation manager */ | |||||
error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); | |||||
if (error) { | |||||
device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", | |||||
error); | |||||
goto err_mac_hmc; | |||||
} | |||||
/* reserve a contiguous allocation for the PF's VSI */ | |||||
error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); | |||||
if (error) { | |||||
device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", | |||||
error); | |||||
goto err_mac_hmc; | |||||
} | |||||
device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", | |||||
pf->qtag.num_allocated, pf->qtag.num_active); | |||||
/* Disable LLDP from the firmware for certain NVM versions */ | /* Disable LLDP from the firmware for certain NVM versions */ | ||||
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || | if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || | ||||
(pf->hw.aq.fw_maj_ver < 4)) | (pf->hw.aq.fw_maj_ver < 4)) { | ||||
i40e_aq_stop_lldp(hw, TRUE, NULL); | i40e_aq_stop_lldp(hw, TRUE, NULL); | ||||
pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; | |||||
} | |||||
/* Get MAC addresses from hardware */ | /* Get MAC addresses from hardware */ | ||||
i40e_get_mac_addr(hw, hw->mac.addr); | i40e_get_mac_addr(hw, hw->mac.addr); | ||||
error = i40e_validate_mac_addr(hw->mac.addr); | error = i40e_validate_mac_addr(hw->mac.addr); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "validate_mac_addr failed: %d\n", error); | device_printf(dev, "validate_mac_addr failed: %d\n", error); | ||||
goto err_mac_hmc; | goto err_mac_hmc; | ||||
} | } | ||||
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); | bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); | ||||
iflib_set_mac(ctx, hw->mac.addr); | |||||
i40e_get_port_mac_addr(hw, hw->mac.port_addr); | i40e_get_port_mac_addr(hw, hw->mac.port_addr); | ||||
/* Set up the device filtering */ | |||||
bzero(&filter, sizeof(filter)); | |||||
filter.enable_ethtype = TRUE; | |||||
filter.enable_macvlan = TRUE; | |||||
filter.enable_fdir = FALSE; | |||||
filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; | |||||
if (i40e_set_filter_control(hw, &filter)) | |||||
device_printf(dev, "i40e_set_filter_control() failed\n"); | |||||
/* Query device FW LLDP status */ | |||||
ixl_get_fw_lldp_status(pf); | |||||
/* Tell FW to apply DCB config on link up */ | |||||
if ((hw->mac.type != I40E_MAC_X722) | |||||
&& ((pf->hw.aq.api_maj_ver > 1) | |||||
|| (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7))) | |||||
i40e_aq_set_dcb_parameters(hw, true, NULL); | |||||
/* Initialize mac filter list for VSI */ | /* Initialize mac filter list for VSI */ | ||||
SLIST_INIT(&vsi->ftl); | SLIST_INIT(&vsi->ftl); | ||||
/* Set up SW VSI and allocate queue memory and rings */ | /* Fill out iflib parameters */ | ||||
if (ixl_setup_stations(pf)) { | if (hw->mac.type == I40E_MAC_X722) | ||||
device_printf(dev, "setup stations failed!\n"); | scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; | ||||
error = ENOMEM; | else | ||||
goto err_mac_hmc; | scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; | ||||
if (vsi->enable_head_writeback) { | |||||
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] | |||||
* sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); | |||||
scctx->isc_txrx = &ixl_txrx_hwb; | |||||
} else { | |||||
scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] | |||||
* sizeof(struct i40e_tx_desc), DBA_ALIGN); | |||||
scctx->isc_txrx = &ixl_txrx_dwb; | |||||
} | } | ||||
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] | |||||
* sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); | |||||
scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); | |||||
scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; | |||||
scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; | |||||
scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; | |||||
scctx->isc_tx_tso_segsize_max = PAGE_SIZE; | |||||
scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; | |||||
scctx->isc_tx_csum_flags = CSUM_OFFLOAD; | |||||
scctx->isc_capenable = IXL_CAPS; | |||||
INIT_DEBUGOUT("ixl_if_attach_pre: end"); | |||||
return (0); | |||||
// TODO: Review what needs to be cleaned up when this fails | |||||
err_mac_hmc: | |||||
i40e_shutdown_lan_hmc(hw); | |||||
err_get_cap: | |||||
i40e_shutdown_adminq(hw); | |||||
err_out: | |||||
ixl_free_pci_resources(pf); | |||||
ixl_free_mac_filters(vsi); | |||||
return (error); | |||||
} | |||||
static int | |||||
ixl_if_attach_post(if_ctx_t ctx) | |||||
{ | |||||
device_t dev; | |||||
struct ixl_pf *pf; | |||||
struct i40e_hw *hw; | |||||
struct ixl_vsi *vsi; | |||||
int error = 0; | |||||
enum i40e_status_code status; | |||||
INIT_DEBUGOUT("ixl_if_attach_post: begin"); | |||||
dev = iflib_get_dev(ctx); | |||||
pf = iflib_get_softc(ctx); | |||||
vsi = &pf->vsi; | |||||
vsi->ifp = iflib_get_ifp(ctx); | |||||
hw = &pf->hw; | |||||
/* Setup OS network interface / ifnet */ | /* Setup OS network interface / ifnet */ | ||||
if (ixl_setup_interface(dev, vsi)) { | if (ixl_setup_interface(dev, pf)) { | ||||
device_printf(dev, "interface setup failed!\n"); | device_printf(dev, "interface setup failed!\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_late; | goto err_late; | ||||
} | } | ||||
/* Determine link state */ | /* Determine link state */ | ||||
if (ixl_attach_get_link_status(pf)) { | if (ixl_attach_get_link_status(pf)) { | ||||
error = EINVAL; | error = EINVAL; | ||||
goto err_late; | goto err_late; | ||||
} | } | ||||
error = ixl_switch_config(pf); | error = ixl_switch_config(pf); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "Initial ixl_switch_config() failed: %d\n", | device_printf(dev, "Initial ixl_switch_config() failed: %d\n", | ||||
error); | error); | ||||
goto err_late; | goto err_late; | ||||
} | } | ||||
/* Init queue allocation manager */ | |||||
error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); | |||||
if (error) { | |||||
device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", | |||||
error); | |||||
goto err_mac_hmc; | |||||
} | |||||
/* reserve a contiguous allocation for the PF's VSI */ | |||||
error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, | |||||
max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); | |||||
if (error) { | |||||
device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", | |||||
error); | |||||
goto err_mac_hmc; | |||||
} | |||||
device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", | |||||
pf->qtag.num_allocated, pf->qtag.num_active); | |||||
/* Limit PHY interrupts to link, autoneg, and modules failure */ | /* Limit PHY interrupts to link, autoneg, and modules failure */ | ||||
status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, | status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, | ||||
NULL); | NULL); | ||||
if (status) { | if (status) { | ||||
device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," | device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," | ||||
" aq_err %s\n", i40e_stat_str(hw, status), | " aq_err %s\n", i40e_stat_str(hw, status), | ||||
i40e_aq_str(hw, hw->aq.asq_last_status)); | i40e_aq_str(hw, hw->aq.asq_last_status)); | ||||
goto err_late; | goto err_late; | ||||
} | } | ||||
/* Get the bus configuration and set the shared code's config */ | /* Get the bus configuration and set the shared code */ | ||||
ixl_get_bus_info(pf); | ixl_get_bus_info(pf); | ||||
/* | /* Keep admin queue interrupts active while driver is loaded */ | ||||
* In MSI-X mode, initialize the Admin Queue interrupt, | if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { | ||||
* so userland tools can communicate with the adapter regardless of | |||||
* the ifnet interface's status. | |||||
*/ | |||||
if (pf->msix > 1) { | |||||
error = ixl_setup_adminq_msix(pf); | |||||
if (error) { | |||||
device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", | |||||
error); | |||||
goto err_late; | |||||
} | |||||
error = ixl_setup_adminq_tq(pf); | |||||
if (error) { | |||||
device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", | |||||
error); | |||||
goto err_late; | |||||
} | |||||
ixl_configure_intr0_msix(pf); | ixl_configure_intr0_msix(pf); | ||||
ixl_enable_intr0(hw); | ixl_enable_intr0(hw); | ||||
error = ixl_setup_queue_msix(vsi); | |||||
if (error) | |||||
device_printf(dev, "ixl_setup_queue_msix() error: %d\n", | |||||
error); | |||||
error = ixl_setup_queue_tqs(vsi); | |||||
if (error) | |||||
device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", | |||||
error); | |||||
} else { | |||||
error = ixl_setup_legacy(pf); | |||||
error = ixl_setup_adminq_tq(pf); | |||||
if (error) { | |||||
device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", | |||||
error); | |||||
goto err_late; | |||||
} | } | ||||
error = ixl_setup_queue_tqs(vsi); | |||||
if (error) | |||||
device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", | |||||
error); | |||||
} | |||||
if (error) { | |||||
device_printf(dev, "interrupt setup error: %d\n", error); | |||||
} | |||||
/* Set initial advertised speed sysctl value */ | /* Set initial advertised speed sysctl value */ | ||||
ixl_get_initial_advertised_speeds(pf); | ixl_set_initial_advertised_speeds(pf); | ||||
/* Initialize statistics & add sysctls */ | /* Initialize statistics & add sysctls */ | ||||
ixl_add_device_sysctls(pf); | ixl_add_device_sysctls(pf); | ||||
ixl_pf_reset_stats(pf); | ixl_pf_reset_stats(pf); | ||||
ixl_update_stats_counters(pf); | ixl_update_stats_counters(pf); | ||||
ixl_add_hw_stats(pf); | ixl_add_hw_stats(pf); | ||||
/* Register for VLAN events */ | hw->phy.get_link_info = true; | ||||
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | i40e_get_link_status(hw, &pf->link_up); | ||||
ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); | ixl_update_link_status(pf); | ||||
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | |||||
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
ixl_initialize_sriov(pf); | ixl_initialize_sriov(pf); | ||||
#endif | #endif | ||||
#ifdef DEV_NETMAP | |||||
ixl_netmap_attach(vsi); | |||||
#endif /* DEV_NETMAP */ | |||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
if (hw->func_caps.iwarp && ixl_enable_iwarp) { | if (hw->func_caps.iwarp && ixl_enable_iwarp) { | ||||
pf->iw_enabled = (pf->iw_msix > 0) ? true : false; | pf->iw_enabled = (pf->iw_msix > 0) ? true : false; | ||||
if (pf->iw_enabled) { | if (pf->iw_enabled) { | ||||
error = ixl_iw_pf_attach(pf); | error = ixl_iw_pf_attach(pf); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"interfacing to iwarp driver failed: %d\n", | "interfacing to iwarp driver failed: %d\n", | ||||
error); | error); | ||||
goto err_late; | goto err_late; | ||||
} | |||||
} else | } else | ||||
device_printf(dev, "iWARP ready\n"); | |||||
} else | |||||
device_printf(dev, | device_printf(dev, | ||||
"iwarp disabled on this device (no msix vectors)\n"); | "iwarp disabled on this device (no msix vectors)\n"); | ||||
} else { | } else { | ||||
pf->iw_enabled = false; | pf->iw_enabled = false; | ||||
device_printf(dev, "The device is not iWARP enabled\n"); | device_printf(dev, "The device is not iWARP enabled\n"); | ||||
} | } | ||||
#endif | #endif | ||||
INIT_DEBUGOUT("ixl_attach: end"); | INIT_DBG_DEV(dev, "end"); | ||||
return (0); | return (0); | ||||
// TODO: Review what needs to be cleaned up when this fails | |||||
err_late: | err_late: | ||||
if (vsi->ifp != NULL) { | |||||
ether_ifdetach(vsi->ifp); | |||||
if_free(vsi->ifp); | |||||
} | |||||
err_mac_hmc: | err_mac_hmc: | ||||
i40e_shutdown_lan_hmc(hw); | i40e_shutdown_lan_hmc(hw); | ||||
err_get_cap: | |||||
i40e_shutdown_adminq(hw); | i40e_shutdown_adminq(hw); | ||||
err_out: | |||||
ixl_free_pci_resources(pf); | ixl_free_pci_resources(pf); | ||||
ixl_free_vsi(vsi); | ixl_free_mac_filters(vsi); | ||||
IXL_PF_LOCK_DESTROY(pf); | INIT_DEBUGOUT("end: error %d", error); | ||||
return (error); | return (error); | ||||
} | } | ||||
/********************************************************************* | |||||
* Device removal routine | |||||
* | |||||
* The detach entry point is called when the driver is being removed. | |||||
* This routine stops the adapter and deallocates all the resources | |||||
* that were allocated for driver operation. | |||||
* | |||||
* return 0 on success, positive on failure | |||||
*********************************************************************/ | |||||
static int | static int | ||||
ixl_detach(device_t dev) | ixl_if_detach(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_pf *pf = device_get_softc(dev); | struct ixl_pf *pf = iflib_get_softc(ctx); | ||||
struct i40e_hw *hw = &pf->hw; | |||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | |||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
#if defined(PCI_IOV) || defined(IXL_IW) | #if defined(PCI_IOV) || defined(IXL_IW) | ||||
int error; | int error; | ||||
#endif | #endif | ||||
INIT_DEBUGOUT("ixl_detach: begin"); | INIT_DBG_DEV(dev, "begin"); | ||||
/* Make sure VLANS are not using driver */ | #ifdef IXL_IW | ||||
if (vsi->ifp->if_vlantrunk != NULL) { | if (ixl_enable_iwarp && pf->iw_enabled) { | ||||
device_printf(dev, "Vlan in use, detach first\n"); | error = ixl_iw_pf_detach(pf); | ||||
return (EBUSY); | if (error == EBUSY) { | ||||
device_printf(dev, "iwarp in use; stop it first.\n"); | |||||
return (error); | |||||
} | } | ||||
} | |||||
#endif | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
error = pci_iov_detach(dev); | error = pci_iov_detach(dev); | ||||
if (error != 0) { | if (error != 0) { | ||||
device_printf(dev, "SR-IOV in use; detach first.\n"); | device_printf(dev, "SR-IOV in use; detach first.\n"); | ||||
return (error); | return (error); | ||||
} | } | ||||
#endif | #endif | ||||
/* Remove all previously allocated media types */ | |||||
ifmedia_removeall(vsi->media); | |||||
ether_ifdetach(vsi->ifp); | |||||
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) | |||||
ixl_stop(pf); | |||||
/* Shutdown LAN HMC */ | /* Shutdown LAN HMC */ | ||||
if (hw->hmc.hmc_obj) { | |||||
status = i40e_shutdown_lan_hmc(hw); | status = i40e_shutdown_lan_hmc(hw); | ||||
if (status) | if (status) | ||||
device_printf(dev, | device_printf(dev, | ||||
"Shutdown LAN HMC failed with code %d\n", status); | "i40e_shutdown_lan_hmc() failed with status %s\n", | ||||
i40e_stat_str(hw, status)); | |||||
} | |||||
/* Teardown LAN queue resources */ | |||||
ixl_teardown_queue_msix(vsi); | |||||
ixl_free_queue_tqs(vsi); | |||||
/* Shutdown admin queue */ | /* Shutdown admin queue */ | ||||
ixl_disable_intr0(hw); | ixl_disable_intr0(hw); | ||||
ixl_teardown_adminq_msix(pf); | |||||
ixl_free_adminq_tq(pf); | |||||
status = i40e_shutdown_adminq(hw); | status = i40e_shutdown_adminq(hw); | ||||
if (status) | if (status) | ||||
device_printf(dev, | device_printf(dev, | ||||
"Shutdown Admin queue failed with code %d\n", status); | "i40e_shutdown_adminq() failed with status %s\n", | ||||
i40e_stat_str(hw, status)); | |||||
/* Unregister VLAN events */ | ixl_pf_qmgr_destroy(&pf->qmgr); | ||||
if (vsi->vlan_attach != NULL) | ixl_free_pci_resources(pf); | ||||
EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); | ixl_free_mac_filters(vsi); | ||||
if (vsi->vlan_detach != NULL) | INIT_DBG_DEV(dev, "end"); | ||||
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); | return (0); | ||||
} | |||||
callout_drain(&pf->timer); | /* TODO: Do shutdown-specific stuff here */ | ||||
static int | |||||
ixl_if_shutdown(if_ctx_t ctx) | |||||
{ | |||||
int error = 0; | |||||
INIT_DEBUGOUT("ixl_if_shutdown: begin"); | |||||
/* TODO: Call ixl_if_stop()? */ | |||||
/* TODO: Then setup low power mode */ | |||||
return (error); | |||||
} | |||||
static int | |||||
ixl_if_suspend(if_ctx_t ctx) | |||||
{ | |||||
int error = 0; | |||||
INIT_DEBUGOUT("ixl_if_suspend: begin"); | |||||
/* TODO: Call ixl_if_stop()? */ | |||||
/* TODO: Then setup low power mode */ | |||||
return (error); | |||||
} | |||||
static int | |||||
ixl_if_resume(if_ctx_t ctx) | |||||
{ | |||||
struct ifnet *ifp = iflib_get_ifp(ctx); | |||||
INIT_DEBUGOUT("ixl_if_resume: begin"); | |||||
/* Read & clear wake-up registers */ | |||||
/* Required after D3->D0 transition */ | |||||
if (ifp->if_flags & IFF_UP) | |||||
ixl_if_init(ctx); | |||||
return (0); | |||||
} | |||||
/* Set Report Status queue fields to 0 */ | |||||
static void | |||||
ixl_init_tx_rsqs(struct ixl_vsi *vsi) | |||||
{ | |||||
if_softc_ctx_t scctx = vsi->shared; | |||||
struct ixl_tx_queue *tx_que; | |||||
int i, j; | |||||
for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { | |||||
struct tx_ring *txr = &tx_que->txr; | |||||
txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; | |||||
for (j = 0; j < scctx->isc_ntxd[0]; j++) | |||||
txr->tx_rsq[j] = QIDX_INVALID; | |||||
} | |||||
} | |||||
static void | |||||
ixl_init_tx_cidx(struct ixl_vsi *vsi) | |||||
{ | |||||
struct ixl_tx_queue *tx_que; | |||||
int i; | |||||
for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { | |||||
struct tx_ring *txr = &tx_que->txr; | |||||
txr->tx_cidx_processed = 0; | |||||
} | |||||
} | |||||
void | |||||
ixl_if_init(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = iflib_get_dev(ctx); | |||||
u8 tmpaddr[ETHER_ADDR_LEN]; | |||||
int ret; | |||||
/* | |||||
* If the aq is dead here, it probably means something outside of the driver | |||||
* did something to the adapter, like a PF reset. | |||||
* So rebuild the driver's state here if that occurs. | |||||
*/ | |||||
if (!i40e_check_asq_alive(&pf->hw)) { | |||||
device_printf(dev, "Admin Queue is down; resetting...\n"); | |||||
ixl_teardown_hw_structs(pf); | |||||
ixl_reset(pf); | |||||
} | |||||
/* Get the latest mac address... User might use a LAA */ | |||||
bcopy(IF_LLADDR(vsi->ifp), tmpaddr, | |||||
ETH_ALEN); | |||||
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && | |||||
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { | |||||
ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | |||||
bcopy(tmpaddr, hw->mac.addr, | |||||
ETH_ALEN); | |||||
ret = i40e_aq_mac_address_write(hw, | |||||
I40E_AQC_WRITE_TYPE_LAA_ONLY, | |||||
hw->mac.addr, NULL); | |||||
if (ret) { | |||||
device_printf(dev, "LLA address" | |||||
"change failed!!\n"); | |||||
return; | |||||
} | |||||
} | |||||
iflib_set_mac(ctx, hw->mac.addr); | |||||
ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | |||||
/* Prepare the VSI: rings, hmc contexts, etc... */ | |||||
if (ixl_initialize_vsi(vsi)) { | |||||
device_printf(dev, "initialize vsi failed!!\n"); | |||||
return; | |||||
} | |||||
// TODO: Call iflib setup multicast filters here? | |||||
// It's called in ixgbe in D5213 | |||||
ixl_if_multi_set(ctx); | |||||
/* Set up RSS */ | |||||
ixl_config_rss(pf); | |||||
/* Add protocol filters to list */ | |||||
ixl_init_filters(vsi); | |||||
/* Setup vlan's if needed */ | |||||
ixl_setup_vlan_filters(vsi); | |||||
/* Set up MSI/X routing and the ITR settings */ | |||||
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { | |||||
ixl_configure_queue_intr_msix(pf); | |||||
ixl_configure_itr(pf); | |||||
} else | |||||
ixl_configure_legacy(pf); | |||||
if (vsi->enable_head_writeback) | |||||
ixl_init_tx_cidx(vsi); | |||||
else | |||||
ixl_init_tx_rsqs(vsi); | |||||
ixl_enable_rings(vsi); | |||||
i40e_aq_set_default_vsi(hw, vsi->seid, NULL); | |||||
ixl_reconfigure_filters(vsi); | |||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
if (ixl_enable_iwarp && pf->iw_enabled) { | if (ixl_enable_iwarp && pf->iw_enabled) { | ||||
error = ixl_iw_pf_detach(pf); | ret = ixl_iw_pf_init(pf); | ||||
if (error == EBUSY) { | if (ret) | ||||
device_printf(dev, "iwarp in use; stop it first.\n"); | device_printf(dev, | ||||
return (error); | "initialize iwarp failed, code %d\n", ret); | ||||
} | } | ||||
#endif | |||||
} | } | ||||
void | |||||
ixl_if_stop(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
INIT_DEBUGOUT("ixl_if_stop: begin\n"); | |||||
// TODO: This may need to be reworked | |||||
#ifdef IXL_IW | |||||
/* Stop iWARP device */ | |||||
if (ixl_enable_iwarp && pf->iw_enabled) | |||||
ixl_iw_pf_stop(pf); | |||||
#endif | #endif | ||||
#ifdef DEV_NETMAP | ixl_disable_rings_intr(vsi); | ||||
netmap_detach(vsi->ifp); | ixl_disable_rings(vsi); | ||||
#endif /* DEV_NETMAP */ | } | ||||
ixl_pf_qmgr_destroy(&pf->qmgr); | |||||
ixl_free_pci_resources(pf); | static int | ||||
bus_generic_detach(dev); | ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) | ||||
if_free(vsi->ifp); | { | ||||
ixl_free_vsi(vsi); | struct ixl_pf *pf = iflib_get_softc(ctx); | ||||
IXL_PF_LOCK_DESTROY(pf); | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct ixl_rx_queue *rx_que = vsi->rx_queues; | |||||
struct ixl_tx_queue *tx_que = vsi->tx_queues; | |||||
int err, i, rid, vector = 0; | |||||
char buf[16]; | |||||
/* Admin Que must use vector 0*/ | |||||
rid = vector + 1; | |||||
err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, | |||||
ixl_msix_adminq, pf, 0, "aq"); | |||||
if (err) { | |||||
iflib_irq_free(ctx, &vsi->irq); | |||||
device_printf(iflib_get_dev(ctx), | |||||
"Failed to register Admin que handler"); | |||||
return (err); | |||||
} | |||||
pf->admvec = vector; | |||||
// TODO: Re-enable this at some point | |||||
// iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov"); | |||||
/* Now set up the stations */ | |||||
for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) { | |||||
rid = vector + 1; | |||||
snprintf(buf, sizeof(buf), "rxq%d", i); | |||||
err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, | |||||
IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); | |||||
/* XXX: Does the driver work as expected if there are fewer num_rx_queues than | |||||
* what's expected in the iflib context? */ | |||||
if (err) { | |||||
device_printf(iflib_get_dev(ctx), | |||||
"Failed to allocate q int %d err: %d", i, err); | |||||
vsi->num_rx_queues = i + 1; | |||||
goto fail; | |||||
} | |||||
rx_que->msix = vector; | |||||
} | |||||
bzero(buf, sizeof(buf)); | |||||
for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) { | |||||
snprintf(buf, sizeof(buf), "txq%d", i); | |||||
iflib_softirq_alloc_generic(ctx, | |||||
&vsi->rx_queues[i % vsi->num_rx_queues].que_irq, | |||||
IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); | |||||
/* TODO: Maybe call a strategy function for this to figure out which | |||||
* interrupts to map Tx queues to. I don't know if there's an immediately | |||||
* better way than this other than a user-supplied map, though. */ | |||||
tx_que->msix = (i % vsi->num_rx_queues) + 1; | |||||
} | |||||
return (0); | return (0); | ||||
fail: | |||||
iflib_irq_free(ctx, &vsi->irq); | |||||
rx_que = vsi->rx_queues; | |||||
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) | |||||
iflib_irq_free(ctx, &rx_que->que_irq); | |||||
return (err); | |||||
} | } | ||||
/********************************************************************* | /* | ||||
* Enable all interrupts | |||||
* TODO: Let it enable all interrupts? | |||||
* | * | ||||
* Shutdown entry point | * Called in: | ||||
* iflib_init_locked, after ixl_if_init() | |||||
*/ | |||||
static void | |||||
ixl_if_enable_intr(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_rx_queue *que = vsi->rx_queues; | |||||
// TODO: Allow this to be enabled here? | |||||
ixl_enable_intr0(hw); | |||||
/* Enable queue interrupts */ | |||||
for (int i = 0; i < vsi->num_rx_queues; i++, que++) | |||||
/* TODO: Queue index parameter is probably wrong */ | |||||
ixl_enable_queue(hw, que->rxr.me); | |||||
} | |||||
/* | |||||
* Disable queue interrupts | |||||
* | * | ||||
**********************************************************************/ | * Other interrupt causes need to remain active. | ||||
*/ | |||||
static void | |||||
ixl_if_disable_intr(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_rx_queue *rx_que = vsi->rx_queues; | |||||
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { | |||||
ixl_disable_intr0(hw); | |||||
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) | |||||
ixl_disable_queue(hw, rx_que->msix - 1); | |||||
} else { | |||||
// Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF | |||||
// stops queues from triggering interrupts | |||||
wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); | |||||
} | |||||
} | |||||
static int | static int | ||||
ixl_shutdown(device_t dev) | ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) | ||||
{ | { | ||||
struct ixl_pf *pf = device_get_softc(dev); | struct ixl_pf *pf = iflib_get_softc(ctx); | ||||
ixl_stop(pf); | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; | |||||
ixl_enable_queue(hw, rx_que->msix - 1); | |||||
return (0); | |||||
} | |||||
static int | |||||
ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; | |||||
ixl_enable_queue(hw, tx_que->msix - 1); | |||||
return (0); | |||||
} | |||||
static int | |||||
ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
if_softc_ctx_t scctx = vsi->shared; | |||||
struct ixl_tx_queue *que; | |||||
// int i; | |||||
int i, j, error = 0; | |||||
MPASS(vsi->num_tx_queues > 0); | |||||
MPASS(ntxqs == 1); | |||||
MPASS(vsi->num_tx_queues == ntxqsets); | |||||
/* Allocate queue structure memory */ | |||||
if (!(vsi->tx_queues = | |||||
(struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { | |||||
device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); | |||||
return (ENOMEM); | |||||
} | |||||
for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { | |||||
struct tx_ring *txr = &que->txr; | |||||
txr->me = i; | |||||
que->vsi = vsi; | |||||
if (!vsi->enable_head_writeback) { | |||||
/* Allocate report status array */ | |||||
if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { | |||||
device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
/* Init report status array */ | |||||
for (j = 0; j < scctx->isc_ntxd[0]; j++) | |||||
txr->tx_rsq[j] = QIDX_INVALID; | |||||
} | |||||
/* get the virtual and physical address of the hardware queues */ | |||||
txr->tail = I40E_QTX_TAIL(txr->me); | |||||
txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; | |||||
txr->tx_paddr = paddrs[i * ntxqs]; | |||||
txr->que = que; | |||||
} | |||||
return (0); | |||||
fail: | |||||
ixl_if_queues_free(ctx); | |||||
return (error); | |||||
} | |||||
static int | |||||
ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct ixl_rx_queue *que; | |||||
int i, error = 0; | |||||
MPASS(vsi->num_rx_queues > 0); | |||||
MPASS(nrxqs == 1); | |||||
MPASS(vsi->num_rx_queues == nrxqsets); | |||||
/* Allocate queue structure memory */ | |||||
if (!(vsi->rx_queues = | |||||
(struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * | |||||
nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { | |||||
device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { | |||||
struct rx_ring *rxr = &que->rxr; | |||||
rxr->me = i; | |||||
que->vsi = vsi; | |||||
/* get the virtual and physical address of the hardware queues */ | |||||
rxr->tail = I40E_QRX_TAIL(rxr->me); | |||||
rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; | |||||
rxr->rx_paddr = paddrs[i * nrxqs]; | |||||
rxr->que = que; | |||||
} | |||||
return (0); | |||||
fail: | |||||
ixl_if_queues_free(ctx); | |||||
return (error); | |||||
} | |||||
static void | |||||
ixl_if_queues_free(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
if (vsi->enable_head_writeback) { | |||||
struct ixl_tx_queue *que; | |||||
int i = 0; | |||||
for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { | |||||
struct tx_ring *txr = &que->txr; | |||||
if (txr->tx_rsq != NULL) { | |||||
free(txr->tx_rsq, M_IXL); | |||||
txr->tx_rsq = NULL; | |||||
} | |||||
} | |||||
} | |||||
if (vsi->tx_queues != NULL) { | |||||
free(vsi->tx_queues, M_IXL); | |||||
vsi->tx_queues = NULL; | |||||
} | |||||
if (vsi->rx_queues != NULL) { | |||||
free(vsi->rx_queues, M_IXL); | |||||
vsi->rx_queues = NULL; | |||||
} | |||||
} | |||||
void | |||||
ixl_update_link_status(struct ixl_pf *pf) | |||||
{ | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
u64 baudrate; | |||||
if (pf->link_up) { | |||||
if (vsi->link_active == FALSE) { | |||||
vsi->link_active = TRUE; | |||||
baudrate = ixl_max_aq_speed_to_value(pf->link_speed); | |||||
iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); | |||||
ixl_link_up_msg(pf); | |||||
#ifdef PCI_IOV | |||||
ixl_broadcast_link_state(pf); | |||||
#endif | |||||
} | |||||
} else { /* Link down */ | |||||
if (vsi->link_active == TRUE) { | |||||
vsi->link_active = FALSE; | |||||
iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); | |||||
#ifdef PCI_IOV | |||||
ixl_broadcast_link_state(pf); | |||||
#endif | |||||
} | |||||
} | |||||
} | |||||
static int | |||||
ixl_process_adminq(struct ixl_pf *pf, u16 *pending) | |||||
{ | |||||
enum i40e_status_code status = I40E_SUCCESS; | |||||
struct i40e_arq_event_info event; | |||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | |||||
u16 opcode; | |||||
u32 loop = 0, reg; | |||||
event.buf_len = IXL_AQ_BUF_SZ; | |||||
event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); | |||||
if (!event.msg_buf) { | |||||
device_printf(dev, "%s: Unable to allocate memory for Admin" | |||||
" Queue event!\n", __func__); | |||||
return (ENOMEM); | |||||
} | |||||
/* clean and process any events */ | |||||
do { | |||||
status = i40e_clean_arq_element(hw, &event, pending); | |||||
if (status) | |||||
break; | |||||
opcode = LE16_TO_CPU(event.desc.opcode); | |||||
ixl_dbg(pf, IXL_DBG_AQ, | |||||
"Admin Queue event: %#06x\n", opcode); | |||||
switch (opcode) { | |||||
case i40e_aqc_opc_get_link_status: | |||||
ixl_link_event(pf, &event); | |||||
break; | |||||
case i40e_aqc_opc_send_msg_to_pf: | |||||
#ifdef PCI_IOV | |||||
ixl_handle_vf_msg(pf, &event); | |||||
#endif | |||||
break; | |||||
/* | |||||
* This should only occur on no-drop queues, which | |||||
* aren't currently configured. | |||||
*/ | |||||
case i40e_aqc_opc_event_lan_overflow: | |||||
device_printf(dev, "LAN overflow event\n"); | |||||
break; | |||||
default: | |||||
device_printf(dev, "AdminQ unknown event %x\n", opcode); | |||||
break; | |||||
} | |||||
} while (*pending && (loop++ < IXL_ADM_LIMIT)); | |||||
free(event.msg_buf, M_IXL); | |||||
/* Re-enable admin queue interrupt cause */ | |||||
reg = rd32(hw, I40E_PFINT_ICR0_ENA); | |||||
reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | |||||
wr32(hw, I40E_PFINT_ICR0_ENA, reg); | |||||
return (status); | |||||
} | |||||
static void | |||||
ixl_if_update_admin_status(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = &pf->hw; | |||||
u16 pending; | |||||
// TODO: Refactor reset handling | |||||
if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { | |||||
ixl_handle_empr_reset(pf); | |||||
//iflib_init_locked(ctx); | |||||
} | |||||
if (pf->state & IXL_PF_STATE_CORE_RESET_REQ) { | |||||
device_printf(pf->dev, "Doing CORE reset...\n"); | |||||
//iflib_stop(ctx); | |||||
ixl_teardown_hw_structs(pf); | |||||
wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); | |||||
atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); | |||||
// ixl_handle_empr_reset(pf); | |||||
// iflib_init_locked(ctx); | |||||
return; | |||||
} | |||||
if (pf->state & IXL_PF_STATE_GLOB_RESET_REQ) { | |||||
device_printf(pf->dev, "Doing GLOB reset...\n"); | |||||
//iflib_stop(ctx); | |||||
ixl_teardown_hw_structs(pf); | |||||
wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); | |||||
atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); | |||||
// ixl_handle_empr_reset(pf); | |||||
// iflib_init_locked(ctx); | |||||
return; | |||||
} | |||||
if (pf->state & IXL_PF_STATE_EMP_RESET_REQ) { | |||||
/* This register is read-only to drivers */ | |||||
if (!(rd32(hw, 0x000B818C) & 0x1)) { | |||||
device_printf(pf->dev, "SW not allowed to initiate EMPR\n"); | |||||
atomic_clear_int(&pf->state, IXL_PF_STATE_EMP_RESET_REQ); | |||||
} else { | |||||
device_printf(pf->dev, "Doing EMP reset...\n"); | |||||
//iflib_stop(ctx); | |||||
ixl_teardown_hw_structs(pf); | |||||
wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK); | |||||
atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); | |||||
// ixl_handle_empr_reset(pf); | |||||
// iflib_init_locked(ctx); | |||||
return; | |||||
} | |||||
} | |||||
if (pf->state & IXL_PF_STATE_MDD_PENDING) | |||||
ixl_handle_mdd_event(pf); | |||||
if (pf->state & IXL_PF_STATE_PF_RESET_REQ) { | |||||
device_printf(pf->dev, "Doing PF reset...\n"); | |||||
//iflib_stop(ctx); | |||||
ixl_teardown_hw_structs(pf); | |||||
ixl_reset(pf); | |||||
device_printf(pf->dev, "PF reset done.\n"); | |||||
// TODO: Do init if previously up! | |||||
//iflib_init_locked(ctx); | |||||
} | |||||
#ifdef PCI_IOV | |||||
if (pf->state & IXL_PF_STATE_VF_RESET_REQ) | |||||
iflib_iov_intr_deferred(ctx); | |||||
#endif | |||||
ixl_process_adminq(pf, &pending); | |||||
ixl_update_link_status(pf); | |||||
/* | |||||
* If there are still messages to process, reschedule ourselves. | |||||
* Otherwise, re-enable our interrupt and go to sleep. | |||||
*/ | |||||
if (pending > 0) | |||||
iflib_admin_intr_deferred(ctx); | |||||
else | |||||
ixl_enable_intr0(hw); | |||||
} | |||||
static void | |||||
ixl_if_multi_set(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
int mcnt = 0, flags; | |||||
IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); | |||||
mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); | |||||
/* delete existing MC filters */ | |||||
ixl_del_multi(vsi); | |||||
if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { | |||||
i40e_aq_set_vsi_multicast_promiscuous(hw, | |||||
vsi->seid, TRUE, NULL); | |||||
return; | |||||
} | |||||
/* (re-)install filters for all mcast addresses */ | |||||
mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); | |||||
if (mcnt > 0) { | |||||
flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); | |||||
ixl_add_hw_filters(vsi, flags, mcnt); | |||||
} | |||||
IOCTL_DEBUGOUT("ixl_if_multi_set: end"); | |||||
} | |||||
static int | |||||
ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); | |||||
if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - | |||||
ETHER_VLAN_ENCAP_LEN) | |||||
return (EINVAL); | |||||
vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + | |||||
ETHER_VLAN_ENCAP_LEN; | |||||
return (0); | |||||
} | |||||
static void | |||||
ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = &pf->hw; | |||||
INIT_DEBUGOUT("ixl_media_status: begin"); | |||||
hw->phy.get_link_info = TRUE; | |||||
i40e_get_link_status(hw, &pf->link_up); | |||||
ixl_update_link_status(pf); | |||||
ifmr->ifm_status = IFM_AVALID; | |||||
ifmr->ifm_active = IFM_ETHER; | |||||
if (!pf->link_up) { | |||||
return; | |||||
} | |||||
ifmr->ifm_status |= IFM_ACTIVE; | |||||
/* Hardware is always full-duplex */ | |||||
ifmr->ifm_active |= IFM_FDX; | |||||
switch (hw->phy.link_info.phy_type) { | |||||
/* 100 M */ | |||||
case I40E_PHY_TYPE_100BASE_TX: | |||||
ifmr->ifm_active |= IFM_100_TX; | |||||
break; | |||||
/* 1 G */ | |||||
case I40E_PHY_TYPE_1000BASE_T: | |||||
ifmr->ifm_active |= IFM_1000_T; | |||||
break; | |||||
case I40E_PHY_TYPE_1000BASE_SX: | |||||
ifmr->ifm_active |= IFM_1000_SX; | |||||
break; | |||||
case I40E_PHY_TYPE_1000BASE_LX: | |||||
ifmr->ifm_active |= IFM_1000_LX; | |||||
break; | |||||
case I40E_PHY_TYPE_1000BASE_T_OPTICAL: | |||||
ifmr->ifm_active |= IFM_1000_T; | |||||
break; | |||||
/* 10 G */ | |||||
case I40E_PHY_TYPE_10GBASE_SFPP_CU: | |||||
ifmr->ifm_active |= IFM_10G_TWINAX; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_SR: | |||||
ifmr->ifm_active |= IFM_10G_SR; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_LR: | |||||
ifmr->ifm_active |= IFM_10G_LR; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_T: | |||||
ifmr->ifm_active |= IFM_10G_T; | |||||
break; | |||||
case I40E_PHY_TYPE_XAUI: | |||||
case I40E_PHY_TYPE_XFI: | |||||
ifmr->ifm_active |= IFM_10G_TWINAX; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_AOC: | |||||
ifmr->ifm_active |= IFM_10G_AOC; | |||||
break; | |||||
/* 25 G */ | |||||
case I40E_PHY_TYPE_25GBASE_KR: | |||||
ifmr->ifm_active |= IFM_25G_KR; | |||||
break; | |||||
case I40E_PHY_TYPE_25GBASE_CR: | |||||
ifmr->ifm_active |= IFM_25G_CR; | |||||
break; | |||||
case I40E_PHY_TYPE_25GBASE_SR: | |||||
ifmr->ifm_active |= IFM_25G_SR; | |||||
break; | |||||
case I40E_PHY_TYPE_25GBASE_LR: | |||||
ifmr->ifm_active |= IFM_25G_LR; | |||||
break; | |||||
case I40E_PHY_TYPE_25GBASE_AOC: | |||||
ifmr->ifm_active |= IFM_25G_AOC; | |||||
break; | |||||
case I40E_PHY_TYPE_25GBASE_ACC: | |||||
ifmr->ifm_active |= IFM_25G_ACC; | |||||
break; | |||||
/* 40 G */ | |||||
case I40E_PHY_TYPE_40GBASE_CR4: | |||||
case I40E_PHY_TYPE_40GBASE_CR4_CU: | |||||
ifmr->ifm_active |= IFM_40G_CR4; | |||||
break; | |||||
case I40E_PHY_TYPE_40GBASE_SR4: | |||||
ifmr->ifm_active |= IFM_40G_SR4; | |||||
break; | |||||
case I40E_PHY_TYPE_40GBASE_LR4: | |||||
ifmr->ifm_active |= IFM_40G_LR4; | |||||
break; | |||||
case I40E_PHY_TYPE_XLAUI: | |||||
ifmr->ifm_active |= IFM_OTHER; | |||||
break; | |||||
case I40E_PHY_TYPE_1000BASE_KX: | |||||
ifmr->ifm_active |= IFM_1000_KX; | |||||
break; | |||||
case I40E_PHY_TYPE_SGMII: | |||||
ifmr->ifm_active |= IFM_1000_SGMII; | |||||
break; | |||||
/* ERJ: What's the difference between these? */ | |||||
case I40E_PHY_TYPE_10GBASE_CR1_CU: | |||||
case I40E_PHY_TYPE_10GBASE_CR1: | |||||
ifmr->ifm_active |= IFM_10G_CR1; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_KX4: | |||||
ifmr->ifm_active |= IFM_10G_KX4; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_KR: | |||||
ifmr->ifm_active |= IFM_10G_KR; | |||||
break; | |||||
case I40E_PHY_TYPE_SFI: | |||||
ifmr->ifm_active |= IFM_10G_SFI; | |||||
break; | |||||
/* Our single 20G media type */ | |||||
case I40E_PHY_TYPE_20GBASE_KR2: | |||||
ifmr->ifm_active |= IFM_20G_KR2; | |||||
break; | |||||
case I40E_PHY_TYPE_40GBASE_KR4: | |||||
ifmr->ifm_active |= IFM_40G_KR4; | |||||
break; | |||||
case I40E_PHY_TYPE_XLPPI: | |||||
case I40E_PHY_TYPE_40GBASE_AOC: | |||||
ifmr->ifm_active |= IFM_40G_XLPPI; | |||||
break; | |||||
/* Unknown to driver */ | |||||
default: | |||||
ifmr->ifm_active |= IFM_UNKNOWN; | |||||
break; | |||||
} | |||||
/* Report flow control status as well */ | |||||
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) | |||||
ifmr->ifm_active |= IFM_ETH_TXPAUSE; | |||||
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) | |||||
ifmr->ifm_active |= IFM_ETH_RXPAUSE; | |||||
} | |||||
static int | |||||
ixl_if_media_change(if_ctx_t ctx) | |||||
{ | |||||
struct ifmedia *ifm = iflib_get_media(ctx); | |||||
INIT_DEBUGOUT("ixl_media_change: begin"); | |||||
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) | |||||
return (EINVAL); | |||||
if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); | |||||
return (ENODEV); | |||||
} | |||||
static int | |||||
ixl_if_promisc_set(if_ctx_t ctx, int flags) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct ifnet *ifp = iflib_get_ifp(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | |||||
int err; | |||||
bool uni = FALSE, multi = FALSE; | |||||
if (flags & IFF_PROMISC) | |||||
uni = multi = TRUE; | |||||
else if (flags & IFF_ALLMULTI || | |||||
if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) | |||||
multi = TRUE; | |||||
err = i40e_aq_set_vsi_unicast_promiscuous(hw, | |||||
vsi->seid, uni, NULL, true); | |||||
if (err) | |||||
return (err); | |||||
err = i40e_aq_set_vsi_multicast_promiscuous(hw, | |||||
vsi->seid, multi, NULL); | |||||
return (err); | |||||
} | |||||
static void | |||||
ixl_if_timer(if_ctx_t ctx, uint16_t qid) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
//struct i40e_hw *hw = &pf->hw; | |||||
//struct ixl_tx_queue *que = &vsi->tx_queues[qid]; | |||||
#if 0 | |||||
u32 mask; | |||||
/* | |||||
** Check status of the queues | |||||
*/ | |||||
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | | |||||
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); | |||||
/* If queue param has outstanding work, trigger sw irq */ | |||||
// TODO: TX queues in iflib don't use HW interrupts; does this do anything? | |||||
if (que->busy) | |||||
wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); | |||||
#endif | |||||
if (qid != 0) | |||||
return; | |||||
/* Fire off the adminq task */ | |||||
iflib_admin_intr_deferred(ctx); | |||||
/* Update stats */ | |||||
ixl_update_stats_counters(pf); | |||||
} | |||||
static void | |||||
ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | |||||
return; | |||||
++vsi->num_vlans; | |||||
ixl_add_filter(vsi, hw->mac.addr, vtag); | |||||
} | |||||
static void | |||||
ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | |||||
return; | |||||
--vsi->num_vlans; | |||||
ixl_del_filter(vsi, hw->mac.addr, vtag); | |||||
} | |||||
static uint64_t | |||||
ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
if_t ifp = iflib_get_ifp(ctx); | |||||
switch (cnt) { | |||||
case IFCOUNTER_IPACKETS: | |||||
return (vsi->ipackets); | |||||
case IFCOUNTER_IERRORS: | |||||
return (vsi->ierrors); | |||||
case IFCOUNTER_OPACKETS: | |||||
return (vsi->opackets); | |||||
case IFCOUNTER_OERRORS: | |||||
return (vsi->oerrors); | |||||
case IFCOUNTER_COLLISIONS: | |||||
/* Collisions are by standard impossible in 40G/10G Ethernet */ | |||||
return (0); | |||||
case IFCOUNTER_IBYTES: | |||||
return (vsi->ibytes); | |||||
case IFCOUNTER_OBYTES: | |||||
return (vsi->obytes); | |||||
case IFCOUNTER_IMCASTS: | |||||
return (vsi->imcasts); | |||||
case IFCOUNTER_OMCASTS: | |||||
return (vsi->omcasts); | |||||
case IFCOUNTER_IQDROPS: | |||||
return (vsi->iqdrops); | |||||
case IFCOUNTER_OQDROPS: | |||||
return (vsi->oqdrops); | |||||
case IFCOUNTER_NOPROTO: | |||||
return (vsi->noproto); | |||||
default: | |||||
return (if_get_counter_default(ifp, cnt)); | |||||
} | |||||
} | |||||
static void | |||||
ixl_if_vflr_handle(if_ctx_t ctx) | |||||
{ | |||||
IXL_DEV_ERR(iflib_get_dev(ctx), ""); | |||||
// TODO: call ixl_handle_vflr() | |||||
} | |||||
static int | |||||
ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) | |||||
{ | |||||
struct ixl_pf *pf = iflib_get_softc(ctx); | |||||
for (int i = 0; i < req->len; i++) | |||||
if (ixl_read_i2c_byte(pf, req->offset + i, | |||||
req->dev_addr, &req->data[i])) | |||||
return (EIO); | |||||
return (0); | |||||
} | |||||
static int | |||||
ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) | |||||
{ | |||||
struct ixl_vsi *vsi = arg; | |||||
if (ifma->ifma_addr->sa_family != AF_LINK) | |||||
return (0); | |||||
ixl_add_mc_filter(vsi, | |||||
(u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); | |||||
return (1); | |||||
} | |||||
/* | |||||
* Sanity check and save off tunable values. | |||||
*/ | |||||
static int | |||||
ixl_save_pf_tunables(struct ixl_pf *pf) | |||||
{ | |||||
device_t dev = pf->dev; | |||||
/* Save tunable information */ | |||||
pf->enable_msix = ixl_enable_msix; | |||||
pf->max_queues = ixl_max_queues; | |||||
pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; | |||||
pf->dynamic_rx_itr = ixl_dynamic_rx_itr; | |||||
pf->dynamic_tx_itr = ixl_dynamic_tx_itr; | |||||
pf->dbg_mask = ixl_core_debug_mask; | |||||
pf->hw.debug_mask = ixl_shared_debug_mask; | |||||
pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); | |||||
if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { | |||||
device_printf(dev, "Invalid tx_itr value of %d set!\n", | |||||
ixl_tx_itr); | |||||
device_printf(dev, "tx_itr must be between %d and %d, " | |||||
"inclusive\n", | |||||
0, IXL_MAX_ITR); | |||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_ITR_4K); | |||||
pf->tx_itr = IXL_ITR_4K; | |||||
} else | |||||
pf->tx_itr = ixl_tx_itr; | |||||
if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { | |||||
device_printf(dev, "Invalid rx_itr value of %d set!\n", | |||||
ixl_rx_itr); | |||||
device_printf(dev, "rx_itr must be between %d and %d, " | |||||
"inclusive\n", | |||||
0, IXL_MAX_ITR); | |||||
device_printf(dev, "Using default value of %d instead\n", | |||||
IXL_ITR_8K); | |||||
pf->rx_itr = IXL_ITR_8K; | |||||
} else | |||||
pf->rx_itr = ixl_rx_itr; | |||||
return (0); | return (0); | ||||
} | } | ||||