Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/if_ixl.c
Show All 39 Lines | |||||
#include "ixl.h" | #include "ixl.h" | ||||
#include "ixl_pf.h" | #include "ixl_pf.h" | ||||
#ifdef RSS | #ifdef RSS | ||||
#include <net/rss_config.h> | #include <net/rss_config.h> | ||||
#endif | #endif | ||||
#include "ifdi_if.h" | |||||
/********************************************************************* | /********************************************************************* | ||||
* Driver version | * Driver version | ||||
*********************************************************************/ | *********************************************************************/ | ||||
char ixl_driver_version[] = "1.4.3"; | char ixl_driver_version[] = "1.4.3"; | ||||
/********************************************************************* | /********************************************************************* | ||||
* PCI Device ID Table | * PCI Device ID Table | ||||
* | * | ||||
* Used by probe to select devices to load on | * Used by probe to select devices to load on | ||||
* Last field stores an index into ixl_strings | * Last field stores an index into ixl_strings | ||||
* Last entry must be all 0s | * Last entry must be PVID_END | ||||
* | * | ||||
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static ixl_vendor_info_t ixl_vendor_info_array[] = | static pci_vendor_info_t ixl_vendor_info_array[] = | ||||
{ | { | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4,"Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
#ifdef X722_SUPPORT | #ifdef X722_SUPPORT | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0}, | PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection XL710 Driver"), | ||||
#endif | #endif | ||||
/* required last entry */ | /* required last entry */ | ||||
{0, 0, 0, 0, 0} | PVID_END | ||||
}; | }; | ||||
/********************************************************************* | |||||
* Table of branding strings | |||||
*********************************************************************/ | |||||
static char *ixl_strings[] = { | |||||
"Intel(R) Ethernet Connection XL710 Driver" | |||||
}; | |||||
/********************************************************************* | /********************************************************************* | ||||
* Function prototypes | * Function prototypes | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int ixl_probe(device_t); | static void *ixl_register(device_t); | ||||
static int ixl_attach(device_t); | |||||
static int ixl_detach(device_t); | |||||
static int ixl_shutdown(device_t); | |||||
static int ixl_get_hw_capabilities(struct ixl_pf *); | static int ixl_get_hw_capabilities(struct ixl_pf *); | ||||
static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); | |||||
static int ixl_ioctl(struct ifnet *, u_long, caddr_t); | |||||
static void ixl_init(void *); | |||||
static void ixl_init_locked(struct ixl_pf *); | |||||
static void ixl_stop(struct ixl_pf *); | |||||
static void ixl_media_status(struct ifnet *, struct ifmediareq *); | |||||
static int ixl_media_change(struct ifnet *); | |||||
static void ixl_update_link_status(struct ixl_pf *); | static void ixl_update_link_status(struct ixl_pf *); | ||||
static int ixl_allocate_pci_resources(struct ixl_pf *); | static int ixl_allocate_pci_resources(struct ixl_pf *); | ||||
static u16 ixl_get_bus_info(struct i40e_hw *, device_t); | static u16 ixl_get_bus_info(struct i40e_hw *, device_t); | ||||
static int ixl_setup_stations(struct ixl_pf *); | |||||
static int ixl_switch_config(struct ixl_pf *); | static int ixl_switch_config(struct ixl_pf *); | ||||
static int ixl_initialize_vsi(struct ixl_vsi *); | static int ixl_initialize_vsi(struct ixl_vsi *); | ||||
static int ixl_assign_vsi_msix(struct ixl_pf *); | |||||
static int ixl_assign_vsi_legacy(struct ixl_pf *); | |||||
static int ixl_init_msix(struct ixl_pf *); | |||||
static void ixl_configure_msix(struct ixl_pf *); | static void ixl_configure_msix(struct ixl_pf *); | ||||
static void ixl_configure_itr(struct ixl_pf *); | static void ixl_configure_itr(struct ixl_pf *); | ||||
static void ixl_configure_legacy(struct ixl_pf *); | static void ixl_configure_legacy(struct ixl_pf *); | ||||
static void ixl_free_pci_resources(struct ixl_pf *); | static void ixl_free_pci_resources(struct ixl_pf *); | ||||
static void ixl_local_timer(void *); | |||||
static int ixl_setup_interface(device_t, struct ixl_vsi *); | static int ixl_setup_interface(device_t, struct ixl_vsi *); | ||||
static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); | static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); | ||||
static void ixl_config_rss(struct ixl_vsi *); | static void ixl_config_rss(struct ixl_vsi *); | ||||
static void ixl_set_queue_rx_itr(struct ixl_queue *); | static void ixl_set_queue_rx_itr(struct ixl_queue *); | ||||
static void ixl_set_queue_tx_itr(struct ixl_queue *); | static void ixl_set_queue_tx_itr(struct ixl_queue *); | ||||
static int ixl_set_advertised_speeds(struct ixl_pf *, int); | static int ixl_set_advertised_speeds(struct ixl_pf *, int); | ||||
static int ixl_enable_rings(struct ixl_vsi *); | static int ixl_enable_rings(struct ixl_vsi *); | ||||
static int ixl_disable_rings(struct ixl_vsi *); | static int ixl_disable_rings(struct ixl_vsi *); | ||||
static void ixl_enable_intr(struct ixl_vsi *); | static void ixl_enable_intr(struct ixl_vsi *); | ||||
static void ixl_disable_intr(struct ixl_vsi *); | |||||
static void ixl_disable_rings_intr(struct ixl_vsi *); | |||||
static void ixl_enable_adminq(struct i40e_hw *); | static void ixl_enable_adminq(struct i40e_hw *); | ||||
static void ixl_disable_adminq(struct i40e_hw *); | static void ixl_disable_adminq(struct i40e_hw *); | ||||
static void ixl_enable_queue(struct i40e_hw *, int); | |||||
static void ixl_disable_queue(struct i40e_hw *, int); | |||||
static void ixl_enable_legacy(struct i40e_hw *); | static void ixl_enable_legacy(struct i40e_hw *); | ||||
static void ixl_disable_legacy(struct i40e_hw *); | static void ixl_disable_legacy(struct i40e_hw *); | ||||
static void ixl_set_promisc(struct ixl_vsi *); | |||||
static void ixl_add_multi(struct ixl_vsi *); | |||||
static void ixl_del_multi(struct ixl_vsi *); | static void ixl_del_multi(struct ixl_vsi *); | ||||
static void ixl_register_vlan(void *, struct ifnet *, u16); | |||||
static void ixl_unregister_vlan(void *, struct ifnet *, u16); | |||||
static void ixl_setup_vlan_filters(struct ixl_vsi *); | static void ixl_setup_vlan_filters(struct ixl_vsi *); | ||||
static void ixl_init_filters(struct ixl_vsi *); | static void ixl_init_filters(struct ixl_vsi *); | ||||
static void ixl_reconfigure_filters(struct ixl_vsi *vsi); | static void ixl_reconfigure_filters(struct ixl_vsi *vsi); | ||||
static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); | static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); | ||||
static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); | static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); | ||||
static void ixl_add_hw_filters(struct ixl_vsi *, int, int); | static void ixl_add_hw_filters(struct ixl_vsi *, int, int); | ||||
static void ixl_del_hw_filters(struct ixl_vsi *, int); | static void ixl_del_hw_filters(struct ixl_vsi *, int); | ||||
static struct ixl_mac_filter * | static struct ixl_mac_filter * | ||||
ixl_find_filter(struct ixl_vsi *, u8 *, s16); | ixl_find_filter(struct ixl_vsi *, u8 *, s16); | ||||
static void ixl_add_mc_filter(struct ixl_vsi *, u8 *); | static void ixl_add_mc_filter(struct ixl_vsi *, u8 *); | ||||
static void ixl_free_mac_filters(struct ixl_vsi *vsi); | static void ixl_free_mac_filters(struct ixl_vsi *vsi); | ||||
/* Sysctl debug interface */ | /* Sysctl debug interface */ | ||||
#ifdef IXL_DEBUG_SYSCTL | #ifdef IXL_DEBUG_SYSCTL | ||||
static int ixl_debug_info(SYSCTL_HANDLER_ARGS); | static int ixl_debug_info(SYSCTL_HANDLER_ARGS); | ||||
static void ixl_print_debug_info(struct ixl_pf *); | static void ixl_print_debug_info(struct ixl_pf *); | ||||
#endif | #endif | ||||
/* The MSI/X Interrupt handlers */ | /* The MSI/X Interrupt handlers */ | ||||
static void ixl_intr(void *); | int ixl_intr(void *); | ||||
static void ixl_msix_que(void *); | static int ixl_msix_que(void *); | ||||
static void ixl_msix_adminq(void *); | static int ixl_msix_adminq(void *); | ||||
static void ixl_handle_mdd_event(struct ixl_pf *); | static void ixl_handle_mdd_event(struct ixl_pf *); | ||||
/* Deferred interrupt tasklets */ | |||||
static void ixl_do_adminq(void *, int); | |||||
/* Sysctl handlers */ | /* Sysctl handlers */ | ||||
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS); | static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS); | static int ixl_set_advertise(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_current_speed(SYSCTL_HANDLER_ARGS); | static int ixl_current_speed(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); | ||||
/* Statistics */ | /* Statistics */ | ||||
static void ixl_add_hw_stats(struct ixl_pf *); | static void ixl_add_hw_stats(struct ixl_pf *); | ||||
Show All 18 Lines | |||||
static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); | ||||
#endif | #endif | ||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); | static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); | ||||
static int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*); | static int ixl_if_iov_init(if_ctx_t, uint16_t num_vfs, const nvlist_t*); | ||||
static void ixl_iov_uninit(device_t dev); | static void ixl_if_iov_uninit(if_ctx_t); | ||||
static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*); | static int ixl_if_vf_add(if_ctx_t, uint16_t vfnum, const nvlist_t*); | ||||
static void ixl_handle_vf_msg(struct ixl_pf *, | static void ixl_handle_vf_msg(struct ixl_pf *, | ||||
struct i40e_arq_event_info *); | struct i40e_arq_event_info *); | ||||
static void ixl_handle_vflr(void *arg, int pending); | static void ixl_if_handle_vflr(if_ctx_t ctx); | ||||
static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); | static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); | ||||
static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); | static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); | ||||
#endif | #endif | ||||
static int ixl_if_attach_pre(if_ctx_t); | |||||
static int ixl_if_attach_post(if_ctx_t); | |||||
static int ixl_if_msix_intr_assign(if_ctx_t, int); | |||||
static int ixl_if_detach(if_ctx_t); | |||||
static void ixl_if_init(if_ctx_t ctx); | |||||
static void ixl_if_stop(if_ctx_t ctx); | |||||
static void ixl_if_intr_enable(if_ctx_t ctx); | |||||
static void ixl_if_intr_disable(if_ctx_t ctx); | |||||
static void ixl_if_queue_intr_enable(if_ctx_t ctx, uint16_t qid); | |||||
static void ixl_if_queue_intr_disable(if_ctx_t ctx, uint16_t qid); | |||||
static void ixl_if_multi_set(if_ctx_t); | |||||
static void ixl_if_update_admin_status(if_ctx_t); | |||||
static int ixl_if_mtu_set(if_ctx_t, uint32_t); | |||||
static void ixl_if_media_status(if_ctx_t, struct ifmediareq *); | |||||
static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); | |||||
static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); | |||||
static void ixl_if_timer(if_ctx_t, uint16_t); | |||||
static int ixl_if_promisc_set(if_ctx_t ctx, int flags); | |||||
/********************************************************************* | /********************************************************************* | ||||
* FreeBSD Device Interface Entry Points | * FreeBSD Device Interface Entry Points | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static device_method_t ixl_methods[] = { | static device_method_t ixl_methods[] = { | ||||
/* Device interface */ | /* Device interface */ | ||||
DEVMETHOD(device_probe, ixl_probe), | DEVMETHOD(device_register, ixl_register), | ||||
DEVMETHOD(device_attach, ixl_attach), | DEVMETHOD(device_probe, iflib_device_probe), | ||||
DEVMETHOD(device_detach, ixl_detach), | DEVMETHOD(device_attach, iflib_device_attach), | ||||
DEVMETHOD(device_shutdown, ixl_shutdown), | DEVMETHOD(device_detach, iflib_device_detach), | ||||
DEVMETHOD(device_shutdown, iflib_device_suspend), | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
DEVMETHOD(pci_iov_init, ixl_iov_init), | DEVMETHOD(pci_iov_init, iflib_device_iov_init), | ||||
DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), | DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), | ||||
DEVMETHOD(pci_iov_add_vf, ixl_add_vf), | DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), | ||||
#endif | #endif | ||||
{0, 0} | {0, 0} | ||||
}; | }; | ||||
static driver_t ixl_driver = { | static driver_t ixl_driver = { | ||||
"ixl", ixl_methods, sizeof(struct ixl_pf), | "ixl", ixl_methods, sizeof(struct ixl_pf), | ||||
}; | }; | ||||
devclass_t ixl_devclass; | devclass_t ixl_devclass; | ||||
DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); | DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); | ||||
MODULE_DEPEND(ixl, pci, 1, 1, 1); | MODULE_DEPEND(ixl, pci, 1, 1, 1); | ||||
MODULE_DEPEND(ixl, ether, 1, 1, 1); | MODULE_DEPEND(ixl, ether, 1, 1, 1); | ||||
#ifdef DEV_NETMAP | MODULE_DEPEND(ixl, iflib, 1, 1, 1); | ||||
MODULE_DEPEND(ixl, netmap, 1, 1, 1); | |||||
#endif /* DEV_NETMAP */ | |||||
/* | |||||
** Global reset mutex | |||||
*/ | |||||
static struct mtx ixl_reset_mtx; | |||||
static device_method_t ixl_if_methods[] = { | |||||
DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), | |||||
DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), | |||||
DEVMETHOD(ifdi_detach, ixl_if_detach), | |||||
DEVMETHOD(ifdi_init, ixl_if_init), | |||||
DEVMETHOD(ifdi_stop, ixl_if_stop), | |||||
DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), | |||||
DEVMETHOD(ifdi_intr_disable, ixl_if_intr_disable), | |||||
DEVMETHOD(ifdi_intr_enable, ixl_if_intr_enable), | |||||
DEVMETHOD(ifdi_queue_intr_enable, ixl_if_queue_intr_enable), | |||||
DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), | |||||
DEVMETHOD(ifdi_queues_alloc, ixl_if_queues_alloc), | |||||
DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), | |||||
DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), | |||||
DEVMETHOD(ifdi_media_status, ixl_if_media_status), | |||||
DEVMETHOD(ifdi_media_change, ixl_if_media_change), | |||||
DEVMETHOD(ifdi_timer, ixl_if_timer), | |||||
DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), | |||||
DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), | |||||
DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), | |||||
DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), | |||||
#ifdef PCI_IOV | |||||
DEVMETHOD(ifdi_vflr_handle, ixl_if_handle_vflr), | |||||
DEVMETHOD(ifdi_iov_init, ixl_if_iov_init), | |||||
DEVMETHOD(ifdi_iov_uninit, ixl_if_iov_uninit), | |||||
DEVMETHOD(ifdi_iov_vf_add, ixl_if_vf_add), | |||||
#endif | |||||
DEVMETHOD_END | |||||
}; | |||||
static driver_t ixl_if_driver = { | |||||
"ixl_if", ixl_if_methods, sizeof(struct ixl_pf), | |||||
}; | |||||
/* | /* | ||||
** TUNEABLE PARAMETERS: | ** TUNEABLE PARAMETERS: | ||||
*/ | */ | ||||
static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, | static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, | ||||
"IXL driver parameters"); | "IXL driver parameters"); | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
#ifdef IXL_FDIR | #ifdef IXL_FDIR | ||||
static int ixl_enable_fdir = 1; | static int ixl_enable_fdir = 1; | ||||
TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir); | TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir); | ||||
/* Rate at which we sample */ | /* Rate at which we sample */ | ||||
int ixl_atr_rate = 20; | int ixl_atr_rate = 20; | ||||
TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate); | TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate); | ||||
#endif | #endif | ||||
#ifdef DEV_NETMAP | |||||
#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */ | |||||
#include <dev/netmap/if_ixl_netmap.h> | |||||
#endif /* DEV_NETMAP */ | |||||
static char *ixl_fc_string[6] = { | static char *ixl_fc_string[6] = { | ||||
"None", | "None", | ||||
"Rx", | "Rx", | ||||
"Tx", | "Tx", | ||||
"Full", | "Full", | ||||
"Priority", | "Priority", | ||||
"Default" | "Default" | ||||
}; | }; | ||||
static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); | extern struct if_txrx ixl_txrx; | ||||
static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = | static struct if_shared_ctx ixl_sctx_init = { | ||||
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | .isc_magic = IFLIB_MAGIC, | ||||
.isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ | |||||
.isc_tx_maxsize = IXL_TSO_SIZE, | |||||
/********************************************************************* | .isc_tx_maxsegsize = PAGE_SIZE*4, | ||||
* Device identification routine | |||||
* | |||||
* ixl_probe determines if the driver should be loaded on | |||||
* the hardware based on PCI vendor/device id of the device. | |||||
* | |||||
* return BUS_PROBE_DEFAULT on success, positive on failure | |||||
*********************************************************************/ | |||||
static int | .isc_rx_maxsize = PAGE_SIZE*4, | ||||
ixl_probe(device_t dev) | .isc_rx_nsegments = 1, | ||||
{ | .isc_rx_maxsegsize = PAGE_SIZE*4, | ||||
ixl_vendor_info_t *ent; | .isc_ntxd = DEFAULT_RING, | ||||
.isc_nrxd = DEFAULT_RING, | |||||
.isc_nfl = 1, | |||||
.isc_qsizes[0] = roundup2((DEFAULT_RING * sizeof(struct i40e_tx_desc)) + | |||||
sizeof(u32), DBA_ALIGN), | |||||
.isc_qsizes[1] = roundup2(DEFAULT_RING * | |||||
sizeof(union i40e_rx_desc), DBA_ALIGN), | |||||
.isc_nqs = 2, | |||||
.isc_admin_intrcnt = 1, | |||||
.isc_vendor_info = ixl_vendor_info_array, | |||||
.isc_driver_version = ixl_driver_version, | |||||
.isc_txrx = &ixl_txrx, | |||||
.isc_driver = &ixl_if_driver, | |||||
}; | |||||
u16 pci_vendor_id, pci_device_id; | if_shared_ctx_t ixl_sctx = &ixl_sctx_init; | ||||
u16 pci_subvendor_id, pci_subdevice_id; | MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); | ||||
char device_name[256]; | |||||
static bool lock_init = FALSE; | |||||
INIT_DEBUGOUT("ixl_probe: begin"); | static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = | ||||
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | |||||
pci_vendor_id = pci_get_vendor(dev); | static void * | ||||
if (pci_vendor_id != I40E_INTEL_VENDOR_ID) | ixl_register(device_t dev) | ||||
return (ENXIO); | { | ||||
ixl_sctx->isc_ntxd = ixl_ringsz; | |||||
ixl_sctx->isc_nrxd = ixl_ringsz; | |||||
ixl_sctx->isc_qsizes[0] = roundup2((ixl_ringsz * sizeof(struct i40e_tx_desc)) + | |||||
sizeof(u32), DBA_ALIGN); | |||||
ixl_sctx->isc_qsizes[1] = roundup2(ixl_ringsz * | |||||
sizeof(union i40e_rx_desc), DBA_ALIGN); | |||||
pci_device_id = pci_get_device(dev); | |||||
pci_subvendor_id = pci_get_subvendor(dev); | |||||
pci_subdevice_id = pci_get_subdevice(dev); | |||||
ent = ixl_vendor_info_array; | return (ixl_sctx); | ||||
while (ent->vendor_id != 0) { | |||||
if ((pci_vendor_id == ent->vendor_id) && | |||||
(pci_device_id == ent->device_id) && | |||||
((pci_subvendor_id == ent->subvendor_id) || | |||||
(ent->subvendor_id == 0)) && | |||||
((pci_subdevice_id == ent->subdevice_id) || | |||||
(ent->subdevice_id == 0))) { | |||||
sprintf(device_name, "%s, Version - %s", | |||||
ixl_strings[ent->index], | |||||
ixl_driver_version); | |||||
device_set_desc_copy(dev, device_name); | |||||
/* One shot mutex init */ | |||||
if (lock_init == FALSE) { | |||||
lock_init = TRUE; | |||||
mtx_init(&ixl_reset_mtx, | |||||
"ixl_reset", | |||||
"IXL RESET Lock", MTX_DEF); | |||||
} | } | ||||
return (BUS_PROBE_DEFAULT); | |||||
} | |||||
ent++; | |||||
} | |||||
return (ENXIO); | |||||
} | |||||
/********************************************************************* | /********************************************************************* | ||||
* Device initialization routine | * Device initialization routine | ||||
* | * | ||||
* The attach entry point is called when the driver is being loaded. | * The attach entry point is called when the driver is being loaded. | ||||
* This routine identifies the type of hardware, allocates all resources | * This routine identifies the type of hardware, allocates all resources | ||||
* and initializes the hardware. | * and initializes the hardware. | ||||
* | * | ||||
* return 0 on success, positive on failure | * return 0 on success, positive on failure | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int | static int | ||||
ixl_attach(device_t dev) | ixl_if_attach_pre(if_ctx_t ctx) | ||||
{ | { | ||||
device_t dev; | |||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct ixl_vsi *vsi; | struct ixl_vsi *vsi; | ||||
u16 bus; | |||||
int error = 0; | int error = 0; | ||||
#ifdef PCI_IOV | |||||
nvlist_t *pf_schema, *vf_schema; | |||||
int iov_error; | |||||
#endif | |||||
INIT_DEBUGOUT("ixl_attach: begin"); | INIT_DEBUGOUT("ixl_attach: begin"); | ||||
/* Allocate, clear, and link in our primary soft structure */ | dev = iflib_get_dev(ctx); | ||||
pf = device_get_softc(dev); | pf = iflib_get_softc(ctx); | ||||
pf->dev = pf->osdep.dev = dev; | |||||
hw = &pf->hw; | hw = &pf->hw; | ||||
vsi = &pf->vsi; | |||||
vsi->back = pf; | |||||
vsi->hw = &pf->hw; | |||||
vsi->id = 0; | |||||
vsi->num_vlans = 0; | |||||
vsi->ctx = ctx; | |||||
vsi->media = iflib_get_media(ctx); | |||||
vsi->shared = iflib_get_softc_ctx(ctx); | |||||
pf->dev = iflib_get_dev(ctx); | |||||
/* | /* | ||||
* These are the same across all current ixl models | |||||
*/ | |||||
vsi->shared->isc_tx_nsegments = IXL_MAX_TX_SEGS; | |||||
vsi->shared->isc_msix_bar = PCIR_BAR(IXL_BAR); | |||||
/* | |||||
** Note this assumes we have a single embedded VSI, | ** Note this assumes we have a single embedded VSI, | ||||
** this could be enhanced later to allocate multiple | ** this could be enhanced later to allocate multiple | ||||
*/ | */ | ||||
vsi = &pf->vsi; | vsi = &pf->vsi; | ||||
vsi->dev = pf->dev; | |||||
/* Core Lock Init*/ | |||||
IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); | |||||
/* Set up the timer callout */ | |||||
callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); | |||||
/* Set up sysctls */ | /* Set up sysctls */ | ||||
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), | SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), | ||||
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), | SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), | ||||
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, | OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_set_flowcntl, "I", "Flow Control"); | pf, 0, ixl_set_flowcntl, "I", "Flow Control"); | ||||
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), | SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), | ||||
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), | SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), | ||||
▲ Show 20 Lines • Show All 81 Lines • ▼ Show 20 Lines | hw->subsystem_vendor_id = | ||||
pci_read_config(dev, PCIR_SUBVEND_0, 2); | pci_read_config(dev, PCIR_SUBVEND_0, 2); | ||||
hw->subsystem_device_id = | hw->subsystem_device_id = | ||||
pci_read_config(dev, PCIR_SUBDEV_0, 2); | pci_read_config(dev, PCIR_SUBDEV_0, 2); | ||||
hw->bus.device = pci_get_slot(dev); | hw->bus.device = pci_get_slot(dev); | ||||
hw->bus.func = pci_get_function(dev); | hw->bus.func = pci_get_function(dev); | ||||
pf->vc_debug_lvl = 1; | pf->vc_debug_lvl = 1; | ||||
hw->back = &pf->osdep; | |||||
pf->osdep.dev = dev; | |||||
/* Do PCI setup - map BAR0, etc */ | /* Do PCI setup - map BAR0, etc */ | ||||
if (ixl_allocate_pci_resources(pf)) { | if (ixl_allocate_pci_resources(pf)) { | ||||
device_printf(dev, "Allocation of PCI resources failed\n"); | device_printf(dev, "Allocation of PCI resources failed\n"); | ||||
error = ENXIO; | error = ENXIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | #endif | ||||
i40e_get_mac_addr(hw, hw->mac.addr); | i40e_get_mac_addr(hw, hw->mac.addr); | ||||
error = i40e_validate_mac_addr(hw->mac.addr); | error = i40e_validate_mac_addr(hw->mac.addr); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "validate_mac_addr failed: %d\n", error); | device_printf(dev, "validate_mac_addr failed: %d\n", error); | ||||
goto err_mac_hmc; | goto err_mac_hmc; | ||||
} | } | ||||
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); | bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); | ||||
iflib_set_mac(ctx, hw->mac.addr); | |||||
i40e_get_port_mac_addr(hw, hw->mac.port_addr); | i40e_get_port_mac_addr(hw, hw->mac.port_addr); | ||||
/* Set up VSI and queues */ | |||||
if (ixl_setup_stations(pf) != 0) { | |||||
device_printf(dev, "setup stations failed!\n"); | |||||
error = ENOMEM; | |||||
goto err_mac_hmc; | |||||
} | |||||
/* Initialize mac filter list for VSI */ | /* Initialize mac filter list for VSI */ | ||||
SLIST_INIT(&vsi->ftl); | SLIST_INIT(&vsi->ftl); | ||||
device_printf(dev, "%s success!\n", __FUNCTION__); | |||||
return (0); | |||||
/* Set up interrupt routing here */ | err_mac_hmc: | ||||
if (pf->msix > 1) | i40e_shutdown_lan_hmc(hw); | ||||
error = ixl_assign_vsi_msix(pf); | err_get_cap: | ||||
else | i40e_shutdown_adminq(hw); | ||||
error = ixl_assign_vsi_legacy(pf); | err_out: | ||||
if (error) | ixl_free_pci_resources(pf); | ||||
goto err_late; | ixl_free_mac_filters(vsi); | ||||
return (error); | |||||
} | |||||
static int | |||||
ixl_if_attach_post(if_ctx_t ctx) | |||||
{ | |||||
device_t dev; | |||||
struct ixl_pf *pf; | |||||
struct i40e_hw *hw; | |||||
struct ixl_vsi *vsi; | |||||
int error = 0; | |||||
u16 bus; | |||||
#ifdef PCI_IOV | |||||
nvlist_t *pf_schema, *vf_schema; | |||||
int iov_error; | |||||
#endif | |||||
INIT_DEBUGOUT("ixl_attach: begin"); | |||||
dev = iflib_get_dev(ctx); | |||||
vsi = iflib_get_softc(ctx); | |||||
vsi->ifp = iflib_get_ifp(ctx); | |||||
pf = (struct ixl_pf *)vsi; | |||||
hw = &pf->hw; | |||||
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || | if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || | ||||
(hw->aq.fw_maj_ver < 4)) { | (hw->aq.fw_maj_ver < 4)) { | ||||
i40e_msec_delay(75); | i40e_msec_delay(75); | ||||
error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); | error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); | ||||
if (error) | if (error) { | ||||
device_printf(dev, "link restart failed, aq_err=%d\n", | device_printf(dev, "link restart failed, aq_err=%d\n", | ||||
pf->hw.aq.asq_last_status); | pf->hw.aq.asq_last_status); | ||||
goto err_mac_hmc; | |||||
} | } | ||||
} | |||||
/* Determine link state */ | /* Determine link state */ | ||||
i40e_aq_get_link_info(hw, TRUE, NULL, NULL); | i40e_aq_get_link_info(hw, TRUE, NULL, NULL); | ||||
i40e_get_link_status(hw, &pf->link_up); | i40e_get_link_status(hw, &pf->link_up); | ||||
/* Setup OS specific network interface */ | |||||
if (ixl_setup_interface(dev, vsi) != 0) { | if (ixl_setup_interface(dev, vsi) != 0) { | ||||
device_printf(dev, "interface setup failed!\n"); | device_printf(dev, "interface setup failed!\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_late; | |||||
} | } | ||||
if (error) { | |||||
device_printf(dev, "Interface setup failed: %d\n", error); | |||||
goto err_mac_hmc; | |||||
} else | |||||
device_printf(dev, "%s success!\n", __FUNCTION__); | |||||
error = ixl_switch_config(pf); | error = ixl_switch_config(pf); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "Initial switch config failed: %d\n", error); | device_printf(dev, "Initial switch config failed: %d\n", error); | ||||
goto err_mac_hmc; | goto err_mac_hmc; | ||||
} | } | ||||
/* Limit phy interrupts to link and modules failure */ | /* Limit phy interrupts to link and modules failure */ | ||||
error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | | error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | | ||||
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); | ||||
if (error) | if (error) | ||||
device_printf(dev, "set phy mask failed: %d\n", error); | device_printf(dev, "set phy mask failed: %d\n", error); | ||||
/* Get the bus configuration and set the shared code */ | /* Get the bus configuration and set the shared code */ | ||||
bus = ixl_get_bus_info(hw, dev); | bus = ixl_get_bus_info(hw, dev); | ||||
i40e_set_pci_config_data(hw, bus); | i40e_set_pci_config_data(hw, bus); | ||||
/* Initialize statistics */ | /* Initialize statistics */ | ||||
ixl_pf_reset_stats(pf); | ixl_pf_reset_stats(pf); | ||||
ixl_update_stats_counters(pf); | ixl_update_stats_counters(pf); | ||||
ixl_add_hw_stats(pf); | ixl_add_hw_stats(pf); | ||||
/* Register for VLAN events */ | |||||
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | |||||
ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); | |||||
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | |||||
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
/* SR-IOV is only supported when MSI-X is in use. */ | /* SR-IOV is only supported when MSI-X is in use. */ | ||||
if (pf->msix > 1) { | if (pf->msix > 1) { | ||||
pf_schema = pci_iov_schema_alloc_node(); | pf_schema = pci_iov_schema_alloc_node(); | ||||
vf_schema = pci_iov_schema_alloc_node(); | vf_schema = pci_iov_schema_alloc_node(); | ||||
pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); | pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); | ||||
pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", | pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", | ||||
IOV_SCHEMA_HASDEFAULT, TRUE); | IOV_SCHEMA_HASDEFAULT, TRUE); | ||||
pci_iov_schema_add_bool(vf_schema, "allow-set-mac", | pci_iov_schema_add_bool(vf_schema, "allow-set-mac", | ||||
IOV_SCHEMA_HASDEFAULT, FALSE); | IOV_SCHEMA_HASDEFAULT, FALSE); | ||||
pci_iov_schema_add_bool(vf_schema, "allow-promisc", | pci_iov_schema_add_bool(vf_schema, "allow-promisc", | ||||
IOV_SCHEMA_HASDEFAULT, FALSE); | IOV_SCHEMA_HASDEFAULT, FALSE); | ||||
iov_error = pci_iov_attach(dev, pf_schema, vf_schema); | iov_error = pci_iov_attach(dev, pf_schema, vf_schema); | ||||
if (iov_error != 0) | if (iov_error != 0) | ||||
device_printf(dev, | device_printf(dev, | ||||
"Failed to initialize SR-IOV (error=%d)\n", | "Failed to initialize SR-IOV (error=%d)\n", | ||||
iov_error); | iov_error); | ||||
} | } | ||||
#endif | #endif | ||||
#ifdef DEV_NETMAP | |||||
ixl_netmap_attach(vsi); | |||||
#endif /* DEV_NETMAP */ | |||||
INIT_DEBUGOUT("ixl_attach: end"); | INIT_DEBUGOUT("ixl_attach: end"); | ||||
device_printf(dev, "%s success!\n", __FUNCTION__); | |||||
return (0); | return (0); | ||||
err_late: | |||||
if (vsi->ifp != NULL) | |||||
if_free(vsi->ifp); | |||||
err_mac_hmc: | err_mac_hmc: | ||||
i40e_shutdown_lan_hmc(hw); | i40e_shutdown_lan_hmc(hw); | ||||
err_get_cap: | |||||
i40e_shutdown_adminq(hw); | i40e_shutdown_adminq(hw); | ||||
err_out: | |||||
ixl_free_pci_resources(pf); | ixl_free_pci_resources(pf); | ||||
ixl_free_vsi(vsi); | ixl_free_mac_filters(vsi); | ||||
IXL_PF_LOCK_DESTROY(pf); | |||||
return (error); | return (error); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* Device removal routine | * Device removal routine | ||||
* | * | ||||
* The detach entry point is called when the driver is being removed. | * The detach entry point is called when the driver is being removed. | ||||
* This routine stops the adapter and deallocates all the resources | * This routine stops the adapter and deallocates all the resources | ||||
* that were allocated for driver operation. | * that were allocated for driver operation. | ||||
* | * | ||||
* return 0 on success, positive on failure | * return 0 on success, positive on failure | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int | static int | ||||
ixl_detach(device_t dev) | ixl_if_detach(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_pf *pf = device_get_softc(dev); | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ixl_pf *pf = vsi->back; | |||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct ixl_queue *que = vsi->queues; | |||||
i40e_status status; | i40e_status status; | ||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
int error; | int error; | ||||
#endif | #endif | ||||
INIT_DEBUGOUT("ixl_detach: begin"); | INIT_DEBUGOUT("ixl_detach: begin"); | ||||
/* Make sure VLANS are not using driver */ | |||||
if (vsi->ifp->if_vlantrunk != NULL) { | |||||
device_printf(dev,"Vlan in use, detach first\n"); | |||||
return (EBUSY); | |||||
} | |||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
error = pci_iov_detach(dev); | error = pci_iov_detach(iflib_get_dev(ctx)); | ||||
if (error != 0) { | if (error != 0) { | ||||
device_printf(dev, "SR-IOV in use; detach first.\n"); | device_printf(iflib_get_dev(ctx), "SR-IOV in use; detach first.\n"); | ||||
return (error); | return (error); | ||||
} | } | ||||
#endif | #endif | ||||
ether_ifdetach(vsi->ifp); | |||||
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { | |||||
IXL_PF_LOCK(pf); | |||||
ixl_stop(pf); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | |||||
if (que->tq) { | |||||
taskqueue_drain(que->tq, &que->task); | |||||
taskqueue_drain(que->tq, &que->tx_task); | |||||
taskqueue_free(que->tq); | |||||
} | |||||
} | |||||
/* Shutdown LAN HMC */ | /* Shutdown LAN HMC */ | ||||
status = i40e_shutdown_lan_hmc(hw); | status = i40e_shutdown_lan_hmc(hw); | ||||
if (status) | if (status) | ||||
device_printf(dev, | device_printf(iflib_get_dev(ctx), | ||||
"Shutdown LAN HMC failed with code %d\n", status); | "Shutdown LAN HMC failed with code %d\n", status); | ||||
/* Shutdown admin queue */ | /* Shutdown admin queue */ | ||||
status = i40e_shutdown_adminq(hw); | status = i40e_shutdown_adminq(hw); | ||||
if (status) | if (status) | ||||
device_printf(dev, | device_printf(iflib_get_dev(ctx), | ||||
"Shutdown Admin queue failed with code %d\n", status); | "Shutdown Admin queue failed with code %d\n", status); | ||||
/* Unregister VLAN events */ | |||||
if (vsi->vlan_attach != NULL) | |||||
EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); | |||||
if (vsi->vlan_detach != NULL) | |||||
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); | |||||
callout_drain(&pf->timer); | |||||
#ifdef DEV_NETMAP | |||||
netmap_detach(vsi->ifp); | |||||
#endif /* DEV_NETMAP */ | |||||
ixl_free_pci_resources(pf); | ixl_free_pci_resources(pf); | ||||
bus_generic_detach(dev); | ixl_free_mac_filters(vsi); | ||||
if_free(vsi->ifp); | |||||
ixl_free_vsi(vsi); | |||||
IXL_PF_LOCK_DESTROY(pf); | |||||
return (0); | return (0); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Shutdown entry point | |||||
* | |||||
**********************************************************************/ | |||||
static int | |||||
ixl_shutdown(device_t dev) | |||||
{ | |||||
struct ixl_pf *pf = device_get_softc(dev); | |||||
IXL_PF_LOCK(pf); | |||||
ixl_stop(pf); | |||||
IXL_PF_UNLOCK(pf); | |||||
return (0); | |||||
} | |||||
/********************************************************************* | |||||
* | |||||
* Get the hardware capabilities | * Get the hardware capabilities | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
ixl_get_hw_capabilities(struct ixl_pf *pf) | ixl_get_hw_capabilities(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_aqc_list_capabilities_element_resp *buf; | struct i40e_aqc_list_capabilities_element_resp *buf; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(((struct ixl_vsi *)pf)->ctx); | ||||
int error, len; | int error, len; | ||||
u16 needed; | u16 needed; | ||||
bool again = TRUE; | bool again = TRUE; | ||||
len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); | len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); | ||||
retry: | retry: | ||||
if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) | if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) | ||||
malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { | malloc(len, M_IXL, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate cap memory\n"); | device_printf(dev, "Unable to allocate cap memory\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
/* This populates the hw struct */ | /* This populates the hw struct */ | ||||
error = i40e_aq_discover_capabilities(hw, buf, len, | error = i40e_aq_discover_capabilities(hw, buf, len, | ||||
&needed, i40e_aqc_opc_list_func_capabilities, NULL); | &needed, i40e_aqc_opc_list_func_capabilities, NULL); | ||||
free(buf, M_DEVBUF); | free(buf, M_IXL); | ||||
if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && | if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && | ||||
(again == TRUE)) { | (again == TRUE)) { | ||||
/* retry once with a larger buffer */ | /* retry once with a larger buffer */ | ||||
again = FALSE; | again = FALSE; | ||||
len = needed; | len = needed; | ||||
goto retry; | goto retry; | ||||
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { | } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { | ||||
device_printf(dev, "capability discovery failed: %d\n", | device_printf(dev, "capability discovery failed: %d\n", | ||||
Show All 14 Lines | device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, " | ||||
hw->func_caps.fd_filters_best_effort, | hw->func_caps.fd_filters_best_effort, | ||||
hw->func_caps.num_tx_qp, | hw->func_caps.num_tx_qp, | ||||
hw->func_caps.num_rx_qp, | hw->func_caps.num_rx_qp, | ||||
hw->func_caps.base_queue); | hw->func_caps.base_queue); | ||||
#endif | #endif | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | |||||
ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) | |||||
{ | |||||
device_t dev = vsi->dev; | |||||
/* Enable/disable TXCSUM/TSO4 */ | |||||
if (!(ifp->if_capenable & IFCAP_TXCSUM) | |||||
&& !(ifp->if_capenable & IFCAP_TSO4)) { | |||||
if (mask & IFCAP_TXCSUM) { | |||||
ifp->if_capenable |= IFCAP_TXCSUM; | |||||
/* enable TXCSUM, restore TSO if previously enabled */ | |||||
if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { | |||||
vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; | |||||
ifp->if_capenable |= IFCAP_TSO4; | |||||
} | |||||
} | |||||
else if (mask & IFCAP_TSO4) { | |||||
ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); | |||||
vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; | |||||
device_printf(dev, | |||||
"TSO4 requires txcsum, enabling both...\n"); | |||||
} | |||||
} else if((ifp->if_capenable & IFCAP_TXCSUM) | |||||
&& !(ifp->if_capenable & IFCAP_TSO4)) { | |||||
if (mask & IFCAP_TXCSUM) | |||||
ifp->if_capenable &= ~IFCAP_TXCSUM; | |||||
else if (mask & IFCAP_TSO4) | |||||
ifp->if_capenable |= IFCAP_TSO4; | |||||
} else if((ifp->if_capenable & IFCAP_TXCSUM) | |||||
&& (ifp->if_capenable & IFCAP_TSO4)) { | |||||
if (mask & IFCAP_TXCSUM) { | |||||
vsi->flags |= IXL_FLAGS_KEEP_TSO4; | |||||
ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); | |||||
device_printf(dev, | |||||
"TSO4 requires txcsum, disabling both...\n"); | |||||
} else if (mask & IFCAP_TSO4) | |||||
ifp->if_capenable &= ~IFCAP_TSO4; | |||||
} | |||||
/* Enable/disable TXCSUM_IPV6/TSO6 */ | |||||
if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) | |||||
&& !(ifp->if_capenable & IFCAP_TSO6)) { | |||||
if (mask & IFCAP_TXCSUM_IPV6) { | |||||
ifp->if_capenable |= IFCAP_TXCSUM_IPV6; | |||||
if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { | |||||
vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; | |||||
ifp->if_capenable |= IFCAP_TSO6; | |||||
} | |||||
} else if (mask & IFCAP_TSO6) { | |||||
ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); | |||||
vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; | |||||
device_printf(dev, | |||||
"TSO6 requires txcsum6, enabling both...\n"); | |||||
} | |||||
} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) | |||||
&& !(ifp->if_capenable & IFCAP_TSO6)) { | |||||
if (mask & IFCAP_TXCSUM_IPV6) | |||||
ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; | |||||
else if (mask & IFCAP_TSO6) | |||||
ifp->if_capenable |= IFCAP_TSO6; | |||||
} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) | |||||
&& (ifp->if_capenable & IFCAP_TSO6)) { | |||||
if (mask & IFCAP_TXCSUM_IPV6) { | |||||
vsi->flags |= IXL_FLAGS_KEEP_TSO6; | |||||
ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); | |||||
device_printf(dev, | |||||
"TSO6 requires txcsum6, disabling both...\n"); | |||||
} else if (mask & IFCAP_TSO6) | |||||
ifp->if_capenable &= ~IFCAP_TSO6; | |||||
} | |||||
} | |||||
/********************************************************************* | /********************************************************************* | ||||
* Ioctl entry point | * Ioctl mtu entry point | ||||
* | * | ||||
* ixl_ioctl is called when the user wants to configure the | |||||
* interface. | |||||
* | * | ||||
* return 0 on success, positive on failure | * return 0 on success, EINVAL on failure | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) | ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) | ||||
{ | { | ||||
struct ixl_vsi *vsi = ifp->if_softc; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ixl_pf *pf = vsi->back; | |||||
struct ifreq *ifr = (struct ifreq *) data; | |||||
#if defined(INET) || defined(INET6) | |||||
struct ifaddr *ifa = (struct ifaddr *)data; | |||||
bool avoid_reset = FALSE; | |||||
#endif | |||||
int error = 0; | |||||
switch (command) { | |||||
case SIOCSIFADDR: | |||||
#ifdef INET | |||||
if (ifa->ifa_addr->sa_family == AF_INET) | |||||
avoid_reset = TRUE; | |||||
#endif | |||||
#ifdef INET6 | |||||
if (ifa->ifa_addr->sa_family == AF_INET6) | |||||
avoid_reset = TRUE; | |||||
#endif | |||||
#if defined(INET) || defined(INET6) | |||||
/* | |||||
** Calling init results in link renegotiation, | |||||
** so we avoid doing it when possible. | |||||
*/ | |||||
if (avoid_reset) { | |||||
ifp->if_flags |= IFF_UP; | |||||
if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) | |||||
ixl_init(pf); | |||||
#ifdef INET | |||||
if (!(ifp->if_flags & IFF_NOARP)) | |||||
arp_ifinit(ifp, ifa); | |||||
#endif | |||||
} else | |||||
error = ether_ioctl(ifp, command, data); | |||||
break; | |||||
#endif | |||||
case SIOCSIFMTU: | |||||
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); | IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); | ||||
if (ifr->ifr_mtu > IXL_MAX_FRAME - | if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - | ||||
ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { | ETHER_VLAN_ENCAP_LEN) | ||||
error = EINVAL; | return (EINVAL); | ||||
} else { | |||||
IXL_PF_LOCK(pf); | |||||
ifp->if_mtu = ifr->ifr_mtu; | |||||
vsi->max_frame_size = | |||||
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | |||||
+ ETHER_VLAN_ENCAP_LEN; | |||||
ixl_init_locked(pf); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | |||||
break; | |||||
case SIOCSIFFLAGS: | |||||
IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); | |||||
IXL_PF_LOCK(pf); | |||||
if (ifp->if_flags & IFF_UP) { | |||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { | |||||
if ((ifp->if_flags ^ pf->if_flags) & | |||||
(IFF_PROMISC | IFF_ALLMULTI)) { | |||||
ixl_set_promisc(vsi); | |||||
} | |||||
} else | |||||
ixl_init_locked(pf); | |||||
} else | |||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) | |||||
ixl_stop(pf); | |||||
pf->if_flags = ifp->if_flags; | |||||
IXL_PF_UNLOCK(pf); | |||||
break; | |||||
case SIOCADDMULTI: | |||||
IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); | |||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | |||||
IXL_PF_LOCK(pf); | |||||
ixl_disable_intr(vsi); | |||||
ixl_add_multi(vsi); | |||||
ixl_enable_intr(vsi); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | |||||
break; | |||||
case SIOCDELMULTI: | |||||
IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); | |||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | |||||
IXL_PF_LOCK(pf); | |||||
ixl_disable_intr(vsi); | |||||
ixl_del_multi(vsi); | |||||
ixl_enable_intr(vsi); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | |||||
break; | |||||
case SIOCSIFMEDIA: | |||||
case SIOCGIFMEDIA: | |||||
#ifdef IFM_ETH_XTYPE | |||||
case SIOCGIFXMEDIA: | |||||
#endif | |||||
IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); | |||||
error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); | |||||
break; | |||||
case SIOCSIFCAP: | |||||
{ | |||||
int mask = ifr->ifr_reqcap ^ ifp->if_capenable; | |||||
IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); | |||||
ixl_cap_txcsum_tso(vsi, ifp, mask); | vsi->max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + | ||||
ETHER_VLAN_ENCAP_LEN; | |||||
if (mask & IFCAP_RXCSUM) | return (0); | ||||
ifp->if_capenable ^= IFCAP_RXCSUM; | |||||
if (mask & IFCAP_RXCSUM_IPV6) | |||||
ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; | |||||
if (mask & IFCAP_LRO) | |||||
ifp->if_capenable ^= IFCAP_LRO; | |||||
if (mask & IFCAP_VLAN_HWTAGGING) | |||||
ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; | |||||
if (mask & IFCAP_VLAN_HWFILTER) | |||||
ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; | |||||
if (mask & IFCAP_VLAN_HWTSO) | |||||
ifp->if_capenable ^= IFCAP_VLAN_HWTSO; | |||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | |||||
IXL_PF_LOCK(pf); | |||||
ixl_init_locked(pf); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | } | ||||
VLAN_CAPABILITIES(ifp); | |||||
break; | |||||
} | |||||
default: | |||||
IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); | |||||
error = ether_ioctl(ifp, command, data); | |||||
break; | |||||
} | |||||
return (error); | |||||
} | |||||
/********************************************************************* | /********************************************************************* | ||||
* Init entry point | * Init entry point | ||||
* | * | ||||
* This routine is used in two ways. It is used by the stack as | * This routine is used in two ways. It is used by the stack as | ||||
* init entry point in network interface structure. It is also used | * init entry point in network interface structure. It is also used | ||||
* by the driver as a hw/sw initialization routine to get to a | * by the driver as a hw/sw initialization routine to get to a | ||||
* consistent state. | * consistent state. | ||||
* | * | ||||
* return 0 on success, positive on failure | * return 0 on success, positive on failure | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
ixl_init_locked(struct ixl_pf *pf) | ixl_if_init(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct ixl_pf *pf = vsi->back; | |||||
device_t dev = iflib_get_dev(ctx); | |||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct ifnet *ifp = vsi->ifp; | |||||
device_t dev = pf->dev; | |||||
struct i40e_filter_control_settings filter; | struct i40e_filter_control_settings filter; | ||||
u8 tmpaddr[ETHER_ADDR_LEN]; | u8 tmpaddr[ETHER_ADDR_LEN]; | ||||
int ret; | int ret; | ||||
mtx_assert(&pf->pf_mtx, MA_OWNED); | |||||
INIT_DEBUGOUT("ixl_init: begin"); | INIT_DEBUGOUT("ixl_init: begin"); | ||||
ixl_stop(pf); | |||||
/* Get the latest mac address... User might use a LAA */ | /* Get the latest mac address... User might use a LAA */ | ||||
bcopy(IF_LLADDR(vsi->ifp), tmpaddr, | bcopy(IF_LLADDR(iflib_get_ifp(ctx)), tmpaddr, | ||||
I40E_ETH_LENGTH_OF_ADDRESS); | I40E_ETH_LENGTH_OF_ADDRESS); | ||||
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && | if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && | ||||
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { | (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { | ||||
ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | ||||
bcopy(tmpaddr, hw->mac.addr, | bcopy(tmpaddr, hw->mac.addr, | ||||
I40E_ETH_LENGTH_OF_ADDRESS); | I40E_ETH_LENGTH_OF_ADDRESS); | ||||
ret = i40e_aq_mac_address_write(hw, | ret = i40e_aq_mac_address_write(hw, | ||||
I40E_AQC_WRITE_TYPE_LAA_ONLY, | I40E_AQC_WRITE_TYPE_LAA_ONLY, | ||||
hw->mac.addr, NULL); | hw->mac.addr, NULL); | ||||
if (ret) { | if (ret) { | ||||
device_printf(dev, "LLA address" | device_printf(dev, "LLA address" | ||||
"change failed!!\n"); | "change failed!!\n"); | ||||
return; | return; | ||||
} else { | } else { | ||||
ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | ||||
} | } | ||||
} | } | ||||
/* Set the various hardware offload abilities */ | |||||
ifp->if_hwassist = 0; | |||||
if (ifp->if_capenable & IFCAP_TSO) | |||||
ifp->if_hwassist |= CSUM_TSO; | |||||
if (ifp->if_capenable & IFCAP_TXCSUM) | |||||
ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); | |||||
if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) | |||||
ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); | |||||
/* Set up the device filtering */ | /* Set up the device filtering */ | ||||
bzero(&filter, sizeof(filter)); | bzero(&filter, sizeof(filter)); | ||||
filter.enable_ethtype = TRUE; | filter.enable_ethtype = TRUE; | ||||
filter.enable_macvlan = TRUE; | filter.enable_macvlan = TRUE; | ||||
#ifdef IXL_FDIR | #ifdef IXL_FDIR | ||||
filter.enable_fdir = TRUE; | filter.enable_fdir = TRUE; | ||||
#endif | #endif | ||||
if (i40e_set_filter_control(hw, &filter)) | if (i40e_set_filter_control(hw, &filter)) | ||||
Show All 11 Lines | #endif | ||||
} | } | ||||
/* Add protocol filters to list */ | /* Add protocol filters to list */ | ||||
ixl_init_filters(vsi); | ixl_init_filters(vsi); | ||||
/* Setup vlan's if needed */ | /* Setup vlan's if needed */ | ||||
ixl_setup_vlan_filters(vsi); | ixl_setup_vlan_filters(vsi); | ||||
/* Start the local timer */ | |||||
callout_reset(&pf->timer, hz, ixl_local_timer, pf); | |||||
/* Set up MSI/X routing and the ITR settings */ | /* Set up MSI/X routing and the ITR settings */ | ||||
if (ixl_enable_msix) { | if (ixl_enable_msix) { | ||||
ixl_configure_msix(pf); | ixl_configure_msix(pf); | ||||
ixl_configure_itr(pf); | ixl_configure_itr(pf); | ||||
} else | } else | ||||
ixl_configure_legacy(pf); | ixl_configure_legacy(pf); | ||||
ixl_enable_rings(vsi); | ixl_enable_rings(vsi); | ||||
i40e_aq_set_default_vsi(hw, vsi->seid, NULL); | i40e_aq_set_default_vsi(hw, vsi->seid, NULL); | ||||
ixl_reconfigure_filters(vsi); | ixl_reconfigure_filters(vsi); | ||||
/* Set MTU in hardware*/ | /* Set MTU in hardware*/ | ||||
int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size, | int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size, | ||||
TRUE, 0, NULL); | TRUE, 0, NULL); | ||||
if (aq_error) | if (aq_error) | ||||
device_printf(vsi->dev, | device_printf(dev, | ||||
"aq_set_mac_config in init error, code %d\n", | "aq_set_mac_config in init error, code %d\n", | ||||
aq_error); | aq_error); | ||||
/* And now turn on interrupts */ | |||||
ixl_enable_intr(vsi); | |||||
/* Now inform the stack we're ready */ | |||||
ifp->if_drv_flags |= IFF_DRV_RUNNING; | |||||
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; | |||||
return; | |||||
} | } | ||||
static void | |||||
ixl_init(void *arg) | |||||
{ | |||||
struct ixl_pf *pf = arg; | |||||
IXL_PF_LOCK(pf); | |||||
ixl_init_locked(pf); | |||||
IXL_PF_UNLOCK(pf); | |||||
return; | |||||
} | |||||
/* | |||||
** | |||||
** MSIX Interrupt Handlers and Tasklets | |||||
** | |||||
*/ | |||||
static void | |||||
ixl_handle_que(void *context, int pending) | |||||
{ | |||||
struct ixl_queue *que = context; | |||||
struct ixl_vsi *vsi = que->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct tx_ring *txr = &que->txr; | |||||
struct ifnet *ifp = vsi->ifp; | |||||
bool more; | |||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | |||||
more = ixl_rxeof(que, IXL_RX_LIMIT); | |||||
IXL_TX_LOCK(txr); | |||||
ixl_txeof(que); | |||||
if (!drbr_empty(ifp, txr->br)) | |||||
ixl_mq_start_locked(ifp, txr); | |||||
IXL_TX_UNLOCK(txr); | |||||
if (more) { | |||||
taskqueue_enqueue(que->tq, &que->task); | |||||
return; | |||||
} | |||||
} | |||||
/* Reenable this interrupt - hmmm */ | |||||
ixl_enable_queue(hw, que->me); | |||||
return; | |||||
} | |||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Legacy Interrupt Service routine | * Legacy Interrupt Service routine | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
void | int | ||||
ixl_intr(void *arg) | ixl_intr(void *arg) | ||||
{ | { | ||||
struct ixl_pf *pf = arg; | struct ixl_pf *pf = arg; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
struct ifnet *ifp = vsi->ifp; | |||||
struct tx_ring *txr = &que->txr; | |||||
u32 reg, icr0, mask; | u32 reg, icr0, mask; | ||||
bool more_tx, more_rx; | |||||
++que->irqs; | ++que->irqs; | ||||
/* Protect against spurious interrupts */ | |||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) | |||||
return; | |||||
icr0 = rd32(hw, I40E_PFINT_ICR0); | icr0 = rd32(hw, I40E_PFINT_ICR0); | ||||
reg = rd32(hw, I40E_PFINT_DYN_CTL0); | reg = rd32(hw, I40E_PFINT_DYN_CTL0); | ||||
reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; | reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; | ||||
wr32(hw, I40E_PFINT_DYN_CTL0, reg); | wr32(hw, I40E_PFINT_DYN_CTL0, reg); | ||||
mask = rd32(hw, I40E_PFINT_ICR0_ENA); | mask = rd32(hw, I40E_PFINT_ICR0_ENA); | ||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) | if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) | ||||
taskqueue_enqueue(pf->tq, &pf->vflr_task); | iflib_iov_intr_deferred(vsi->ctx); | ||||
#endif | #endif | ||||
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { | if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { | ||||
taskqueue_enqueue(pf->tq, &pf->adminq); | iflib_admin_intr_deferred(vsi->ctx); | ||||
return; | return (FILTER_HANDLED); | ||||
} | } | ||||
more_rx = ixl_rxeof(que, IXL_RX_LIMIT); | |||||
IXL_TX_LOCK(txr); | |||||
more_tx = ixl_txeof(que); | |||||
if (!drbr_empty(vsi->ifp, txr->br)) | |||||
more_tx = 1; | |||||
IXL_TX_UNLOCK(txr); | |||||
/* re-enable other interrupt causes */ | /* re-enable other interrupt causes */ | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, mask); | wr32(hw, I40E_PFINT_ICR0_ENA, mask); | ||||
/* And now the queues */ | /* And now the queues */ | ||||
reg = rd32(hw, I40E_QINT_RQCTL(0)); | reg = rd32(hw, I40E_QINT_RQCTL(0)); | ||||
reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; | reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK; | ||||
wr32(hw, I40E_QINT_RQCTL(0), reg); | wr32(hw, I40E_QINT_RQCTL(0), reg); | ||||
reg = rd32(hw, I40E_QINT_TQCTL(0)); | reg = rd32(hw, I40E_QINT_TQCTL(0)); | ||||
reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; | reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; | ||||
reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK; | reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK; | ||||
wr32(hw, I40E_QINT_TQCTL(0), reg); | wr32(hw, I40E_QINT_TQCTL(0), reg); | ||||
return (FILTER_SCHEDULE_THREAD); | |||||
ixl_enable_legacy(hw); | |||||
return; | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* MSIX VSI Interrupt Service routine | * MSIX VSI Interrupt Service routine | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
void | int | ||||
ixl_msix_que(void *arg) | ixl_msix_que(void *arg) | ||||
{ | { | ||||
struct ixl_queue *que = arg; | struct ixl_queue *que = arg; | ||||
struct ixl_vsi *vsi = que->vsi; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct tx_ring *txr = &que->txr; | |||||
bool more_tx, more_rx; | |||||
/* Protect against spurious interrupts */ | |||||
if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) | |||||
return; | |||||
++que->irqs; | |||||
more_rx = ixl_rxeof(que, IXL_RX_LIMIT); | |||||
IXL_TX_LOCK(txr); | |||||
more_tx = ixl_txeof(que); | |||||
/* | |||||
** Make certain that if the stack | |||||
** has anything queued the task gets | |||||
** scheduled to handle it. | |||||
*/ | |||||
if (!drbr_empty(vsi->ifp, txr->br)) | |||||
more_tx = 1; | |||||
IXL_TX_UNLOCK(txr); | |||||
ixl_set_queue_rx_itr(que); | ixl_set_queue_rx_itr(que); | ||||
ixl_set_queue_tx_itr(que); | ixl_set_queue_tx_itr(que); | ||||
if (more_tx || more_rx) | return (FILTER_SCHEDULE_THREAD); | ||||
taskqueue_enqueue(que->tq, &que->task); | |||||
else | |||||
ixl_enable_queue(hw, que->me); | |||||
return; | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* MSIX Admin Queue Interrupt Service routine | * MSIX Admin Queue Interrupt Service routine | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | int | ||||
ixl_msix_adminq(void *arg) | ixl_msix_adminq(void *arg) | ||||
{ | { | ||||
struct ixl_pf *pf = arg; | struct ixl_pf *pf = arg; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
u32 reg, mask; | u32 reg, mask; | ||||
++pf->admin_irq; | ++pf->admin_irq; | ||||
reg = rd32(hw, I40E_PFINT_ICR0); | reg = rd32(hw, I40E_PFINT_ICR0); | ||||
mask = rd32(hw, I40E_PFINT_ICR0_ENA); | mask = rd32(hw, I40E_PFINT_ICR0_ENA); | ||||
/* Check on the cause */ | /* Check on the cause */ | ||||
if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) | if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) | ||||
mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | ||||
if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { | if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { | ||||
ixl_handle_mdd_event(pf); | ixl_handle_mdd_event(pf); | ||||
mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; | mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; | ||||
} | } | ||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
if (reg & I40E_PFINT_ICR0_VFLR_MASK) { | if (reg & I40E_PFINT_ICR0_VFLR_MASK) { | ||||
mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; | mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; | ||||
taskqueue_enqueue(pf->tq, &pf->vflr_task); | iflib_iov_intr_deferred(pf->vsi.ctx); | ||||
} | } | ||||
#endif | #endif | ||||
reg = rd32(hw, I40E_PFINT_DYN_CTL0); | reg = rd32(hw, I40E_PFINT_DYN_CTL0); | ||||
reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; | reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; | ||||
wr32(hw, I40E_PFINT_DYN_CTL0, reg); | wr32(hw, I40E_PFINT_DYN_CTL0, reg); | ||||
taskqueue_enqueue(pf->tq, &pf->adminq); | iflib_admin_intr_deferred(pf->vsi.ctx); | ||||
return; | return (FILTER_HANDLED); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Setup MSIX Interrupt resources and handlers for the VSI | |||||
* | |||||
**********************************************************************/ | |||||
int | |||||
ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct ixl_pf *pf = vsi->back; | |||||
struct ixl_queue *que = vsi->queues; | |||||
int err, rid, vector = 0; | |||||
/* Admin Que is vector 0*/ | |||||
rid = vector + 1; | |||||
err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, | |||||
ixl_msix_adminq, pf, 0, "aq"); | |||||
if (err) { | |||||
iflib_irq_free(ctx, &vsi->irq); | |||||
device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler"); | |||||
return (err); | |||||
} | |||||
pf->admvec = vector; | |||||
++vector; | |||||
iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov"); | |||||
/* Now set up the stations */ | |||||
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { | |||||
char buf[16]; | |||||
rid = vector + 1; | |||||
snprintf(buf, sizeof(buf), "rxq%d", i); | |||||
err = iflib_irq_alloc_generic(ctx, &que->que_irq, rid, IFLIB_INTR_RX, | |||||
ixl_msix_que, que, que->me, buf); | |||||
if (err) { | |||||
device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err); | |||||
vsi->num_queues = i + 1; | |||||
goto fail; | |||||
} | |||||
snprintf(buf, sizeof(buf), "txq%d", i); | |||||
iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, que, que->me, buf); | |||||
que->msix = vector; | |||||
} | |||||
return (0); | |||||
fail: | |||||
iflib_irq_free(ctx, &vsi->irq); | |||||
que = vsi->queues; | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) | |||||
iflib_irq_free(ctx, &que->que_irq); | |||||
return (err); | |||||
} | |||||
/********************************************************************* | |||||
* | |||||
* Media Ioctl callback | * Media Ioctl callback | ||||
* | * | ||||
* This routine is called whenever the user queries the status of | * This routine is called whenever the user queries the status of | ||||
* the interface using ifconfig. | * the interface using ifconfig. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) | ixl_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) | ||||
{ | { | ||||
struct ixl_vsi *vsi = ifp->if_softc; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ixl_pf *pf = vsi->back; | struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
INIT_DEBUGOUT("ixl_media_status: begin"); | INIT_DEBUGOUT("ixl_media_status: begin"); | ||||
IXL_PF_LOCK(pf); | |||||
hw->phy.get_link_info = TRUE; | hw->phy.get_link_info = TRUE; | ||||
i40e_get_link_status(hw, &pf->link_up); | i40e_get_link_status(hw, &pf->link_up); | ||||
ixl_update_link_status(pf); | ixl_update_link_status(pf); | ||||
ifmr->ifm_status = IFM_AVALID; | ifmr->ifm_status = IFM_AVALID; | ||||
ifmr->ifm_active = IFM_ETHER; | ifmr->ifm_active = IFM_ETHER; | ||||
if (!pf->link_up) { | if (!pf->link_up) { | ||||
IXL_PF_UNLOCK(pf); | |||||
return; | return; | ||||
} | } | ||||
ifmr->ifm_status |= IFM_ACTIVE; | ifmr->ifm_status |= IFM_ACTIVE; | ||||
/* Hardware is always full-duplex */ | /* Hardware is always full-duplex */ | ||||
ifmr->ifm_active |= IFM_FDX; | ifmr->ifm_active |= IFM_FDX; | ||||
switch (hw->phy.link_info.phy_type) { | switch (hw->phy.link_info.phy_type) { | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | default: | ||||
break; | break; | ||||
} | } | ||||
/* Report flow control status as well */ | /* Report flow control status as well */ | ||||
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) | if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) | ||||
ifmr->ifm_active |= IFM_ETH_TXPAUSE; | ifmr->ifm_active |= IFM_ETH_TXPAUSE; | ||||
if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) | if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) | ||||
ifmr->ifm_active |= IFM_ETH_RXPAUSE; | ifmr->ifm_active |= IFM_ETH_RXPAUSE; | ||||
IXL_PF_UNLOCK(pf); | |||||
return; | |||||
} | } | ||||
/********************************************************************* | |||||
* | |||||
* Media Ioctl callback | |||||
* | |||||
* This routine is called when the user changes speed/duplex using | |||||
* media/mediopt option with ifconfig. | |||||
* | |||||
**********************************************************************/ | |||||
static int | |||||
ixl_media_change(struct ifnet * ifp) | |||||
{ | |||||
struct ixl_vsi *vsi = ifp->if_softc; | |||||
struct ifmedia *ifm = &vsi->media; | |||||
INIT_DEBUGOUT("ixl_media_change: begin"); | |||||
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) | |||||
return (EINVAL); | |||||
if_printf(ifp, "Media change is currently not supported.\n"); | |||||
return (ENODEV); | |||||
} | |||||
#ifdef IXL_FDIR | #ifdef IXL_FDIR | ||||
/* | /* | ||||
** ATR: Application Targetted Receive - creates a filter | ** ATR: Application Targetted Receive - creates a filter | ||||
** based on TX flow info that will keep the receive | ** based on TX flow info that will keep the receive | ||||
** portion of the flow on the same queue. Based on the | ** portion of the flow on the same queue. Based on the | ||||
** implementation this is only available for TCP connections | ** implementation this is only available for TCP connections | ||||
*/ | */ | ||||
void | void | ||||
ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype) | ixl_atr(struct ixl_queue *que, int hflags, int etype) | ||||
{ | { | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
if_shared_ctx_t sctx = ixl_sctx; | |||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct i40e_filter_program_desc *FDIR; | struct i40e_filter_program_desc *FDIR; | ||||
u32 ptype, dtype; | u32 ptype, dtype; | ||||
int idx; | int idx; | ||||
/* check if ATR is enabled and sample rate */ | /* check if ATR is enabled and sample rate */ | ||||
if ((!ixl_enable_fdir) || (!txr->atr_rate)) | if ((!ixl_enable_fdir) || (!txr->atr_rate)) | ||||
return; | return; | ||||
/* | /* | ||||
** We sample all TCP SYN/FIN packets, | ** We sample all TCP SYN/FIN packets, | ||||
** or at the selected sample rate | ** or at the selected sample rate | ||||
*/ | */ | ||||
txr->atr_count++; | txr->atr_count++; | ||||
if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) && | if (((hflags & (TH_FIN | TH_SYN)) == 0) && | ||||
(txr->atr_count < txr->atr_rate)) | (txr->atr_count < txr->atr_rate)) | ||||
return; | return; | ||||
txr->atr_count = 0; | txr->atr_count = 0; | ||||
/* Get a descriptor to use */ | /* Get a descriptor to use */ | ||||
idx = txr->next_avail; | idx = txr->next_avail; | ||||
FDIR = (struct i40e_filter_program_desc *) &txr->base[idx]; | FDIR = (struct i40e_filter_program_desc *) &txr->base[idx]; | ||||
if (++idx == que->num_desc) | if (++idx == sctx->isc_ntxd) | ||||
idx = 0; | idx = 0; | ||||
txr->avail--; | txr->avail--; | ||||
txr->next_avail = idx; | txr->next_avail = idx; | ||||
ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & | ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & | ||||
I40E_TXD_FLTR_QW0_QINDEX_MASK; | I40E_TXD_FLTR_QW0_QINDEX_MASK; | ||||
ptype |= (etype == ETHERTYPE_IP) ? | ptype |= (etype == ETHERTYPE_IP) ? | ||||
(I40E_FILTER_PCTYPE_NONF_IPV4_TCP << | (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << | ||||
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : | I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : | ||||
(I40E_FILTER_PCTYPE_NONF_IPV6_TCP << | (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << | ||||
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); | I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); | ||||
ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; | ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; | ||||
dtype = I40E_TX_DESC_DTYPE_FILTER_PROG; | dtype = I40E_TX_DESC_DTYPE_FILTER_PROG; | ||||
/* | /* | ||||
** We use the TCP TH_FIN as a trigger to remove | ** We use the TCP TH_FIN as a trigger to remove | ||||
** the filter, otherwise its an update. | ** the filter, otherwise its an update. | ||||
*/ | */ | ||||
dtype |= (th->th_flags & TH_FIN) ? | dtype |= (hflags & TH_FIN) ? | ||||
(I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << | (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE << | ||||
I40E_TXD_FLTR_QW1_PCMD_SHIFT) : | I40E_TXD_FLTR_QW1_PCMD_SHIFT) : | ||||
(I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << | (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE << | ||||
I40E_TXD_FLTR_QW1_PCMD_SHIFT); | I40E_TXD_FLTR_QW1_PCMD_SHIFT); | ||||
dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << | dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX << | ||||
I40E_TXD_FLTR_QW1_DEST_SHIFT; | I40E_TXD_FLTR_QW1_DEST_SHIFT; | ||||
dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << | dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID << | ||||
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; | I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT; | ||||
FDIR->qindex_flex_ptype_vsi = htole32(ptype); | FDIR->qindex_flex_ptype_vsi = htole32(ptype); | ||||
FDIR->dtype_cmd_cntindex = htole32(dtype); | FDIR->dtype_cmd_cntindex = htole32(dtype); | ||||
return; | |||||
} | } | ||||
#endif | #endif | ||||
static int | |||||
static void | ixl_if_promisc_set(if_ctx_t ctx, int flags) | ||||
ixl_set_promisc(struct ixl_vsi *vsi) | |||||
{ | { | ||||
struct ifnet *ifp = vsi->ifp; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ifnet *ifp = iflib_get_ifp(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
int err, mcnt = 0; | int err; | ||||
bool uni = FALSE, multi = FALSE; | bool uni = FALSE, multi = FALSE; | ||||
if (ifp->if_flags & IFF_ALLMULTI) | if (flags & IFF_ALLMULTI || | ||||
if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) | |||||
multi = TRUE; | multi = TRUE; | ||||
else { /* Need to count the multicast addresses */ | if (flags & IFF_PROMISC) | ||||
struct ifmultiaddr *ifma; | |||||
if_maddr_rlock(ifp); | |||||
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | |||||
if (ifma->ifma_addr->sa_family != AF_LINK) | |||||
continue; | |||||
if (mcnt == MAX_MULTICAST_ADDR) | |||||
break; | |||||
mcnt++; | |||||
} | |||||
if_maddr_runlock(ifp); | |||||
} | |||||
if (mcnt >= MAX_MULTICAST_ADDR) | |||||
multi = TRUE; | |||||
if (ifp->if_flags & IFF_PROMISC) | |||||
uni = TRUE; | uni = TRUE; | ||||
err = i40e_aq_set_vsi_unicast_promiscuous(hw, | err = i40e_aq_set_vsi_unicast_promiscuous(hw, | ||||
vsi->seid, uni, NULL); | vsi->seid, uni, NULL); | ||||
if (err) | |||||
return (err); | |||||
err = i40e_aq_set_vsi_multicast_promiscuous(hw, | err = i40e_aq_set_vsi_multicast_promiscuous(hw, | ||||
vsi->seid, multi, NULL); | vsi->seid, multi, NULL); | ||||
return; | return (err); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* Filter Routines | * Filter Routines | ||||
* | * | ||||
* Routines for multicast and vlan filter management. | * Routines for multicast and vlan filter management. | ||||
* | * | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static void | static void | ||||
ixl_add_multi(struct ixl_vsi *vsi) | ixl_del_multi(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ifmultiaddr *ifma; | struct ixl_mac_filter *f; | ||||
struct ifnet *ifp = vsi->ifp; | int mcnt = 0; | ||||
struct i40e_hw *hw = vsi->hw; | |||||
int mcnt = 0, flags; | |||||
IOCTL_DEBUGOUT("ixl_add_multi: begin"); | IOCTL_DEBUGOUT("ixl_del_multi: begin"); | ||||
if_maddr_rlock(ifp); | SLIST_FOREACH(f, &vsi->ftl, next) { | ||||
/* | if ((f->flags & (IXL_FILTER_USED|IXL_FILTER_MC)) == (IXL_FILTER_USED|IXL_FILTER_MC)){ | ||||
** First just get a count, to decide if we | f->flags |= IXL_FILTER_DEL; | ||||
** we simply use multicast promiscuous. | |||||
*/ | |||||
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | |||||
if (ifma->ifma_addr->sa_family != AF_LINK) | |||||
continue; | |||||
mcnt++; | mcnt++; | ||||
} | } | ||||
if_maddr_runlock(ifp); | } | ||||
if (mcnt > 0) | |||||
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { | |||||
/* delete existing MC filters */ | |||||
ixl_del_hw_filters(vsi, mcnt); | ixl_del_hw_filters(vsi, mcnt); | ||||
i40e_aq_set_vsi_multicast_promiscuous(hw, | |||||
vsi->seid, TRUE, NULL); | |||||
return; | |||||
} | } | ||||
mcnt = 0; | static int | ||||
if_maddr_rlock(ifp); | ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) | ||||
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | { | ||||
struct ixl_vsi *vsi = arg; | |||||
if (ifma->ifma_addr->sa_family != AF_LINK) | if (ifma->ifma_addr->sa_family != AF_LINK) | ||||
continue; | return (0); | ||||
ixl_add_mc_filter(vsi, | ixl_add_mc_filter(vsi, | ||||
(u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); | (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); | ||||
mcnt++; | return (1); | ||||
} | } | ||||
if_maddr_runlock(ifp); | |||||
if (mcnt > 0) { | |||||
flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); | |||||
ixl_add_hw_filters(vsi, flags, mcnt); | |||||
} | |||||
IOCTL_DEBUGOUT("ixl_add_multi: end"); | |||||
return; | |||||
} | |||||
static void | static void | ||||
ixl_del_multi(struct ixl_vsi *vsi) | ixl_if_multi_set(if_ctx_t ctx) | ||||
{ | { | ||||
struct ifnet *ifp = vsi->ifp; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ifmultiaddr *ifma; | struct i40e_hw *hw = vsi->hw; | ||||
struct ixl_mac_filter *f; | int mcnt = 0, flags; | ||||
int mcnt = 0; | |||||
bool match = FALSE; | |||||
IOCTL_DEBUGOUT("ixl_del_multi: begin"); | IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); | ||||
/* Search for removed multicast addresses */ | mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); | ||||
if_maddr_rlock(ifp); | /* delete existing MC filters */ | ||||
SLIST_FOREACH(f, &vsi->ftl, next) { | ixl_del_multi(vsi); | ||||
if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { | |||||
match = FALSE; | if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { | ||||
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | i40e_aq_set_vsi_multicast_promiscuous(hw, | ||||
if (ifma->ifma_addr->sa_family != AF_LINK) | vsi->seid, TRUE, NULL); | ||||
continue; | return; | ||||
u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); | |||||
if (cmp_etheraddr(f->macaddr, mc_addr)) { | |||||
match = TRUE; | |||||
break; | |||||
} | } | ||||
/* (re-)install filters for all mcast addresses */ | |||||
mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); | |||||
if (mcnt > 0) { | |||||
flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); | |||||
ixl_add_hw_filters(vsi, flags, mcnt); | |||||
} | } | ||||
if (match == FALSE) { | |||||
f->flags |= IXL_FILTER_DEL; | |||||
mcnt++; | |||||
} | |||||
} | |||||
} | |||||
if_maddr_runlock(ifp); | |||||
if (mcnt > 0) | IOCTL_DEBUGOUT("ixl_if_multi_set: end"); | ||||
ixl_del_hw_filters(vsi, mcnt); | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* Timer routine | * Timer routine | ||||
* | * | ||||
* This routine checks for link status,updates statistics, | * This routine checks for link status,updates statistics, | ||||
* and runs the watchdog check. | * and runs the watchdog check. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
ixl_local_timer(void *arg) | ixl_if_timer(if_ctx_t ctx, uint16_t qid) | ||||
{ | { | ||||
struct ixl_pf *pf = arg; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ixl_pf *pf = vsi->back; | |||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_queue *que = &vsi->queues[qid]; | ||||
struct ixl_queue *que = vsi->queues; | |||||
device_t dev = pf->dev; | |||||
int hung = 0; | |||||
u32 mask; | u32 mask; | ||||
mtx_assert(&pf->pf_mtx, MA_OWNED); | |||||
/* Fire off the adminq task */ | |||||
taskqueue_enqueue(pf->tq, &pf->adminq); | |||||
/* Update stats */ | |||||
ixl_update_stats_counters(pf); | |||||
/* | /* | ||||
** Check status of the queues | ** Check status of the queues | ||||
*/ | */ | ||||
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | | mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | | ||||
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); | ||||
for (int i = 0; i < vsi->num_queues; i++,que++) { | |||||
/* Any queues with outstanding work get a sw irq */ | /* Any queues with outstanding work get a sw irq */ | ||||
if (que->busy) | if (que->busy) | ||||
wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask); | wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask); | ||||
/* | |||||
** Each time txeof runs without cleaning, but there | |||||
** are uncleaned descriptors it increments busy. If | |||||
** we get to 5 we declare it hung. | |||||
*/ | |||||
if (que->busy == IXL_QUEUE_HUNG) { | |||||
++hung; | |||||
/* Mark the queue as inactive */ | |||||
vsi->active_queues &= ~((u64)1 << que->me); | |||||
continue; | |||||
} else { | |||||
/* Check if we've come back from hung */ | |||||
if ((vsi->active_queues & ((u64)1 << que->me)) == 0) | |||||
vsi->active_queues |= ((u64)1 << que->me); | |||||
} | |||||
if (que->busy >= IXL_MAX_TX_BUSY) { | |||||
#ifdef IXL_DEBUG | |||||
device_printf(dev,"Warning queue %d " | |||||
"appears to be hung!\n", i); | |||||
#endif | |||||
que->busy = IXL_QUEUE_HUNG; | |||||
++hung; | |||||
} | |||||
} | |||||
/* Only reinit if all queues show hung */ | |||||
if (hung == vsi->num_queues) | |||||
goto hung; | |||||
callout_reset(&pf->timer, hz, ixl_local_timer, pf); | if (qid != 0) | ||||
return; | return; | ||||
hung: | /* Fire off the adminq task */ | ||||
device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n"); | iflib_admin_intr_deferred(ctx); | ||||
ixl_init_locked(pf); | |||||
/* Update stats */ | |||||
ixl_update_stats_counters(pf); | |||||
} | } | ||||
/* | /* | ||||
** Note: this routine updates the OS on the link state | ** Note: this routine updates the OS on the link state | ||||
** the real check of the hardware only happens with | ** the real check of the hardware only happens with | ||||
** a link interrupt. | ** a link interrupt. | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_update_link_status(struct ixl_pf *pf) | ixl_update_link_status(struct ixl_pf *pf) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ifnet *ifp = vsi->ifp; | device_t dev = iflib_get_dev(vsi->ctx); | ||||
device_t dev = pf->dev; | |||||
if (pf->link_up){ | if (pf->link_up){ | ||||
if (vsi->link_active == FALSE) { | if (vsi->link_active == FALSE) { | ||||
pf->fc = hw->fc.current_mode; | pf->fc = hw->fc.current_mode; | ||||
if (bootverbose) { | if (bootverbose) { | ||||
device_printf(dev,"Link is up %d Gbps %s," | device_printf(dev,"Link is up %d Gbps %s," | ||||
" Flow Control: %s\n", | " Flow Control: %s\n", | ||||
((pf->link_speed == | ((pf->link_speed == | ||||
I40E_LINK_SPEED_40GB)? 40:10), | I40E_LINK_SPEED_40GB)? 40:10), | ||||
"Full Duplex", ixl_fc_string[pf->fc]); | "Full Duplex", ixl_fc_string[pf->fc]); | ||||
} | } | ||||
vsi->link_active = TRUE; | vsi->link_active = TRUE; | ||||
/* | /* | ||||
** Warn user if link speed on NPAR enabled | ** Warn user if link speed on NPAR enabled | ||||
** partition is not at least 10GB | ** partition is not at least 10GB | ||||
*/ | */ | ||||
if (hw->func_caps.npar_enable && | if (hw->func_caps.npar_enable && | ||||
(hw->phy.link_info.link_speed == | (hw->phy.link_info.link_speed == I40E_LINK_SPEED_1GB || | ||||
I40E_LINK_SPEED_1GB || | hw->phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) | ||||
hw->phy.link_info.link_speed == | device_printf(dev, "The partition detected link" | ||||
I40E_LINK_SPEED_100MB)) | "speed that is less than 10Gbps\n"); | ||||
device_printf(dev, "The partition detected" | iflib_link_state_change(vsi->ctx, LINK_STATE_UP); | ||||
"link speed that is less than 10Gbps\n"); | |||||
if_link_state_change(ifp, LINK_STATE_UP); | |||||
} | } | ||||
} else { /* Link down */ | } else { /* Link down */ | ||||
if (vsi->link_active == TRUE) { | if (vsi->link_active == TRUE) { | ||||
if (bootverbose) | if (bootverbose) | ||||
device_printf(dev,"Link is Down\n"); | device_printf(dev,"Link is Down\n"); | ||||
if_link_state_change(ifp, LINK_STATE_DOWN); | iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN); | ||||
vsi->link_active = FALSE; | vsi->link_active = FALSE; | ||||
} | } | ||||
} | } | ||||
return; | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* This routine disables all traffic on the adapter by issuing a | * This routine disables all traffic on the adapter by issuing a | ||||
* global reset on the MAC and deallocates TX/RX buffers. | * global reset on the MAC and deallocates TX/RX buffers. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static void | static void | ||||
ixl_stop(struct ixl_pf *pf) | ixl_if_stop(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ifnet *ifp = vsi->ifp; | |||||
mtx_assert(&pf->pf_mtx, MA_OWNED); | INIT_DEBUGOUT("ixl_if_stop: begin\n"); | ||||
INIT_DEBUGOUT("ixl_stop: begin\n"); | |||||
if (pf->num_vfs == 0) | |||||
ixl_disable_intr(vsi); | |||||
else | |||||
ixl_disable_rings_intr(vsi); | |||||
ixl_disable_rings(vsi); | ixl_disable_rings(vsi); | ||||
/* Tell the stack that the interface is no longer active */ | |||||
ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); | |||||
/* Stop the local timer */ | |||||
callout_stop(&pf->timer); | |||||
return; | |||||
} | } | ||||
/********************************************************************* | |||||
* | |||||
* Setup MSIX Interrupt resources and handlers for the VSI | |||||
* | |||||
**********************************************************************/ | |||||
static int | |||||
ixl_assign_vsi_legacy(struct ixl_pf *pf) | |||||
{ | |||||
device_t dev = pf->dev; | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct ixl_queue *que = vsi->queues; | |||||
int error, rid = 0; | |||||
if (pf->msix == 1) | |||||
rid = 1; | |||||
pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, | |||||
&rid, RF_SHAREABLE | RF_ACTIVE); | |||||
if (pf->res == NULL) { | |||||
device_printf(dev,"Unable to allocate" | |||||
" bus resource: vsi legacy/msi interrupt\n"); | |||||
return (ENXIO); | |||||
} | |||||
/* Set the handler function */ | |||||
error = bus_setup_intr(dev, pf->res, | |||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, | |||||
ixl_intr, pf, &pf->tag); | |||||
if (error) { | |||||
pf->res = NULL; | |||||
device_printf(dev, "Failed to register legacy/msi handler"); | |||||
return (error); | |||||
} | |||||
bus_describe_intr(dev, pf->res, pf->tag, "irq0"); | |||||
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); | |||||
TASK_INIT(&que->task, 0, ixl_handle_que, que); | |||||
que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, | |||||
taskqueue_thread_enqueue, &que->tq); | |||||
taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", | |||||
device_get_nameunit(dev)); | |||||
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); | |||||
#ifdef PCI_IOV | |||||
TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); | |||||
#endif | |||||
pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, | |||||
taskqueue_thread_enqueue, &pf->tq); | |||||
taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", | |||||
device_get_nameunit(dev)); | |||||
return (0); | |||||
} | |||||
/********************************************************************* | |||||
* | |||||
* Setup MSIX Interrupt resources and handlers for the VSI | |||||
* | |||||
**********************************************************************/ | |||||
static int | |||||
ixl_assign_vsi_msix(struct ixl_pf *pf) | |||||
{ | |||||
device_t dev = pf->dev; | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct ixl_queue *que = vsi->queues; | |||||
struct tx_ring *txr; | |||||
int error, rid, vector = 0; | |||||
#ifdef RSS | |||||
cpuset_t cpu_mask; | |||||
#endif | |||||
/* Admin Que is vector 0*/ | |||||
rid = vector + 1; | |||||
pf->res = bus_alloc_resource_any(dev, | |||||
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); | |||||
if (!pf->res) { | |||||
device_printf(dev,"Unable to allocate" | |||||
" bus resource: Adminq interrupt [%d]\n", rid); | |||||
return (ENXIO); | |||||
} | |||||
/* Set the adminq vector and handler */ | |||||
error = bus_setup_intr(dev, pf->res, | |||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, | |||||
ixl_msix_adminq, pf, &pf->tag); | |||||
if (error) { | |||||
pf->res = NULL; | |||||
device_printf(dev, "Failed to register Admin que handler"); | |||||
return (error); | |||||
} | |||||
bus_describe_intr(dev, pf->res, pf->tag, "aq"); | |||||
pf->admvec = vector; | |||||
/* Tasklet for Admin Queue */ | |||||
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); | |||||
#ifdef PCI_IOV | |||||
TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); | |||||
#endif | |||||
pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, | |||||
taskqueue_thread_enqueue, &pf->tq); | |||||
taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq", | |||||
device_get_nameunit(pf->dev)); | |||||
++vector; | |||||
/* Now set up the stations */ | |||||
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { | |||||
int cpu_id = i; | |||||
rid = vector + 1; | |||||
txr = &que->txr; | |||||
que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, | |||||
RF_SHAREABLE | RF_ACTIVE); | |||||
if (que->res == NULL) { | |||||
device_printf(dev,"Unable to allocate" | |||||
" bus resource: que interrupt [%d]\n", vector); | |||||
return (ENXIO); | |||||
} | |||||
/* Set the handler function */ | |||||
error = bus_setup_intr(dev, que->res, | |||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, | |||||
ixl_msix_que, que, &que->tag); | |||||
if (error) { | |||||
que->res = NULL; | |||||
device_printf(dev, "Failed to register que handler"); | |||||
return (error); | |||||
} | |||||
bus_describe_intr(dev, que->res, que->tag, "q%d", i); | |||||
/* Bind the vector to a CPU */ | |||||
#ifdef RSS | |||||
cpu_id = rss_getcpu(i % rss_getnumbuckets()); | |||||
#endif | |||||
bus_bind_intr(dev, que->res, cpu_id); | |||||
que->msix = vector; | |||||
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); | |||||
TASK_INIT(&que->task, 0, ixl_handle_que, que); | |||||
que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, | |||||
taskqueue_thread_enqueue, &que->tq); | |||||
#ifdef RSS | |||||
CPU_SETOF(cpu_id, &cpu_mask); | |||||
taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, | |||||
&cpu_mask, "%s (bucket %d)", | |||||
device_get_nameunit(dev), cpu_id); | |||||
#else | |||||
taskqueue_start_threads(&que->tq, 1, PI_NET, | |||||
"%s que", device_get_nameunit(dev)); | |||||
#endif | |||||
} | |||||
return (0); | |||||
} | |||||
/* | /* | ||||
* Allocate MSI/X vectors | |||||
*/ | |||||
static int | |||||
ixl_init_msix(struct ixl_pf *pf) | |||||
{ | |||||
device_t dev = pf->dev; | |||||
int rid, want, vectors, queues, available; | |||||
/* Override by tuneable */ | |||||
if (ixl_enable_msix == 0) | |||||
goto msi; | |||||
/* | |||||
** When used in a virtualized environment | |||||
** PCI BUSMASTER capability may not be set | |||||
** so explicity set it here and rewrite | |||||
** the ENABLE in the MSIX control register | |||||
** at this point to cause the host to | |||||
** successfully initialize us. | |||||
*/ | |||||
{ | |||||
u16 pci_cmd_word; | |||||
int msix_ctrl; | |||||
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); | |||||
pci_cmd_word |= PCIM_CMD_BUSMASTEREN; | |||||
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); | |||||
pci_find_cap(dev, PCIY_MSIX, &rid); | |||||
rid += PCIR_MSIX_CTRL; | |||||
msix_ctrl = pci_read_config(dev, rid, 2); | |||||
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; | |||||
pci_write_config(dev, rid, msix_ctrl, 2); | |||||
} | |||||
/* First try MSI/X */ | |||||
rid = PCIR_BAR(IXL_BAR); | |||||
pf->msix_mem = bus_alloc_resource_any(dev, | |||||
SYS_RES_MEMORY, &rid, RF_ACTIVE); | |||||
if (!pf->msix_mem) { | |||||
/* May not be enabled */ | |||||
device_printf(pf->dev, | |||||
"Unable to map MSIX table \n"); | |||||
goto msi; | |||||
} | |||||
available = pci_msix_count(dev); | |||||
if (available == 0) { /* system has msix disabled */ | |||||
bus_release_resource(dev, SYS_RES_MEMORY, | |||||
rid, pf->msix_mem); | |||||
pf->msix_mem = NULL; | |||||
goto msi; | |||||
} | |||||
/* Figure out a reasonable auto config value */ | |||||
queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus; | |||||
/* Override with hardcoded value if sane */ | |||||
if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) | |||||
queues = ixl_max_queues; | |||||
#ifdef RSS | |||||
/* If we're doing RSS, clamp at the number of RSS buckets */ | |||||
if (queues > rss_getnumbuckets()) | |||||
queues = rss_getnumbuckets(); | |||||
#endif | |||||
/* | |||||
** Want one vector (RX/TX pair) per queue | |||||
** plus an additional for the admin queue. | |||||
*/ | |||||
want = queues + 1; | |||||
if (want <= available) /* Have enough */ | |||||
vectors = want; | |||||
else { | |||||
device_printf(pf->dev, | |||||
"MSIX Configuration Problem, " | |||||
"%d vectors available but %d wanted!\n", | |||||
available, want); | |||||
return (0); /* Will go to Legacy setup */ | |||||
} | |||||
if (pci_alloc_msix(dev, &vectors) == 0) { | |||||
device_printf(pf->dev, | |||||
"Using MSIX interrupts with %d vectors\n", vectors); | |||||
pf->msix = vectors; | |||||
pf->vsi.num_queues = queues; | |||||
#ifdef RSS | |||||
/* | |||||
* If we're doing RSS, the number of queues needs to | |||||
* match the number of RSS buckets that are configured. | |||||
* | |||||
* + If there's more queues than RSS buckets, we'll end | |||||
* up with queues that get no traffic. | |||||
* | |||||
* + If there's more RSS buckets than queues, we'll end | |||||
* up having multiple RSS buckets map to the same queue, | |||||
* so there'll be some contention. | |||||
*/ | |||||
if (queues != rss_getnumbuckets()) { | |||||
device_printf(dev, | |||||
"%s: queues (%d) != RSS buckets (%d)" | |||||
"; performance will be impacted.\n", | |||||
__func__, queues, rss_getnumbuckets()); | |||||
} | |||||
#endif | |||||
return (vectors); | |||||
} | |||||
msi: | |||||
vectors = pci_msi_count(dev); | |||||
pf->vsi.num_queues = 1; | |||||
pf->msix = 1; | |||||
ixl_max_queues = 1; | |||||
ixl_enable_msix = 0; | |||||
if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) | |||||
device_printf(pf->dev,"Using an MSI interrupt\n"); | |||||
else { | |||||
pf->msix = 0; | |||||
device_printf(pf->dev,"Using a Legacy interrupt\n"); | |||||
} | |||||
return (vectors); | |||||
} | |||||
/* | |||||
* Plumb MSI/X vectors | * Plumb MSI/X vectors | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_configure_msix(struct ixl_pf *pf) | ixl_configure_msix(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
u32 reg; | u32 reg; | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | |||||
* Configure for MSI single vector operation | * Configure for MSI single vector operation | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_configure_legacy(struct ixl_pf *pf) | ixl_configure_legacy(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
u32 reg; | u32 reg; | ||||
wr32(hw, I40E_PFINT_ITR0(0), 0); | wr32(hw, I40E_PFINT_ITR0(0), 0); | ||||
wr32(hw, I40E_PFINT_ITR0(1), 0); | wr32(hw, I40E_PFINT_ITR0(1), 0); | ||||
/* Setup "other" causes */ | /* Setup "other" causes */ | ||||
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | ||||
| I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | ||||
| I40E_PFINT_ICR0_ENA_GRST_MASK | | I40E_PFINT_ICR0_ENA_GRST_MASK | ||||
| I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | ||||
| I40E_PFINT_ICR0_ENA_GPIO_MASK | | I40E_PFINT_ICR0_ENA_GPIO_MASK | ||||
| I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | ||||
| I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | ||||
▲ Show 20 Lines • Show All 67 Lines • ▼ Show 20 Lines | ixl_configure_itr(struct ixl_pf *pf) | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
ixl_allocate_pci_resources(struct ixl_pf *pf) | ixl_allocate_pci_resources(struct ixl_pf *pf) | ||||
{ | { | ||||
int rid; | int rid; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
rid = PCIR_BAR(0); | rid = PCIR_BAR(0); | ||||
pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, | pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, | ||||
&rid, RF_ACTIVE); | &rid, RF_ACTIVE); | ||||
if (!(pf->pci_mem)) { | if (!(pf->pci_mem)) { | ||||
device_printf(dev,"Unable to allocate bus resource: memory\n"); | device_printf(dev,"Unable to allocate bus resource: memory\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
pf->osdep.mem_bus_space_tag = | pf->osdep.mem_bus_space_tag = | ||||
rman_get_bustag(pf->pci_mem); | rman_get_bustag(pf->pci_mem); | ||||
pf->osdep.mem_bus_space_handle = | pf->osdep.mem_bus_space_handle = | ||||
rman_get_bushandle(pf->pci_mem); | rman_get_bushandle(pf->pci_mem); | ||||
pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); | pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); | ||||
pf->osdep.flush_reg = I40E_GLGEN_STAT; | pf->osdep.flush_reg = I40E_GLGEN_STAT; | ||||
pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; | pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; | ||||
pf->hw.back = &pf->osdep; | pf->hw.back = &pf->osdep; | ||||
/* | |||||
** Now setup MSI or MSI/X, should | |||||
** return us the number of supported | |||||
** vectors. (Will be 1 for MSI) | |||||
*/ | |||||
pf->msix = ixl_init_msix(pf); | |||||
return (0); | return (0); | ||||
} | } | ||||
static void | static void | ||||
ixl_free_pci_resources(struct ixl_pf * pf) | ixl_free_pci_resources(struct ixl_pf * pf) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(vsi->ctx); | ||||
int rid, memrid; | int rid, memrid; | ||||
memrid = PCIR_BAR(IXL_BAR); | memrid = PCIR_BAR(IXL_BAR); | ||||
/* We may get here before stations are setup */ | /* We may get here before stations are setup */ | ||||
if ((!ixl_enable_msix) || (que == NULL)) | if ((!ixl_enable_msix) || (que == NULL)) | ||||
goto early; | goto early; | ||||
/* | /* | ||||
** Release all msix VSI resources: | ** Release all msix VSI resources: | ||||
*/ | */ | ||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | iflib_irq_free(vsi->ctx, &vsi->irq); | ||||
rid = que->msix + 1; | |||||
if (que->tag != NULL) { | |||||
bus_teardown_intr(dev, que->res, que->tag); | |||||
que->tag = NULL; | |||||
} | |||||
if (que->res != NULL) | |||||
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); | |||||
} | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) | |||||
iflib_irq_free(vsi->ctx, &que->que_irq); | |||||
early: | early: | ||||
/* Clean the AdminQ interrupt last */ | /* Clean the AdminQ interrupt last */ | ||||
if (pf->admvec) /* we are doing MSIX */ | if (pf->admvec) /* we are doing MSIX */ | ||||
rid = pf->admvec + 1; | rid = pf->admvec + 1; | ||||
else | else | ||||
(pf->msix != 0) ? (rid = 1):(rid = 0); | (pf->msix != 0) ? (rid = 1):(rid = 0); | ||||
if (pf->tag != NULL) { | if (pf->tag != NULL) { | ||||
bus_teardown_intr(dev, pf->res, pf->tag); | bus_teardown_intr(dev, pf->res, pf->tag); | ||||
pf->tag = NULL; | pf->tag = NULL; | ||||
} | } | ||||
if (pf->res != NULL) | if (pf->res != NULL) | ||||
bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); | bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); | ||||
if (pf->msix) | |||||
pci_release_msi(dev); | |||||
if (pf->msix_mem != NULL) | |||||
bus_release_resource(dev, SYS_RES_MEMORY, | |||||
memrid, pf->msix_mem); | |||||
if (pf->pci_mem != NULL) | if (pf->pci_mem != NULL) | ||||
bus_release_resource(dev, SYS_RES_MEMORY, | bus_release_resource(dev, SYS_RES_MEMORY, | ||||
PCIR_BAR(0), pf->pci_mem); | PCIR_BAR(0), pf->pci_mem); | ||||
return; | |||||
} | } | ||||
static void | static void | ||||
ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type) | ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type) | ||||
{ | { | ||||
/* Display supported media types */ | /* Display supported media types */ | ||||
if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX)) | if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T)) | if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX)) | if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX)) | if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_XAUI) || | if (phy_type & (1 << I40E_PHY_TYPE_XAUI) || | ||||
phy_type & (1 << I40E_PHY_TYPE_XFI) || | phy_type & (1 << I40E_PHY_TYPE_XFI) || | ||||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU)) | phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) || | if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) || | ||||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) || | phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) || | ||||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) || | phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) || | ||||
phy_type & (1 << I40E_PHY_TYPE_XLAUI) || | phy_type & (1 << I40E_PHY_TYPE_XLAUI) || | ||||
phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) | phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4)) | if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4)) | if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); | ||||
#ifndef IFM_ETH_XTYPE | #ifndef IFM_ETH_XTYPE | ||||
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX)) | if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) || | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) || | ||||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) || | phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) || | ||||
phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) || | phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) || | ||||
phy_type & (1 << I40E_PHY_TYPE_SFI)) | phy_type & (1 << I40E_PHY_TYPE_SFI)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) | if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_XLPPI)) | if (phy_type & (1 << I40E_PHY_TYPE_XLPPI)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); | ||||
#else | #else | ||||
if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX)) | if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) | ||||
|| phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1)) | || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_SFI)) | if (phy_type & (1 << I40E_PHY_TYPE_SFI)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR)) | if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2)) | if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) | if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); | ||||
if (phy_type & (1 << I40E_PHY_TYPE_XLPPI)) | if (phy_type & (1 << I40E_PHY_TYPE_XLPPI)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); | ||||
#endif | #endif | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Setup networking device structure and register an interface. | * Setup networking device structure and register an interface. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) | ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ifnet *ifp; | if_ctx_t ctx = vsi->ctx; | ||||
struct i40e_hw *hw = vsi->hw; | struct ixl_pf *pf = vsi->back; | ||||
struct ixl_queue *que = vsi->queues; | struct i40e_hw *hw = &pf->hw; | ||||
struct ifnet *ifp = iflib_get_ifp(ctx); | |||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
enum i40e_status_code aq_error = 0; | enum i40e_status_code aq_error = 0; | ||||
uint64_t cap; | |||||
INIT_DEBUGOUT("ixl_setup_interface: begin"); | INIT_DEBUGOUT("ixl_setup_interface: begin"); | ||||
/* initialize fast path functions */ | |||||
ifp = vsi->ifp = if_alloc(IFT_ETHER); | cap = IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_LRO | IFCAP_JUMBO_MTU; /* IFCAP_TSO | */ | ||||
if (ifp == NULL) { | cap |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; | ||||
device_printf(dev, "can not allocate ifnet structure\n"); | if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); | ||||
return (-1); | if_setcapabilitiesbit(ifp, cap, 0); | ||||
} | if_setcapenable(ifp, if_getcapabilities(ifp)); | ||||
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | if_setbaudrate(ifp, 4000000000); | ||||
ifp->if_mtu = ETHERMTU; | |||||
ifp->if_baudrate = IF_Gbps(40); | |||||
ifp->if_init = ixl_init; | |||||
ifp->if_softc = vsi; | |||||
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | |||||
ifp->if_ioctl = ixl_ioctl; | |||||
#if __FreeBSD_version >= 1100036 | |||||
if_setgetcounterfn(ifp, ixl_get_counter); | |||||
#endif | |||||
ifp->if_transmit = ixl_mq_start; | |||||
ifp->if_qflush = ixl_qflush; | |||||
ifp->if_snd.ifq_maxlen = que->num_desc - 2; | |||||
vsi->max_frame_size = | vsi->max_frame_size = | ||||
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | ||||
+ ETHER_VLAN_ENCAP_LEN; | + ETHER_VLAN_ENCAP_LEN; | ||||
/* | /* | ||||
* Tell the upper layer(s) we support long frames. | |||||
*/ | |||||
ifp->if_hdrlen = sizeof(struct ether_vlan_header); | |||||
ifp->if_capabilities |= IFCAP_HWCSUM; | |||||
ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; | |||||
ifp->if_capabilities |= IFCAP_TSO; | |||||
ifp->if_capabilities |= IFCAP_JUMBO_MTU; | |||||
ifp->if_capabilities |= IFCAP_LRO; | |||||
/* VLAN capabilties */ | |||||
ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | |||||
| IFCAP_VLAN_HWTSO | |||||
| IFCAP_VLAN_MTU | |||||
| IFCAP_VLAN_HWCSUM; | |||||
ifp->if_capenable = ifp->if_capabilities; | |||||
/* | |||||
** Don't turn this on by default, if vlans are | ** Don't turn this on by default, if vlans are | ||||
** created on another pseudo device (eg. lagg) | ** created on another pseudo device (eg. lagg) | ||||
** then vlan events are not passed thru, breaking | ** then vlan events are not passed thru, breaking | ||||
** operation, but with HW FILTER off it works. If | ** operation, but with HW FILTER off it works. If | ||||
** using vlans directly on the ixl driver you can | ** using vlans directly on the ixl driver you can | ||||
** enable this and get full hardware tag filtering. | ** enable this and get full hardware tag filtering. | ||||
*/ | */ | ||||
ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; | ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; | ||||
/* | |||||
* Specify the media types supported by this adapter and register | |||||
* callbacks to update media and link information | |||||
*/ | |||||
ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, | |||||
ixl_media_status); | |||||
aq_error = i40e_aq_get_phy_capabilities(hw, | aq_error = i40e_aq_get_phy_capabilities(hw, | ||||
FALSE, TRUE, &abilities, NULL); | FALSE, TRUE, &abilities, NULL); | ||||
/* May need delay to detect fiber correctly */ | /* May need delay to detect fiber correctly */ | ||||
if (aq_error == I40E_ERR_UNKNOWN_PHY) { | if (aq_error == I40E_ERR_UNKNOWN_PHY) { | ||||
i40e_msec_delay(200); | i40e_msec_delay(200); | ||||
aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, | aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, | ||||
TRUE, &abilities, NULL); | TRUE, &abilities, NULL); | ||||
} | } | ||||
if (aq_error) { | if (aq_error) { | ||||
if (aq_error == I40E_ERR_UNKNOWN_PHY) | if (aq_error == I40E_ERR_UNKNOWN_PHY) | ||||
device_printf(dev, "Unknown PHY type detected!\n"); | device_printf(dev, "Unknown PHY type detected!\n"); | ||||
else | else | ||||
device_printf(dev, | device_printf(dev, | ||||
"Error getting supported media types, err %d," | "Error getting supported media types, err %d," | ||||
" AQ error %d\n", aq_error, hw->aq.asq_last_status); | " AQ error %d\n", aq_error, hw->aq.asq_last_status); | ||||
return (0); | return (0); | ||||
} | } | ||||
ixl_add_ifmedia(vsi, abilities.phy_type); | ixl_add_ifmedia(vsi, abilities.phy_type); | ||||
/* Use autoselect media by default */ | /* Use autoselect media by default */ | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); | ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO); | ||||
ether_ifattach(ifp, hw->mac.addr); | |||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
** Run when the Admin Queue gets a | ** Run when the Admin Queue gets a | ||||
** link transition interrupt. | ** link transition interrupt. | ||||
*/ | */ | ||||
static void | static void | ||||
Show All 9 Lines | ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) | ||||
pf->link_up = check; | pf->link_up = check; | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
printf("Link is %s\n", check ? "up":"down"); | printf("Link is %s\n", check ? "up":"down"); | ||||
#endif | #endif | ||||
/* Report if Unqualified modules are found */ | /* Report if Unqualified modules are found */ | ||||
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && | if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && | ||||
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && | (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && | ||||
(!(status->link_info & I40E_AQ_LINK_UP))) | (!(status->link_info & I40E_AQ_LINK_UP))) | ||||
device_printf(pf->dev, "Link failed because " | device_printf(iflib_get_dev(pf->vsi.ctx), "Link failed because " | ||||
"an unqualified module was detected\n"); | "an unqualified module was detected\n"); | ||||
return; | return; | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Get Firmware Switch configuration | * Get Firmware Switch configuration | ||||
* - this will need to be more robust when more complex | * - this will need to be more robust when more complex | ||||
* switch configurations are enabled. | * switch configurations are enabled. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
ixl_switch_config(struct ixl_pf *pf) | ixl_switch_config(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
device_t dev = vsi->dev; | device_t dev = iflib_get_dev(vsi->ctx); | ||||
struct i40e_aqc_get_switch_config_resp *sw_config; | struct i40e_aqc_get_switch_config_resp *sw_config; | ||||
u8 aq_buf[I40E_AQ_LARGE_BUF]; | u8 aq_buf[I40E_AQ_LARGE_BUF]; | ||||
int ret; | int ret; | ||||
u16 next = 0; | u16 next = 0; | ||||
memset(&aq_buf, 0, sizeof(aq_buf)); | memset(&aq_buf, 0, sizeof(aq_buf)); | ||||
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; | sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; | ||||
ret = i40e_aq_get_switch_config(hw, sw_config, | ret = i40e_aq_get_switch_config(hw, sw_config, | ||||
Show All 28 Lines | |||||
* Initialize the VSI: this handles contexts, which means things | * Initialize the VSI: this handles contexts, which means things | ||||
* like the number of descriptors, buffer size, | * like the number of descriptors, buffer size, | ||||
* plus we init the rings thru this function. | * plus we init the rings thru this function. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
ixl_initialize_vsi(struct ixl_vsi *vsi) | ixl_initialize_vsi(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_pf *pf = vsi->back; | if_shared_ctx_t sctx = ixl_sctx; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
device_t dev = vsi->dev; | device_t dev = iflib_get_dev(vsi->ctx); | ||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
struct i40e_vsi_context ctxt; | struct i40e_vsi_context ctxt; | ||||
int err = 0; | int err = 0; | ||||
struct ifnet *ifp = iflib_get_ifp(vsi->ctx); | |||||
memset(&ctxt, 0, sizeof(ctxt)); | memset(&ctxt, 0, sizeof(ctxt)); | ||||
ctxt.seid = vsi->seid; | ctxt.seid = vsi->seid; | ||||
#ifdef notyet | |||||
if (pf->veb_seid != 0) | if (pf->veb_seid != 0) | ||||
ctxt.uplink_seid = pf->veb_seid; | ctxt.uplink_seid = pf->veb_seid; | ||||
#endif | |||||
ctxt.pf_num = hw->pf_id; | ctxt.pf_num = hw->pf_id; | ||||
err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); | err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); | ||||
if (err) { | if (err) { | ||||
device_printf(dev,"get vsi params failed %x!!\n", err); | device_printf(dev,"get vsi params failed %x!!\n", err); | ||||
return (err); | return (err); | ||||
} | } | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " | printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " | ||||
Show All 12 Lines | #endif | ||||
ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; | ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; | ||||
ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; | ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; | ||||
ctxt.info.queue_mapping[0] = 0; | ctxt.info.queue_mapping[0] = 0; | ||||
ctxt.info.tc_mapping[0] = 0x0800; | ctxt.info.tc_mapping[0] = 0x0800; | ||||
/* Set VLAN receive stripping mode */ | /* Set VLAN receive stripping mode */ | ||||
ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; | ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; | ||||
ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; | ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; | ||||
if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) | if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) | ||||
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; | ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; | ||||
else | else | ||||
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; | ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; | ||||
/* Keep copy of VSI info in VSI for statistic counters */ | /* Keep copy of VSI info in VSI for statistic counters */ | ||||
memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); | memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info)); | ||||
/* Reset VSI statistics */ | /* Reset VSI statistics */ | ||||
Show All 15 Lines | for (int i = 0; i < vsi->num_queues; i++, que++) { | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
struct i40e_hmc_obj_txq tctx; | struct i40e_hmc_obj_txq tctx; | ||||
struct i40e_hmc_obj_rxq rctx; | struct i40e_hmc_obj_rxq rctx; | ||||
u32 txctl; | u32 txctl; | ||||
u16 size; | u16 size; | ||||
/* Setup the HMC TX Context */ | /* Setup the HMC TX Context */ | ||||
size = que->num_desc * sizeof(struct i40e_tx_desc); | size = sctx->isc_ntxd * sizeof(struct i40e_tx_desc); | ||||
memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq)); | memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq)); | ||||
tctx.new_context = 1; | tctx.new_context = 1; | ||||
tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); | |||||
tctx.qlen = que->num_desc; | tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS); | ||||
tctx.qlen = sctx->isc_ntxd; | |||||
tctx.fc_ena = 0; | tctx.fc_ena = 0; | ||||
tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ | tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ | ||||
/* Enable HEAD writeback */ | /* Enable HEAD writeback */ | ||||
tctx.head_wb_ena = 1; | tctx.head_wb_ena = 1; | ||||
tctx.head_wb_addr = txr->dma.pa + | tctx.head_wb_addr = txr->tx_paddr + | ||||
(que->num_desc * sizeof(struct i40e_tx_desc)); | (sctx->isc_ntxd * sizeof(struct i40e_tx_desc)); | ||||
tctx.rdylist_act = 0; | tctx.rdylist_act = 0; | ||||
err = i40e_clear_lan_tx_queue_context(hw, i); | err = i40e_clear_lan_tx_queue_context(hw, i); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, "Unable to clear TX context\n"); | device_printf(dev, "Unable to clear TX context\n"); | ||||
break; | break; | ||||
} | } | ||||
err = i40e_set_lan_tx_queue_context(hw, i, &tctx); | err = i40e_set_lan_tx_queue_context(hw, i, &tctx); | ||||
if (err) { | if (err) { | ||||
Show All 23 Lines | for (int i = 0; i < vsi->num_queues; i++, que++) { | ||||
rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; | rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; | ||||
/* ignore header split for now */ | /* ignore header split for now */ | ||||
rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; | rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; | ||||
rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? | rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? | ||||
vsi->max_frame_size : max_rxmax; | vsi->max_frame_size : max_rxmax; | ||||
rctx.dtype = 0; | rctx.dtype = 0; | ||||
rctx.dsize = 1; /* do 32byte descriptors */ | rctx.dsize = 1; /* do 32byte descriptors */ | ||||
rctx.hsplit_0 = 0; /* no HDR split initially */ | rctx.hsplit_0 = 0; /* no HDR split initially */ | ||||
rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); | rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS); | ||||
rctx.qlen = que->num_desc; | rctx.qlen = sctx->isc_nrxd; | ||||
rctx.tphrdesc_ena = 1; | rctx.tphrdesc_ena = 1; | ||||
rctx.tphwdesc_ena = 1; | rctx.tphwdesc_ena = 1; | ||||
rctx.tphdata_ena = 0; | rctx.tphdata_ena = 0; | ||||
rctx.tphhead_ena = 0; | rctx.tphhead_ena = 0; | ||||
rctx.lrxqthresh = 2; | rctx.lrxqthresh = 2; | ||||
rctx.crcstrip = 1; | rctx.crcstrip = 1; | ||||
rctx.l2tsel = 1; | rctx.l2tsel = 1; | ||||
rctx.showiv = 1; | rctx.showiv = 1; | ||||
rctx.fc_ena = 0; | rctx.fc_ena = 0; | ||||
rctx.prefena = 1; | rctx.prefena = 1; | ||||
err = i40e_clear_lan_rx_queue_context(hw, i); | err = i40e_clear_lan_rx_queue_context(hw, i); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to clear RX context %d\n", i); | "Unable to clear RX context %d\n", i); | ||||
break; | break; | ||||
} | } | ||||
err = i40e_set_lan_rx_queue_context(hw, i, &rctx); | err = i40e_set_lan_rx_queue_context(hw, i, &rctx); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, "Unable to set RX context %d\n", i); | device_printf(dev, "Unable to set RX context %d\n", i); | ||||
break; | break; | ||||
} | } | ||||
err = ixl_init_rx_ring(que); | |||||
if (err) { | |||||
device_printf(dev, "Fail in init_rx_ring %d\n", i); | |||||
break; | |||||
} | } | ||||
wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0); | |||||
#ifdef DEV_NETMAP | |||||
/* preserve queue */ | |||||
if (vsi->ifp->if_capenable & IFCAP_NETMAP) { | |||||
struct netmap_adapter *na = NA(vsi->ifp); | |||||
struct netmap_kring *kring = &na->rx_rings[i]; | |||||
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | |||||
wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); | |||||
} else | |||||
#endif /* DEV_NETMAP */ | |||||
wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); | |||||
} | |||||
return (err); | return (err); | ||||
} | } | ||||
/********************************************************************* | |||||
* | |||||
* Free all VSI structs. | |||||
* | |||||
**********************************************************************/ | |||||
void | |||||
ixl_free_vsi(struct ixl_vsi *vsi) | |||||
{ | |||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | |||||
struct ixl_queue *que = vsi->queues; | |||||
/* Free station queues */ | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | |||||
struct tx_ring *txr = &que->txr; | |||||
struct rx_ring *rxr = &que->rxr; | |||||
if (!mtx_initialized(&txr->mtx)) /* uninitialized */ | |||||
continue; | |||||
IXL_TX_LOCK(txr); | |||||
ixl_free_que_tx(que); | |||||
if (txr->base) | |||||
i40e_free_dma_mem(&pf->hw, &txr->dma); | |||||
IXL_TX_UNLOCK(txr); | |||||
IXL_TX_LOCK_DESTROY(txr); | |||||
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ | |||||
continue; | |||||
IXL_RX_LOCK(rxr); | |||||
ixl_free_que_rx(que); | |||||
if (rxr->base) | |||||
i40e_free_dma_mem(&pf->hw, &rxr->dma); | |||||
IXL_RX_UNLOCK(rxr); | |||||
IXL_RX_LOCK_DESTROY(rxr); | |||||
} | |||||
free(vsi->queues, M_DEVBUF); | |||||
/* Free VSI filter list */ | |||||
ixl_free_mac_filters(vsi); | |||||
} | |||||
static void | static void | ||||
ixl_free_mac_filters(struct ixl_vsi *vsi) | ixl_free_mac_filters(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_mac_filter *f; | struct ixl_mac_filter *f; | ||||
while (!SLIST_EMPTY(&vsi->ftl)) { | while (!SLIST_EMPTY(&vsi->ftl)) { | ||||
f = SLIST_FIRST(&vsi->ftl); | f = SLIST_FIRST(&vsi->ftl); | ||||
SLIST_REMOVE_HEAD(&vsi->ftl, next); | SLIST_REMOVE_HEAD(&vsi->ftl, next); | ||||
free(f, M_DEVBUF); | free(f, M_IXL); | ||||
} | } | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Allocate memory for the VSI (virtual station interface) and their | * Allocate memory for the VSI (virtual station interface) and their | ||||
* associated queues, rings and the descriptors associated with each, | * associated queues, rings and the descriptors associated with each, | ||||
* called only once at attach. | * called only once at attach. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | |||||
ixl_setup_stations(struct ixl_pf *pf) | |||||
{ | |||||
device_t dev = pf->dev; | |||||
struct ixl_vsi *vsi; | |||||
struct ixl_queue *que; | |||||
struct tx_ring *txr; | |||||
struct rx_ring *rxr; | |||||
int rsize, tsize; | |||||
int error = I40E_SUCCESS; | |||||
vsi = &pf->vsi; | |||||
vsi->back = (void *)pf; | |||||
vsi->hw = &pf->hw; | |||||
vsi->id = 0; | |||||
vsi->num_vlans = 0; | |||||
vsi->back = pf; | |||||
/* Get memory for the station queues */ | |||||
if (!(vsi->queues = | |||||
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) * | |||||
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | |||||
device_printf(dev, "Unable to allocate queue memory\n"); | |||||
error = ENOMEM; | |||||
goto early; | |||||
} | |||||
for (int i = 0; i < vsi->num_queues; i++) { | |||||
que = &vsi->queues[i]; | |||||
que->num_desc = ixl_ringsz; | |||||
que->me = i; | |||||
que->vsi = vsi; | |||||
/* mark the queue as active */ | |||||
vsi->active_queues |= (u64)1 << que->me; | |||||
txr = &que->txr; | |||||
txr->que = que; | |||||
txr->tail = I40E_QTX_TAIL(que->me); | |||||
/* Initialize the TX lock */ | |||||
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | |||||
device_get_nameunit(dev), que->me); | |||||
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); | |||||
/* Create the TX descriptor ring */ | |||||
tsize = roundup2((que->num_desc * | |||||
sizeof(struct i40e_tx_desc)) + | |||||
sizeof(u32), DBA_ALIGN); | |||||
if (i40e_allocate_dma_mem(&pf->hw, | |||||
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { | |||||
device_printf(dev, | |||||
"Unable to allocate TX Descriptor memory\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
txr->base = (struct i40e_tx_desc *)txr->dma.va; | |||||
bzero((void *)txr->base, tsize); | |||||
/* Now allocate transmit soft structs for the ring */ | |||||
if (ixl_allocate_tx_data(que)) { | |||||
device_printf(dev, | |||||
"Critical Failure setting up TX structures\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
/* Allocate a buf ring */ | |||||
txr->br = buf_ring_alloc(4096, M_DEVBUF, | |||||
M_WAITOK, &txr->mtx); | |||||
if (txr->br == NULL) { | |||||
device_printf(dev, | |||||
"Critical Failure setting up TX buf ring\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
/* | /* | ||||
* Next the RX queues... | |||||
*/ | |||||
rsize = roundup2(que->num_desc * | |||||
sizeof(union i40e_rx_desc), DBA_ALIGN); | |||||
rxr = &que->rxr; | |||||
rxr->que = que; | |||||
rxr->tail = I40E_QRX_TAIL(que->me); | |||||
/* Initialize the RX side lock */ | |||||
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | |||||
device_get_nameunit(dev), que->me); | |||||
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); | |||||
if (i40e_allocate_dma_mem(&pf->hw, | |||||
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { | |||||
device_printf(dev, | |||||
"Unable to allocate RX Descriptor memory\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
rxr->base = (union i40e_rx_desc *)rxr->dma.va; | |||||
bzero((void *)rxr->base, rsize); | |||||
/* Allocate receive soft structs for the ring*/ | |||||
if (ixl_allocate_rx_data(que)) { | |||||
device_printf(dev, | |||||
"Critical Failure setting up receive structs\n"); | |||||
error = ENOMEM; | |||||
goto fail; | |||||
} | |||||
} | |||||
return (0); | |||||
fail: | |||||
for (int i = 0; i < vsi->num_queues; i++) { | |||||
que = &vsi->queues[i]; | |||||
rxr = &que->rxr; | |||||
txr = &que->txr; | |||||
if (rxr->base) | |||||
i40e_free_dma_mem(&pf->hw, &rxr->dma); | |||||
if (txr->base) | |||||
i40e_free_dma_mem(&pf->hw, &txr->dma); | |||||
} | |||||
early: | |||||
return (error); | |||||
} | |||||
/* | |||||
** Provide a update to the queue RX | ** Provide a update to the queue RX | ||||
** interrupt moderation value. | ** interrupt moderation value. | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_set_queue_rx_itr(struct ixl_queue *que) | ixl_set_queue_rx_itr(struct ixl_queue *que) | ||||
{ | { | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | if (ixl_dynamic_rx_itr) { | ||||
if (rxr->itr != vsi->rx_itr_setting) { | if (rxr->itr != vsi->rx_itr_setting) { | ||||
rxr->itr = vsi->rx_itr_setting; | rxr->itr = vsi->rx_itr_setting; | ||||
wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, | wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, | ||||
que->me), rxr->itr); | que->me), rxr->itr); | ||||
} | } | ||||
} | } | ||||
rxr->bytes = 0; | rxr->bytes = 0; | ||||
rxr->packets = 0; | rxr->packets = 0; | ||||
return; | |||||
} | } | ||||
/* | /* | ||||
** Provide a update to the queue TX | ** Provide a update to the queue TX | ||||
** interrupt moderation value. | ** interrupt moderation value. | ||||
*/ | */ | ||||
static void | static void | ||||
▲ Show 20 Lines • Show All 57 Lines • ▼ Show 20 Lines | if (ixl_dynamic_tx_itr) { | ||||
if (txr->itr != vsi->tx_itr_setting) { | if (txr->itr != vsi->tx_itr_setting) { | ||||
txr->itr = vsi->tx_itr_setting; | txr->itr = vsi->tx_itr_setting; | ||||
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, | wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, | ||||
que->me), txr->itr); | que->me), txr->itr); | ||||
} | } | ||||
} | } | ||||
txr->bytes = 0; | txr->bytes = 0; | ||||
txr->packets = 0; | txr->packets = 0; | ||||
return; | |||||
} | } | ||||
#define QUEUE_NAME_LEN 32 | #define QUEUE_NAME_LEN 32 | ||||
static void | static void | ||||
ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, | ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, | ||||
struct sysctl_ctx_list *ctx, const char *sysctl_name) | struct sysctl_ctx_list *ctx, const char *sysctl_name) | ||||
{ | { | ||||
struct sysctl_oid *tree; | struct sysctl_oid *tree; | ||||
struct sysctl_oid_list *child; | struct sysctl_oid_list *child; | ||||
struct sysctl_oid_list *vsi_list; | struct sysctl_oid_list *vsi_list; | ||||
tree = device_get_sysctl_tree(pf->dev); | tree = device_get_sysctl_tree(iflib_get_dev(vsi->ctx)); | ||||
child = SYSCTL_CHILDREN(tree); | child = SYSCTL_CHILDREN(tree); | ||||
vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, | vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, | ||||
CTLFLAG_RD, NULL, "VSI Number"); | CTLFLAG_RD, NULL, "VSI Number"); | ||||
vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); | vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); | ||||
ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); | ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); | ||||
} | } | ||||
static void | static void | ||||
ixl_add_hw_stats(struct ixl_pf *pf) | ixl_add_hw_stats(struct ixl_pf *pf) | ||||
{ | { | ||||
device_t dev = pf->dev; | |||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
device_t dev = iflib_get_dev(vsi->ctx); | |||||
struct ixl_queue *queues = vsi->queues; | struct ixl_queue *queues = vsi->queues; | ||||
struct i40e_hw_port_stats *pf_stats = &pf->stats; | struct i40e_hw_port_stats *pf_stats = &pf->stats; | ||||
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); | struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); | ||||
struct sysctl_oid *tree = device_get_sysctl_tree(dev); | struct sysctl_oid *tree = device_get_sysctl_tree(dev); | ||||
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); | struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); | ||||
struct sysctl_oid_list *vsi_list; | struct sysctl_oid_list *vsi_list; | ||||
▲ Show 20 Lines • Show All 149 Lines • ▼ Show 20 Lines | ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, | ||||
while (entry->stat != 0) | while (entry->stat != 0) | ||||
{ | { | ||||
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, | SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, | ||||
CTLFLAG_RD, entry->stat, | CTLFLAG_RD, entry->stat, | ||||
entry->description); | entry->description); | ||||
entry++; | entry++; | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
** ixl_config_rss - setup RSS | ** ixl_config_rss - setup RSS | ||||
** - note this is done for the single vsi | ** - note this is done for the single vsi | ||||
*/ | */ | ||||
static void ixl_config_rss(struct ixl_vsi *vsi) | static void ixl_config_rss(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | ||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
▲ Show 20 Lines • Show All 76 Lines • ▼ Show 20 Lines | lut = (lut << 8) | (que_id & | ||||
((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); | ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); | ||||
/* On i = 3, we have 4 entries in lut; write to the register */ | /* On i = 3, we have 4 entries in lut; write to the register */ | ||||
if ((i & 3) == 3) | if ((i & 3) == 3) | ||||
wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); | wr32(hw, I40E_PFQF_HLUT(i >> 2), lut); | ||||
} | } | ||||
ixl_flush(hw); | ixl_flush(hw); | ||||
} | } | ||||
/* | /* | ||||
** This routine is run via an vlan config EVENT, | |||||
** it enables us to use the HW Filter table since | |||||
** we can get the vlan id. This just creates the | |||||
** entry in the soft version of the VFTA, init will | |||||
** repopulate the real table. | |||||
*/ | |||||
static void | |||||
ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) | |||||
{ | |||||
struct ixl_vsi *vsi = ifp->if_softc; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | |||||
if (ifp->if_softc != arg) /* Not our event */ | |||||
return; | |||||
if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | |||||
return; | |||||
IXL_PF_LOCK(pf); | |||||
++vsi->num_vlans; | |||||
ixl_add_filter(vsi, hw->mac.addr, vtag); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | |||||
/* | |||||
** This routine is run via an vlan | |||||
** unconfig EVENT, remove our entry | |||||
** in the soft vfta. | |||||
*/ | |||||
static void | |||||
ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) | |||||
{ | |||||
struct ixl_vsi *vsi = ifp->if_softc; | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | |||||
if (ifp->if_softc != arg) | |||||
return; | |||||
if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | |||||
return; | |||||
IXL_PF_LOCK(pf); | |||||
--vsi->num_vlans; | |||||
ixl_del_filter(vsi, hw->mac.addr, vtag); | |||||
IXL_PF_UNLOCK(pf); | |||||
} | |||||
/* | |||||
** This routine updates vlan filters, called by init | ** This routine updates vlan filters, called by init | ||||
** it scans the filter table and then updates the hw | ** it scans the filter table and then updates the hw | ||||
** after a soft reset. | ** after a soft reset. | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_setup_vlan_filters(struct ixl_vsi *vsi) | ixl_setup_vlan_filters(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_mac_filter *f; | struct ixl_mac_filter *f; | ||||
Show All 16 Lines | ixl_setup_vlan_filters(struct ixl_vsi *vsi) | ||||
} | } | ||||
if (cnt == 0) { | if (cnt == 0) { | ||||
printf("setup vlan: no filters found!\n"); | printf("setup vlan: no filters found!\n"); | ||||
return; | return; | ||||
} | } | ||||
flags = IXL_FILTER_VLAN; | flags = IXL_FILTER_VLAN; | ||||
flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); | flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); | ||||
ixl_add_hw_filters(vsi, flags, cnt); | ixl_add_hw_filters(vsi, flags, cnt); | ||||
return; | |||||
} | } | ||||
/* | /* | ||||
** Initialize filter list and add filters that the hardware | ** Initialize filter list and add filters that the hardware | ||||
** needs to know about. | ** needs to know about. | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_init_filters(struct ixl_vsi *vsi) | ixl_init_filters(struct ixl_vsi *vsi) | ||||
Show All 19 Lines | ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) | ||||
if (f == NULL) { | if (f == NULL) { | ||||
printf("WARNING: no filter available!!\n"); | printf("WARNING: no filter available!!\n"); | ||||
return; | return; | ||||
} | } | ||||
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); | bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); | ||||
f->vlan = IXL_VLAN_ANY; | f->vlan = IXL_VLAN_ANY; | ||||
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED | f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED | ||||
| IXL_FILTER_MC); | | IXL_FILTER_MC); | ||||
return; | |||||
} | } | ||||
static void | static void | ||||
ixl_reconfigure_filters(struct ixl_vsi *vsi) | ixl_reconfigure_filters(struct ixl_vsi *vsi) | ||||
{ | { | ||||
ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); | ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); | ||||
} | } | ||||
/* | /* | ||||
** This routine is run via an vlan config EVENT, | |||||
** it enables us to use the HW Filter table since | |||||
** we can get the vlan id. This just creates the | |||||
** entry in the soft version of the VFTA, init will | |||||
** repopulate the real table. | |||||
*/ | |||||
void | |||||
ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | |||||
if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | |||||
return; | |||||
++vsi->num_vlans; | |||||
ixl_add_filter(vsi, hw->mac.addr, vtag); | |||||
} | |||||
/* | |||||
** This routine is run via an vlan | |||||
** unconfig EVENT, remove our entry | |||||
** in the soft vfta. | |||||
*/ | |||||
void | |||||
ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | |||||
if ((vtag == 0) || (vtag > 4095)) /* Invalid */ | |||||
return; | |||||
--vsi->num_vlans; | |||||
ixl_del_filter(vsi, hw->mac.addr, vtag); | |||||
} | |||||
/* | |||||
** This routine adds macvlan filters | ** This routine adds macvlan filters | ||||
*/ | */ | ||||
static void | void | ||||
ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) | ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) | ||||
{ | { | ||||
struct ixl_mac_filter *f, *tmp; | struct ixl_mac_filter *f, *tmp; | ||||
struct ixl_pf *pf; | |||||
device_t dev; | |||||
DEBUGOUT("ixl_add_filter: begin"); | DEBUGOUT("ixl_add_filter: begin"); | ||||
pf = vsi->back; | |||||
dev = pf->dev; | |||||
/* Does one already exist */ | /* Does one already exist */ | ||||
f = ixl_find_filter(vsi, macaddr, vlan); | f = ixl_find_filter(vsi, macaddr, vlan); | ||||
if (f != NULL) | if (f != NULL) | ||||
return; | return; | ||||
/* | /* | ||||
** Is this the first vlan being registered, if so we | ** Is this the first vlan being registered, if so we | ||||
** need to remove the ANY filter that indicates we are | ** need to remove the ANY filter that indicates we are | ||||
** not in a vlan, and replace that with a 0 filter. | ** not in a vlan, and replace that with a 0 filter. | ||||
*/ | */ | ||||
if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { | if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { | ||||
tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); | tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); | ||||
if (tmp != NULL) { | if (tmp != NULL) { | ||||
ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); | ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); | ||||
ixl_add_filter(vsi, macaddr, 0); | ixl_add_filter(vsi, macaddr, 0); | ||||
} | } | ||||
} | } | ||||
f = ixl_get_filter(vsi); | f = ixl_get_filter(vsi); | ||||
if (f == NULL) { | if (f == NULL) { | ||||
device_printf(dev, "WARNING: no filter available!!\n"); | device_printf(iflib_get_dev(vsi->ctx), "WARNING: no filter available!!\n"); | ||||
return; | return; | ||||
} | } | ||||
bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); | bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); | ||||
f->vlan = vlan; | f->vlan = vlan; | ||||
f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); | f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); | ||||
if (f->vlan != IXL_VLAN_ANY) | if (f->vlan != IXL_VLAN_ANY) | ||||
f->flags |= IXL_FILTER_VLAN; | f->flags |= IXL_FILTER_VLAN; | ||||
else | else | ||||
vsi->num_macs++; | vsi->num_macs++; | ||||
ixl_add_hw_filters(vsi, f->flags, 1); | ixl_add_hw_filters(vsi, f->flags, 1); | ||||
return; | return; | ||||
} | } | ||||
static void | void | ||||
ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) | ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan) | ||||
{ | { | ||||
struct ixl_mac_filter *f; | struct ixl_mac_filter *f; | ||||
f = ixl_find_filter(vsi, macaddr, vlan); | f = ixl_find_filter(vsi, macaddr, vlan); | ||||
if (f == NULL) | if (f == NULL) | ||||
return; | return; | ||||
Show All 38 Lines | |||||
** table and creates an Admin Queue call to create | ** table and creates an Admin Queue call to create | ||||
** the filters in the hardware. | ** the filters in the hardware. | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) | ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) | ||||
{ | { | ||||
struct i40e_aqc_add_macvlan_element_data *a, *b; | struct i40e_aqc_add_macvlan_element_data *a, *b; | ||||
struct ixl_mac_filter *f; | struct ixl_mac_filter *f; | ||||
struct ixl_pf *pf; | struct i40e_hw *hw = vsi->hw; | ||||
struct i40e_hw *hw; | device_t dev = iflib_get_dev(vsi->ctx); | ||||
device_t dev; | |||||
int err, j = 0; | int err, j = 0; | ||||
pf = vsi->back; | |||||
dev = pf->dev; | |||||
hw = &pf->hw; | |||||
IXL_PF_LOCK_ASSERT(pf); | |||||
a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, | a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, | ||||
M_DEVBUF, M_NOWAIT | M_ZERO); | M_IXL, M_NOWAIT | M_ZERO); | ||||
if (a == NULL) { | if (a == NULL) { | ||||
device_printf(dev, "add_hw_filters failed to get memory\n"); | device_printf(dev, "add_hw_filters failed to get memory\n"); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
** Scan the filter list, each time we find one | ** Scan the filter list, each time we find one | ||||
** we add it to the admin queue array and turn off | ** we add it to the admin queue array and turn off | ||||
Show All 20 Lines | ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) | ||||
if (j > 0) { | if (j > 0) { | ||||
err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); | err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); | ||||
if (err) | if (err) | ||||
device_printf(dev, "aq_add_macvlan err %d, " | device_printf(dev, "aq_add_macvlan err %d, " | ||||
"aq_error %d\n", err, hw->aq.asq_last_status); | "aq_error %d\n", err, hw->aq.asq_last_status); | ||||
else | else | ||||
vsi->hw_filters_add += j; | vsi->hw_filters_add += j; | ||||
} | } | ||||
free(a, M_DEVBUF); | free(a, M_IXL); | ||||
return; | return; | ||||
} | } | ||||
/* | /* | ||||
** This routine takes removals in the vsi filter | ** This routine takes removals in the vsi filter | ||||
** table and creates an Admin Queue call to delete | ** table and creates an Admin Queue call to delete | ||||
** the filters in the hardware. | ** the filters in the hardware. | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) | ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) | ||||
{ | { | ||||
struct i40e_aqc_remove_macvlan_element_data *d, *e; | struct i40e_aqc_remove_macvlan_element_data *d, *e; | ||||
struct ixl_pf *pf; | struct i40e_hw *hw = vsi->hw; | ||||
struct i40e_hw *hw; | device_t dev = iflib_get_dev(vsi->ctx); | ||||
device_t dev; | |||||
struct ixl_mac_filter *f, *f_temp; | struct ixl_mac_filter *f, *f_temp; | ||||
int err, j = 0; | int err, j = 0; | ||||
DEBUGOUT("ixl_del_hw_filters: begin\n"); | DEBUGOUT("ixl_del_hw_filters: begin\n"); | ||||
pf = vsi->back; | |||||
hw = &pf->hw; | |||||
dev = pf->dev; | |||||
d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, | d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, | ||||
M_DEVBUF, M_NOWAIT | M_ZERO); | M_IXL, M_NOWAIT | M_ZERO); | ||||
if (d == NULL) { | if (d == NULL) { | ||||
printf("del hw filter failed to get memory\n"); | printf("del hw filter failed to get memory\n"); | ||||
return; | return; | ||||
} | } | ||||
SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { | SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { | ||||
if (f->flags & IXL_FILTER_DEL) { | if (f->flags & IXL_FILTER_DEL) { | ||||
e = &d[j]; // a pox on fvl long names :) | e = &d[j]; // a pox on fvl long names :) | ||||
bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); | bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); | ||||
e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); | e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); | ||||
e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; | e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; | ||||
/* delete entry from vsi list */ | /* delete entry from vsi list */ | ||||
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); | SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); | ||||
free(f, M_DEVBUF); | free(f, M_IXL); | ||||
j++; | j++; | ||||
} | } | ||||
if (j == cnt) | if (j == cnt) | ||||
break; | break; | ||||
} | } | ||||
if (j > 0) { | if (j > 0) { | ||||
err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); | err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); | ||||
/* NOTE: returns ENOENT every time but seems to work fine, | /* NOTE: returns ENOENT every time but seems to work fine, | ||||
so we'll ignore that specific error. */ | so we'll ignore that specific error. */ | ||||
// TODO: Does this still occur on current firmwares? | // TODO: Does this still occur on current firmwares? | ||||
if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { | if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { | ||||
int sc = 0; | int sc = 0; | ||||
for (int i = 0; i < j; i++) | for (int i = 0; i < j; i++) | ||||
sc += (!d[i].error_code); | sc += (!d[i].error_code); | ||||
vsi->hw_filters_del += sc; | vsi->hw_filters_del += sc; | ||||
device_printf(dev, | device_printf(dev, | ||||
"Failed to remove %d/%d filters, aq error %d\n", | "Failed to remove %d/%d filters, aq error %d\n", | ||||
j - sc, j, hw->aq.asq_last_status); | j - sc, j, hw->aq.asq_last_status); | ||||
} else | } else | ||||
vsi->hw_filters_del += j; | vsi->hw_filters_del += j; | ||||
} | } | ||||
free(d, M_DEVBUF); | free(d, M_IXL); | ||||
DEBUGOUT("ixl_del_hw_filters: end\n"); | DEBUGOUT("ixl_del_hw_filters: end\n"); | ||||
return; | return; | ||||
} | } | ||||
static int | static int | ||||
ixl_enable_rings(struct ixl_vsi *vsi) | ixl_enable_rings(struct ixl_vsi *vsi) | ||||
{ | { | ||||
Show All 14 Lines | for (int i = 0; i < vsi->num_queues; i++) { | ||||
/* Verify the enable took */ | /* Verify the enable took */ | ||||
for (int j = 0; j < 10; j++) { | for (int j = 0; j < 10; j++) { | ||||
reg = rd32(hw, I40E_QTX_ENA(index)); | reg = rd32(hw, I40E_QTX_ENA(index)); | ||||
if (reg & I40E_QTX_ENA_QENA_STAT_MASK) | if (reg & I40E_QTX_ENA_QENA_STAT_MASK) | ||||
break; | break; | ||||
i40e_msec_delay(10); | i40e_msec_delay(10); | ||||
} | } | ||||
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { | if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { | ||||
device_printf(pf->dev, "TX queue %d disabled!\n", | device_printf(iflib_get_dev(vsi->ctx), "TX queue %d disabled!\n", | ||||
index); | index); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
reg = rd32(hw, I40E_QRX_ENA(index)); | reg = rd32(hw, I40E_QRX_ENA(index)); | ||||
reg |= I40E_QRX_ENA_QENA_REQ_MASK | | reg |= I40E_QRX_ENA_QENA_REQ_MASK | | ||||
I40E_QRX_ENA_QENA_STAT_MASK; | I40E_QRX_ENA_QENA_STAT_MASK; | ||||
wr32(hw, I40E_QRX_ENA(index), reg); | wr32(hw, I40E_QRX_ENA(index), reg); | ||||
/* Verify the enable took */ | /* Verify the enable took */ | ||||
for (int j = 0; j < 10; j++) { | for (int j = 0; j < 10; j++) { | ||||
reg = rd32(hw, I40E_QRX_ENA(index)); | reg = rd32(hw, I40E_QRX_ENA(index)); | ||||
if (reg & I40E_QRX_ENA_QENA_STAT_MASK) | if (reg & I40E_QRX_ENA_QENA_STAT_MASK) | ||||
break; | break; | ||||
i40e_msec_delay(10); | i40e_msec_delay(10); | ||||
} | } | ||||
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { | if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { | ||||
device_printf(pf->dev, "RX queue %d disabled!\n", | device_printf(iflib_get_dev(vsi->ctx), "RX queue %d disabled!\n", | ||||
index); | index); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | |||||
ixl_if_intr_enable(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_queue *que = vsi->queues; | |||||
if (ixl_enable_msix) { | |||||
ixl_enable_adminq(hw); | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) | |||||
ixl_if_queue_intr_enable(vsi->ctx, que->me); | |||||
} else | |||||
ixl_enable_legacy(hw); | |||||
} | |||||
static void | |||||
ixl_if_intr_disable(if_ctx_t ctx) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_queue *que = vsi->queues; | |||||
if (ixl_enable_msix) { | |||||
ixl_disable_adminq(hw); | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) | |||||
ixl_if_queue_intr_disable(ctx, que->me); | |||||
} else | |||||
ixl_disable_legacy(hw); | |||||
} | |||||
static void | |||||
ixl_if_queue_intr_enable(if_ctx_t ctx, uint16_t qid) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
u32 reg; | |||||
reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | | |||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | | |||||
(IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); | |||||
wr32(vsi->hw, I40E_PFINT_DYN_CTLN(qid), reg); | |||||
} | |||||
static void | |||||
ixl_if_queue_intr_disable(if_ctx_t ctx, uint16_t qid) | |||||
{ | |||||
struct ixl_vsi *vsi = iflib_get_softc(ctx); | |||||
struct i40e_hw *hw = vsi->hw; | |||||
u32 reg; | |||||
reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; | |||||
wr32(hw, I40E_PFINT_DYN_CTLN(qid), reg); | |||||
} | |||||
static int | static int | ||||
ixl_disable_rings(struct ixl_vsi *vsi) | ixl_disable_rings(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_pf *pf = vsi->back; | struct ixl_pf *pf = vsi->back; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
int index, error; | int index, error; | ||||
u32 reg; | u32 reg; | ||||
Show All 10 Lines | for (int i = 0; i < vsi->num_queues; i++) { | ||||
/* Verify the disable took */ | /* Verify the disable took */ | ||||
for (int j = 0; j < 10; j++) { | for (int j = 0; j < 10; j++) { | ||||
reg = rd32(hw, I40E_QTX_ENA(index)); | reg = rd32(hw, I40E_QTX_ENA(index)); | ||||
if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) | if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) | ||||
break; | break; | ||||
i40e_msec_delay(10); | i40e_msec_delay(10); | ||||
} | } | ||||
if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { | if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { | ||||
device_printf(pf->dev, "TX queue %d still enabled!\n", | device_printf(iflib_get_dev(vsi->ctx), "TX queue %d still enabled!\n", | ||||
index); | index); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
reg = rd32(hw, I40E_QRX_ENA(index)); | reg = rd32(hw, I40E_QRX_ENA(index)); | ||||
reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; | reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; | ||||
wr32(hw, I40E_QRX_ENA(index), reg); | wr32(hw, I40E_QRX_ENA(index), reg); | ||||
/* Verify the disable took */ | /* Verify the disable took */ | ||||
for (int j = 0; j < 10; j++) { | for (int j = 0; j < 10; j++) { | ||||
reg = rd32(hw, I40E_QRX_ENA(index)); | reg = rd32(hw, I40E_QRX_ENA(index)); | ||||
if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) | if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) | ||||
break; | break; | ||||
i40e_msec_delay(10); | i40e_msec_delay(10); | ||||
} | } | ||||
if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { | if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { | ||||
device_printf(pf->dev, "RX queue %d still enabled!\n", | device_printf(iflib_get_dev(vsi->ctx), "RX queue %d still enabled!\n", | ||||
index); | index); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/** | /** | ||||
* ixl_handle_mdd_event | * ixl_handle_mdd_event | ||||
* | * | ||||
* Called from interrupt handler to identify possibly malicious vfs | * Called from interrupt handler to identify possibly malicious vfs | ||||
* (But also detects events from the PF, as well) | * (But also detects events from the PF, as well) | ||||
**/ | **/ | ||||
static void ixl_handle_mdd_event(struct ixl_pf *pf) | static void ixl_handle_mdd_event(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
bool mdd_detected = false; | bool mdd_detected = false; | ||||
bool pf_mdd_detected = false; | bool pf_mdd_detected = false; | ||||
u32 reg; | u32 reg; | ||||
/* find what triggered the MDD event */ | /* find what triggered the MDD event */ | ||||
reg = rd32(hw, I40E_GL_MDET_TX); | reg = rd32(hw, I40E_GL_MDET_TX); | ||||
if (reg & I40E_GL_MDET_TX_VALID_MASK) { | if (reg & I40E_GL_MDET_TX_VALID_MASK) { | ||||
u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> | u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> | ||||
▲ Show 20 Lines • Show All 55 Lines • ▼ Show 20 Lines | |||||
ixl_enable_intr(struct ixl_vsi *vsi) | ixl_enable_intr(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
if (ixl_enable_msix) { | if (ixl_enable_msix) { | ||||
ixl_enable_adminq(hw); | ixl_enable_adminq(hw); | ||||
for (int i = 0; i < vsi->num_queues; i++, que++) | for (int i = 0; i < vsi->num_queues; i++, que++) | ||||
ixl_enable_queue(hw, que->me); | ixl_if_queue_intr_enable(vsi->ctx, que->me); | ||||
} else | } else | ||||
ixl_enable_legacy(hw); | ixl_enable_legacy(hw); | ||||
} | } | ||||
static void | static void | ||||
ixl_disable_rings_intr(struct ixl_vsi *vsi) | |||||
{ | |||||
struct i40e_hw *hw = vsi->hw; | |||||
struct ixl_queue *que = vsi->queues; | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) | |||||
ixl_disable_queue(hw, que->me); | |||||
} | |||||
static void | |||||
ixl_disable_intr(struct ixl_vsi *vsi) | |||||
{ | |||||
struct i40e_hw *hw = vsi->hw; | |||||
if (ixl_enable_msix) | |||||
ixl_disable_adminq(hw); | |||||
else | |||||
ixl_disable_legacy(hw); | |||||
} | |||||
static void | |||||
ixl_enable_adminq(struct i40e_hw *hw) | ixl_enable_adminq(struct i40e_hw *hw) | ||||
{ | { | ||||
u32 reg; | u32 reg; | ||||
reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | | reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | | ||||
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | | ||||
(IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); | ||||
wr32(hw, I40E_PFINT_DYN_CTL0, reg); | wr32(hw, I40E_PFINT_DYN_CTL0, reg); | ||||
ixl_flush(hw); | ixl_flush(hw); | ||||
return; | return; | ||||
} | } | ||||
static void | static void | ||||
ixl_disable_adminq(struct i40e_hw *hw) | ixl_disable_adminq(struct i40e_hw *hw) | ||||
{ | { | ||||
u32 reg; | u32 reg; | ||||
reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; | reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; | ||||
wr32(hw, I40E_PFINT_DYN_CTL0, reg); | wr32(hw, I40E_PFINT_DYN_CTL0, reg); | ||||
return; | return; | ||||
} | } | ||||
static void | static void | ||||
ixl_enable_queue(struct i40e_hw *hw, int id) | |||||
{ | |||||
u32 reg; | |||||
reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | | |||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | | |||||
(IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); | |||||
wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); | |||||
} | |||||
static void | |||||
ixl_disable_queue(struct i40e_hw *hw, int id) | |||||
{ | |||||
u32 reg; | |||||
reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; | |||||
wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); | |||||
return; | |||||
} | |||||
static void | |||||
ixl_enable_legacy(struct i40e_hw *hw) | ixl_enable_legacy(struct i40e_hw *hw) | ||||
{ | { | ||||
u32 reg; | u32 reg; | ||||
reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | | reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | | ||||
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | | ||||
(IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); | ||||
wr32(hw, I40E_PFINT_DYN_CTL0, reg); | wr32(hw, I40E_PFINT_DYN_CTL0, reg); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 185 Lines • ▼ Show 20 Lines | ixl_update_stats_counters(struct ixl_pf *pf) | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
** Tasklet handler for MSIX Adminq interrupts | ** Tasklet handler for MSIX Adminq interrupts | ||||
** - do outside interrupt since it might sleep | ** - do outside interrupt since it might sleep | ||||
*/ | */ | ||||
static void | static void | ||||
ixl_do_adminq(void *context, int pending) | ixl_if_update_admin_status(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_pf *pf = context; | struct ixl_vsi *vsi = iflib_get_softc(ctx); | ||||
struct ixl_pf *pf = vsi->back; | |||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_arq_event_info event; | struct i40e_arq_event_info event; | ||||
i40e_status ret; | i40e_status ret; | ||||
u32 reg, loop = 0; | u32 reg, loop = 0; | ||||
u16 opcode, result; | u16 opcode, result; | ||||
event.buf_len = IXL_AQ_BUF_SZ; | event.buf_len = IXL_AQ_BUF_SZ; | ||||
event.msg_buf = malloc(event.buf_len, | event.msg_buf = malloc(event.buf_len, | ||||
M_DEVBUF, M_NOWAIT | M_ZERO); | M_IXL, M_NOWAIT | M_ZERO); | ||||
if (!event.msg_buf) { | if (!event.msg_buf) { | ||||
printf("Unable to allocate adminq memory\n"); | printf("Unable to allocate adminq memory\n"); | ||||
return; | return; | ||||
} | } | ||||
IXL_PF_LOCK(pf); | |||||
/* clean and process any events */ | /* clean and process any events */ | ||||
do { | do { | ||||
ret = i40e_clean_arq_element(hw, &event, &result); | ret = i40e_clean_arq_element(hw, &event, &result); | ||||
if (ret) | if (ret) | ||||
break; | break; | ||||
opcode = LE16_TO_CPU(event.desc.opcode); | opcode = LE16_TO_CPU(event.desc.opcode); | ||||
switch (opcode) { | switch (opcode) { | ||||
case i40e_aqc_opc_get_link_status: | case i40e_aqc_opc_get_link_status: | ||||
Show All 14 Lines | #endif | ||||
break; | break; | ||||
} | } | ||||
} while (result && (loop++ < IXL_ADM_LIMIT)); | } while (result && (loop++ < IXL_ADM_LIMIT)); | ||||
reg = rd32(hw, I40E_PFINT_ICR0_ENA); | reg = rd32(hw, I40E_PFINT_ICR0_ENA); | ||||
reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, reg); | wr32(hw, I40E_PFINT_ICR0_ENA, reg); | ||||
free(event.msg_buf, M_DEVBUF); | free(event.msg_buf, M_IXL); | ||||
/* | /* | ||||
* If there are still messages to process, reschedule ourselves. | * If there are still messages to process, reschedule ourselves. | ||||
* Otherwise, re-enable our interrupt and go to sleep. | * Otherwise, re-enable our interrupt and go to sleep. | ||||
*/ | */ | ||||
if (result > 0) | if (result > 0) | ||||
taskqueue_enqueue(pf->tq, &pf->adminq); | iflib_admin_intr_deferred(ctx); | ||||
else | else | ||||
ixl_enable_intr(vsi); | ixl_enable_intr(vsi); | ||||
IXL_PF_UNLOCK(pf); | |||||
} | } | ||||
#ifdef IXL_DEBUG_SYSCTL | #ifdef IXL_DEBUG_SYSCTL | ||||
static int | static int | ||||
ixl_debug_info(SYSCTL_HANDLER_ARGS) | ixl_debug_info(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
int error, input = 0; | int error, input = 0; | ||||
Show All 19 Lines | ixl_print_debug_info(struct ixl_pf *pf) | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
u32 reg; | u32 reg; | ||||
printf("Queue irqs = %jx\n", (uintmax_t)que->irqs); | printf("Queue irqs = %jx\n", (uintmax_t)que->irqs); | ||||
printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq); | printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq); | ||||
printf("RX next check = %x\n", rxr->next_check); | |||||
printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done); | printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done); | ||||
printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets); | printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets); | ||||
printf("TX desc avail = %x\n", txr->avail); | printf("TX desc avail = %x\n", txr->avail); | ||||
reg = rd32(hw, I40E_GLV_GORCL(0xc)); | reg = rd32(hw, I40E_GLV_GORCL(0xc)); | ||||
printf("RX Bytes = %x\n", reg); | printf("RX Bytes = %x\n", reg); | ||||
reg = rd32(hw, I40E_GLPRT_GORCL(hw->port)); | reg = rd32(hw, I40E_GLPRT_GORCL(hw->port)); | ||||
printf("Port RX Bytes = %x\n", reg); | printf("Port RX Bytes = %x\n", reg); | ||||
Show All 26 Lines | |||||
* Update VSI-specific ethernet statistics counters. | * Update VSI-specific ethernet statistics counters. | ||||
**/ | **/ | ||||
void ixl_update_eth_stats(struct ixl_vsi *vsi) | void ixl_update_eth_stats(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct i40e_eth_stats *es; | struct i40e_eth_stats *es; | ||||
struct i40e_eth_stats *oes; | struct i40e_eth_stats *oes; | ||||
struct i40e_hw_port_stats *nsd; | struct i40e_hw_port_stats *nsd; | ||||
u16 stat_idx = vsi->info.stat_counter_idx; | u16 stat_idx = vsi->info.stat_counter_idx; | ||||
es = &vsi->eth_stats; | es = &vsi->eth_stats; | ||||
oes = &vsi->eth_stats_offsets; | oes = &vsi->eth_stats_offsets; | ||||
nsd = &pf->stats; | nsd = &pf->stats; | ||||
/* Gather up the stats that the hw collects */ | /* Gather up the stats that the hw collects */ | ||||
Show All 39 Lines | ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), | ||||
&oes->tx_broadcast, &es->tx_broadcast); | &oes->tx_broadcast, &es->tx_broadcast); | ||||
vsi->stat_offsets_loaded = true; | vsi->stat_offsets_loaded = true; | ||||
} | } | ||||
static void | static void | ||||
ixl_update_vsi_stats(struct ixl_vsi *vsi) | ixl_update_vsi_stats(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct ifnet *ifp; | |||||
struct i40e_eth_stats *es; | struct i40e_eth_stats *es; | ||||
u64 tx_discards; | u64 tx_discards; | ||||
struct i40e_hw_port_stats *nsd; | struct i40e_hw_port_stats *nsd; | ||||
pf = vsi->back; | pf = vsi->back; | ||||
ifp = vsi->ifp; | |||||
es = &vsi->eth_stats; | es = &vsi->eth_stats; | ||||
nsd = &pf->stats; | nsd = &pf->stats; | ||||
ixl_update_eth_stats(vsi); | ixl_update_eth_stats(vsi); | ||||
tx_discards = es->tx_discards + nsd->tx_dropped_link_down; | tx_discards = es->tx_discards + nsd->tx_dropped_link_down; | ||||
for (int i = 0; i < vsi->num_queues; i++) | |||||
tx_discards += vsi->queues[i].txr.br->br_drops; | |||||
/* Update ifnet stats */ | /* Update ifnet stats */ | ||||
IXL_SET_IPACKETS(vsi, es->rx_unicast + | IXL_SET_IPACKETS(vsi, es->rx_unicast + | ||||
es->rx_multicast + | es->rx_multicast + | ||||
es->rx_broadcast); | es->rx_broadcast); | ||||
IXL_SET_OPACKETS(vsi, es->tx_unicast + | IXL_SET_OPACKETS(vsi, es->tx_unicast + | ||||
es->tx_multicast + | es->tx_multicast + | ||||
es->tx_broadcast); | es->tx_broadcast); | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | ixl_set_flowcntl(SYSCTL_HANDLER_ARGS) | ||||
* TODO: ensure flow control is disabled if | * TODO: ensure flow control is disabled if | ||||
* priority flow control is enabled | * priority flow control is enabled | ||||
* | * | ||||
* TODO: ensure tx CRC by hardware should be enabled | * TODO: ensure tx CRC by hardware should be enabled | ||||
* if tx flow control is enabled. | * if tx flow control is enabled. | ||||
*/ | */ | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
int error = 0; | int error = 0; | ||||
enum i40e_status_code aq_error = 0; | enum i40e_status_code aq_error = 0; | ||||
u8 fc_aq_err = 0; | u8 fc_aq_err = 0; | ||||
/* Get request */ | /* Get request */ | ||||
error = sysctl_handle_int(oidp, &pf->fc, 0, req); | error = sysctl_handle_int(oidp, &pf->fc, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
▲ Show 20 Lines • Show All 71 Lines • ▼ Show 20 Lines | error = sysctl_handle_string(oidp, speeds[index], | ||||
strlen(speeds[index]), req); | strlen(speeds[index]), req); | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) | ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | if_ctx_t ctx = ((struct ixl_vsi *)pf)->ctx; | ||||
device_t dev = iflib_get_dev(ctx); | |||||
struct ifnet *ifp = iflib_get_ifp(ctx); | |||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
struct i40e_aq_set_phy_config config; | struct i40e_aq_set_phy_config config; | ||||
enum i40e_status_code aq_error = 0; | enum i40e_status_code aq_error = 0; | ||||
/* Get current capability information */ | /* Get current capability information */ | ||||
aq_error = i40e_aq_get_phy_capabilities(hw, | aq_error = i40e_aq_get_phy_capabilities(hw, | ||||
FALSE, FALSE, &abilities, NULL); | FALSE, FALSE, &abilities, NULL); | ||||
if (aq_error) { | if (aq_error) { | ||||
Show All 31 Lines | device_printf(dev, | ||||
hw->aq.asq_last_status); | hw->aq.asq_last_status); | ||||
return (EAGAIN); | return (EAGAIN); | ||||
} | } | ||||
/* | /* | ||||
** This seems a bit heavy handed, but we | ** This seems a bit heavy handed, but we | ||||
** need to get a reinit on some devices | ** need to get a reinit on some devices | ||||
*/ | */ | ||||
IXL_PF_LOCK(pf); | ifp->if_init(ifp->if_softc); | ||||
ixl_stop(pf); | |||||
ixl_init_locked(pf); | |||||
IXL_PF_UNLOCK(pf); | |||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
** Control link advertise speed: | ** Control link advertise speed: | ||||
** Flags: | ** Flags: | ||||
** 0x1 - advertise 100 Mb | ** 0x1 - advertise 100 Mb | ||||
** 0x2 - advertise 1G | ** 0x2 - advertise 1G | ||||
** 0x4 - advertise 10G | ** 0x4 - advertise 10G | ||||
** 0x8 - advertise 20G | ** 0x8 - advertise 20G | ||||
** | ** | ||||
** Does not work on 40G devices. | ** Does not work on 40G devices. | ||||
*/ | */ | ||||
static int | static int | ||||
ixl_set_advertise(SYSCTL_HANDLER_ARGS) | ixl_set_advertise(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
int requested_ls = 0; | int requested_ls = 0; | ||||
int error = 0; | int error = 0; | ||||
/* | /* | ||||
** FW doesn't support changing advertised speed | ** FW doesn't support changing advertised speed | ||||
** for 40G devices; speed is always 40G. | ** for 40G devices; speed is always 40G. | ||||
*/ | */ | ||||
if (i40e_is_40G_device(hw->device_id)) | if (i40e_is_40G_device(hw->device_id)) | ||||
▲ Show 20 Lines • Show All 220 Lines • ▼ Show 20 Lines | ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) | ||||
} | } | ||||
if (ftl_len < 1) { | if (ftl_len < 1) { | ||||
sysctl_handle_string(oidp, "(none)", 6, req); | sysctl_handle_string(oidp, "(none)", 6, req); | ||||
return (0); | return (0); | ||||
} | } | ||||
buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; | buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; | ||||
buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT); | buf = buf_i = malloc(buf_len, M_IXL, M_NOWAIT); | ||||
sprintf(buf_i++, "\n"); | sprintf(buf_i++, "\n"); | ||||
SLIST_FOREACH(f, &vsi->ftl, next) { | SLIST_FOREACH(f, &vsi->ftl, next) { | ||||
sprintf(buf_i, | sprintf(buf_i, | ||||
MAC_FORMAT ", vlan %4d, flags %#06x", | MAC_FORMAT ", vlan %4d, flags %#06x", | ||||
MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); | MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); | ||||
buf_i += entry_len; | buf_i += entry_len; | ||||
/* don't print '\n' for last entry */ | /* don't print '\n' for last entry */ | ||||
if (++ftl_counter != ftl_len) { | if (++ftl_counter != ftl_len) { | ||||
sprintf(buf_i, "\n"); | sprintf(buf_i, "\n"); | ||||
buf_i++; | buf_i++; | ||||
} | } | ||||
} | } | ||||
error = sysctl_handle_string(oidp, buf, strlen(buf), req); | error = sysctl_handle_string(oidp, buf, strlen(buf), req); | ||||
if (error) | if (error) | ||||
printf("sysctl error: %d\n", error); | printf("sysctl error: %d\n", error); | ||||
free(buf, M_DEVBUF); | free(buf, M_IXL); | ||||
return error; | return error; | ||||
} | } | ||||
#define IXL_SW_RES_SIZE 0x14 | #define IXL_SW_RES_SIZE 0x14 | ||||
static int | static int | ||||
ixl_res_alloc_cmp(const void *a, const void *b) | ixl_res_alloc_cmp(const void *a, const void *b) | ||||
{ | { | ||||
const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; | const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; | ||||
one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; | one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; | ||||
two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; | two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; | ||||
return ((int)one->resource_type - (int)two->resource_type); | return ((int)one->resource_type - (int)two->resource_type); | ||||
} | } | ||||
static int | static int | ||||
ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) | ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
struct sbuf *buf; | struct sbuf *buf; | ||||
int error = 0; | int error = 0; | ||||
u8 num_entries; | u8 num_entries; | ||||
struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; | struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; | ||||
buf = sbuf_new_for_sysctl(NULL, NULL, 0, req); | buf = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); | ||||
if (!buf) { | if (!buf) { | ||||
device_printf(dev, "Could not allocate sbuf for output.\n"); | device_printf(dev, "Could not allocate sbuf for output.\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
bzero(resp, sizeof(resp)); | bzero(resp, sizeof(resp)); | ||||
error = i40e_aq_get_switch_resource_alloc(hw, &num_entries, | error = i40e_aq_get_switch_resource_alloc(hw, &num_entries, | ||||
resp, | resp, | ||||
Show All 24 Lines | sbuf_printf(buf, | ||||
resp[i].guaranteed, | resp[i].guaranteed, | ||||
resp[i].total, | resp[i].total, | ||||
resp[i].used, | resp[i].used, | ||||
resp[i].total_unalloced); | resp[i].total_unalloced); | ||||
if (i < num_entries - 1) | if (i < num_entries - 1) | ||||
sbuf_cat(buf, "\n"); | sbuf_cat(buf, "\n"); | ||||
} | } | ||||
sbuf_trim(buf); | |||||
error = sbuf_finish(buf); | error = sbuf_finish(buf); | ||||
sbuf_delete(buf); | sbuf_delete(buf); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
** Caller must init and delete sbuf; this function will clear and | ** Caller must init and delete sbuf; this function will clear and | ||||
Show All 33 Lines | ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink) | ||||
return sbuf_data(s); | return sbuf_data(s); | ||||
} | } | ||||
static int | static int | ||||
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) | ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = iflib_get_dev(pf->vsi.ctx); | ||||
struct sbuf *buf; | struct sbuf *buf; | ||||
struct sbuf *nmbuf; | struct sbuf *nmbuf; | ||||
int error = 0; | int error = 0; | ||||
u8 aq_buf[I40E_AQ_LARGE_BUF]; | u8 aq_buf[I40E_AQ_LARGE_BUF]; | ||||
u16 next = 0; | u16 next = 0; | ||||
struct i40e_aqc_get_switch_config_resp *sw_config; | struct i40e_aqc_get_switch_config_resp *sw_config; | ||||
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; | sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; | ||||
buf = sbuf_new_for_sysctl(NULL, NULL, 0, req); | buf = sbuf_new(NULL, NULL, 1024, SBUF_AUTOEXTEND); | ||||
if (!buf) { | if (!buf) { | ||||
device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); | device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
error = i40e_aq_get_switch_config(hw, sw_config, | error = i40e_aq_get_switch_config(hw, sw_config, | ||||
sizeof(aq_buf), &next, NULL); | sizeof(aq_buf), &next, NULL); | ||||
if (error) { | if (error) { | ||||
▲ Show 20 Lines • Show All 102 Lines • ▼ Show 20 Lines | ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf) | ||||
vf->vsi.num_queues = IXLV_MAX_QUEUES; | vf->vsi.num_queues = IXLV_MAX_QUEUES; | ||||
code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); | code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); | ||||
if (code != I40E_SUCCESS) | if (code != I40E_SUCCESS) | ||||
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | ||||
code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); | code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); | ||||
if (code != I40E_SUCCESS) { | if (code != I40E_SUCCESS) { | ||||
device_printf(pf->dev, "Failed to disable BW limit: %d\n", | device_printf(iflib_get_dev(pf->vsi.ctx), "Failed to disable BW limit: %d\n", | ||||
ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | ||||
return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); | ||||
} | } | ||||
memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); | memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 181 Lines • ▼ Show 20 Lines | ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf) | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
uint32_t vfrstat, vfrtrig; | uint32_t vfrstat, vfrtrig; | ||||
int i, error; | int i, error; | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
error = ixl_flush_pcie(pf, vf); | error = ixl_flush_pcie(pf, vf); | ||||
if (error != 0) | if (error != 0) | ||||
device_printf(pf->dev, | device_printf(iflib_get_dev(pf->vsi.ctx), | ||||
"Timed out waiting for PCIe activity to stop on VF-%d\n", | "Timed out waiting for PCIe activity to stop on VF-%d\n", | ||||
vf->vf_num); | vf->vf_num); | ||||
for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { | for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { | ||||
DELAY(10); | DELAY(10); | ||||
vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); | vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); | ||||
if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) | if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) | ||||
break; | break; | ||||
} | } | ||||
if (i == IXL_VF_RESET_TIMEOUT) | if (i == IXL_VF_RESET_TIMEOUT) | ||||
device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); | device_printf(iflib_get_dev(pf->vsi.ctx), "VF %d failed to reset\n", vf->vf_num); | ||||
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED); | wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED); | ||||
vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); | vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); | ||||
vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; | vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; | ||||
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); | wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); | ||||
if (vf->vsi.seid != 0) | if (vf->vsi.seid != 0) | ||||
▲ Show 20 Lines • Show All 856 Lines • ▼ Show 20 Lines | ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) | ||||
void *msg; | void *msg; | ||||
uint16_t vf_num, msg_size; | uint16_t vf_num, msg_size; | ||||
uint32_t opcode; | uint32_t opcode; | ||||
vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; | vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; | ||||
opcode = le32toh(event->desc.cookie_high); | opcode = le32toh(event->desc.cookie_high); | ||||
if (vf_num >= pf->num_vfs) { | if (vf_num >= pf->num_vfs) { | ||||
device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num); | device_printf(iflib_get_dev(pf->vsi.ctx), "Got msg from illegal VF: %d\n", vf_num); | ||||
return; | return; | ||||
} | } | ||||
vf = &pf->vfs[vf_num]; | vf = &pf->vfs[vf_num]; | ||||
msg = event->msg_buf; | msg = event->msg_buf; | ||||
msg_size = event->msg_len; | msg_size = event->msg_len; | ||||
I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), | I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) | ||||
default: | default: | ||||
i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); | i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ | /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ | ||||
static void | static void | ||||
ixl_handle_vflr(void *arg, int pending) | ixl_if_handle_vflr(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
uint16_t global_vf_num; | uint16_t global_vf_num; | ||||
uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; | uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; | ||||
int i; | int i; | ||||
pf = arg; | pf = iflib_get_softc(ctx); | ||||
hw = &pf->hw; | hw = &pf->hw; | ||||
IXL_PF_LOCK(pf); | |||||
for (i = 0; i < pf->num_vfs; i++) { | for (i = 0; i < pf->num_vfs; i++) { | ||||
global_vf_num = hw->func_caps.vf_base_id + i; | global_vf_num = hw->func_caps.vf_base_id + i; | ||||
vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); | vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); | ||||
vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); | vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); | ||||
vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); | vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); | ||||
if (vflrstat & vflrstat_mask) { | if (vflrstat & vflrstat_mask) { | ||||
wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), | wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), | ||||
vflrstat_mask); | vflrstat_mask); | ||||
ixl_reinit_vf(pf, &pf->vfs[i]); | ixl_reinit_vf(pf, &pf->vfs[i]); | ||||
} | } | ||||
} | } | ||||
icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); | icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); | ||||
icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; | icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, icr0); | wr32(hw, I40E_PFINT_ICR0_ENA, icr0); | ||||
ixl_flush(hw); | ixl_flush(hw); | ||||
IXL_PF_UNLOCK(pf); | |||||
} | } | ||||
static int | static int | ||||
ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) | ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) | ||||
{ | { | ||||
switch (err) { | switch (err) { | ||||
case I40E_AQ_RC_EPERM: | case I40E_AQ_RC_EPERM: | ||||
▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) | ||||
case I40E_AQ_RC_EFBIG: | case I40E_AQ_RC_EFBIG: | ||||
return (EFBIG); | return (EFBIG); | ||||
default: | default: | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
} | } | ||||
static int | static int | ||||
ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) | ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params) | ||||
{ | { | ||||
device_t dev; | |||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct ixl_vsi *pf_vsi; | struct ixl_vsi *pf_vsi; | ||||
enum i40e_status_code ret; | enum i40e_status_code ret; | ||||
int i, error; | int i, error; | ||||
pf = device_get_softc(dev); | dev = iflib_get_dev(ctx); | ||||
pf = iflib_get_softc(ctx); | |||||
hw = &pf->hw; | hw = &pf->hw; | ||||
pf_vsi = &pf->vsi; | pf_vsi = &pf->vsi; | ||||
IXL_PF_LOCK(pf); | |||||
pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | | pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | | ||||
M_ZERO); | M_ZERO); | ||||
if (pf->vfs == NULL) { | if (pf->vfs == NULL) { | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
for (i = 0; i < num_vfs; i++) | for (i = 0; i < num_vfs; i++) | ||||
sysctl_ctx_init(&pf->vfs[i].ctx); | sysctl_ctx_init(&pf->vfs[i].ctx); | ||||
ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, | ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, | ||||
1, FALSE, FALSE, &pf->veb_seid, NULL); | 1, FALSE, FALSE, &pf->veb_seid, NULL); | ||||
if (ret != I40E_SUCCESS) { | if (ret != I40E_SUCCESS) { | ||||
error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); | error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); | ||||
device_printf(dev, "add_veb failed; code=%d error=%d", ret, | device_printf(dev, "add_veb failed; code=%d error=%d", ret, | ||||
error); | error); | ||||
goto fail; | goto fail; | ||||
} | } | ||||
ixl_configure_msix(pf); | ixl_configure_msix(pf); | ||||
ixl_enable_adminq(hw); | ixl_enable_adminq(hw); | ||||
pf->num_vfs = num_vfs; | pf->num_vfs = num_vfs; | ||||
IXL_PF_UNLOCK(pf); | |||||
return (0); | return (0); | ||||
fail: | fail: | ||||
free(pf->vfs, M_IXL); | free(pf->vfs, M_IXL); | ||||
pf->vfs = NULL; | pf->vfs = NULL; | ||||
IXL_PF_UNLOCK(pf); | |||||
return (error); | return (error); | ||||
} | } | ||||
static void | static void | ||||
ixl_iov_uninit(device_t dev) | ixl_if_iov_uninit(if_ctx_t ctx) | ||||
{ | { | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct i40e_hw *hw; | struct i40e_hw *hw; | ||||
struct ixl_vsi *vsi; | struct ixl_vsi *vsi; | ||||
struct ifnet *ifp; | struct ifnet *ifp; | ||||
struct ixl_vf *vfs; | struct ixl_vf *vfs; | ||||
device_t dev; | |||||
int i, num_vfs; | int i, num_vfs; | ||||
pf = device_get_softc(dev); | dev = iflib_get_dev(ctx); | ||||
pf = iflib_get_softc(ctx); | |||||
hw = &pf->hw; | hw = &pf->hw; | ||||
vsi = &pf->vsi; | vsi = &pf->vsi; | ||||
ifp = vsi->ifp; | ifp = vsi->ifp; | ||||
IXL_PF_LOCK(pf); | |||||
for (i = 0; i < pf->num_vfs; i++) { | for (i = 0; i < pf->num_vfs; i++) { | ||||
if (pf->vfs[i].vsi.seid != 0) | if (pf->vfs[i].vsi.seid != 0) | ||||
i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); | i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); | ||||
} | } | ||||
if (pf->veb_seid != 0) { | if (pf->veb_seid != 0) { | ||||
i40e_aq_delete_element(hw, pf->veb_seid, NULL); | i40e_aq_delete_element(hw, pf->veb_seid, NULL); | ||||
pf->veb_seid = 0; | pf->veb_seid = 0; | ||||
} | } | ||||
#if __FreeBSD_version > 1100022 | |||||
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) | ||||
#else | ixl_if_intr_disable(ctx); | ||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) | |||||
#endif | |||||
ixl_disable_intr(vsi); | |||||
vfs = pf->vfs; | vfs = pf->vfs; | ||||
num_vfs = pf->num_vfs; | num_vfs = pf->num_vfs; | ||||
pf->vfs = NULL; | pf->vfs = NULL; | ||||
pf->num_vfs = 0; | pf->num_vfs = 0; | ||||
IXL_PF_UNLOCK(pf); | |||||
/* Do this after the unlock as sysctl_ctx_free might sleep. */ | /* Do this after the unlock as sysctl_ctx_free might sleep. */ | ||||
for (i = 0; i < num_vfs; i++) | for (i = 0; i < num_vfs; i++) | ||||
sysctl_ctx_free(&vfs[i].ctx); | sysctl_ctx_free(&vfs[i].ctx); | ||||
free(vfs, M_IXL); | free(vfs, M_IXL); | ||||
} | } | ||||
static int | static int | ||||
ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) | ixl_if_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params) | ||||
{ | { | ||||
char sysctl_name[QUEUE_NAME_LEN]; | char sysctl_name[QUEUE_NAME_LEN]; | ||||
struct ixl_pf *pf; | struct ixl_pf *pf; | ||||
struct ixl_vf *vf; | struct ixl_vf *vf; | ||||
device_t dev; | |||||
const void *mac; | const void *mac; | ||||
size_t size; | size_t size; | ||||
int error; | int error; | ||||
pf = device_get_softc(dev); | dev = iflib_get_dev(ctx); | ||||
pf = iflib_get_softc(ctx); | |||||
vf = &pf->vfs[vfnum]; | vf = &pf->vfs[vfnum]; | ||||
IXL_PF_LOCK(pf); | |||||
vf->vf_num = vfnum; | vf->vf_num = vfnum; | ||||
vf->vsi.back = pf; | vf->vsi.back = pf; | ||||
vf->vf_flags = VF_FLAG_ENABLED; | vf->vf_flags = VF_FLAG_ENABLED; | ||||
SLIST_INIT(&vf->vsi.ftl); | SLIST_INIT(&vf->vsi.ftl); | ||||
error = ixl_vf_setup_vsi(pf, vf); | error = ixl_vf_setup_vsi(pf, vf); | ||||
if (error != 0) | if (error != 0) | ||||
Show All 17 Lines | ixl_if_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params) | ||||
if (nvlist_get_bool(params, "allow-promisc")) | if (nvlist_get_bool(params, "allow-promisc")) | ||||
vf->vf_flags |= VF_FLAG_PROMISC_CAP; | vf->vf_flags |= VF_FLAG_PROMISC_CAP; | ||||
vf->vf_flags |= VF_FLAG_VLAN_CAP; | vf->vf_flags |= VF_FLAG_VLAN_CAP; | ||||
ixl_reset_vf(pf, vf); | ixl_reset_vf(pf, vf); | ||||
out: | out: | ||||
IXL_PF_UNLOCK(pf); | |||||
if (error == 0) { | if (error == 0) { | ||||
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); | snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); | ||||
ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); | ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
#endif /* PCI_IOV */ | #endif /* PCI_IOV */ |