Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/if_ixlv.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
Show All 21 Lines | |||||
/*$FreeBSD$*/ | /*$FreeBSD$*/ | ||||
#include "ixl.h" | #include "ixl.h" | ||||
#include "ixlv.h" | #include "ixlv.h" | ||||
/********************************************************************* | /********************************************************************* | ||||
* Driver version | * Driver version | ||||
*********************************************************************/ | *********************************************************************/ | ||||
char ixlv_driver_version[] = "1.4.12-k"; | #define IXLV_DRIVER_VERSION_MAJOR 1 | ||||
#define IXLV_DRIVER_VERSION_MINOR 5 | |||||
#define IXLV_DRIVER_VERSION_BUILD 4 | |||||
char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "." | |||||
__XSTRING(IXLV_DRIVER_VERSION_MINOR) "." | |||||
__XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k"; | |||||
/********************************************************************* | /********************************************************************* | ||||
* PCI Device ID Table | * PCI Device ID Table | ||||
* | * | ||||
* Used by probe to select devices to load on | * Used by probe to select devices to load on | ||||
* Last field stores an index into ixlv_strings | * Last field stores an index into ixlv_strings | ||||
* Last entry must be all 0s | * Last entry must be all 0s | ||||
* | * | ||||
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static ixl_vendor_info_t ixlv_vendor_info_array[] = | static ixl_vendor_info_t ixlv_vendor_info_array[] = | ||||
{ | { | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0}, | {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0}, | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0}, | {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0}, | ||||
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0}, | {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0}, | ||||
/* required last entry */ | /* required last entry */ | ||||
{0, 0, 0, 0, 0} | {0, 0, 0, 0, 0} | ||||
}; | }; | ||||
/********************************************************************* | /********************************************************************* | ||||
* Table of branding strings | * Table of branding strings | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static char *ixlv_strings[] = { | static char *ixlv_strings[] = { | ||||
"Intel(R) Ethernet Connection XL710/X722 VF Driver" | "Intel(R) Ethernet Connection 700 Series VF Driver" | ||||
}; | }; | ||||
/********************************************************************* | /********************************************************************* | ||||
* Function prototypes | * Function prototypes | ||||
*********************************************************************/ | *********************************************************************/ | ||||
static int ixlv_probe(device_t); | static int ixlv_probe(device_t); | ||||
static int ixlv_attach(device_t); | static int ixlv_attach(device_t); | ||||
static int ixlv_detach(device_t); | static int ixlv_detach(device_t); | ||||
static int ixlv_shutdown(device_t); | static int ixlv_shutdown(device_t); | ||||
static void ixlv_init_locked(struct ixlv_sc *); | static void ixlv_init_locked(struct ixlv_sc *); | ||||
static int ixlv_allocate_pci_resources(struct ixlv_sc *); | static int ixlv_allocate_pci_resources(struct ixlv_sc *); | ||||
static void ixlv_free_pci_resources(struct ixlv_sc *); | static void ixlv_free_pci_resources(struct ixlv_sc *); | ||||
static int ixlv_assign_msix(struct ixlv_sc *); | static int ixlv_assign_msix(struct ixlv_sc *); | ||||
static int ixlv_init_msix(struct ixlv_sc *); | static int ixlv_init_msix(struct ixlv_sc *); | ||||
static int ixlv_init_taskqueue(struct ixlv_sc *); | static int ixlv_init_taskqueue(struct ixlv_sc *); | ||||
static int ixlv_setup_queues(struct ixlv_sc *); | static int ixlv_setup_queues(struct ixlv_sc *); | ||||
static void ixlv_config_rss(struct ixlv_sc *); | static void ixlv_config_rss(struct ixlv_sc *); | ||||
static void ixlv_stop(struct ixlv_sc *); | static void ixlv_stop(struct ixlv_sc *); | ||||
static void ixlv_add_multi(struct ixl_vsi *); | static void ixlv_add_multi(struct ixl_vsi *); | ||||
static void ixlv_del_multi(struct ixl_vsi *); | static void ixlv_del_multi(struct ixl_vsi *); | ||||
static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que); | |||||
static void ixlv_free_queues(struct ixl_vsi *); | static void ixlv_free_queues(struct ixl_vsi *); | ||||
static int ixlv_setup_interface(device_t, struct ixlv_sc *); | static int ixlv_setup_interface(device_t, struct ixlv_sc *); | ||||
static int ixlv_teardown_adminq_msix(struct ixlv_sc *); | static int ixlv_teardown_adminq_msix(struct ixlv_sc *); | ||||
static int ixlv_media_change(struct ifnet *); | static int ixlv_media_change(struct ifnet *); | ||||
static void ixlv_media_status(struct ifnet *, struct ifmediareq *); | static void ixlv_media_status(struct ifnet *, struct ifmediareq *); | ||||
static void ixlv_local_timer(void *); | static void ixlv_local_timer(void *); | ||||
Show All 27 Lines | |||||
static void ixlv_init_hw(struct ixlv_sc *); | static void ixlv_init_hw(struct ixlv_sc *); | ||||
static int ixlv_setup_vc(struct ixlv_sc *); | static int ixlv_setup_vc(struct ixlv_sc *); | ||||
static int ixlv_vf_config(struct ixlv_sc *); | static int ixlv_vf_config(struct ixlv_sc *); | ||||
static void ixlv_cap_txcsum_tso(struct ixl_vsi *, | static void ixlv_cap_txcsum_tso(struct ixl_vsi *, | ||||
struct ifnet *, int); | struct ifnet *, int); | ||||
static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed); | |||||
static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS); | |||||
static void ixlv_add_sysctls(struct ixlv_sc *); | static void ixlv_add_sysctls(struct ixlv_sc *); | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); | static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); | ||||
static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); | static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); | ||||
#endif | #endif | ||||
/********************************************************************* | /********************************************************************* | ||||
* FreeBSD Device Interface Entry Points | * FreeBSD Device Interface Entry Points | ||||
Show All 22 Lines | |||||
** TUNEABLE PARAMETERS: | ** TUNEABLE PARAMETERS: | ||||
*/ | */ | ||||
static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0, | static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0, | ||||
"IXLV driver parameters"); | "IXLV driver parameters"); | ||||
/* | /* | ||||
** Number of descriptors per ring: | ** Number of descriptors per ring: | ||||
** - TX and RX are the same size | ** - TX and RX sizes are independently configurable | ||||
*/ | */ | ||||
static int ixlv_ringsz = IXL_DEFAULT_RING; | static int ixlv_tx_ring_size = IXL_DEFAULT_RING; | ||||
TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz); | TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size); | ||||
SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, | ||||
&ixlv_ringsz, 0, "Descriptor Ring Size"); | &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size"); | ||||
static int ixlv_rx_ring_size = IXL_DEFAULT_RING; | |||||
TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size); | |||||
SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, | |||||
&ixlv_rx_ring_size, 0, "TX Descriptor Ring Size"); | |||||
/* Set to zero to auto calculate */ | /* Set to zero to auto calculate */ | ||||
int ixlv_max_queues = 0; | int ixlv_max_queues = 0; | ||||
TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues); | TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues); | ||||
SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN, | ||||
&ixlv_max_queues, 0, "Number of Queues"); | &ixlv_max_queues, 0, "Number of Queues"); | ||||
/* | /* | ||||
** Number of entries in Tx queue buf_ring. | ** Number of entries in Tx queue buf_ring. | ||||
** Increasing this will reduce the number of | ** Increasing this will reduce the number of | ||||
** errors when transmitting fragmented UDP | ** errors when transmitting fragmented UDP | ||||
** packets. | ** packets. | ||||
*/ | */ | ||||
static int ixlv_txbrsz = DEFAULT_TXBRSZ; | static int ixlv_txbrsz = DEFAULT_TXBRSZ; | ||||
TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz); | TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz); | ||||
SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN, | ||||
&ixlv_txbrsz, 0, "TX Buf Ring Size"); | &ixlv_txbrsz, 0, "TX Buf Ring Size"); | ||||
/* | /* | ||||
* Different method for processing TX descriptor | |||||
* completion. | |||||
*/ | |||||
static int ixlv_enable_head_writeback = 0; | |||||
TUNABLE_INT("hw.ixlv.enable_head_writeback", | |||||
&ixlv_enable_head_writeback); | |||||
SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, | |||||
&ixlv_enable_head_writeback, 0, | |||||
"For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); | |||||
/* | |||||
** Controls for Interrupt Throttling | ** Controls for Interrupt Throttling | ||||
** - true/false for dynamic adjustment | ** - true/false for dynamic adjustment | ||||
** - default values for static ITR | ** - default values for static ITR | ||||
*/ | */ | ||||
int ixlv_dynamic_rx_itr = 0; | int ixlv_dynamic_rx_itr = 0; | ||||
TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr); | TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr); | ||||
SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, | SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, | ||||
&ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); | &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); | ||||
▲ Show 20 Lines • Show All 92 Lines • ▼ Show 20 Lines | ixlv_attach(device_t dev) | ||||
vsi->dev = dev; | vsi->dev = dev; | ||||
/* Initialize hw struct */ | /* Initialize hw struct */ | ||||
ixlv_init_hw(sc); | ixlv_init_hw(sc); | ||||
/* Allocate filter lists */ | /* Allocate filter lists */ | ||||
ixlv_init_filters(sc); | ixlv_init_filters(sc); | ||||
/* Save this tunable */ | |||||
vsi->enable_head_writeback = ixlv_enable_head_writeback; | |||||
/* Core Lock Init */ | /* Core Lock Init */ | ||||
mtx_init(&sc->mtx, device_get_nameunit(dev), | mtx_init(&sc->mtx, device_get_nameunit(dev), | ||||
"IXL SC Lock", MTX_DEF); | "IXL SC Lock", MTX_DEF); | ||||
/* Set up the timer callout */ | /* Set up the timer callout */ | ||||
callout_init_mtx(&sc->timer, &sc->mtx, 0); | callout_init_mtx(&sc->timer, &sc->mtx, 0); | ||||
/* Do PCI setup - map BAR0, etc */ | /* Do PCI setup - map BAR0, etc */ | ||||
▲ Show 20 Lines • Show All 86 Lines • ▼ Show 20 Lines | #endif | ||||
/* We fail without MSIX support */ | /* We fail without MSIX support */ | ||||
if (sc->msix == 0) { | if (sc->msix == 0) { | ||||
error = ENXIO; | error = ENXIO; | ||||
goto err_res_buf; | goto err_res_buf; | ||||
} | } | ||||
vsi->id = sc->vsi_res->vsi_id; | vsi->id = sc->vsi_res->vsi_id; | ||||
vsi->back = (void *)sc; | vsi->back = (void *)sc; | ||||
sc->link_up = TRUE; | |||||
ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size); | |||||
/* This allocates the memory and early settings */ | /* This allocates the memory and early settings */ | ||||
if (ixlv_setup_queues(sc) != 0) { | if (ixlv_setup_queues(sc) != 0) { | ||||
device_printf(dev, "%s: setup queues failed!\n", | device_printf(dev, "%s: setup queues failed!\n", | ||||
__func__); | __func__); | ||||
error = EIO; | error = EIO; | ||||
goto out; | goto out; | ||||
} | } | ||||
/* Setup the stack interface */ | /* Do queue interrupt setup */ | ||||
if (ixlv_setup_interface(dev, sc) != 0) { | if (ixlv_assign_msix(sc) != 0) { | ||||
device_printf(dev, "%s: setup interface failed!\n", | device_printf(dev, "%s: allocating queue interrupts failed!\n", | ||||
__func__); | __func__); | ||||
error = EIO; | error = ENXIO; | ||||
goto out; | goto out; | ||||
} | } | ||||
INIT_DBG_DEV(dev, "Queue memory and interface setup"); | INIT_DBG_DEV(dev, "Queue memory and interrupts setup"); | ||||
/* Do queue interrupt setup */ | /* Setup the stack interface */ | ||||
if (ixlv_assign_msix(sc) != 0) { | if (ixlv_setup_interface(dev, sc) != 0) { | ||||
device_printf(dev, "%s: allocating queue interrupts failed!\n", | device_printf(dev, "%s: setup interface failed!\n", | ||||
__func__); | __func__); | ||||
error = ENXIO; | error = EIO; | ||||
goto out; | goto out; | ||||
} | } | ||||
INIT_DBG_DEV(dev, "Interface setup complete"); | |||||
/* Start AdminQ taskqueue */ | /* Start AdminQ taskqueue */ | ||||
ixlv_init_taskqueue(sc); | ixlv_init_taskqueue(sc); | ||||
/* We expect a link state message, so schedule the AdminQ task now */ | |||||
taskqueue_enqueue(sc->tq, &sc->aq_irq); | |||||
/* Initialize stats */ | /* Initialize stats */ | ||||
bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); | bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); | ||||
ixlv_add_sysctls(sc); | ixlv_add_sysctls(sc); | ||||
/* Register for VLAN events */ | /* Register for VLAN events */ | ||||
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, | ||||
ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); | ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); | ||||
vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, | ||||
ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); | ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); | ||||
/* We want AQ enabled early */ | /* We want AQ enabled early */ | ||||
ixlv_enable_adminq_irq(hw); | ixlv_enable_adminq_irq(hw); | ||||
/* Set things up to run init */ | /* Set things up to run init */ | ||||
sc->init_state = IXLV_INIT_READY; | sc->init_state = IXLV_INIT_READY; | ||||
ixl_vc_init_mgr(sc, &sc->vc_mgr); | ixl_vc_init_mgr(sc, &sc->vc_mgr); | ||||
INIT_DBG_DEV(dev, "end"); | INIT_DBG_DEV(dev, "end"); | ||||
return (error); | return (error); | ||||
out: | out: | ||||
ixlv_free_queues(vsi); | ixlv_free_queues(vsi); | ||||
ixlv_teardown_adminq_msix(sc); | |||||
err_res_buf: | err_res_buf: | ||||
free(sc->vf_res, M_DEVBUF); | free(sc->vf_res, M_DEVBUF); | ||||
err_aq: | err_aq: | ||||
i40e_shutdown_adminq(hw); | i40e_shutdown_adminq(hw); | ||||
err_pci_res: | err_pci_res: | ||||
ixlv_free_pci_resources(sc); | ixlv_free_pci_resources(sc); | ||||
err_early: | err_early: | ||||
mtx_destroy(&sc->mtx); | mtx_destroy(&sc->mtx); | ||||
Show All 23 Lines | ixlv_detach(device_t dev) | ||||
INIT_DBG_DEV(dev, "begin"); | INIT_DBG_DEV(dev, "begin"); | ||||
/* Make sure VLANS are not using driver */ | /* Make sure VLANS are not using driver */ | ||||
if (vsi->ifp->if_vlantrunk != NULL) { | if (vsi->ifp->if_vlantrunk != NULL) { | ||||
if_printf(vsi->ifp, "Vlan in use, detach first\n"); | if_printf(vsi->ifp, "Vlan in use, detach first\n"); | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
/* Remove all the media and link information */ | |||||
ifmedia_removeall(&sc->media); | |||||
/* Stop driver */ | /* Stop driver */ | ||||
ether_ifdetach(vsi->ifp); | ether_ifdetach(vsi->ifp); | ||||
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { | if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { | ||||
mtx_lock(&sc->mtx); | mtx_lock(&sc->mtx); | ||||
ixlv_stop(sc); | ixlv_stop(sc); | ||||
mtx_unlock(&sc->mtx); | mtx_unlock(&sc->mtx); | ||||
} | } | ||||
Show All 14 Lines | ixlv_detach(device_t dev) | ||||
if (status != I40E_SUCCESS) { | if (status != I40E_SUCCESS) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"i40e_shutdown_adminq() failed with status %s\n", | "i40e_shutdown_adminq() failed with status %s\n", | ||||
i40e_stat_str(hw, status)); | i40e_stat_str(hw, status)); | ||||
} | } | ||||
if_free(vsi->ifp); | if_free(vsi->ifp); | ||||
free(sc->vf_res, M_DEVBUF); | free(sc->vf_res, M_DEVBUF); | ||||
ixlv_free_pci_resources(sc); | |||||
ixlv_free_queues(vsi); | ixlv_free_queues(vsi); | ||||
ixlv_free_pci_resources(sc); | |||||
ixlv_free_filters(sc); | ixlv_free_filters(sc); | ||||
bus_generic_detach(dev); | bus_generic_detach(dev); | ||||
mtx_destroy(&sc->mtx); | mtx_destroy(&sc->mtx); | ||||
INIT_DBG_DEV(dev, "end"); | INIT_DBG_DEV(dev, "end"); | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 595 Lines • ▼ Show 20 Lines | while (!i40e_asq_done(hw)) { | ||||
} | } | ||||
i40e_msec_pause(10); | i40e_msec_pause(10); | ||||
} | } | ||||
INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d", | INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d", | ||||
retried + 1); | retried + 1); | ||||
if (!sc->vf_res) { | if (!sc->vf_res) { | ||||
bufsz = sizeof(struct i40e_virtchnl_vf_resource) + | bufsz = sizeof(struct virtchnl_vf_resource) + | ||||
(I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource)); | (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); | ||||
sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT); | sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT); | ||||
if (!sc->vf_res) { | if (!sc->vf_res) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: Unable to allocate memory for VF configuration" | "%s: Unable to allocate memory for VF configuration" | ||||
" message from PF on attempt %d\n", __func__, retried + 1); | " message from PF on attempt %d\n", __func__, retried + 1); | ||||
ret_error = 1; | ret_error = 1; | ||||
goto fail; | goto fail; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 162 Lines • ▼ Show 20 Lines | ixlv_allocate_pci_resources(struct ixlv_sc *sc) | ||||
sc->osdep.mem_bus_space_tag = | sc->osdep.mem_bus_space_tag = | ||||
rman_get_bustag(sc->pci_mem); | rman_get_bustag(sc->pci_mem); | ||||
sc->osdep.mem_bus_space_handle = | sc->osdep.mem_bus_space_handle = | ||||
rman_get_bushandle(sc->pci_mem); | rman_get_bushandle(sc->pci_mem); | ||||
sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); | sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); | ||||
sc->osdep.flush_reg = I40E_VFGEN_RSTAT; | sc->osdep.flush_reg = I40E_VFGEN_RSTAT; | ||||
sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; | sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; | ||||
sc->hw.back = &sc->osdep; | sc->hw.back = &sc->osdep; | ||||
/* | ixl_set_busmaster(dev); | ||||
** Explicitly set the guest PCI BUSMASTER capability | ixl_set_msix_enable(dev); | ||||
** and we must rewrite the ENABLE in the MSIX control | |||||
** register again at this point to cause the host to | |||||
** successfully initialize us. | |||||
** | |||||
** This must be set before accessing any registers. | |||||
*/ | |||||
{ | |||||
u16 pci_cmd_word; | |||||
int msix_ctrl; | |||||
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); | |||||
pci_cmd_word |= PCIM_CMD_BUSMASTEREN; | |||||
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); | |||||
pci_find_cap(dev, PCIY_MSIX, &rid); | |||||
rid += PCIR_MSIX_CTRL; | |||||
msix_ctrl = pci_read_config(dev, rid, 2); | |||||
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; | |||||
pci_write_config(dev, rid, msix_ctrl, 2); | |||||
} | |||||
/* Disable adminq interrupts (just in case) */ | /* Disable adminq interrupts (just in case) */ | ||||
ixlv_disable_adminq_irq(&sc->hw); | ixlv_disable_adminq_irq(&sc->hw); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | |||||
* Free MSI-X related resources for a single queue | |||||
*/ | |||||
static void | static void | ||||
ixlv_free_pci_resources(struct ixlv_sc *sc) | ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &sc->vsi; | |||||
struct ixl_queue *que = vsi->queues; | |||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
/* We may get here before stations are setup */ | |||||
if (que == NULL) | |||||
goto early; | |||||
/* | /* | ||||
** Release all msix queue resources: | ** Release all msix queue resources: | ||||
*/ | */ | ||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | |||||
int rid = que->msix + 1; | |||||
if (que->tag != NULL) { | if (que->tag != NULL) { | ||||
bus_teardown_intr(dev, que->res, que->tag); | bus_teardown_intr(dev, que->res, que->tag); | ||||
que->tag = NULL; | que->tag = NULL; | ||||
} | } | ||||
if (que->res != NULL) { | if (que->res != NULL) { | ||||
int rid = que->msix + 1; | |||||
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); | bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); | ||||
que->res = NULL; | que->res = NULL; | ||||
} | } | ||||
if (que->tq != NULL) { | |||||
taskqueue_free(que->tq); | |||||
que->tq = NULL; | |||||
} | } | ||||
} | |||||
early: | static void | ||||
ixlv_free_pci_resources(struct ixlv_sc *sc) | |||||
{ | |||||
device_t dev = sc->dev; | |||||
pci_release_msi(dev); | pci_release_msi(dev); | ||||
if (sc->msix_mem != NULL) | if (sc->msix_mem != NULL) | ||||
bus_release_resource(dev, SYS_RES_MEMORY, | bus_release_resource(dev, SYS_RES_MEMORY, | ||||
PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem); | PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem); | ||||
if (sc->pci_mem != NULL) | if (sc->pci_mem != NULL) | ||||
bus_release_resource(dev, SYS_RES_MEMORY, | bus_release_resource(dev, SYS_RES_MEMORY, | ||||
▲ Show 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | if (que->res == NULL) { | ||||
" bus resource: que interrupt [%d]\n", vector); | " bus resource: que interrupt [%d]\n", vector); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
/* Set the handler function */ | /* Set the handler function */ | ||||
error = bus_setup_intr(dev, que->res, | error = bus_setup_intr(dev, que->res, | ||||
INTR_TYPE_NET | INTR_MPSAFE, NULL, | INTR_TYPE_NET | INTR_MPSAFE, NULL, | ||||
ixlv_msix_que, que, &que->tag); | ixlv_msix_que, que, &que->tag); | ||||
if (error) { | if (error) { | ||||
que->res = NULL; | que->tag = NULL; | ||||
device_printf(dev, "Failed to register que handler"); | device_printf(dev, "Failed to register que handler"); | ||||
return (error); | return (error); | ||||
} | } | ||||
bus_describe_intr(dev, que->res, que->tag, "que %d", i); | bus_describe_intr(dev, que->res, que->tag, "que %d", i); | ||||
/* Bind the vector to a CPU */ | /* Bind the vector to a CPU */ | ||||
#ifdef RSS | #ifdef RSS | ||||
cpu_id = rss_getcpu(i % rss_getnumbuckets()); | cpu_id = rss_getcpu(i % rss_getnumbuckets()); | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | |||||
{ | { | ||||
u32 reg; | u32 reg; | ||||
/* Wait up to ~10 seconds */ | /* Wait up to ~10 seconds */ | ||||
for (int i = 0; i < 100; i++) { | for (int i = 0; i < 100; i++) { | ||||
reg = rd32(hw, I40E_VFGEN_RSTAT) & | reg = rd32(hw, I40E_VFGEN_RSTAT) & | ||||
I40E_VFGEN_RSTAT_VFR_STATE_MASK; | I40E_VFGEN_RSTAT_VFR_STATE_MASK; | ||||
if ((reg == I40E_VFR_VFACTIVE) || | if ((reg == VIRTCHNL_VFR_VFACTIVE) || | ||||
(reg == I40E_VFR_COMPLETED)) | (reg == VIRTCHNL_VFR_COMPLETED)) | ||||
return (0); | return (0); | ||||
i40e_msec_pause(100); | i40e_msec_pause(100); | ||||
} | } | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
Show All 16 Lines | if (ifp == NULL) { | ||||
device_printf(dev, "%s: could not allocate ifnet" | device_printf(dev, "%s: could not allocate ifnet" | ||||
" structure!\n", __func__); | " structure!\n", __func__); | ||||
return (-1); | return (-1); | ||||
} | } | ||||
if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | ||||
ifp->if_mtu = ETHERMTU; | ifp->if_mtu = ETHERMTU; | ||||
#if __FreeBSD_version >= 1100000 | |||||
ifp->if_baudrate = IF_Gbps(40); | ifp->if_baudrate = IF_Gbps(40); | ||||
#else | |||||
if_initbaudrate(ifp, IF_Gbps(40)); | |||||
#endif | |||||
ifp->if_init = ixlv_init; | ifp->if_init = ixlv_init; | ||||
ifp->if_softc = vsi; | ifp->if_softc = vsi; | ||||
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | ||||
ifp->if_ioctl = ixlv_ioctl; | ifp->if_ioctl = ixlv_ioctl; | ||||
#if __FreeBSD_version >= 1100000 | #if __FreeBSD_version >= 1100000 | ||||
if_setgetcounterfn(ifp, ixl_get_counter); | if_setgetcounterfn(ifp, ixl_get_counter); | ||||
#endif | #endif | ||||
ifp->if_transmit = ixl_mq_start; | ifp->if_transmit = ixl_mq_start; | ||||
ifp->if_qflush = ixl_qflush; | ifp->if_qflush = ixl_qflush; | ||||
ifp->if_snd.ifq_maxlen = que->num_desc - 2; | ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; | ||||
ether_ifattach(ifp, sc->hw.mac.addr); | ether_ifattach(ifp, sc->hw.mac.addr); | ||||
vsi->max_frame_size = | vsi->max_frame_size = | ||||
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | ||||
+ ETHER_VLAN_ENCAP_LEN; | + ETHER_VLAN_ENCAP_LEN; | ||||
ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); | |||||
ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; | |||||
ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; | |||||
/* | /* | ||||
* Tell the upper layer(s) we support long frames. | * Tell the upper layer(s) we support long frames. | ||||
*/ | */ | ||||
ifp->if_hdrlen = sizeof(struct ether_vlan_header); | ifp->if_hdrlen = sizeof(struct ether_vlan_header); | ||||
ifp->if_capabilities |= IFCAP_HWCSUM; | ifp->if_capabilities |= IFCAP_HWCSUM; | ||||
ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; | ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; | ||||
ifp->if_capabilities |= IFCAP_TSO; | ifp->if_capabilities |= IFCAP_TSO; | ||||
Show All 18 Lines | #endif | ||||
/* | /* | ||||
* Specify the media types supported by this adapter and register | * Specify the media types supported by this adapter and register | ||||
* callbacks to update media and link information | * callbacks to update media and link information | ||||
*/ | */ | ||||
ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change, | ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change, | ||||
ixlv_media_status); | ixlv_media_status); | ||||
// JFV Add media types later? | /* Media types based on reported link speed over AdminQ */ | ||||
ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); | |||||
ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); | |||||
ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); | |||||
ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL); | |||||
ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); | |||||
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); | ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); | ||||
INIT_DBG_DEV(dev, "end"); | INIT_DBG_DEV(dev, "end"); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
** Allocate and setup the interface queues | ** Allocate and setup a single queue | ||||
*/ | */ | ||||
static int | static int | ||||
ixlv_setup_queues(struct ixlv_sc *sc) | ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que) | ||||
{ | { | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
struct ixl_vsi *vsi; | |||||
struct ixl_queue *que; | |||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
int rsize, tsize; | int rsize, tsize; | ||||
int error = I40E_SUCCESS; | int error = I40E_SUCCESS; | ||||
vsi = &sc->vsi; | |||||
vsi->back = (void *)sc; | |||||
vsi->hw = &sc->hw; | |||||
vsi->num_vlans = 0; | |||||
/* Get memory for the station queues */ | |||||
if (!(vsi->queues = | |||||
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) * | |||||
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | |||||
device_printf(dev, "Unable to allocate queue memory\n"); | |||||
error = ENOMEM; | |||||
goto early; | |||||
} | |||||
for (int i = 0; i < vsi->num_queues; i++) { | |||||
que = &vsi->queues[i]; | |||||
que->num_desc = ixlv_ringsz; | |||||
que->me = i; | |||||
que->vsi = vsi; | |||||
txr = &que->txr; | txr = &que->txr; | ||||
txr->que = que; | txr->que = que; | ||||
txr->tail = I40E_QTX_TAIL1(que->me); | txr->tail = I40E_QTX_TAIL1(que->me); | ||||
/* Initialize the TX lock */ | /* Initialize the TX lock */ | ||||
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | ||||
device_get_nameunit(dev), que->me); | device_get_nameunit(dev), que->me); | ||||
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); | mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); | ||||
/* | /* | ||||
** Create the TX descriptor ring, the extra int is | * Create the TX descriptor ring | ||||
** added as the location for HEAD WB. | * | ||||
* In Head Writeback mode, the descriptor ring is one bigger | |||||
* than the number of descriptors for space for the HW to | |||||
* write back index of last completed descriptor. | |||||
*/ | */ | ||||
tsize = roundup2((que->num_desc * | if (sc->vsi.enable_head_writeback) { | ||||
tsize = roundup2((que->num_tx_desc * | |||||
sizeof(struct i40e_tx_desc)) + | sizeof(struct i40e_tx_desc)) + | ||||
sizeof(u32), DBA_ALIGN); | sizeof(u32), DBA_ALIGN); | ||||
} else { | |||||
tsize = roundup2((que->num_tx_desc * | |||||
sizeof(struct i40e_tx_desc)), DBA_ALIGN); | |||||
} | |||||
if (i40e_allocate_dma_mem(&sc->hw, | if (i40e_allocate_dma_mem(&sc->hw, | ||||
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { | &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to allocate TX Descriptor memory\n"); | "Unable to allocate TX Descriptor memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_destroy_tx_mtx; | ||||
} | } | ||||
txr->base = (struct i40e_tx_desc *)txr->dma.va; | txr->base = (struct i40e_tx_desc *)txr->dma.va; | ||||
bzero((void *)txr->base, tsize); | bzero((void *)txr->base, tsize); | ||||
/* Now allocate transmit soft structs for the ring */ | /* Now allocate transmit soft structs for the ring */ | ||||
if (ixl_allocate_tx_data(que)) { | if (ixl_allocate_tx_data(que)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up TX structures\n"); | "Critical Failure setting up TX structures\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_free_tx_dma; | ||||
} | } | ||||
/* Allocate a buf ring */ | /* Allocate a buf ring */ | ||||
txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF, | txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF, | ||||
M_WAITOK, &txr->mtx); | M_WAITOK, &txr->mtx); | ||||
if (txr->br == NULL) { | if (txr->br == NULL) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up TX buf ring\n"); | "Critical Failure setting up TX buf ring\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_free_tx_data; | ||||
} | } | ||||
/* | /* | ||||
* Next the RX queues... | * Next the RX queues... | ||||
*/ | */ | ||||
rsize = roundup2(que->num_desc * | rsize = roundup2(que->num_rx_desc * | ||||
sizeof(union i40e_rx_desc), DBA_ALIGN); | sizeof(union i40e_rx_desc), DBA_ALIGN); | ||||
rxr = &que->rxr; | rxr = &que->rxr; | ||||
rxr->que = que; | rxr->que = que; | ||||
rxr->tail = I40E_QRX_TAIL1(que->me); | rxr->tail = I40E_QRX_TAIL1(que->me); | ||||
/* Initialize the RX side lock */ | /* Initialize the RX side lock */ | ||||
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | ||||
device_get_nameunit(dev), que->me); | device_get_nameunit(dev), que->me); | ||||
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); | mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); | ||||
if (i40e_allocate_dma_mem(&sc->hw, | if (i40e_allocate_dma_mem(&sc->hw, | ||||
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA? | &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA? | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to allocate RX Descriptor memory\n"); | "Unable to allocate RX Descriptor memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_destroy_rx_mtx; | ||||
} | } | ||||
rxr->base = (union i40e_rx_desc *)rxr->dma.va; | rxr->base = (union i40e_rx_desc *)rxr->dma.va; | ||||
bzero((void *)rxr->base, rsize); | bzero((void *)rxr->base, rsize); | ||||
/* Allocate receive soft structs for the ring */ | /* Allocate receive soft structs for the ring */ | ||||
if (ixl_allocate_rx_data(que)) { | if (ixl_allocate_rx_data(que)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up receive structs\n"); | "Critical Failure setting up receive structs\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_free_rx_dma; | ||||
} | } | ||||
} | |||||
return (0); | return (0); | ||||
fail: | err_free_rx_dma: | ||||
for (int i = 0; i < vsi->num_queues; i++) { | |||||
que = &vsi->queues[i]; | |||||
rxr = &que->rxr; | |||||
txr = &que->txr; | |||||
if (rxr->base) | |||||
i40e_free_dma_mem(&sc->hw, &rxr->dma); | i40e_free_dma_mem(&sc->hw, &rxr->dma); | ||||
if (txr->base) | err_destroy_rx_mtx: | ||||
mtx_destroy(&rxr->mtx); | |||||
/* err_free_tx_buf_ring */ | |||||
buf_ring_free(txr->br, M_DEVBUF); | |||||
err_free_tx_data: | |||||
ixl_free_que_tx(que); | |||||
err_free_tx_dma: | |||||
i40e_free_dma_mem(&sc->hw, &txr->dma); | i40e_free_dma_mem(&sc->hw, &txr->dma); | ||||
err_destroy_tx_mtx: | |||||
mtx_destroy(&txr->mtx); | |||||
return (error); | |||||
} | } | ||||
/* | |||||
** Allocate and setup the interface queues | |||||
*/ | |||||
static int | |||||
ixlv_setup_queues(struct ixlv_sc *sc) | |||||
{ | |||||
device_t dev = sc->dev; | |||||
struct ixl_vsi *vsi; | |||||
struct ixl_queue *que; | |||||
int i; | |||||
int error = I40E_SUCCESS; | |||||
vsi = &sc->vsi; | |||||
vsi->back = (void *)sc; | |||||
vsi->hw = &sc->hw; | |||||
vsi->num_vlans = 0; | |||||
/* Get memory for the station queues */ | |||||
if (!(vsi->queues = | |||||
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) * | |||||
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | |||||
device_printf(dev, "Unable to allocate queue memory\n"); | |||||
return ENOMEM; | |||||
} | |||||
for (i = 0; i < vsi->num_queues; i++) { | |||||
que = &vsi->queues[i]; | |||||
que->num_tx_desc = vsi->num_tx_desc; | |||||
que->num_rx_desc = vsi->num_rx_desc; | |||||
que->me = i; | |||||
que->vsi = vsi; | |||||
if (ixlv_setup_queue(sc, que)) { | |||||
error = ENOMEM; | |||||
goto err_free_queues; | |||||
} | |||||
} | |||||
return (0); | |||||
err_free_queues: | |||||
while (i--) | |||||
ixlv_free_queue(sc, &vsi->queues[i]); | |||||
free(vsi->queues, M_DEVBUF); | free(vsi->queues, M_DEVBUF); | ||||
early: | |||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
** This routine is run via an vlan config EVENT, | ** This routine is run via an vlan config EVENT, | ||||
** it enables us to use the HW Filter table since | ** it enables us to use the HW Filter table since | ||||
** we can get the vlan id. This just creates the | ** we can get the vlan id. This just creates the | ||||
** entry in the soft version of the VFTA, init will | ** entry in the soft version of the VFTA, init will | ||||
▲ Show 20 Lines • Show All 501 Lines • ▼ Show 20 Lines | if (!sc->link_up) { | ||||
mtx_unlock(&sc->mtx); | mtx_unlock(&sc->mtx); | ||||
INIT_DBG_IF(ifp, "end: link not up"); | INIT_DBG_IF(ifp, "end: link not up"); | ||||
return; | return; | ||||
} | } | ||||
ifmr->ifm_status |= IFM_ACTIVE; | ifmr->ifm_status |= IFM_ACTIVE; | ||||
/* Hardware is always full-duplex */ | /* Hardware is always full-duplex */ | ||||
ifmr->ifm_active |= IFM_FDX; | ifmr->ifm_active |= IFM_FDX; | ||||
/* Based on the link speed reported by the PF over the AdminQ, choose a | |||||
* PHY type to report. This isn't 100% correct since we don't really | |||||
* know the underlying PHY type of the PF, but at least we can report | |||||
* a valid link speed... | |||||
*/ | |||||
switch (sc->link_speed) { | |||||
case VIRTCHNL_LINK_SPEED_100MB: | |||||
ifmr->ifm_active |= IFM_100_TX; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_1GB: | |||||
ifmr->ifm_active |= IFM_1000_T; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_10GB: | |||||
ifmr->ifm_active |= IFM_10G_SR; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_20GB: | |||||
case VIRTCHNL_LINK_SPEED_25GB: | |||||
ifmr->ifm_active |= IFM_25G_SR; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_40GB: | |||||
ifmr->ifm_active |= IFM_40G_SR4; | |||||
break; | |||||
default: | |||||
ifmr->ifm_active |= IFM_UNKNOWN; | |||||
break; | |||||
} | |||||
mtx_unlock(&sc->mtx); | mtx_unlock(&sc->mtx); | ||||
INIT_DBG_IF(ifp, "end"); | INIT_DBG_IF(ifp, "end"); | ||||
return; | return; | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Media Ioctl callback | * Media Ioctl callback | ||||
* | * | ||||
* This routine is called when the user changes speed/duplex using | * This routine is called when the user changes speed/duplex using | ||||
* media/mediopt option with ifconfig. | * media/mediopt option with ifconfig. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
static int | static int | ||||
ixlv_media_change(struct ifnet * ifp) | ixlv_media_change(struct ifnet * ifp) | ||||
{ | { | ||||
struct ixl_vsi *vsi = ifp->if_softc; | struct ixl_vsi *vsi = ifp->if_softc; | ||||
struct ifmedia *ifm = &vsi->media; | struct ifmedia *ifm = &vsi->media; | ||||
INIT_DBG_IF(ifp, "begin"); | INIT_DBG_IF(ifp, "begin"); | ||||
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) | ||||
return (EINVAL); | return (EINVAL); | ||||
if_printf(ifp, "Changing speed is not supported\n"); | |||||
INIT_DBG_IF(ifp, "end"); | INIT_DBG_IF(ifp, "end"); | ||||
return (0); | return (ENODEV); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* Multicast Initialization | * Multicast Initialization | ||||
* | * | ||||
* This routine is called by init to reset a fresh state. | * This routine is called by init to reset a fresh state. | ||||
* | * | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | ||||
mcnt++; | mcnt++; | ||||
} | } | ||||
if_maddr_runlock(ifp); | if_maddr_runlock(ifp); | ||||
/* TODO: Remove -- cannot set promiscuous mode in a VF */ | /* TODO: Remove -- cannot set promiscuous mode in a VF */ | ||||
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { | if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { | ||||
/* delete all multicast filters */ | /* delete all multicast filters */ | ||||
ixlv_init_multi(vsi); | ixlv_init_multi(vsi); | ||||
sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC; | sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC; | ||||
ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, | ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, | ||||
IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, | IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, | ||||
sc); | sc); | ||||
IOCTL_DEBUGOUT("%s: end: too many filters", __func__); | IOCTL_DEBUGOUT("%s: end: too many filters", __func__); | ||||
return; | return; | ||||
} | } | ||||
mcnt = 0; | mcnt = 0; | ||||
▲ Show 20 Lines • Show All 96 Lines • ▼ Show 20 Lines | ixlv_local_timer(void *arg) | ||||
/* If Reset is in progress just bail */ | /* If Reset is in progress just bail */ | ||||
if (sc->init_state == IXLV_RESET_PENDING) | if (sc->init_state == IXLV_RESET_PENDING) | ||||
return; | return; | ||||
/* Check for when PF triggers a VF reset */ | /* Check for when PF triggers a VF reset */ | ||||
val = rd32(hw, I40E_VFGEN_RSTAT) & | val = rd32(hw, I40E_VFGEN_RSTAT) & | ||||
I40E_VFGEN_RSTAT_VFR_STATE_MASK; | I40E_VFGEN_RSTAT_VFR_STATE_MASK; | ||||
if (val != I40E_VFR_VFACTIVE | if (val != VIRTCHNL_VFR_VFACTIVE | ||||
&& val != I40E_VFR_COMPLETED) { | && val != VIRTCHNL_VFR_COMPLETED) { | ||||
DDPRINTF(dev, "reset in progress! (%d)", val); | DDPRINTF(dev, "reset in progress! (%d)", val); | ||||
return; | return; | ||||
} | } | ||||
ixlv_request_stats(sc); | ixlv_request_stats(sc); | ||||
/* clean and process any events */ | /* clean and process any events */ | ||||
taskqueue_enqueue(sc->tq, &sc->aq_irq); | taskqueue_enqueue(sc->tq, &sc->aq_irq); | ||||
Show All 22 Lines | if (timer > 0) { | ||||
* the watchdog doesn't need to countdown. | * the watchdog doesn't need to countdown. | ||||
*/ | */ | ||||
atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer); | atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer); | ||||
/* Any queues with outstanding work get a sw irq */ | /* Any queues with outstanding work get a sw irq */ | ||||
wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask); | wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
/* Reset when a queue shows hung */ | /* Increment stat when a queue shows hung */ | ||||
if (hung) | if (hung) | ||||
goto hung; | goto hung; | ||||
callout_reset(&sc->timer, hz, ixlv_local_timer, sc); | callout_reset(&sc->timer, hz, ixlv_local_timer, sc); | ||||
return; | return; | ||||
hung: | hung: | ||||
device_printf(dev, "WARNING: Resetting!\n"); | |||||
sc->init_state = IXLV_RESET_REQUIRED; | |||||
sc->watchdog_events++; | sc->watchdog_events++; | ||||
ixlv_stop(sc); | |||||
ixlv_init_locked(sc); | |||||
} | } | ||||
/* | /* | ||||
** Note: this routine updates the OS on the link state | ** Note: this routine updates the OS on the link state | ||||
** the real check of the hardware only happens with | ** the real check of the hardware only happens with | ||||
** a link interrupt. | ** a link interrupt. | ||||
*/ | */ | ||||
void | void | ||||
ixlv_update_link_status(struct ixlv_sc *sc) | ixlv_update_link_status(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
struct ifnet *ifp = vsi->ifp; | struct ifnet *ifp = vsi->ifp; | ||||
if (sc->link_up){ | if (sc->link_up){ | ||||
if (vsi->link_active == FALSE) { | if (vsi->link_active == FALSE) { | ||||
if (bootverbose) | if (bootverbose) | ||||
if_printf(ifp,"Link is Up, %d Gbps\n", | if_printf(ifp,"Link is Up, %s\n", | ||||
(sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10); | ixlv_vc_speed_to_string(sc->link_speed)); | ||||
vsi->link_active = TRUE; | vsi->link_active = TRUE; | ||||
if_link_state_change(ifp, LINK_STATE_UP); | if_link_state_change(ifp, LINK_STATE_UP); | ||||
} | } | ||||
} else { /* Link down */ | } else { /* Link down */ | ||||
if (vsi->link_active == TRUE) { | if (vsi->link_active == TRUE) { | ||||
if (bootverbose) | if (bootverbose) | ||||
if_printf(ifp,"Link is Down\n"); | if_printf(ifp,"Link is Down\n"); | ||||
if_link_state_change(ifp, LINK_STATE_DOWN); | if_link_state_change(ifp, LINK_STATE_DOWN); | ||||
Show All 31 Lines | while ((ifp->if_drv_flags & IFF_DRV_RUNNING) && | ||||
ixlv_do_adminq_locked(sc); | ixlv_do_adminq_locked(sc); | ||||
/* Stop the local timer */ | /* Stop the local timer */ | ||||
callout_stop(&sc->timer); | callout_stop(&sc->timer); | ||||
INIT_DBG_IF(ifp, "end"); | INIT_DBG_IF(ifp, "end"); | ||||
} | } | ||||
/* Free a single queue struct */ | |||||
/********************************************************************* | |||||
* | |||||
* Free all station queue structs. | |||||
* | |||||
**********************************************************************/ | |||||
static void | static void | ||||
ixlv_free_queues(struct ixl_vsi *vsi) | ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que) | ||||
{ | { | ||||
struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; | |||||
struct ixl_queue *que = vsi->queues; | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | |||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
if (!mtx_initialized(&txr->mtx)) /* uninitialized */ | if (!mtx_initialized(&txr->mtx)) /* uninitialized */ | ||||
continue; | return; | ||||
IXL_TX_LOCK(txr); | IXL_TX_LOCK(txr); | ||||
if (txr->br) | |||||
buf_ring_free(txr->br, M_DEVBUF); | |||||
ixl_free_que_tx(que); | ixl_free_que_tx(que); | ||||
if (txr->base) | if (txr->base) | ||||
i40e_free_dma_mem(&sc->hw, &txr->dma); | i40e_free_dma_mem(&sc->hw, &txr->dma); | ||||
IXL_TX_UNLOCK(txr); | IXL_TX_UNLOCK(txr); | ||||
IXL_TX_LOCK_DESTROY(txr); | IXL_TX_LOCK_DESTROY(txr); | ||||
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ | if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ | ||||
continue; | return; | ||||
IXL_RX_LOCK(rxr); | IXL_RX_LOCK(rxr); | ||||
ixl_free_que_rx(que); | ixl_free_que_rx(que); | ||||
if (rxr->base) | if (rxr->base) | ||||
i40e_free_dma_mem(&sc->hw, &rxr->dma); | i40e_free_dma_mem(&sc->hw, &rxr->dma); | ||||
IXL_RX_UNLOCK(rxr); | IXL_RX_UNLOCK(rxr); | ||||
IXL_RX_LOCK_DESTROY(rxr); | IXL_RX_LOCK_DESTROY(rxr); | ||||
} | |||||
/********************************************************************* | |||||
* | |||||
* Free all station queue structs. | |||||
* | |||||
**********************************************************************/ | |||||
static void | |||||
ixlv_free_queues(struct ixl_vsi *vsi) | |||||
{ | |||||
struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; | |||||
struct ixl_queue *que = vsi->queues; | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | |||||
/* First, free the MSI-X resources */ | |||||
ixlv_free_msix_resources(sc, que); | |||||
/* Then free other queue data */ | |||||
ixlv_free_queue(sc, que); | |||||
} | } | ||||
free(vsi->queues, M_DEVBUF); | free(vsi->queues, M_DEVBUF); | ||||
} | } | ||||
static void | static void | ||||
ixlv_config_rss_reg(struct ixlv_sc *sc) | ixlv_config_rss_reg(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_hw *hw = &sc->hw; | struct i40e_hw *hw = &sc->hw; | ||||
struct ixl_vsi *vsi = &sc->vsi; | struct ixl_vsi *vsi = &sc->vsi; | ||||
▲ Show 20 Lines • Show All 92 Lines • ▼ Show 20 Lines | |||||
/* | /* | ||||
** ixlv_config_rss - setup RSS | ** ixlv_config_rss - setup RSS | ||||
** | ** | ||||
** RSS keys and table are cleared on VF reset. | ** RSS keys and table are cleared on VF reset. | ||||
*/ | */ | ||||
static void | static void | ||||
ixlv_config_rss(struct ixlv_sc *sc) | ixlv_config_rss(struct ixlv_sc *sc) | ||||
{ | { | ||||
if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) { | if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) { | ||||
DDPRINTF(sc->dev, "Setting up RSS using VF registers..."); | DDPRINTF(sc->dev, "Setting up RSS using VF registers..."); | ||||
ixlv_config_rss_reg(sc); | ixlv_config_rss_reg(sc); | ||||
} else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { | } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { | ||||
DDPRINTF(sc->dev, "Setting up RSS using messages to PF..."); | DDPRINTF(sc->dev, "Setting up RSS using messages to PF..."); | ||||
ixlv_config_rss_pf(sc); | ixlv_config_rss_pf(sc); | ||||
} else | } else | ||||
device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); | device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); | ||||
} | } | ||||
/* | /* | ||||
** This routine refreshes vlan filters, called by init | ** This routine refreshes vlan filters, called by init | ||||
▲ Show 20 Lines • Show All 87 Lines • ▼ Show 20 Lines | ixlv_do_adminq(void *context, int pending) | ||||
return; | return; | ||||
} | } | ||||
static void | static void | ||||
ixlv_do_adminq_locked(struct ixlv_sc *sc) | ixlv_do_adminq_locked(struct ixlv_sc *sc) | ||||
{ | { | ||||
struct i40e_hw *hw = &sc->hw; | struct i40e_hw *hw = &sc->hw; | ||||
struct i40e_arq_event_info event; | struct i40e_arq_event_info event; | ||||
struct i40e_virtchnl_msg *v_msg; | struct virtchnl_msg *v_msg; | ||||
device_t dev = sc->dev; | device_t dev = sc->dev; | ||||
u16 result = 0; | u16 result = 0; | ||||
u32 reg, oldreg; | u32 reg, oldreg; | ||||
i40e_status ret; | i40e_status ret; | ||||
bool aq_error = false; | bool aq_error = false; | ||||
IXLV_CORE_LOCK_ASSERT(sc); | IXLV_CORE_LOCK_ASSERT(sc); | ||||
event.buf_len = IXL_AQ_BUF_SZ; | event.buf_len = IXL_AQ_BUF_SZ; | ||||
event.msg_buf = sc->aq_buffer; | event.msg_buf = sc->aq_buffer; | ||||
v_msg = (struct i40e_virtchnl_msg *)&event.desc; | v_msg = (struct virtchnl_msg *)&event.desc; | ||||
do { | do { | ||||
ret = i40e_clean_arq_element(hw, &event, &result); | ret = i40e_clean_arq_element(hw, &event, &result); | ||||
if (ret) | if (ret) | ||||
break; | break; | ||||
ixlv_vc_completion(sc, v_msg->v_opcode, | ixlv_vc_completion(sc, v_msg->v_opcode, | ||||
v_msg->v_retval, event.msg_buf, event.msg_len); | v_msg->v_retval, event.msg_buf, event.msg_len); | ||||
if (result != 0) | if (result != 0) | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | |||||
#define QUEUE_NAME_LEN 32 | #define QUEUE_NAME_LEN 32 | ||||
char queue_namebuf[QUEUE_NAME_LEN]; | char queue_namebuf[QUEUE_NAME_LEN]; | ||||
struct ixl_queue *queues = vsi->queues; | struct ixl_queue *queues = vsi->queues; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
/* Driver statistics sysctls */ | /* Driver statistics sysctls */ | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", | ||||
CTLFLAG_RD, &sc->watchdog_events, | CTLFLAG_RD, &sc->watchdog_events, | ||||
"Watchdog timeouts"); | "Watchdog timeouts"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq", | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", | ||||
CTLFLAG_RD, &sc->admin_irq, | CTLFLAG_RD, &sc->admin_irq, | ||||
"Admin Queue IRQ Handled"); | "Admin Queue IRQ Handled"); | ||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size", | |||||
CTLFLAG_RD, &vsi->num_tx_desc, 0, | |||||
"TX ring size"); | |||||
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size", | |||||
CTLFLAG_RD, &vsi->num_rx_desc, 0, | |||||
"RX ring size"); | |||||
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed", | |||||
CTLTYPE_STRING | CTLFLAG_RD, | |||||
sc, 0, ixlv_sysctl_current_speed, | |||||
"A", "Current Port Speed"); | |||||
/* VSI statistics sysctls */ | /* VSI statistics sysctls */ | ||||
vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", | vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", | ||||
CTLFLAG_RD, NULL, "VSI-specific statistics"); | CTLFLAG_RD, NULL, "VSI-specific statistics"); | ||||
vsi_list = SYSCTL_CHILDREN(vsi_node); | vsi_list = SYSCTL_CHILDREN(vsi_node); | ||||
struct ixl_sysctl_info ctls[] = | struct ixl_sysctl_info ctls[] = | ||||
{ | { | ||||
{&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, | {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, | ||||
▲ Show 20 Lines • Show All 108 Lines • ▼ Show 20 Lines | ixlv_free_filters(struct ixlv_sc *sc) | ||||
struct ixlv_mac_filter *f; | struct ixlv_mac_filter *f; | ||||
struct ixlv_vlan_filter *v; | struct ixlv_vlan_filter *v; | ||||
while (!SLIST_EMPTY(sc->mac_filters)) { | while (!SLIST_EMPTY(sc->mac_filters)) { | ||||
f = SLIST_FIRST(sc->mac_filters); | f = SLIST_FIRST(sc->mac_filters); | ||||
SLIST_REMOVE_HEAD(sc->mac_filters, next); | SLIST_REMOVE_HEAD(sc->mac_filters, next); | ||||
free(f, M_DEVBUF); | free(f, M_DEVBUF); | ||||
} | } | ||||
free(sc->mac_filters, M_DEVBUF); | |||||
while (!SLIST_EMPTY(sc->vlan_filters)) { | while (!SLIST_EMPTY(sc->vlan_filters)) { | ||||
v = SLIST_FIRST(sc->vlan_filters); | v = SLIST_FIRST(sc->vlan_filters); | ||||
SLIST_REMOVE_HEAD(sc->vlan_filters, next); | SLIST_REMOVE_HEAD(sc->vlan_filters, next); | ||||
free(v, M_DEVBUF); | free(v, M_DEVBUF); | ||||
} | } | ||||
free(sc->vlan_filters, M_DEVBUF); | |||||
return; | return; | ||||
} | |||||
static char * | |||||
ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed) | |||||
{ | |||||
int index; | |||||
char *speeds[] = { | |||||
"Unknown", | |||||
"100 Mbps", | |||||
"1 Gbps", | |||||
"10 Gbps", | |||||
"40 Gbps", | |||||
"20 Gbps", | |||||
"25 Gbps", | |||||
}; | |||||
switch (link_speed) { | |||||
case VIRTCHNL_LINK_SPEED_100MB: | |||||
index = 1; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_1GB: | |||||
index = 2; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_10GB: | |||||
index = 3; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_40GB: | |||||
index = 4; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_20GB: | |||||
index = 5; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_25GB: | |||||
index = 6; | |||||
break; | |||||
case VIRTCHNL_LINK_SPEED_UNKNOWN: | |||||
default: | |||||
index = 0; | |||||
break; | |||||
} | |||||
return speeds[index]; | |||||
} | |||||
static int | |||||
ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct ixlv_sc *sc = (struct ixlv_sc *)arg1; | |||||
int error = 0; | |||||
error = sysctl_handle_string(oidp, | |||||
ixlv_vc_speed_to_string(sc->link_speed), | |||||
8, req); | |||||
return (error); | |||||
} | } | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
/** | /** | ||||
* ixlv_sysctl_qtx_tail_handler | * ixlv_sysctl_qtx_tail_handler | ||||
* Retrieves I40E_QTX_TAIL1 value from hardware | * Retrieves I40E_QTX_TAIL1 value from hardware | ||||
* for a sysctl. | * for a sysctl. | ||||
*/ | */ | ||||
Show All 40 Lines |