Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/ixl/ixl_pf_main.c
/****************************************************************************** | /****************************************************************************** | ||||
Copyright (c) 2013-2015, Intel Corporation | Copyright (c) 2013-2017, Intel Corporation | ||||
All rights reserved. | All rights reserved. | ||||
Redistribution and use in source and binary forms, with or without | Redistribution and use in source and binary forms, with or without | ||||
modification, are permitted provided that the following conditions are met: | modification, are permitted provided that the following conditions are met: | ||||
1. Redistributions of source code must retain the above copyright notice, | 1. Redistributions of source code must retain the above copyright notice, | ||||
this list of conditions and the following disclaimer. | this list of conditions and the following disclaimer. | ||||
Show All 33 Lines | |||||
#endif | #endif | ||||
#ifdef DEV_NETMAP | #ifdef DEV_NETMAP | ||||
#include <net/netmap.h> | #include <net/netmap.h> | ||||
#include <sys/selinfo.h> | #include <sys/selinfo.h> | ||||
#include <dev/netmap/netmap_kern.h> | #include <dev/netmap/netmap_kern.h> | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int); | static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int); | ||||
static u64 ixl_max_aq_speed_to_value(u8); | static u64 ixl_max_aq_speed_to_value(u8); | ||||
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); | static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); | ||||
static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); | |||||
/* Sysctls */ | /* Sysctls */ | ||||
static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_set_advertise(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_current_speed(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); | |||||
static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); | ||||
/* Debug Sysctls */ | /* Debug Sysctls */ | ||||
static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); | |||||
static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); | |||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); | ||||
static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); | static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); | ||||
#endif | #endif | ||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
extern int ixl_enable_iwarp; | extern int ixl_enable_iwarp; | ||||
extern int ixl_limit_iwarp_msix; | |||||
#endif | #endif | ||||
const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = | const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = | ||||
{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | ||||
const char * const ixl_fc_string[6] = { | const char * const ixl_fc_string[6] = { | ||||
"None", | "None", | ||||
"Rx", | "Rx", | ||||
"Tx", | "Tx", | ||||
"Full", | "Full", | ||||
"Priority", | "Priority", | ||||
"Default" | "Default" | ||||
}; | }; | ||||
static char *ixl_fec_string[3] = { | |||||
"CL108 RS-FEC", | |||||
"CL74 FC-FEC/BASE-R", | |||||
"None" | |||||
}; | |||||
MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); | MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); | ||||
void | void | ||||
ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...) | ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...) | ||||
{ | { | ||||
va_list args; | va_list args; | ||||
if (!(mask & pf->dbg_mask)) | if (!(mask & pf->dbg_mask)) | ||||
▲ Show 20 Lines • Show All 125 Lines • ▼ Show 20 Lines | ixl_init_locked(struct ixl_pf *pf) | ||||
if (!i40e_check_asq_alive(&pf->hw)) { | if (!i40e_check_asq_alive(&pf->hw)) { | ||||
device_printf(dev, "Admin Queue is down; resetting...\n"); | device_printf(dev, "Admin Queue is down; resetting...\n"); | ||||
ixl_teardown_hw_structs(pf); | ixl_teardown_hw_structs(pf); | ||||
ixl_reset(pf); | ixl_reset(pf); | ||||
} | } | ||||
/* Get the latest mac address... User might use a LAA */ | /* Get the latest mac address... User might use a LAA */ | ||||
bcopy(IF_LLADDR(vsi->ifp), tmpaddr, | bcopy(IF_LLADDR(vsi->ifp), tmpaddr, | ||||
I40E_ETH_LENGTH_OF_ADDRESS); | ETH_ALEN); | ||||
if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && | if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && | ||||
(i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { | (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { | ||||
device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n"); | |||||
ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); | ||||
bcopy(tmpaddr, hw->mac.addr, | bcopy(tmpaddr, hw->mac.addr, | ||||
I40E_ETH_LENGTH_OF_ADDRESS); | ETH_ALEN); | ||||
ret = i40e_aq_mac_address_write(hw, | ret = i40e_aq_mac_address_write(hw, | ||||
I40E_AQC_WRITE_TYPE_LAA_ONLY, | I40E_AQC_WRITE_TYPE_LAA_ONLY, | ||||
hw->mac.addr, NULL); | hw->mac.addr, NULL); | ||||
if (ret) { | if (ret) { | ||||
device_printf(dev, "LLA address" | device_printf(dev, "LLA address" | ||||
"change failed!!\n"); | "change failed!!\n"); | ||||
return; | return; | ||||
} | } | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | |||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
if (ixl_enable_iwarp && pf->iw_enabled) { | if (ixl_enable_iwarp && pf->iw_enabled) { | ||||
ret = ixl_iw_pf_init(pf); | ret = ixl_iw_pf_init(pf); | ||||
if (ret) | if (ret) | ||||
device_printf(dev, | device_printf(dev, | ||||
"initialize iwarp failed, code %d\n", ret); | "initialize iwarp failed, code %d\n", ret); | ||||
} | } | ||||
#endif | #endif | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Get the hardware capabilities | * Get the hardware capabilities | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, " | ||||
hw->func_caps.num_msix_vectors, | hw->func_caps.num_msix_vectors, | ||||
hw->func_caps.num_msix_vectors_vf, | hw->func_caps.num_msix_vectors_vf, | ||||
hw->func_caps.fd_filters_guaranteed, | hw->func_caps.fd_filters_guaranteed, | ||||
hw->func_caps.fd_filters_best_effort, | hw->func_caps.fd_filters_best_effort, | ||||
hw->func_caps.num_tx_qp, | hw->func_caps.num_tx_qp, | ||||
hw->func_caps.num_rx_qp, | hw->func_caps.num_rx_qp, | ||||
hw->func_caps.base_queue); | hw->func_caps.base_queue); | ||||
#endif | #endif | ||||
struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back; | |||||
osdep->i2c_intfc_num = ixl_find_i2c_interface(pf); | |||||
if (osdep->i2c_intfc_num != -1) | |||||
pf->has_i2c = true; | |||||
/* Print a subset of the capability information. */ | /* Print a subset of the capability information. */ | ||||
device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n", | device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n", | ||||
hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, | hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, | ||||
hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, | hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, | ||||
(hw->func_caps.mdio_port_mode == 2) ? "I2C" : | (hw->func_caps.mdio_port_mode == 2) ? "I2C" : | ||||
(hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : | |||||
(hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : | (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : | ||||
"MDIO shared"); | "MDIO shared"); | ||||
struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back; | |||||
osdep->i2c_intfc_num = ixl_find_i2c_interface(pf); | |||||
if (osdep->i2c_intfc_num != -1) | |||||
pf->has_i2c = true; | |||||
return (error); | return (error); | ||||
} | } | ||||
void | void | ||||
ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) | ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) | ||||
{ | { | ||||
device_t dev = vsi->dev; | device_t dev = vsi->dev; | ||||
▲ Show 20 Lines • Show All 61 Lines • ▼ Show 20 Lines | if (mask & IFCAP_TXCSUM_IPV6) { | ||||
"TSO6 requires txcsum6, disabling both...\n"); | "TSO6 requires txcsum6, disabling both...\n"); | ||||
} else if (mask & IFCAP_TSO6) | } else if (mask & IFCAP_TSO6) | ||||
ifp->if_capenable &= ~IFCAP_TSO6; | ifp->if_capenable &= ~IFCAP_TSO6; | ||||
} | } | ||||
} | } | ||||
/* For the set_advertise sysctl */ | /* For the set_advertise sysctl */ | ||||
void | void | ||||
ixl_get_initial_advertised_speeds(struct ixl_pf *pf) | ixl_set_initial_advertised_speeds(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
enum i40e_status_code status; | int err; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | |||||
/* Set initial sysctl values */ | /* Make sure to initialize the device to the complete list of | ||||
status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities, | * supported speeds on driver load, to ensure unloading and | ||||
NULL); | * reloading the driver will restore this value. | ||||
if (status) { | */ | ||||
err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); | |||||
if (err) { | |||||
/* Non-fatal error */ | /* Non-fatal error */ | ||||
device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n", | device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", | ||||
__func__, status); | __func__, err); | ||||
return; | return; | ||||
} | } | ||||
pf->advertised_speed = | pf->advertised_speed = | ||||
ixl_convert_sysctl_aq_link_speed(abilities.link_speed, false); | ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); | ||||
} | } | ||||
int | int | ||||
ixl_teardown_hw_structs(struct ixl_pf *pf) | ixl_teardown_hw_structs(struct ixl_pf *pf) | ||||
{ | { | ||||
enum i40e_status_code status = 0; | enum i40e_status_code status = 0; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
/* Shutdown LAN HMC */ | /* Shutdown LAN HMC */ | ||||
if (hw->hmc.hmc_obj) { | if (hw->hmc.hmc_obj) { | ||||
status = i40e_shutdown_lan_hmc(hw); | status = i40e_shutdown_lan_hmc(hw); | ||||
if (status) { | if (status) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"init: LAN HMC shutdown failure; status %d\n", status); | "init: LAN HMC shutdown failure; status %d\n", status); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
} | } | ||||
// XXX: This gets called when we know the adminq is inactive; | |||||
// so we already know it's setup when we get here. | |||||
/* Shutdown admin queue */ | /* Shutdown admin queue */ | ||||
ixl_disable_intr0(hw); | |||||
status = i40e_shutdown_adminq(hw); | status = i40e_shutdown_adminq(hw); | ||||
if (status) | if (status) | ||||
device_printf(dev, | device_printf(dev, | ||||
"init: Admin Queue shutdown failure; status %d\n", status); | "init: Admin Queue shutdown failure; status %d\n", status); | ||||
err_out: | err_out: | ||||
return (status); | return (status); | ||||
} | } | ||||
int | int | ||||
ixl_reset(struct ixl_pf *pf) | ixl_reset(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
u8 set_fc_err_mask; | u8 set_fc_err_mask; | ||||
int error = 0; | int error = 0; | ||||
// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary | // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary | ||||
i40e_clear_hw(hw); | i40e_clear_hw(hw); | ||||
error = i40e_pf_reset(hw); | error = i40e_pf_reset(hw); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "init: PF reset failure"); | device_printf(dev, "init: PF reset failure\n"); | ||||
error = EIO; | error = EIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
error = i40e_init_adminq(hw); | error = i40e_init_adminq(hw); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "init: Admin queue init failure;" | device_printf(dev, "init: Admin queue init failure;" | ||||
" status code %d", error); | " status code %d\n", error); | ||||
error = EIO; | error = EIO; | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
i40e_clear_pxe_mode(hw); | i40e_clear_pxe_mode(hw); | ||||
error = ixl_get_hw_capabilities(pf); | error = ixl_get_hw_capabilities(pf); | ||||
if (error) { | if (error) { | ||||
▲ Show 20 Lines • Show All 53 Lines • ▼ Show 20 Lines | if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "init: link restart failed, aq_err %d\n", | device_printf(dev, "init: link restart failed, aq_err %d\n", | ||||
hw->aq.asq_last_status); | hw->aq.asq_last_status); | ||||
goto err_out; | goto err_out; | ||||
} | } | ||||
} | } | ||||
/* Re-enable admin queue interrupt */ | |||||
if (pf->msix > 1) { | |||||
ixl_configure_intr0_msix(pf); | |||||
ixl_enable_intr0(hw); | |||||
} | |||||
err_out: | err_out: | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
** MSIX Interrupt Handlers and Tasklets | ** MSIX Interrupt Handlers and Tasklets | ||||
*/ | */ | ||||
void | void | ||||
ixl_handle_que(void *context, int pending) | ixl_handle_que(void *context, int pending) | ||||
{ | { | ||||
struct ixl_queue *que = context; | struct ixl_queue *que = context; | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | |||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct ifnet *ifp = vsi->ifp; | struct ifnet *ifp = vsi->ifp; | ||||
bool more; | bool more; | ||||
if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | ||||
more = ixl_rxeof(que, IXL_RX_LIMIT); | more = ixl_rxeof(que, IXL_RX_LIMIT); | ||||
IXL_TX_LOCK(txr); | IXL_TX_LOCK(txr); | ||||
ixl_txeof(que); | ixl_txeof(que); | ||||
if (!drbr_empty(ifp, txr->br)) | if (!drbr_empty(ifp, txr->br)) | ||||
ixl_mq_start_locked(ifp, txr); | ixl_mq_start_locked(ifp, txr); | ||||
IXL_TX_UNLOCK(txr); | IXL_TX_UNLOCK(txr); | ||||
if (more) { | if (more) { | ||||
taskqueue_enqueue(que->tq, &que->task); | taskqueue_enqueue(que->tq, &que->task); | ||||
return; | return; | ||||
} | } | ||||
} | } | ||||
/* Reenable this interrupt - hmmm */ | /* Re-enable queue interrupt */ | ||||
if (pf->msix > 1) | |||||
ixl_enable_queue(hw, que->me); | ixl_enable_queue(hw, que->me); | ||||
return; | else | ||||
ixl_enable_intr0(hw); | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Legacy Interrupt Service routine | * Legacy Interrupt Service routine | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
void | void | ||||
ixl_intr(void *arg) | ixl_intr(void *arg) | ||||
{ | { | ||||
struct ixl_pf *pf = arg; | struct ixl_pf *pf = arg; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
struct ifnet *ifp = vsi->ifp; | struct ifnet *ifp = vsi->ifp; | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
u32 icr0; | u32 icr0; | ||||
bool more_tx, more_rx; | bool more; | ||||
pf->admin_irq++; | pf->admin_irq++; | ||||
/* Protect against spurious interrupts */ | /* Clear PBA at start of ISR if using legacy interrupts */ | ||||
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) | if (pf->msix == 0) | ||||
return; | wr32(hw, I40E_PFINT_DYN_CTL0, | ||||
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | | |||||
(IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); | |||||
icr0 = rd32(hw, I40E_PFINT_ICR0); | icr0 = rd32(hw, I40E_PFINT_ICR0); | ||||
#ifdef PCI_IOV | #ifdef PCI_IOV | ||||
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) | if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) | ||||
taskqueue_enqueue(pf->tq, &pf->vflr_task); | taskqueue_enqueue(pf->tq, &pf->vflr_task); | ||||
#endif | #endif | ||||
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { | if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) | ||||
taskqueue_enqueue(pf->tq, &pf->adminq); | taskqueue_enqueue(pf->tq, &pf->adminq); | ||||
} | |||||
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { | ||||
++que->irqs; | ++que->irqs; | ||||
more_rx = ixl_rxeof(que, IXL_RX_LIMIT); | more = ixl_rxeof(que, IXL_RX_LIMIT); | ||||
IXL_TX_LOCK(txr); | IXL_TX_LOCK(txr); | ||||
more_tx = ixl_txeof(que); | ixl_txeof(que); | ||||
if (!drbr_empty(vsi->ifp, txr->br)) | if (!drbr_empty(vsi->ifp, txr->br)) | ||||
more_tx = 1; | ixl_mq_start_locked(ifp, txr); | ||||
IXL_TX_UNLOCK(txr); | IXL_TX_UNLOCK(txr); | ||||
if (more) | |||||
taskqueue_enqueue(que->tq, &que->task); | |||||
} | } | ||||
ixl_enable_intr0(hw); | ixl_enable_intr0(hw); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* MSIX VSI Interrupt Service routine | * MSIX VSI Interrupt Service routine | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
void | void | ||||
ixl_msix_que(void *arg) | ixl_msix_que(void *arg) | ||||
{ | { | ||||
struct ixl_queue *que = arg; | struct ixl_queue *que = arg; | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
bool more_tx, more_rx; | bool more_tx, more_rx; | ||||
/* Protect against spurious interrupts */ | /* Protect against spurious interrupts */ | ||||
if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) | if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) | ||||
return; | return; | ||||
▲ Show 20 Lines • Show All 116 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
ixl_set_promisc(struct ixl_vsi *vsi) | ixl_set_promisc(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ifnet *ifp = vsi->ifp; | struct ifnet *ifp = vsi->ifp; | ||||
struct i40e_hw *hw = vsi->hw; | struct i40e_hw *hw = vsi->hw; | ||||
int err, mcnt = 0; | int err, mcnt = 0; | ||||
bool uni = FALSE, multi = FALSE; | bool uni = FALSE, multi = FALSE; | ||||
if (ifp->if_flags & IFF_ALLMULTI) | if (ifp->if_flags & IFF_PROMISC) | ||||
uni = multi = TRUE; | |||||
else if (ifp->if_flags & IFF_ALLMULTI) | |||||
multi = TRUE; | multi = TRUE; | ||||
else { /* Need to count the multicast addresses */ | else { /* Need to count the multicast addresses */ | ||||
struct ifmultiaddr *ifma; | struct ifmultiaddr *ifma; | ||||
if_maddr_rlock(ifp); | if_maddr_rlock(ifp); | ||||
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { | ||||
if (ifma->ifma_addr->sa_family != AF_LINK) | if (ifma->ifma_addr->sa_family != AF_LINK) | ||||
continue; | continue; | ||||
if (mcnt == MAX_MULTICAST_ADDR) | if (mcnt == MAX_MULTICAST_ADDR) { | ||||
multi = TRUE; | |||||
break; | break; | ||||
} | |||||
mcnt++; | mcnt++; | ||||
} | } | ||||
if_maddr_runlock(ifp); | if_maddr_runlock(ifp); | ||||
} | } | ||||
if (mcnt >= MAX_MULTICAST_ADDR) | |||||
multi = TRUE; | |||||
if (ifp->if_flags & IFF_PROMISC) | |||||
uni = TRUE; | |||||
err = i40e_aq_set_vsi_unicast_promiscuous(hw, | err = i40e_aq_set_vsi_unicast_promiscuous(hw, | ||||
vsi->seid, uni, NULL, TRUE); | vsi->seid, uni, NULL, TRUE); | ||||
err = i40e_aq_set_vsi_multicast_promiscuous(hw, | err = i40e_aq_set_vsi_multicast_promiscuous(hw, | ||||
vsi->seid, multi, NULL); | vsi->seid, multi, NULL); | ||||
return; | return; | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | SLIST_FOREACH(f, &vsi->ftl, next) { | ||||
} | } | ||||
} | } | ||||
if_maddr_runlock(ifp); | if_maddr_runlock(ifp); | ||||
if (mcnt > 0) | if (mcnt > 0) | ||||
ixl_del_hw_filters(vsi, mcnt); | ixl_del_hw_filters(vsi, mcnt); | ||||
} | } | ||||
static void | |||||
ixl_queue_sw_irq(struct ixl_pf *pf, int qidx) | |||||
{ | |||||
struct i40e_hw *hw = &pf->hw; | |||||
u32 mask; | |||||
/********************************************************************* | mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | | ||||
* Timer routine | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | | ||||
* | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); | ||||
* This routine checks for link status,updates statistics, | |||||
* and runs the watchdog check. | |||||
* | |||||
* Only runs when the driver is configured UP and RUNNING. | |||||
* | |||||
**********************************************************************/ | |||||
void | if (pf->msix > 1) | ||||
ixl_local_timer(void *arg) | wr32(hw, I40E_PFINT_DYN_CTLN(qidx), mask); | ||||
else | |||||
wr32(hw, I40E_PFINT_DYN_CTL0, mask); | |||||
} | |||||
static int | |||||
ixl_queue_hang_check(struct ixl_pf *pf) | |||||
{ | { | ||||
struct ixl_pf *pf = arg; | |||||
struct i40e_hw *hw = &pf->hw; | |||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
int hung = 0; | |||||
u32 mask; | |||||
s32 timer, new_timer; | s32 timer, new_timer; | ||||
int hung = 0; | |||||
IXL_PF_LOCK_ASSERT(pf); | |||||
/* Fire off the adminq task */ | |||||
taskqueue_enqueue(pf->tq, &pf->adminq); | |||||
/* Update stats */ | |||||
ixl_update_stats_counters(pf); | |||||
/* Check status of the queues */ | |||||
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | | |||||
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | | |||||
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | for (int i = 0; i < vsi->num_queues; i++, que++) { | ||||
txr = &que->txr; | txr = &que->txr; | ||||
/* | |||||
* If watchdog_timer is equal to defualt value set by ixl_txeof | |||||
* just substract hz and move on - the queue is most probably | |||||
* running. Otherwise check the value. | |||||
*/ | |||||
if (atomic_cmpset_rel_32(&txr->watchdog_timer, | |||||
IXL_WATCHDOG, (IXL_WATCHDOG) - hz) == 0) { | |||||
timer = atomic_load_acq_32(&txr->watchdog_timer); | timer = atomic_load_acq_32(&txr->watchdog_timer); | ||||
if (timer > 0) { | /* | ||||
* Again - if the timer was reset to default value | |||||
* then queue is running. Otherwise check if watchdog | |||||
* expired and act accrdingly. | |||||
*/ | |||||
if (timer > 0 && timer != IXL_WATCHDOG) { | |||||
new_timer = timer - hz; | new_timer = timer - hz; | ||||
if (new_timer <= 0) { | if (new_timer <= 0) { | ||||
atomic_store_rel_32(&txr->watchdog_timer, -1); | atomic_store_rel_32(&txr->watchdog_timer, -1); | ||||
device_printf(dev, "WARNING: queue %d " | device_printf(dev, "WARNING: queue %d " | ||||
"appears to be hung!\n", que->me); | "appears to be hung!\n", que->me); | ||||
++hung; | ++hung; | ||||
/* Try to unblock the queue with SW IRQ */ | |||||
ixl_queue_sw_irq(pf, i); | |||||
} else { | } else { | ||||
/* | /* | ||||
* If this fails, that means something in the TX path has updated | * If this fails, that means something in the TX path | ||||
* the watchdog, so it means the TX path is still working and | * has updated the watchdog, so it means the TX path | ||||
* the watchdog doesn't need to countdown. | * is still working and the watchdog doesn't need | ||||
* to countdown. | |||||
*/ | */ | ||||
atomic_cmpset_rel_32(&txr->watchdog_timer, timer, new_timer); | atomic_cmpset_rel_32(&txr->watchdog_timer, | ||||
/* Any queues with outstanding work get a sw irq */ | timer, new_timer); | ||||
wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask); | |||||
} | } | ||||
} | } | ||||
} | } | ||||
/* Reset when a queue shows hung */ | } | ||||
if (hung) | |||||
goto hung; | |||||
callout_reset(&pf->timer, hz, ixl_local_timer, pf); | return (hung); | ||||
return; | } | ||||
hung: | /********************************************************************* | ||||
device_printf(dev, "WARNING: Resetting!\n"); | * Timer routine | ||||
* | |||||
* This routine checks for link status, updates statistics, | |||||
* and runs the watchdog check. | |||||
* | |||||
* Only runs when the driver is configured UP and RUNNING. | |||||
* | |||||
**********************************************************************/ | |||||
void | |||||
ixl_local_timer(void *arg) | |||||
{ | |||||
struct ixl_pf *pf = arg; | |||||
IXL_PF_LOCK_ASSERT(pf); | |||||
/* Fire off the adminq task */ | |||||
taskqueue_enqueue(pf->tq, &pf->adminq); | |||||
/* Update stats */ | |||||
ixl_update_stats_counters(pf); | |||||
/* Increment stat when a queue shows hung */ | |||||
if (ixl_queue_hang_check(pf)) | |||||
pf->watchdog_events++; | pf->watchdog_events++; | ||||
ixl_init_locked(pf); | |||||
callout_reset(&pf->timer, hz, ixl_local_timer, pf); | |||||
} | } | ||||
void | void | ||||
ixl_link_up_msg(struct ixl_pf *pf) | ixl_link_up_msg(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ifnet *ifp = pf->vsi.ifp; | struct ifnet *ifp = pf->vsi.ifp; | ||||
char *req_fec_string, *neg_fec_string; | |||||
u8 fec_abilities; | |||||
log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, FEC: %s, Autoneg: %s, Flow Control: %s\n", | fec_abilities = hw->phy.link_info.req_fec_info; | ||||
/* If both RS and KR are requested, only show RS */ | |||||
if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) | |||||
req_fec_string = ixl_fec_string[0]; | |||||
else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) | |||||
req_fec_string = ixl_fec_string[1]; | |||||
else | |||||
req_fec_string = ixl_fec_string[2]; | |||||
if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) | |||||
neg_fec_string = ixl_fec_string[0]; | |||||
else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) | |||||
neg_fec_string = ixl_fec_string[1]; | |||||
else | |||||
neg_fec_string = ixl_fec_string[2]; | |||||
log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", | |||||
ifp->if_xname, | ifp->if_xname, | ||||
ixl_aq_speed_to_str(hw->phy.link_info.link_speed), | ixl_aq_speed_to_str(hw->phy.link_info.link_speed), | ||||
(hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) ? | req_fec_string, neg_fec_string, | ||||
"Clause 74 BASE-R FEC" : (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) ? | |||||
"Clause 108 RS-FEC" : "None", | |||||
(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", | (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", | ||||
(hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && | (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && | ||||
hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? | hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? | ||||
ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? | ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? | ||||
ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? | ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? | ||||
ixl_fc_string[1] : ixl_fc_string[0]); | ixl_fc_string[1] : ixl_fc_string[0]); | ||||
} | } | ||||
/* | /* | ||||
** Note: this routine updates the OS on the link state | ** Note: this routine updates the OS on the link state | ||||
** the real check of the hardware only happens with | ** the real check of the hardware only happens with | ||||
** a link interrupt. | ** a link interrupt. | ||||
*/ | */ | ||||
void | void | ||||
ixl_update_link_status(struct ixl_pf *pf) | ixl_update_link_status(struct ixl_pf *pf) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
struct ifnet *ifp = vsi->ifp; | struct ifnet *ifp = vsi->ifp; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
if (pf->link_up) { | if (pf->link_up) { | ||||
if (vsi->link_active == FALSE) { | if (vsi->link_active == FALSE) { | ||||
vsi->link_active = TRUE; | vsi->link_active = TRUE; | ||||
#if __FreeBSD_version >= 1100000 | |||||
ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed); | ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed); | ||||
#else | |||||
if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed)); | |||||
#endif | |||||
if_link_state_change(ifp, LINK_STATE_UP); | if_link_state_change(ifp, LINK_STATE_UP); | ||||
ixl_link_up_msg(pf); | ixl_link_up_msg(pf); | ||||
#ifdef PCI_IOV | |||||
ixl_broadcast_link_state(pf); | |||||
#endif | |||||
} | } | ||||
} else { /* Link down */ | } else { /* Link down */ | ||||
if (vsi->link_active == TRUE) { | if (vsi->link_active == TRUE) { | ||||
if (bootverbose) | if (bootverbose) | ||||
device_printf(dev, "Link is Down\n"); | device_printf(dev, "Link is Down\n"); | ||||
if_link_state_change(ifp, LINK_STATE_DOWN); | if_link_state_change(ifp, LINK_STATE_DOWN); | ||||
vsi->link_active = FALSE; | vsi->link_active = FALSE; | ||||
#ifdef PCI_IOV | |||||
ixl_broadcast_link_state(pf); | |||||
#endif | |||||
} | } | ||||
} | } | ||||
return; | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* This routine disables all traffic on the adapter by issuing a | * This routine disables all traffic on the adapter by issuing a | ||||
* global reset on the MAC and deallocates TX/RX buffers. | * global reset on the MAC and deallocates TX/RX buffers. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
▲ Show 20 Lines • Show All 246 Lines • ▼ Show 20 Lines | #endif | ||||
} | } | ||||
que->msix = vector; | que->msix = vector; | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* When used in a virtualized environment PCI BUSMASTER capability may not be set | |||||
* so explicity set it here and rewrite the ENABLE in the MSIX control register | |||||
* at this point to cause the host to successfully initialize us. | |||||
*/ | |||||
void | |||||
ixl_set_busmaster(device_t dev) | |||||
{ | |||||
u16 pci_cmd_word; | |||||
pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); | |||||
pci_cmd_word |= PCIM_CMD_BUSMASTEREN; | |||||
pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); | |||||
} | |||||
/* | |||||
* rewrite the ENABLE in the MSIX control register | |||||
* to cause the host to successfully initialize us. | |||||
*/ | |||||
void | |||||
ixl_set_msix_enable(device_t dev) | |||||
{ | |||||
int msix_ctrl, rid; | |||||
pci_find_cap(dev, PCIY_MSIX, &rid); | |||||
rid += PCIR_MSIX_CTRL; | |||||
msix_ctrl = pci_read_config(dev, rid, 2); | |||||
msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; | |||||
pci_write_config(dev, rid, msix_ctrl, 2); | |||||
} | |||||
/* | |||||
* Allocate MSI/X vectors from the OS. | * Allocate MSI/X vectors from the OS. | ||||
* Returns 0 for legacy, 1 for MSI, >1 for MSIX. | * Returns 0 for legacy, 1 for MSI, >1 for MSIX. | ||||
*/ | */ | ||||
int | int | ||||
ixl_init_msix(struct ixl_pf *pf) | ixl_init_msix(struct ixl_pf *pf) | ||||
{ | { | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
#ifdef IXL_IW | |||||
#if __FreeBSD_version >= 1100000 | |||||
cpuset_t cpu_set; | |||||
#endif | |||||
#endif | |||||
int auto_max_queues; | int auto_max_queues; | ||||
int rid, want, vectors, queues, available; | int rid, want, vectors, queues, available; | ||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
int iw_want, iw_vectors; | int iw_want=0, iw_vectors; | ||||
pf->iw_msix = 0; | pf->iw_msix = 0; | ||||
#endif | #endif | ||||
/* Override by tuneable */ | /* Override by tuneable */ | ||||
if (!pf->enable_msix) | if (!pf->enable_msix) | ||||
goto no_msix; | goto no_msix; | ||||
/* Ensure proper operation in virtualized environment */ | |||||
ixl_set_busmaster(dev); | |||||
/* First try MSI/X */ | /* First try MSI/X */ | ||||
rid = PCIR_BAR(IXL_MSIX_BAR); | rid = PCIR_BAR(IXL_MSIX_BAR); | ||||
pf->msix_mem = bus_alloc_resource_any(dev, | pf->msix_mem = bus_alloc_resource_any(dev, | ||||
SYS_RES_MEMORY, &rid, RF_ACTIVE); | SYS_RES_MEMORY, &rid, RF_ACTIVE); | ||||
if (!pf->msix_mem) { | if (!pf->msix_mem) { | ||||
/* May not be enabled */ | /* May not be enabled */ | ||||
device_printf(pf->dev, | device_printf(pf->dev, | ||||
"Unable to map MSIX table\n"); | "Unable to map MSIX table\n"); | ||||
goto no_msix; | goto no_msix; | ||||
} | } | ||||
available = pci_msix_count(dev); | available = pci_msix_count(dev); | ||||
if (available < 2) { | if (available < 2) { | ||||
/* system has msix disabled (0), or only one vector (1) */ | /* system has msix disabled (0), or only one vector (1) */ | ||||
device_printf(pf->dev, "Less than two MSI-X vectors available\n"); | |||||
bus_release_resource(dev, SYS_RES_MEMORY, | bus_release_resource(dev, SYS_RES_MEMORY, | ||||
rid, pf->msix_mem); | rid, pf->msix_mem); | ||||
pf->msix_mem = NULL; | pf->msix_mem = NULL; | ||||
goto no_msix; | goto no_msix; | ||||
} | } | ||||
/* Clamp max number of queues based on: | /* Clamp max number of queues based on: | ||||
* - # of MSI-X vectors available | * - # of MSI-X vectors available | ||||
Show All 38 Lines | else { | ||||
"MSIX Configuration Problem, " | "MSIX Configuration Problem, " | ||||
"%d vectors available but %d wanted!\n", | "%d vectors available but %d wanted!\n", | ||||
available, want); | available, want); | ||||
pf->msix_mem = NULL; | pf->msix_mem = NULL; | ||||
goto no_msix; /* Will go to Legacy setup */ | goto no_msix; /* Will go to Legacy setup */ | ||||
} | } | ||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
if (ixl_enable_iwarp) { | if (ixl_enable_iwarp && hw->func_caps.iwarp) { | ||||
/* iWARP wants additional vector for CQP */ | #if __FreeBSD_version >= 1100000 | ||||
iw_want = mp_ncpus + 1; | if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0) | ||||
{ | |||||
iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX); | |||||
} | |||||
#endif | |||||
if(!iw_want) | |||||
iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX); | |||||
if(ixl_limit_iwarp_msix > 0) | |||||
iw_want = min(iw_want, ixl_limit_iwarp_msix); | |||||
else | |||||
iw_want = min(iw_want, 1); | |||||
available -= vectors; | available -= vectors; | ||||
if (available > 0) { | if (available > 0) { | ||||
iw_vectors = (available >= iw_want) ? | iw_vectors = (available >= iw_want) ? | ||||
iw_want : available; | iw_want : available; | ||||
vectors += iw_vectors; | vectors += iw_vectors; | ||||
} else | } else | ||||
iw_vectors = 0; | iw_vectors = 0; | ||||
} | } | ||||
#endif | #endif | ||||
ixl_set_msix_enable(dev); | ixl_set_msix_enable(dev); | ||||
if (pci_alloc_msix(dev, &vectors) == 0) { | if (pci_alloc_msix(dev, &vectors) == 0) { | ||||
device_printf(pf->dev, | device_printf(pf->dev, | ||||
"Using MSIX interrupts with %d vectors\n", vectors); | "Using MSIX interrupts with %d vectors\n", vectors); | ||||
pf->msix = vectors; | pf->msix = vectors; | ||||
#ifdef IXL_IW | #ifdef IXL_IW | ||||
if (ixl_enable_iwarp) | if (ixl_enable_iwarp && hw->func_caps.iwarp) | ||||
{ | |||||
pf->iw_msix = iw_vectors; | pf->iw_msix = iw_vectors; | ||||
device_printf(pf->dev, | |||||
"Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n", | |||||
iw_vectors); | |||||
} | |||||
#endif | #endif | ||||
pf->vsi.num_queues = queues; | pf->vsi.num_queues = queues; | ||||
#ifdef RSS | #ifdef RSS | ||||
/* | /* | ||||
* If we're doing RSS, the number of queues needs to | * If we're doing RSS, the number of queues needs to | ||||
* match the number of RSS buckets that are configured. | * match the number of RSS buckets that are configured. | ||||
* | * | ||||
Show All 40 Lines | ixl_configure_intr0_msix(struct ixl_pf *pf) | ||||
rd32(hw, I40E_PFINT_ICR0); /* read to clear */ | rd32(hw, I40E_PFINT_ICR0); /* read to clear */ | ||||
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | | reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | | ||||
I40E_PFINT_ICR0_ENA_GRST_MASK | | I40E_PFINT_ICR0_ENA_GRST_MASK | | ||||
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | | ||||
I40E_PFINT_ICR0_ENA_ADMINQ_MASK | | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | | ||||
I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | | ||||
I40E_PFINT_ICR0_ENA_VFLR_MASK | | I40E_PFINT_ICR0_ENA_VFLR_MASK | | ||||
I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | | |||||
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, reg); | wr32(hw, I40E_PFINT_ICR0_ENA, reg); | ||||
/* | /* | ||||
* 0x7FF is the end of the queue list. | * 0x7FF is the end of the queue list. | ||||
* This means we won't use MSI-X vector 0 for a queue interrupt | * This means we won't use MSI-X vector 0 for a queue interrupt | ||||
* in MSIX mode. | * in MSIX mode. | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), | ||||
vsi->rx_itr_setting); | vsi->rx_itr_setting); | ||||
rxr->itr = vsi->rx_itr_setting; | rxr->itr = vsi->rx_itr_setting; | ||||
/* Setup "other" causes */ | /* Setup "other" causes */ | ||||
reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | ||||
| I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | ||||
| I40E_PFINT_ICR0_ENA_GRST_MASK | | I40E_PFINT_ICR0_ENA_GRST_MASK | ||||
| I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | ||||
| I40E_PFINT_ICR0_ENA_GPIO_MASK | |||||
| I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | |||||
| I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | ||||
| I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | ||||
| I40E_PFINT_ICR0_ENA_VFLR_MASK | | I40E_PFINT_ICR0_ENA_VFLR_MASK | ||||
| I40E_PFINT_ICR0_ENA_ADMINQ_MASK | | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | ||||
; | ; | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, reg); | wr32(hw, I40E_PFINT_ICR0_ENA, reg); | ||||
/* No ITR for non-queue interrupts */ | /* No ITR for non-queue interrupts */ | ||||
wr32(hw, I40E_PFINT_STAT_CTL0, | wr32(hw, I40E_PFINT_STAT_CTL0, | ||||
IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); | IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); | ||||
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ | /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ | ||||
wr32(hw, I40E_PFINT_LNKLST0, 0); | wr32(hw, I40E_PFINT_LNKLST0, 0); | ||||
/* Associate the queue pair to the vector and enable the q int */ | /* Associate the queue pair to the vector and enable the q int */ | ||||
reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | ||||
| (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | ||||
| (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); | | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); | ||||
wr32(hw, I40E_QINT_RQCTL(0), reg); | wr32(hw, I40E_QINT_RQCTL(0), reg); | ||||
reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | ||||
| (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | ||||
| (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); | | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); | ||||
wr32(hw, I40E_QINT_TQCTL(0), reg); | wr32(hw, I40E_QINT_TQCTL(0), reg); | ||||
} | } | ||||
int | int | ||||
ixl_allocate_pci_resources(struct ixl_pf *pf) | ixl_allocate_pci_resources(struct ixl_pf *pf) | ||||
{ | { | ||||
int rid; | int rid; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
/* Map BAR0 */ | /* Map BAR0 */ | ||||
rid = PCIR_BAR(0); | rid = PCIR_BAR(0); | ||||
pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, | pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, | ||||
&rid, RF_ACTIVE); | &rid, RF_ACTIVE); | ||||
if (!(pf->pci_mem)) { | if (!(pf->pci_mem)) { | ||||
device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); | device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); | ||||
return (ENXIO); | return (ENXIO); | ||||
} | } | ||||
/* Ensure proper PCI device operation */ | |||||
ixl_set_busmaster(dev); | |||||
/* Save off the PCI information */ | /* Save off the PCI information */ | ||||
hw->vendor_id = pci_get_vendor(dev); | hw->vendor_id = pci_get_vendor(dev); | ||||
hw->device_id = pci_get_device(dev); | hw->device_id = pci_get_device(dev); | ||||
hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); | hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); | ||||
hw->subsystem_vendor_id = | hw->subsystem_vendor_id = | ||||
pci_read_config(dev, PCIR_SUBVEND_0, 2); | pci_read_config(dev, PCIR_SUBVEND_0, 2); | ||||
hw->subsystem_device_id = | hw->subsystem_device_id = | ||||
▲ Show 20 Lines • Show All 157 Lines • ▼ Show 20 Lines | ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types) | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) | if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) | if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) | ||||
|| phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) | || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) | if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) | if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) | if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) | if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) | if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) | if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) | if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) | if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) | if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) | if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) | if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_UNKNOWN, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL); | ||||
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) | |||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL); | |||||
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) | |||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL); | |||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Setup networking device structure and register an interface. | * Setup networking device structure and register an interface. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
int | int | ||||
Show All 23 Lines | |||||
#if __FreeBSD_version >= 1100036 | #if __FreeBSD_version >= 1100036 | ||||
if_setgetcounterfn(ifp, ixl_get_counter); | if_setgetcounterfn(ifp, ixl_get_counter); | ||||
#endif | #endif | ||||
ifp->if_transmit = ixl_mq_start; | ifp->if_transmit = ixl_mq_start; | ||||
ifp->if_qflush = ixl_qflush; | ifp->if_qflush = ixl_qflush; | ||||
ifp->if_snd.ifq_maxlen = que->num_desc - 2; | ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; | ||||
vsi->max_frame_size = | vsi->max_frame_size = | ||||
ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN | ||||
+ ETHER_VLAN_ENCAP_LEN; | + ETHER_VLAN_ENCAP_LEN; | ||||
/* Set TSO limits */ | /* Set TSO limits */ | ||||
ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); | ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); | ||||
ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; | ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; | ||||
ifp->if_hw_tsomaxsegsize = PAGE_SIZE; | ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; | ||||
/* | /* | ||||
* Tell the upper layer(s) we support long frames. | * Tell the upper layer(s) we support long frames. | ||||
*/ | */ | ||||
ifp->if_hdrlen = sizeof(struct ether_vlan_header); | ifp->if_hdrlen = sizeof(struct ether_vlan_header); | ||||
ifp->if_capabilities |= IFCAP_HWCSUM; | ifp->if_capabilities |= IFCAP_HWCSUM; | ||||
ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; | ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; | ||||
Show All 35 Lines | #endif | ||||
} | } | ||||
if (aq_error) { | if (aq_error) { | ||||
if (aq_error == I40E_ERR_UNKNOWN_PHY) | if (aq_error == I40E_ERR_UNKNOWN_PHY) | ||||
device_printf(dev, "Unknown PHY type detected!\n"); | device_printf(dev, "Unknown PHY type detected!\n"); | ||||
else | else | ||||
device_printf(dev, | device_printf(dev, | ||||
"Error getting supported media types, err %d," | "Error getting supported media types, err %d," | ||||
" AQ error %d\n", aq_error, hw->aq.asq_last_status); | " AQ error %d\n", aq_error, hw->aq.asq_last_status); | ||||
return (0); | } else { | ||||
} | |||||
pf->supported_speeds = abilities.link_speed; | pf->supported_speeds = abilities.link_speed; | ||||
#if __FreeBSD_version >= 1100000 | |||||
ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); | ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); | ||||
#else | |||||
if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds)); | |||||
#endif | |||||
ixl_add_ifmedia(vsi, hw->phy.phy_types); | ixl_add_ifmedia(vsi, hw->phy.phy_types); | ||||
} | |||||
/* Use autoselect media by default */ | /* Use autoselect media by default */ | ||||
ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); | ||||
ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); | ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); | ||||
ether_ifattach(ifp, hw->mac.addr); | ether_ifattach(ifp, hw->mac.addr); | ||||
return (0); | return (0); | ||||
Show All 11 Lines | struct i40e_aqc_get_link_status *status = | ||||
(struct i40e_aqc_get_link_status *)&e->desc.params.raw; | (struct i40e_aqc_get_link_status *)&e->desc.params.raw; | ||||
/* Request link status from adapter */ | /* Request link status from adapter */ | ||||
hw->phy.get_link_info = TRUE; | hw->phy.get_link_info = TRUE; | ||||
i40e_get_link_status(hw, &pf->link_up); | i40e_get_link_status(hw, &pf->link_up); | ||||
/* Print out message if an unqualified module is found */ | /* Print out message if an unqualified module is found */ | ||||
if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && | if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && | ||||
(pf->advertised_speed) && | |||||
(!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && | (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && | ||||
(!(status->link_info & I40E_AQ_LINK_UP))) | (!(status->link_info & I40E_AQ_LINK_UP))) | ||||
device_printf(dev, "Link failed because " | device_printf(dev, "Link failed because " | ||||
"an unqualified module was detected!\n"); | "an unqualified module was detected!\n"); | ||||
/* Update OS link info */ | /* Update OS link info */ | ||||
ixl_update_link_status(pf); | ixl_update_link_status(pf); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 140 Lines • ▼ Show 20 Lines | for (int i = 0; i < vsi->num_queues; i++, que++) { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
struct i40e_hmc_obj_txq tctx; | struct i40e_hmc_obj_txq tctx; | ||||
struct i40e_hmc_obj_rxq rctx; | struct i40e_hmc_obj_rxq rctx; | ||||
u32 txctl; | u32 txctl; | ||||
u16 size; | u16 size; | ||||
/* Setup the HMC TX Context */ | /* Setup the HMC TX Context */ | ||||
size = que->num_desc * sizeof(struct i40e_tx_desc); | size = que->num_tx_desc * sizeof(struct i40e_tx_desc); | ||||
memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq)); | bzero(&tctx, sizeof(tctx)); | ||||
tctx.new_context = 1; | tctx.new_context = 1; | ||||
tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); | tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); | ||||
tctx.qlen = que->num_desc; | tctx.qlen = que->num_tx_desc; | ||||
tctx.fc_ena = 0; | tctx.fc_ena = 0; /* Disable FCoE */ | ||||
tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */ | /* | ||||
/* Enable HEAD writeback */ | * This value needs to pulled from the VSI that this queue | ||||
* is assigned to. Index into array is traffic class. | |||||
*/ | |||||
tctx.rdylist = vsi->info.qs_handle[0]; | |||||
/* | |||||
* Set these to enable Head Writeback | |||||
* - Address is last entry in TX ring (reserved for HWB index) | |||||
* Leave these as 0 for Descriptor Writeback | |||||
*/ | |||||
if (vsi->enable_head_writeback) { | |||||
tctx.head_wb_ena = 1; | tctx.head_wb_ena = 1; | ||||
tctx.head_wb_addr = txr->dma.pa + | tctx.head_wb_addr = txr->dma.pa + | ||||
(que->num_desc * sizeof(struct i40e_tx_desc)); | (que->num_tx_desc * sizeof(struct i40e_tx_desc)); | ||||
} | |||||
tctx.rdylist_act = 0; | tctx.rdylist_act = 0; | ||||
err = i40e_clear_lan_tx_queue_context(hw, i); | err = i40e_clear_lan_tx_queue_context(hw, i); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, "Unable to clear TX context\n"); | device_printf(dev, "Unable to clear TX context\n"); | ||||
break; | break; | ||||
} | } | ||||
err = i40e_set_lan_tx_queue_context(hw, i, &tctx); | err = i40e_set_lan_tx_queue_context(hw, i, &tctx); | ||||
if (err) { | if (err) { | ||||
Show All 21 Lines | for (int i = 0; i < vsi->num_queues; i++, que++) { | ||||
/* Set up an RX context for the HMC */ | /* Set up an RX context for the HMC */ | ||||
memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); | memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); | ||||
rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; | rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; | ||||
/* ignore header split for now */ | /* ignore header split for now */ | ||||
rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; | rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; | ||||
rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? | rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? | ||||
vsi->max_frame_size : max_rxmax; | vsi->max_frame_size : max_rxmax; | ||||
rctx.dtype = 0; | rctx.dtype = 0; | ||||
rctx.dsize = 1; /* do 32byte descriptors */ | rctx.dsize = 1; /* do 32byte descriptors */ | ||||
rctx.hsplit_0 = 0; /* no HDR split initially */ | rctx.hsplit_0 = 0; /* no header split */ | ||||
rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); | rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); | ||||
rctx.qlen = que->num_desc; | rctx.qlen = que->num_rx_desc; | ||||
rctx.tphrdesc_ena = 1; | rctx.tphrdesc_ena = 1; | ||||
rctx.tphwdesc_ena = 1; | rctx.tphwdesc_ena = 1; | ||||
rctx.tphdata_ena = 0; | rctx.tphdata_ena = 0; /* Header Split related */ | ||||
rctx.tphhead_ena = 0; | rctx.tphhead_ena = 0; /* Header Split related */ | ||||
rctx.lrxqthresh = 2; | rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */ | ||||
rctx.crcstrip = 1; | rctx.crcstrip = 1; | ||||
rctx.l2tsel = 1; | rctx.l2tsel = 1; | ||||
rctx.showiv = 1; | rctx.showiv = 1; /* Strip inner VLAN header */ | ||||
rctx.fc_ena = 0; | rctx.fc_ena = 0; /* Disable FCoE */ | ||||
rctx.prefena = 1; | rctx.prefena = 1; /* Prefetch descriptors */ | ||||
err = i40e_clear_lan_rx_queue_context(hw, i); | err = i40e_clear_lan_rx_queue_context(hw, i); | ||||
if (err) { | if (err) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to clear RX context %d\n", i); | "Unable to clear RX context %d\n", i); | ||||
break; | break; | ||||
} | } | ||||
err = i40e_set_lan_rx_queue_context(hw, i, &rctx); | err = i40e_set_lan_rx_queue_context(hw, i, &rctx); | ||||
Show All 10 Lines | #ifdef DEV_NETMAP | ||||
/* preserve queue */ | /* preserve queue */ | ||||
if (vsi->ifp->if_capenable & IFCAP_NETMAP) { | if (vsi->ifp->if_capenable & IFCAP_NETMAP) { | ||||
struct netmap_adapter *na = NA(vsi->ifp); | struct netmap_adapter *na = NA(vsi->ifp); | ||||
struct netmap_kring *kring = na->rx_rings[i]; | struct netmap_kring *kring = na->rx_rings[i]; | ||||
int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); | ||||
wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); | wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); | ||||
} else | } else | ||||
#endif /* DEV_NETMAP */ | #endif /* DEV_NETMAP */ | ||||
wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1); | wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1); | ||||
} | } | ||||
return (err); | return (err); | ||||
} | } | ||||
/********************************************************************* | |||||
* | |||||
* Free all VSI structs. | |||||
* | |||||
**********************************************************************/ | |||||
void | void | ||||
ixl_free_vsi(struct ixl_vsi *vsi) | ixl_vsi_free_queues(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | ||||
struct ixl_queue *que = vsi->queues; | struct ixl_queue *que = vsi->queues; | ||||
/* Free station queues */ | if (NULL == vsi->queues) | ||||
if (!vsi->queues) | return; | ||||
goto free_filters; | |||||
for (int i = 0; i < vsi->num_queues; i++, que++) { | for (int i = 0; i < vsi->num_queues; i++, que++) { | ||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
if (!mtx_initialized(&txr->mtx)) /* uninitialized */ | if (!mtx_initialized(&txr->mtx)) /* uninitialized */ | ||||
continue; | continue; | ||||
IXL_TX_LOCK(txr); | IXL_TX_LOCK(txr); | ||||
if (txr->br) | |||||
buf_ring_free(txr->br, M_DEVBUF); | |||||
ixl_free_que_tx(que); | ixl_free_que_tx(que); | ||||
if (txr->base) | if (txr->base) | ||||
i40e_free_dma_mem(&pf->hw, &txr->dma); | i40e_free_dma_mem(&pf->hw, &txr->dma); | ||||
IXL_TX_UNLOCK(txr); | IXL_TX_UNLOCK(txr); | ||||
IXL_TX_LOCK_DESTROY(txr); | IXL_TX_LOCK_DESTROY(txr); | ||||
if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ | if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ | ||||
continue; | continue; | ||||
IXL_RX_LOCK(rxr); | IXL_RX_LOCK(rxr); | ||||
ixl_free_que_rx(que); | ixl_free_que_rx(que); | ||||
if (rxr->base) | if (rxr->base) | ||||
i40e_free_dma_mem(&pf->hw, &rxr->dma); | i40e_free_dma_mem(&pf->hw, &rxr->dma); | ||||
IXL_RX_UNLOCK(rxr); | IXL_RX_UNLOCK(rxr); | ||||
IXL_RX_LOCK_DESTROY(rxr); | IXL_RX_LOCK_DESTROY(rxr); | ||||
} | } | ||||
} | |||||
/********************************************************************* | |||||
* | |||||
* Free all VSI structs. | |||||
* | |||||
**********************************************************************/ | |||||
void | |||||
ixl_free_vsi(struct ixl_vsi *vsi) | |||||
{ | |||||
/* Free station queues */ | |||||
ixl_vsi_free_queues(vsi); | |||||
if (vsi->queues) | |||||
free(vsi->queues, M_DEVBUF); | free(vsi->queues, M_DEVBUF); | ||||
free_filters: | |||||
/* Free VSI filter list */ | /* Free VSI filter list */ | ||||
ixl_free_mac_filters(vsi); | ixl_free_mac_filters(vsi); | ||||
} | } | ||||
void | void | ||||
ixl_free_mac_filters(struct ixl_vsi *vsi) | ixl_free_mac_filters(struct ixl_vsi *vsi) | ||||
{ | { | ||||
struct ixl_mac_filter *f; | struct ixl_mac_filter *f; | ||||
while (!SLIST_EMPTY(&vsi->ftl)) { | while (!SLIST_EMPTY(&vsi->ftl)) { | ||||
f = SLIST_FIRST(&vsi->ftl); | f = SLIST_FIRST(&vsi->ftl); | ||||
SLIST_REMOVE_HEAD(&vsi->ftl, next); | SLIST_REMOVE_HEAD(&vsi->ftl, next); | ||||
free(f, M_DEVBUF); | free(f, M_DEVBUF); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Fill out fields in queue struct and setup tx/rx memory and structs | * Fill out fields in queue struct and setup tx/rx memory and structs | ||||
*/ | */ | ||||
static int | static int | ||||
ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index) | ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)vsi->back; | |||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct tx_ring *txr = &que->txr; | struct tx_ring *txr = &que->txr; | ||||
struct rx_ring *rxr = &que->rxr; | struct rx_ring *rxr = &que->rxr; | ||||
int error = 0; | int error = 0; | ||||
int rsize, tsize; | int rsize, tsize; | ||||
que->num_desc = pf->ringsz; | que->num_tx_desc = vsi->num_tx_desc; | ||||
que->num_rx_desc = vsi->num_rx_desc; | |||||
que->me = index; | que->me = index; | ||||
que->vsi = vsi; | que->vsi = vsi; | ||||
txr->que = que; | txr->que = que; | ||||
txr->tail = I40E_QTX_TAIL(que->me); | txr->tail = I40E_QTX_TAIL(que->me); | ||||
/* Initialize the TX lock */ | /* Initialize the TX lock */ | ||||
snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", | ||||
device_get_nameunit(dev), que->me); | device_get_nameunit(dev), que->me); | ||||
mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); | mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); | ||||
/* Create the TX descriptor ring */ | /* | ||||
tsize = roundup2((que->num_desc * | * Create the TX descriptor ring | ||||
* | |||||
* In Head Writeback mode, the descriptor ring is one bigger | |||||
* than the number of descriptors for space for the HW to | |||||
* write back index of last completed descriptor. | |||||
*/ | |||||
if (vsi->enable_head_writeback) { | |||||
tsize = roundup2((que->num_tx_desc * | |||||
sizeof(struct i40e_tx_desc)) + | sizeof(struct i40e_tx_desc)) + | ||||
sizeof(u32), DBA_ALIGN); | sizeof(u32), DBA_ALIGN); | ||||
} else { | |||||
tsize = roundup2((que->num_tx_desc * | |||||
sizeof(struct i40e_tx_desc)), DBA_ALIGN); | |||||
} | |||||
if (i40e_allocate_dma_mem(hw, | if (i40e_allocate_dma_mem(hw, | ||||
&txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { | &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to allocate TX Descriptor memory\n"); | "Unable to allocate TX Descriptor memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_destroy_tx_mtx; | ||||
} | } | ||||
txr->base = (struct i40e_tx_desc *)txr->dma.va; | txr->base = (struct i40e_tx_desc *)txr->dma.va; | ||||
bzero((void *)txr->base, tsize); | bzero((void *)txr->base, tsize); | ||||
/* Now allocate transmit soft structs for the ring */ | /* Now allocate transmit soft structs for the ring */ | ||||
if (ixl_allocate_tx_data(que)) { | if (ixl_allocate_tx_data(que)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up TX structures\n"); | "Critical Failure setting up TX structures\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_free_tx_dma; | ||||
} | } | ||||
/* Allocate a buf ring */ | /* Allocate a buf ring */ | ||||
txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF, | txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF, | ||||
M_NOWAIT, &txr->mtx); | M_NOWAIT, &txr->mtx); | ||||
if (txr->br == NULL) { | if (txr->br == NULL) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up TX buf ring\n"); | "Critical Failure setting up TX buf ring\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_free_tx_data; | ||||
} | } | ||||
rsize = roundup2(que->num_desc * | rsize = roundup2(que->num_rx_desc * | ||||
sizeof(union i40e_rx_desc), DBA_ALIGN); | sizeof(union i40e_rx_desc), DBA_ALIGN); | ||||
rxr->que = que; | rxr->que = que; | ||||
rxr->tail = I40E_QRX_TAIL(que->me); | rxr->tail = I40E_QRX_TAIL(que->me); | ||||
/* Initialize the RX side lock */ | /* Initialize the RX side lock */ | ||||
snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", | ||||
device_get_nameunit(dev), que->me); | device_get_nameunit(dev), que->me); | ||||
mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); | mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); | ||||
if (i40e_allocate_dma_mem(hw, | if (i40e_allocate_dma_mem(hw, | ||||
&rxr->dma, i40e_mem_reserved, rsize, 4096)) { | &rxr->dma, i40e_mem_reserved, rsize, 4096)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Unable to allocate RX Descriptor memory\n"); | "Unable to allocate RX Descriptor memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_destroy_rx_mtx; | ||||
} | } | ||||
rxr->base = (union i40e_rx_desc *)rxr->dma.va; | rxr->base = (union i40e_rx_desc *)rxr->dma.va; | ||||
bzero((void *)rxr->base, rsize); | bzero((void *)rxr->base, rsize); | ||||
/* Allocate receive soft structs for the ring*/ | /* Allocate receive soft structs for the ring*/ | ||||
if (ixl_allocate_rx_data(que)) { | if (ixl_allocate_rx_data(que)) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"Critical Failure setting up receive structs\n"); | "Critical Failure setting up receive structs\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
goto fail; | goto err_free_rx_dma; | ||||
} | } | ||||
return (0); | return (0); | ||||
fail: | |||||
if (rxr->base) | err_free_rx_dma: | ||||
i40e_free_dma_mem(&pf->hw, &rxr->dma); | i40e_free_dma_mem(&pf->hw, &rxr->dma); | ||||
if (mtx_initialized(&rxr->mtx)) | err_destroy_rx_mtx: | ||||
mtx_destroy(&rxr->mtx); | mtx_destroy(&rxr->mtx); | ||||
if (txr->br) { | /* err_free_tx_buf_ring */ | ||||
buf_ring_free(txr->br, M_DEVBUF); | buf_ring_free(txr->br, M_DEVBUF); | ||||
txr->br = NULL; | err_free_tx_data: | ||||
} | ixl_free_que_tx(que); | ||||
if (txr->base) | err_free_tx_dma: | ||||
i40e_free_dma_mem(&pf->hw, &txr->dma); | i40e_free_dma_mem(&pf->hw, &txr->dma); | ||||
if (mtx_initialized(&txr->mtx)) | err_destroy_tx_mtx: | ||||
mtx_destroy(&txr->mtx); | mtx_destroy(&txr->mtx); | ||||
return (error); | return (error); | ||||
} | } | ||||
int | |||||
ixl_vsi_setup_queues(struct ixl_vsi *vsi) | |||||
{ | |||||
struct ixl_queue *que; | |||||
int error = 0; | |||||
for (int i = 0; i < vsi->num_queues; i++) { | |||||
que = &vsi->queues[i]; | |||||
error = ixl_vsi_setup_queue(vsi, que, i); | |||||
if (error) | |||||
break; | |||||
} | |||||
return (error); | |||||
} | |||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Allocate memory for the VSI (virtual station interface) and their | * Allocate memory for the VSI (virtual station interface) and their | ||||
* associated queues, rings and the descriptors associated with each, | * associated queues, rings and the descriptors associated with each, | ||||
* called only once at attach. | * called only once at attach. | ||||
* | * | ||||
**********************************************************************/ | **********************************************************************/ | ||||
int | int | ||||
ixl_setup_stations(struct ixl_pf *pf) | ixl_setup_stations(struct ixl_pf *pf) | ||||
{ | { | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct ixl_vsi *vsi; | struct ixl_vsi *vsi; | ||||
struct ixl_queue *que; | |||||
int error = 0; | int error = 0; | ||||
vsi = &pf->vsi; | vsi = &pf->vsi; | ||||
vsi->back = (void *)pf; | vsi->back = (void *)pf; | ||||
vsi->hw = &pf->hw; | vsi->hw = &pf->hw; | ||||
vsi->id = 0; | vsi->id = 0; | ||||
vsi->num_vlans = 0; | vsi->num_vlans = 0; | ||||
vsi->back = pf; | vsi->back = pf; | ||||
/* Get memory for the station queues */ | /* Get memory for the station queues */ | ||||
if (!(vsi->queues = | if (!(vsi->queues = | ||||
(struct ixl_queue *) malloc(sizeof(struct ixl_queue) * | (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * | ||||
vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { | ||||
device_printf(dev, "Unable to allocate queue memory\n"); | device_printf(dev, "Unable to allocate queue memory\n"); | ||||
error = ENOMEM; | error = ENOMEM; | ||||
return (error); | goto ixl_setup_stations_err; | ||||
} | } | ||||
/* Then setup each queue */ | /* Then setup each queue */ | ||||
for (int i = 0; i < vsi->num_queues; i++) { | error = ixl_vsi_setup_queues(vsi); | ||||
que = &vsi->queues[i]; | ixl_setup_stations_err: | ||||
error = ixl_setup_queue(que, pf, i); | |||||
if (error) | |||||
return (error); | return (error); | ||||
} | } | ||||
return (0); | |||||
} | |||||
/* | /* | ||||
** Provide a update to the queue RX | ** Provide a update to the queue RX | ||||
** interrupt moderation value. | ** interrupt moderation value. | ||||
*/ | */ | ||||
void | void | ||||
ixl_set_queue_rx_itr(struct ixl_queue *que) | ixl_set_queue_rx_itr(struct ixl_queue *que) | ||||
{ | { | ||||
struct ixl_vsi *vsi = que->vsi; | struct ixl_vsi *vsi = que->vsi; | ||||
▲ Show 20 Lines • Show All 284 Lines • ▼ Show 20 Lines | ixl_add_hw_stats(struct ixl_pf *pf) | ||||
struct sysctl_oid *queue_node; | struct sysctl_oid *queue_node; | ||||
struct sysctl_oid_list *queue_list; | struct sysctl_oid_list *queue_list; | ||||
struct tx_ring *txr; | struct tx_ring *txr; | ||||
struct rx_ring *rxr; | struct rx_ring *rxr; | ||||
char queue_namebuf[QUEUE_NAME_LEN]; | char queue_namebuf[QUEUE_NAME_LEN]; | ||||
/* Driver statistics */ | /* Driver statistics */ | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", | ||||
CTLFLAG_RD, &pf->watchdog_events, | CTLFLAG_RD, &pf->watchdog_events, | ||||
"Watchdog timeouts"); | "Watchdog timeouts"); | ||||
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq", | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", | ||||
CTLFLAG_RD, &pf->admin_irq, | CTLFLAG_RD, &pf->admin_irq, | ||||
"Admin Queue IRQ Handled"); | "Admin Queue IRQ Handled"); | ||||
ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); | ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); | ||||
vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); | vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); | ||||
/* Queue statistics */ | /* Queue statistics */ | ||||
for (int q = 0; q < vsi->num_queues; q++) { | for (int q = 0; q < vsi->num_queues; q++) { | ||||
Show All 40 Lines | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err", | ||||
"Queue Rx Descriptor Errors"); | "Queue Rx Descriptor Errors"); | ||||
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", | SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", | ||||
CTLFLAG_RD, &(rxr->itr), 0, | CTLFLAG_RD, &(rxr->itr), 0, | ||||
"Queue Rx ITR Interval"); | "Queue Rx ITR Interval"); | ||||
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", | SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", | ||||
CTLFLAG_RD, &(txr->itr), 0, | CTLFLAG_RD, &(txr->itr), 0, | ||||
"Queue Tx ITR Interval"); | "Queue Tx ITR Interval"); | ||||
#ifdef IXL_DEBUG | #ifdef IXL_DEBUG | ||||
SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog", | |||||
CTLFLAG_RD, &(txr->watchdog_timer), 0, | |||||
"Ticks before watchdog timer causes interface reinit"); | |||||
SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail", | |||||
CTLFLAG_RD, &(txr->next_avail), 0, | |||||
"Next TX descriptor to be used"); | |||||
SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean", | |||||
CTLFLAG_RD, &(txr->next_to_clean), 0, | |||||
"Next TX descriptor to be cleaned"); | |||||
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done", | SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done", | ||||
CTLFLAG_RD, &(rxr->not_done), | CTLFLAG_RD, &(rxr->not_done), | ||||
"Queue Rx Descriptors not Done"); | "Queue Rx Descriptors not Done"); | ||||
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh", | SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh", | ||||
CTLFLAG_RD, &(rxr->next_refresh), 0, | CTLFLAG_RD, &(rxr->next_refresh), 0, | ||||
"Queue Rx Descriptors not Done"); | "Queue Rx Descriptors not Done"); | ||||
SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check", | SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check", | ||||
CTLFLAG_RD, &(rxr->next_check), 0, | CTLFLAG_RD, &(rxr->next_check), 0, | ||||
"Queue Rx Descriptors not Done"); | "Queue Rx Descriptors not Done"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", | |||||
CTLTYPE_UINT | CTLFLAG_RD, &queues[q], | |||||
sizeof(struct ixl_queue), | |||||
ixl_sysctl_qtx_tail_handler, "IU", | |||||
"Queue Transmit Descriptor Tail"); | |||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", | ||||
CTLTYPE_UINT | CTLFLAG_RD, &queues[q], | CTLTYPE_UINT | CTLFLAG_RD, &queues[q], | ||||
sizeof(struct ixl_queue), | sizeof(struct ixl_queue), | ||||
ixl_sysctl_qrx_tail_handler, "IU", | ixl_sysctl_qrx_tail_handler, "IU", | ||||
"Queue Receive Descriptor Tail"); | "Queue Receive Descriptor Tail"); | ||||
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", | |||||
CTLTYPE_UINT | CTLFLAG_RD, &queues[q], | |||||
sizeof(struct ixl_queue), | |||||
ixl_sysctl_qtx_tail_handler, "IU", | |||||
"Queue Transmit Descriptor Tail"); | |||||
#endif | #endif | ||||
} | } | ||||
/* MAC stats */ | /* MAC stats */ | ||||
ixl_add_sysctls_mac_stats(ctx, child, pf_stats); | ixl_add_sysctls_mac_stats(ctx, child, pf_stats); | ||||
} | } | ||||
void | void | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
void | void | ||||
ixl_set_rss_key(struct ixl_pf *pf) | ixl_set_rss_key(struct ixl_pf *pf) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
enum i40e_status_code status; | |||||
#ifdef RSS | |||||
u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; | u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; | ||||
#else | enum i40e_status_code status; | ||||
u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, | |||||
0x183cfd8c, 0xce880440, 0x580cbc3c, | |||||
0x35897377, 0x328b25e1, 0x4fa98922, | |||||
0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, | |||||
0x0, 0x0, 0x0}; | |||||
#endif | |||||
#ifdef RSS | #ifdef RSS | ||||
/* Fetch the configured RSS key */ | /* Fetch the configured RSS key */ | ||||
rss_getkey((uint8_t *) &rss_seed); | rss_getkey((uint8_t *) &rss_seed); | ||||
#else | |||||
ixl_get_default_rss_key(rss_seed); | |||||
#endif | #endif | ||||
/* Fill out hash function seed */ | /* Fill out hash function seed */ | ||||
if (hw->mac.type == I40E_MAC_X722) { | if (hw->mac.type == I40E_MAC_X722) { | ||||
struct i40e_aqc_get_set_rss_key_data key_data; | struct i40e_aqc_get_set_rss_key_data key_data; | ||||
bcopy(rss_seed, key_data.standard_rss_key, 40); | bcopy(rss_seed, &key_data, 52); | ||||
status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); | status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); | ||||
if (status) | if (status) | ||||
device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n", | device_printf(dev, | ||||
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | "i40e_aq_set_rss_key status %s, error %s\n", | ||||
i40e_stat_str(hw, status), | |||||
i40e_aq_str(hw, hw->aq.asq_last_status)); | |||||
} else { | } else { | ||||
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) | for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) | ||||
i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); | i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); | ||||
} | } | ||||
} | } | ||||
/* | /* | ||||
* Configure enabled PCTYPES for RSS. | * Configure enabled PCTYPES for RSS. | ||||
▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | ixl_set_rss_hlut(struct ixl_pf *pf) | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
int i, que_id; | int i, que_id; | ||||
int lut_entry_width; | int lut_entry_width; | ||||
u32 lut = 0; | u32 lut = 0; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
if (hw->mac.type == I40E_MAC_X722) | |||||
lut_entry_width = 7; | |||||
else | |||||
lut_entry_width = pf->hw.func_caps.rss_table_entry_width; | lut_entry_width = pf->hw.func_caps.rss_table_entry_width; | ||||
/* Populate the LUT with max no. of queues in round robin fashion */ | /* Populate the LUT with max no. of queues in round robin fashion */ | ||||
u8 hlut_buf[512]; | u8 hlut_buf[512]; | ||||
for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) { | for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) { | ||||
#ifdef RSS | #ifdef RSS | ||||
/* | /* | ||||
* Fetch the RSS bucket id for the given indirection entry. | * Fetch the RSS bucket id for the given indirection entry. | ||||
* Cap it at the number of configured buckets (which is | * Cap it at the number of configured buckets (which is | ||||
▲ Show 20 Lines • Show All 294 Lines • ▼ Show 20 Lines | ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) | ||||
} | } | ||||
/* | /* | ||||
** Scan the filter list, each time we find one | ** Scan the filter list, each time we find one | ||||
** we add it to the admin queue array and turn off | ** we add it to the admin queue array and turn off | ||||
** the add bit. | ** the add bit. | ||||
*/ | */ | ||||
SLIST_FOREACH(f, &vsi->ftl, next) { | SLIST_FOREACH(f, &vsi->ftl, next) { | ||||
if (f->flags == flags) { | if ((f->flags & flags) == flags) { | ||||
b = &a[j]; // a pox on fvl long names :) | b = &a[j]; // a pox on fvl long names :) | ||||
bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); | bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); | ||||
if (f->vlan == IXL_VLAN_ANY) { | if (f->vlan == IXL_VLAN_ANY) { | ||||
b->vlan_tag = 0; | b->vlan_tag = 0; | ||||
b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; | b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; | ||||
} else { | } else { | ||||
b->vlan_tag = f->vlan; | b->vlan_tag = f->vlan; | ||||
b->flags = 0; | b->flags = 0; | ||||
▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | if (d == NULL) { | ||||
printf("del hw filter failed to get memory\n"); | printf("del hw filter failed to get memory\n"); | ||||
return; | return; | ||||
} | } | ||||
SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { | SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { | ||||
if (f->flags & IXL_FILTER_DEL) { | if (f->flags & IXL_FILTER_DEL) { | ||||
e = &d[j]; // a pox on fvl long names :) | e = &d[j]; // a pox on fvl long names :) | ||||
bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); | bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); | ||||
e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan); | |||||
e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; | e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; | ||||
if (f->vlan == IXL_VLAN_ANY) { | |||||
e->vlan_tag = 0; | |||||
e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; | |||||
} else { | |||||
e->vlan_tag = f->vlan; | |||||
} | |||||
/* delete entry from vsi list */ | /* delete entry from vsi list */ | ||||
SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); | SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); | ||||
free(f, M_DEVBUF); | free(f, M_DEVBUF); | ||||
j++; | j++; | ||||
} | } | ||||
if (j == cnt) | if (j == cnt) | ||||
break; | break; | ||||
} | } | ||||
Show All 36 Lines | ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) | ||||
reg |= I40E_QTX_ENA_QENA_REQ_MASK | | reg |= I40E_QTX_ENA_QENA_REQ_MASK | | ||||
I40E_QTX_ENA_QENA_STAT_MASK; | I40E_QTX_ENA_QENA_STAT_MASK; | ||||
wr32(hw, I40E_QTX_ENA(pf_qidx), reg); | wr32(hw, I40E_QTX_ENA(pf_qidx), reg); | ||||
/* Verify the enable took */ | /* Verify the enable took */ | ||||
for (int j = 0; j < 10; j++) { | for (int j = 0; j < 10; j++) { | ||||
reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); | reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); | ||||
if (reg & I40E_QTX_ENA_QENA_STAT_MASK) | if (reg & I40E_QTX_ENA_QENA_STAT_MASK) | ||||
break; | break; | ||||
i40e_msec_delay(10); | i40e_usec_delay(10); | ||||
} | } | ||||
if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { | if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { | ||||
device_printf(pf->dev, "TX queue %d still disabled!\n", | device_printf(pf->dev, "TX queue %d still disabled!\n", | ||||
pf_qidx); | pf_qidx); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
return (error); | return (error); | ||||
Show All 17 Lines | ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) | ||||
reg |= I40E_QRX_ENA_QENA_REQ_MASK | | reg |= I40E_QRX_ENA_QENA_REQ_MASK | | ||||
I40E_QRX_ENA_QENA_STAT_MASK; | I40E_QRX_ENA_QENA_STAT_MASK; | ||||
wr32(hw, I40E_QRX_ENA(pf_qidx), reg); | wr32(hw, I40E_QRX_ENA(pf_qidx), reg); | ||||
/* Verify the enable took */ | /* Verify the enable took */ | ||||
for (int j = 0; j < 10; j++) { | for (int j = 0; j < 10; j++) { | ||||
reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); | reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); | ||||
if (reg & I40E_QRX_ENA_QENA_STAT_MASK) | if (reg & I40E_QRX_ENA_QENA_STAT_MASK) | ||||
break; | break; | ||||
i40e_msec_delay(10); | i40e_usec_delay(10); | ||||
} | } | ||||
if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { | if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { | ||||
device_printf(pf->dev, "RX queue %d still disabled!\n", | device_printf(pf->dev, "RX queue %d still disabled!\n", | ||||
pf_qidx); | pf_qidx); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
return (error); | return (error); | ||||
Show All 23 Lines | for (int i = 0; i < vsi->num_queues; i++) { | ||||
error = ixl_enable_ring(pf, &pf->qtag, i); | error = ixl_enable_ring(pf, &pf->qtag, i); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | |||||
* Returns error on first ring that is detected hung. | |||||
*/ | |||||
int | int | ||||
ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) | ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
int error = 0; | int error = 0; | ||||
u32 reg; | u32 reg; | ||||
u16 pf_qidx; | u16 pf_qidx; | ||||
Show All 16 Lines | if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { | ||||
device_printf(pf->dev, "TX queue %d still enabled!\n", | device_printf(pf->dev, "TX queue %d still enabled!\n", | ||||
pf_qidx); | pf_qidx); | ||||
error = ETIMEDOUT; | error = ETIMEDOUT; | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | |||||
* Returns error on first ring that is detected hung. | |||||
*/ | |||||
int | int | ||||
ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) | ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
int error = 0; | int error = 0; | ||||
u32 reg; | u32 reg; | ||||
u16 pf_qidx; | u16 pf_qidx; | ||||
▲ Show 20 Lines • Show All 94 Lines • ▼ Show 20 Lines | if (reg & I40E_GL_MDET_RX_VALID_MASK) { | ||||
mdd_detected = true; | mdd_detected = true; | ||||
} | } | ||||
if (mdd_detected) { | if (mdd_detected) { | ||||
reg = rd32(hw, I40E_PF_MDET_TX); | reg = rd32(hw, I40E_PF_MDET_TX); | ||||
if (reg & I40E_PF_MDET_TX_VALID_MASK) { | if (reg & I40E_PF_MDET_TX_VALID_MASK) { | ||||
wr32(hw, I40E_PF_MDET_TX, 0xFFFF); | wr32(hw, I40E_PF_MDET_TX, 0xFFFF); | ||||
device_printf(dev, | device_printf(dev, | ||||
"MDD TX event is for this function!"); | "MDD TX event is for this function!\n"); | ||||
pf_mdd_detected = true; | pf_mdd_detected = true; | ||||
} | } | ||||
reg = rd32(hw, I40E_PF_MDET_RX); | reg = rd32(hw, I40E_PF_MDET_RX); | ||||
if (reg & I40E_PF_MDET_RX_VALID_MASK) { | if (reg & I40E_PF_MDET_RX_VALID_MASK) { | ||||
wr32(hw, I40E_PF_MDET_RX, 0xFFFF); | wr32(hw, I40E_PF_MDET_RX, 0xFFFF); | ||||
device_printf(dev, | device_printf(dev, | ||||
"MDD RX event is for this function!"); | "MDD RX event is for this function!\n"); | ||||
pf_mdd_detected = true; | pf_mdd_detected = true; | ||||
} | } | ||||
} | } | ||||
/* re-enable mdd interrupt cause */ | /* re-enable mdd interrupt cause */ | ||||
reg = rd32(hw, I40E_PFINT_ICR0_ENA); | reg = rd32(hw, I40E_PFINT_ICR0_ENA); | ||||
reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; | reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; | ||||
wr32(hw, I40E_PFINT_ICR0_ENA, reg); | wr32(hw, I40E_PFINT_ICR0_ENA, reg); | ||||
▲ Show 20 Lines • Show All 238 Lines • ▼ Show 20 Lines | ixl_update_stats_counters(struct ixl_pf *pf) | ||||
for (int i = 0; i < pf->num_vfs; i++) { | for (int i = 0; i < pf->num_vfs; i++) { | ||||
vf = &pf->vfs[i]; | vf = &pf->vfs[i]; | ||||
if (vf->vf_flags & VF_FLAG_ENABLED) | if (vf->vf_flags & VF_FLAG_ENABLED) | ||||
ixl_update_eth_stats(&pf->vfs[i].vsi); | ixl_update_eth_stats(&pf->vfs[i].vsi); | ||||
} | } | ||||
} | } | ||||
int | int | ||||
ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf) | ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct ixl_vsi *vsi = &pf->vsi; | struct ixl_vsi *vsi = &pf->vsi; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
bool is_up = false; | |||||
int error = 0; | int error = 0; | ||||
is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); | |||||
/* Teardown */ | /* Teardown */ | ||||
if (is_up) | if (is_up) | ||||
ixl_stop(pf); | ixl_stop(pf); | ||||
ixl_teardown_queue_msix(vsi); | |||||
error = i40e_shutdown_lan_hmc(hw); | error = i40e_shutdown_lan_hmc(hw); | ||||
if (error) | if (error) | ||||
device_printf(dev, | device_printf(dev, | ||||
"Shutdown LAN HMC failed with code %d\n", error); | "Shutdown LAN HMC failed with code %d\n", error); | ||||
ixl_disable_intr0(hw); | ixl_disable_intr0(hw); | ||||
ixl_teardown_adminq_msix(pf); | ixl_teardown_adminq_msix(pf); | ||||
error = i40e_shutdown_adminq(hw); | error = i40e_shutdown_adminq(hw); | ||||
if (error) | if (error) | ||||
device_printf(dev, | device_printf(dev, | ||||
"Shutdown Admin queue failed with code %d\n", error); | "Shutdown Admin queue failed with code %d\n", error); | ||||
callout_drain(&pf->timer); | |||||
/* Free ring buffers, locks and filters */ | |||||
ixl_vsi_free_queues(vsi); | |||||
/* Free VSI filter list */ | |||||
ixl_free_mac_filters(vsi); | |||||
ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); | |||||
return (error); | |||||
} | |||||
int | |||||
ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up) | |||||
{ | |||||
struct i40e_hw *hw = &pf->hw; | |||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
device_t dev = pf->dev; | |||||
int error = 0; | |||||
device_printf(dev, "Rebuilding driver state...\n"); | |||||
error = i40e_pf_reset(hw); | |||||
if (error) { | |||||
device_printf(dev, "PF reset failure %s\n", | |||||
i40e_stat_str(hw, error)); | |||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | |||||
/* Setup */ | /* Setup */ | ||||
error = i40e_init_adminq(hw); | error = i40e_init_adminq(hw); | ||||
if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { | if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { | ||||
device_printf(dev, "Unable to initialize Admin Queue, error %d\n", | device_printf(dev, "Unable to initialize Admin Queue, error %d\n", | ||||
error); | error); | ||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | } | ||||
error = ixl_setup_adminq_msix(pf); | |||||
i40e_clear_pxe_mode(hw); | |||||
error = ixl_get_hw_capabilities(pf); | |||||
if (error) { | if (error) { | ||||
device_printf(dev, "ixl_setup_adminq_msix error: %d\n", | device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error); | ||||
error); | goto ixl_rebuild_hw_structs_after_reset_err; | ||||
} | } | ||||
ixl_configure_intr0_msix(pf); | |||||
ixl_enable_intr0(hw); | |||||
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, | error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, | ||||
hw->func_caps.num_rx_qp, 0, 0); | hw->func_caps.num_rx_qp, 0, 0); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "init_lan_hmc failed: %d\n", error); | device_printf(dev, "init_lan_hmc failed: %d\n", error); | ||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | } | ||||
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); | error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); | ||||
if (error) { | if (error) { | ||||
device_printf(dev, "configure_lan_hmc failed: %d\n", error); | device_printf(dev, "configure_lan_hmc failed: %d\n", error); | ||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | } | ||||
/* reserve a contiguous allocation for the PF's VSI */ | |||||
error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); | |||||
if (error) { | |||||
device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", | |||||
error); | |||||
/* TODO: error handling */ | |||||
} | |||||
device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", | |||||
pf->qtag.num_allocated, pf->qtag.num_active); | |||||
error = ixl_switch_config(pf); | |||||
if (error) { | |||||
device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n", | |||||
error); | |||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | |||||
if (ixl_vsi_setup_queues(vsi)) { | |||||
device_printf(dev, "setup queues failed!\n"); | |||||
error = ENOMEM; | |||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | |||||
if (pf->msix > 1) { | |||||
error = ixl_setup_adminq_msix(pf); | |||||
if (error) { | |||||
device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", | |||||
error); | |||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | |||||
ixl_configure_intr0_msix(pf); | |||||
ixl_enable_intr0(hw); | |||||
error = ixl_setup_queue_msix(vsi); | |||||
if (error) { | |||||
device_printf(dev, "ixl_setup_queue_msix() error: %d\n", | |||||
error); | |||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | |||||
} else { | |||||
error = ixl_setup_legacy(pf); | |||||
if (error) { | |||||
device_printf(dev, "ixl_setup_legacy() error: %d\n", | |||||
error); | |||||
goto ixl_rebuild_hw_structs_after_reset_err; | |||||
} | |||||
} | |||||
/* Determine link state */ | |||||
if (ixl_attach_get_link_status(pf)) { | |||||
error = EINVAL; | |||||
/* TODO: error handling */ | |||||
} | |||||
i40e_aq_set_dcb_parameters(hw, TRUE, NULL); | |||||
ixl_get_fw_lldp_status(pf); | |||||
if (is_up) | if (is_up) | ||||
ixl_init(pf); | ixl_init(pf); | ||||
device_printf(dev, "Rebuilding driver state done.\n"); | |||||
return (0); | return (0); | ||||
ixl_rebuild_hw_structs_after_reset_err: | |||||
device_printf(dev, "Reload the driver to recover\n"); | |||||
return (error); | |||||
} | } | ||||
void | void | ||||
ixl_handle_empr_reset(struct ixl_pf *pf) | ixl_handle_empr_reset(struct ixl_pf *pf) | ||||
{ | { | ||||
struct ixl_vsi *vsi = &pf->vsi; | |||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); | ||||
int count = 0; | int count = 0; | ||||
u32 reg; | u32 reg; | ||||
ixl_prepare_for_reset(pf, is_up); | |||||
/* Typically finishes within 3-4 seconds */ | /* Typically finishes within 3-4 seconds */ | ||||
while (count++ < 100) { | while (count++ < 100) { | ||||
reg = rd32(hw, I40E_GLGEN_RSTAT) | reg = rd32(hw, I40E_GLGEN_RSTAT) | ||||
& I40E_GLGEN_RSTAT_DEVSTATE_MASK; | & I40E_GLGEN_RSTAT_DEVSTATE_MASK; | ||||
if (reg) | if (reg) | ||||
i40e_msec_delay(100); | i40e_msec_delay(100); | ||||
else | else | ||||
break; | break; | ||||
} | } | ||||
ixl_dbg(pf, IXL_DBG_INFO, | ixl_dbg(pf, IXL_DBG_INFO, | ||||
"EMPR reset wait count: %d\n", count); | "EMPR reset wait count: %d\n", count); | ||||
device_printf(dev, "Rebuilding driver state...\n"); | ixl_rebuild_hw_structs_after_reset(pf, is_up); | ||||
ixl_rebuild_hw_structs_after_reset(pf); | |||||
device_printf(dev, "Rebuilding driver state done.\n"); | |||||
atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); | atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); | ||||
} | } | ||||
/* | /* | ||||
** Tasklet handler for MSIX Adminq interrupts | ** Tasklet handler for MSIX Adminq interrupts | ||||
** - do outside interrupt since it might sleep | ** - do outside interrupt since it might sleep | ||||
*/ | */ | ||||
▲ Show 20 Lines • Show All 255 Lines • ▼ Show 20 Lines | ixl_add_device_sysctls(struct ixl_pf *pf) | ||||
struct sysctl_oid_list *debug_list; | struct sysctl_oid_list *debug_list; | ||||
struct sysctl_oid *fec_node; | struct sysctl_oid *fec_node; | ||||
struct sysctl_oid_list *fec_list; | struct sysctl_oid_list *fec_list; | ||||
/* Set up sysctls */ | /* Set up sysctls */ | ||||
SYSCTL_ADD_PROC(ctx, ctx_list, | SYSCTL_ADD_PROC(ctx, ctx_list, | ||||
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, | OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); | pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); | ||||
SYSCTL_ADD_PROC(ctx, ctx_list, | SYSCTL_ADD_PROC(ctx, ctx_list, | ||||
OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, | OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); | pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); | ||||
SYSCTL_ADD_PROC(ctx, ctx_list, | SYSCTL_ADD_PROC(ctx, ctx_list, | ||||
OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD, | |||||
pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); | |||||
SYSCTL_ADD_PROC(ctx, ctx_list, | |||||
OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, | OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, | ||||
pf, 0, ixl_current_speed, "A", "Current Port Speed"); | pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed"); | ||||
SYSCTL_ADD_PROC(ctx, ctx_list, | SYSCTL_ADD_PROC(ctx, ctx_list, | ||||
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, | OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, | ||||
pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); | pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); | ||||
SYSCTL_ADD_PROC(ctx, ctx_list, | SYSCTL_ADD_PROC(ctx, ctx_list, | ||||
OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD, | OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD, | ||||
pf, 0, ixl_sysctl_unallocated_queues, "I", | pf, 0, ixl_sysctl_unallocated_queues, "I", | ||||
Show All 12 Lines | ixl_add_device_sysctls(struct ixl_pf *pf) | ||||
SYSCTL_ADD_INT(ctx, ctx_list, | SYSCTL_ADD_INT(ctx, ctx_list, | ||||
OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, | OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, | ||||
&pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); | &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); | ||||
SYSCTL_ADD_INT(ctx, ctx_list, | SYSCTL_ADD_INT(ctx, ctx_list, | ||||
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, | OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, | ||||
&pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); | &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); | ||||
SYSCTL_ADD_INT(ctx, ctx_list, | |||||
OID_AUTO, "tx_ring_size", CTLFLAG_RD, | |||||
&pf->vsi.num_tx_desc, 0, "TX ring size"); | |||||
SYSCTL_ADD_INT(ctx, ctx_list, | |||||
OID_AUTO, "rx_ring_size", CTLFLAG_RD, | |||||
&pf->vsi.num_rx_desc, 0, "RX ring size"); | |||||
/* Add FEC sysctls for 25G adapters */ | /* Add FEC sysctls for 25G adapters */ | ||||
/* | if (i40e_is_25G_device(hw->device_id)) { | ||||
* XXX: These settings can be changed, but that isn't supported, | |||||
* so these are read-only for now. | |||||
*/ | |||||
if (hw->device_id == I40E_DEV_ID_25G_B | |||||
|| hw->device_id == I40E_DEV_ID_25G_SFP28) { | |||||
fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, | fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, | ||||
OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls"); | OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls"); | ||||
fec_list = SYSCTL_CHILDREN(fec_node); | fec_list = SYSCTL_CHILDREN(fec_node); | ||||
SYSCTL_ADD_PROC(ctx, fec_list, | SYSCTL_ADD_PROC(ctx, fec_list, | ||||
OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RD, | OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); | pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); | ||||
SYSCTL_ADD_PROC(ctx, fec_list, | SYSCTL_ADD_PROC(ctx, fec_list, | ||||
OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RD, | OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); | pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); | ||||
SYSCTL_ADD_PROC(ctx, fec_list, | SYSCTL_ADD_PROC(ctx, fec_list, | ||||
OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RD, | OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); | pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); | ||||
SYSCTL_ADD_PROC(ctx, fec_list, | SYSCTL_ADD_PROC(ctx, fec_list, | ||||
OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RD, | OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); | pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); | ||||
SYSCTL_ADD_PROC(ctx, fec_list, | SYSCTL_ADD_PROC(ctx, fec_list, | ||||
OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RD, | OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); | pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); | ||||
} | } | ||||
SYSCTL_ADD_PROC(ctx, ctx_list, | |||||
OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW, | |||||
pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); | |||||
/* Add sysctls meant to print debug information, but don't list them | /* Add sysctls meant to print debug information, but don't list them | ||||
* in "sysctl -a" output. */ | * in "sysctl -a" output. */ | ||||
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, | debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, | ||||
OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); | OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); | ||||
debug_list = SYSCTL_CHILDREN(debug_node); | debug_list = SYSCTL_CHILDREN(debug_node); | ||||
SYSCTL_ADD_UINT(ctx, debug_list, | SYSCTL_ADD_UINT(ctx, debug_list, | ||||
OID_AUTO, "shared_debug_mask", CTLFLAG_RW, | OID_AUTO, "shared_debug_mask", CTLFLAG_RW, | ||||
Show All 34 Lines | ixl_add_device_sysctls(struct ixl_pf *pf) | ||||
SYSCTL_ADD_PROC(ctx, debug_list, | SYSCTL_ADD_PROC(ctx, debug_list, | ||||
OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD, | OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD, | ||||
pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); | pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); | ||||
SYSCTL_ADD_PROC(ctx, debug_list, | SYSCTL_ADD_PROC(ctx, debug_list, | ||||
OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR, | OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR, | ||||
pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); | pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); | ||||
SYSCTL_ADD_PROC(ctx, debug_list, | |||||
OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD, | |||||
pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); | |||||
if (pf->has_i2c) { | if (pf->has_i2c) { | ||||
SYSCTL_ADD_PROC(ctx, debug_list, | SYSCTL_ADD_PROC(ctx, debug_list, | ||||
OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, | OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus"); | pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus"); | ||||
SYSCTL_ADD_PROC(ctx, debug_list, | SYSCTL_ADD_PROC(ctx, debug_list, | ||||
OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, | OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, | ||||
pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus"); | pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus"); | ||||
Show All 26 Lines | |||||
/* | /* | ||||
** Set flow control using sysctl: | ** Set flow control using sysctl: | ||||
** 0 - off | ** 0 - off | ||||
** 1 - rx pause | ** 1 - rx pause | ||||
** 2 - tx pause | ** 2 - tx pause | ||||
** 3 - full | ** 3 - full | ||||
*/ | */ | ||||
int | int | ||||
ixl_set_flowcntl(SYSCTL_HANDLER_ARGS) | ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
int requested_fc, error = 0; | int requested_fc, error = 0; | ||||
enum i40e_status_code aq_error = 0; | enum i40e_status_code aq_error = 0; | ||||
u8 fc_aq_err = 0; | u8 fc_aq_err = 0; | ||||
▲ Show 20 Lines • Show All 66 Lines • ▼ Show 20 Lines | default: | ||||
index = 0; | index = 0; | ||||
break; | break; | ||||
} | } | ||||
return speeds[index]; | return speeds[index]; | ||||
} | } | ||||
int | int | ||||
ixl_current_speed(SYSCTL_HANDLER_ARGS) | ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
int error = 0; | int error = 0; | ||||
ixl_update_link_status(pf); | ixl_update_link_status(pf); | ||||
error = sysctl_handle_string(oidp, | error = sysctl_handle_string(oidp, | ||||
ixl_aq_speed_to_str(hw->phy.link_info.link_speed), | ixl_aq_speed_to_str(hw->phy.link_info.link_speed), | ||||
8, req); | 8, req); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | |||||
* Converts 8-bit speeds value to and from sysctl flags and | |||||
* Admin Queue flags. | |||||
*/ | |||||
static u8 | static u8 | ||||
ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) | ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) | ||||
{ | { | ||||
static u16 speedmap[6] = { | static u16 speedmap[6] = { | ||||
(I40E_LINK_SPEED_100MB | (0x1 << 8)), | (I40E_LINK_SPEED_100MB | (0x1 << 8)), | ||||
(I40E_LINK_SPEED_1GB | (0x2 << 8)), | (I40E_LINK_SPEED_1GB | (0x2 << 8)), | ||||
(I40E_LINK_SPEED_10GB | (0x4 << 8)), | (I40E_LINK_SPEED_10GB | (0x4 << 8)), | ||||
(I40E_LINK_SPEED_20GB | (0x8 << 8)), | (I40E_LINK_SPEED_20GB | (0x8 << 8)), | ||||
(I40E_LINK_SPEED_25GB | (0x10 << 8)), | (I40E_LINK_SPEED_25GB | (0x10 << 8)), | ||||
(I40E_LINK_SPEED_40GB | (0x20 << 8)) | (I40E_LINK_SPEED_40GB | (0x20 << 8)) | ||||
}; | }; | ||||
u8 retval = 0; | u8 retval = 0; | ||||
for (int i = 0; i < 6; i++) { | for (int i = 0; i < 6; i++) { | ||||
if (to_aq) | if (to_aq) | ||||
retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; | retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; | ||||
else | else | ||||
retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; | retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; | ||||
} | } | ||||
return (retval); | return (retval); | ||||
} | } | ||||
int | int | ||||
ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds) | ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) | ||||
{ | { | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
struct i40e_aq_set_phy_config config; | struct i40e_aq_set_phy_config config; | ||||
enum i40e_status_code aq_error = 0; | enum i40e_status_code aq_error = 0; | ||||
/* Get current capability information */ | /* Get current capability information */ | ||||
aq_error = i40e_aq_get_phy_capabilities(hw, | aq_error = i40e_aq_get_phy_capabilities(hw, | ||||
FALSE, FALSE, &abilities, NULL); | FALSE, FALSE, &abilities, NULL); | ||||
if (aq_error) { | if (aq_error) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: Error getting phy capabilities %d," | "%s: Error getting phy capabilities %d," | ||||
" aq error: %d\n", __func__, aq_error, | " aq error: %d\n", __func__, aq_error, | ||||
hw->aq.asq_last_status); | hw->aq.asq_last_status); | ||||
return (EIO); | return (EIO); | ||||
} | } | ||||
/* Prepare new config */ | /* Prepare new config */ | ||||
bzero(&config, sizeof(config)); | bzero(&config, sizeof(config)); | ||||
if (from_aq) | |||||
config.link_speed = speeds; | |||||
else | |||||
config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); | config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); | ||||
config.phy_type = abilities.phy_type; | config.phy_type = abilities.phy_type; | ||||
config.phy_type_ext = abilities.phy_type_ext; | config.phy_type_ext = abilities.phy_type_ext; | ||||
config.abilities = abilities.abilities | config.abilities = abilities.abilities | ||||
| I40E_AQ_PHY_ENABLE_ATOMIC_LINK; | | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; | ||||
config.eee_capability = abilities.eee_capability; | config.eee_capability = abilities.eee_capability; | ||||
config.eeer = abilities.eeer_val; | config.eeer = abilities.eeer_val; | ||||
config.low_power_ctrl = abilities.d3_lpan; | config.low_power_ctrl = abilities.d3_lpan; | ||||
config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e); | |||||
/* Do aq command & restart link */ | /* Do aq command & restart link */ | ||||
aq_error = i40e_aq_set_phy_config(hw, &config, NULL); | aq_error = i40e_aq_set_phy_config(hw, &config, NULL); | ||||
if (aq_error) { | if (aq_error) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: Error setting new phy config %d," | "%s: Error setting new phy config %d," | ||||
" aq error: %d\n", __func__, aq_error, | " aq error: %d\n", __func__, aq_error, | ||||
hw->aq.asq_last_status); | hw->aq.asq_last_status); | ||||
return (EIO); | return (EIO); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
** Supported link speedsL | |||||
** Flags: | |||||
** 0x1 - 100 Mb | |||||
** 0x2 - 1G | |||||
** 0x4 - 10G | |||||
** 0x8 - 20G | |||||
** 0x10 - 25G | |||||
** 0x20 - 40G | |||||
*/ | |||||
static int | |||||
ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | |||||
int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); | |||||
return sysctl_handle_int(oidp, NULL, supported, req); | |||||
} | |||||
/* | |||||
** Control link advertise speed: | ** Control link advertise speed: | ||||
** Flags: | ** Flags: | ||||
** 0x1 - advertise 100 Mb | ** 0x1 - advertise 100 Mb | ||||
** 0x2 - advertise 1G | ** 0x2 - advertise 1G | ||||
** 0x4 - advertise 10G | ** 0x4 - advertise 10G | ||||
** 0x8 - advertise 20G | ** 0x8 - advertise 20G | ||||
** 0x10 - advertise 25G | ** 0x10 - advertise 25G | ||||
** 0x20 - advertise 40G | ** 0x20 - advertise 40G | ||||
** | ** | ||||
** Set to 0 to disable link | ** Set to 0 to disable link | ||||
*/ | */ | ||||
int | int | ||||
ixl_set_advertise(SYSCTL_HANDLER_ARGS) | ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
u8 converted_speeds; | u8 converted_speeds; | ||||
int requested_ls = 0; | int requested_ls = 0; | ||||
int error = 0; | int error = 0; | ||||
/* Read in new mode */ | /* Read in new mode */ | ||||
requested_ls = pf->advertised_speed; | requested_ls = pf->advertised_speed; | ||||
error = sysctl_handle_int(oidp, &requested_ls, 0, req); | error = sysctl_handle_int(oidp, &requested_ls, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
/* Check if changing speeds is supported */ | |||||
switch (hw->device_id) { | /* Error out if bits outside of possible flag range are set */ | ||||
case I40E_DEV_ID_25G_B: | if ((requested_ls & ~((u8)0x3F)) != 0) { | ||||
case I40E_DEV_ID_25G_SFP28: | device_printf(dev, "Input advertised speed out of range; " | ||||
device_printf(dev, "Changing advertised speeds not supported" | "valid flags are: 0x%02x\n", | ||||
" on this device.\n"); | ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
if (requested_ls < 0 || requested_ls > 0xff) { | |||||
} | |||||
/* Check for valid value */ | /* Check if adapter supports input value */ | ||||
converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); | converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); | ||||
if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { | if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { | ||||
device_printf(dev, "Invalid advertised speed; " | device_printf(dev, "Invalid advertised speed; " | ||||
"valid flags are: 0x%02x\n", | "valid flags are: 0x%02x\n", | ||||
ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); | ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
error = ixl_set_advertised_speeds(pf, requested_ls); | error = ixl_set_advertised_speeds(pf, requested_ls, false); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
pf->advertised_speed = requested_ls; | pf->advertised_speed = requested_ls; | ||||
ixl_update_link_status(pf); | ixl_update_link_status(pf); | ||||
return (0); | return (0); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 83 Lines • ▼ Show 20 Lines | ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct sbuf *sbuf; | struct sbuf *sbuf; | ||||
sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | ||||
ixl_nvm_version_str(hw, sbuf); | ixl_nvm_version_str(hw, sbuf); | ||||
sbuf_finish(sbuf); | sbuf_finish(sbuf); | ||||
sbuf_delete(sbuf); | sbuf_delete(sbuf); | ||||
return 0; | return (0); | ||||
} | } | ||||
void | void | ||||
ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) | ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) | ||||
{ | { | ||||
if ((nvma->command == I40E_NVM_READ) && | if ((nvma->command == I40E_NVM_READ) && | ||||
((nvma->config & 0xFF) == 0xF) && | ((nvma->config & 0xFF) == 0xF) && | ||||
(((nvma->config & 0xF00) >> 8) == 0xF) && | (((nvma->config & 0xF00) >> 8) == 0xF) && | ||||
Show All 35 Lines | ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) | ||||
DEBUGFUNC("ixl_handle_nvmupd_cmd"); | DEBUGFUNC("ixl_handle_nvmupd_cmd"); | ||||
/* Sanity checks */ | /* Sanity checks */ | ||||
if (ifd->ifd_len < sizeof(struct i40e_nvm_access) || | if (ifd->ifd_len < sizeof(struct i40e_nvm_access) || | ||||
ifd->ifd_data == NULL) { | ifd->ifd_data == NULL) { | ||||
device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", | device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", | ||||
__func__); | __func__); | ||||
device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", | device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", | ||||
__func__, ifd->ifd_len, sizeof(struct i40e_nvm_access)); | __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access)); | ||||
device_printf(dev, "%s: data pointer: %p\n", __func__, | device_printf(dev, "%s: data pointer: %p\n", __func__, | ||||
ifd->ifd_data); | ifd->ifd_data); | ||||
return (EINVAL); | return (EINVAL); | ||||
} | } | ||||
nvma = (struct i40e_nvm_access *)ifd->ifd_data; | nvma = (struct i40e_nvm_access *)ifd->ifd_data; | ||||
Show All 12 Lines | ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) | ||||
if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) { | if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) { | ||||
IXL_PF_LOCK(pf); | IXL_PF_LOCK(pf); | ||||
status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); | status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); | ||||
IXL_PF_UNLOCK(pf); | IXL_PF_UNLOCK(pf); | ||||
} else { | } else { | ||||
perrno = -EBUSY; | perrno = -EBUSY; | ||||
} | } | ||||
if (status) | /* Let the nvmupdate report errors, show them only when debug is enabled */ | ||||
if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) | |||||
device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", | device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", | ||||
i40e_stat_str(hw, status), perrno); | i40e_stat_str(hw, status), perrno); | ||||
/* | /* | ||||
* -EPERM is actually ERESTART, which the kernel interprets as it needing | * -EPERM is actually ERESTART, which the kernel interprets as it needing | ||||
* to run this ioctl again. So use -EACCES for -EPERM instead. | * to run this ioctl again. So use -EACCES for -EPERM instead. | ||||
*/ | */ | ||||
if (perrno == -EPERM) | if (perrno == -EPERM) | ||||
return (-EACCES); | return (-EACCES); | ||||
else | else | ||||
return (perrno); | return (perrno); | ||||
} | } | ||||
/********************************************************************* | /********************************************************************* | ||||
* | * | ||||
* Media Ioctl callback | * Media Ioctl callback | ||||
* | * | ||||
* This routine is called whenever the user queries the status of | * This routine is called whenever the user queries the status of | ||||
* the interface using ifconfig. | * the interface using ifconfig. | ||||
* | * | ||||
* When adding new media types here, make sure to add them to | |||||
* ixl_add_ifmedia(), too. | |||||
* | |||||
**********************************************************************/ | **********************************************************************/ | ||||
void | void | ||||
ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) | ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) | ||||
{ | { | ||||
struct ixl_vsi *vsi = ifp->if_softc; | struct ixl_vsi *vsi = ifp->if_softc; | ||||
struct ixl_pf *pf = vsi->back; | struct ixl_pf *pf = vsi->back; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
INIT_DEBUGOUT("ixl_media_status: begin"); | INIT_DEBUGOUT("ixl_media_status: begin"); | ||||
/* Don't touch PF during reset */ | |||||
if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING) | |||||
return; | |||||
IXL_PF_LOCK(pf); | IXL_PF_LOCK(pf); | ||||
hw->phy.get_link_info = TRUE; | |||||
i40e_get_link_status(hw, &pf->link_up); | i40e_get_link_status(hw, &pf->link_up); | ||||
ixl_update_link_status(pf); | ixl_update_link_status(pf); | ||||
ifmr->ifm_status = IFM_AVALID; | ifmr->ifm_status = IFM_AVALID; | ||||
ifmr->ifm_active = IFM_ETHER; | ifmr->ifm_active = IFM_ETHER; | ||||
if (!pf->link_up) { | if (!pf->link_up) { | ||||
IXL_PF_UNLOCK(pf); | IXL_PF_UNLOCK(pf); | ||||
Show All 16 Lines | case I40E_PHY_TYPE_1000BASE_T: | ||||
break; | break; | ||||
case I40E_PHY_TYPE_1000BASE_SX: | case I40E_PHY_TYPE_1000BASE_SX: | ||||
ifmr->ifm_active |= IFM_1000_SX; | ifmr->ifm_active |= IFM_1000_SX; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_1000BASE_LX: | case I40E_PHY_TYPE_1000BASE_LX: | ||||
ifmr->ifm_active |= IFM_1000_LX; | ifmr->ifm_active |= IFM_1000_LX; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_1000BASE_T_OPTICAL: | case I40E_PHY_TYPE_1000BASE_T_OPTICAL: | ||||
ifmr->ifm_active |= IFM_OTHER; | ifmr->ifm_active |= IFM_1000_T; | ||||
break; | break; | ||||
/* 10 G */ | /* 10 G */ | ||||
case I40E_PHY_TYPE_10GBASE_SFPP_CU: | case I40E_PHY_TYPE_10GBASE_SFPP_CU: | ||||
ifmr->ifm_active |= IFM_10G_TWINAX; | ifmr->ifm_active |= IFM_10G_TWINAX; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_10GBASE_SR: | case I40E_PHY_TYPE_10GBASE_SR: | ||||
ifmr->ifm_active |= IFM_10G_SR; | ifmr->ifm_active |= IFM_10G_SR; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_10GBASE_LR: | case I40E_PHY_TYPE_10GBASE_LR: | ||||
ifmr->ifm_active |= IFM_10G_LR; | ifmr->ifm_active |= IFM_10G_LR; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_10GBASE_T: | case I40E_PHY_TYPE_10GBASE_T: | ||||
ifmr->ifm_active |= IFM_10G_T; | ifmr->ifm_active |= IFM_10G_T; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_XAUI: | case I40E_PHY_TYPE_XAUI: | ||||
case I40E_PHY_TYPE_XFI: | case I40E_PHY_TYPE_XFI: | ||||
ifmr->ifm_active |= IFM_10G_TWINAX; | |||||
break; | |||||
case I40E_PHY_TYPE_10GBASE_AOC: | case I40E_PHY_TYPE_10GBASE_AOC: | ||||
ifmr->ifm_active |= IFM_OTHER; | ifmr->ifm_active |= IFM_10G_AOC; | ||||
break; | break; | ||||
/* 25 G */ | /* 25 G */ | ||||
case I40E_PHY_TYPE_25GBASE_KR: | case I40E_PHY_TYPE_25GBASE_KR: | ||||
ifmr->ifm_active |= IFM_25G_KR; | ifmr->ifm_active |= IFM_25G_KR; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_25GBASE_CR: | case I40E_PHY_TYPE_25GBASE_CR: | ||||
ifmr->ifm_active |= IFM_25G_CR; | ifmr->ifm_active |= IFM_25G_CR; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_25GBASE_SR: | case I40E_PHY_TYPE_25GBASE_SR: | ||||
ifmr->ifm_active |= IFM_25G_SR; | ifmr->ifm_active |= IFM_25G_SR; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_25GBASE_LR: | case I40E_PHY_TYPE_25GBASE_LR: | ||||
ifmr->ifm_active |= IFM_UNKNOWN; | ifmr->ifm_active |= IFM_25G_LR; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_25GBASE_AOC: | |||||
ifmr->ifm_active |= IFM_25G_AOC; | |||||
break; | |||||
case I40E_PHY_TYPE_25GBASE_ACC: | |||||
ifmr->ifm_active |= IFM_25G_ACC; | |||||
break; | |||||
/* 40 G */ | /* 40 G */ | ||||
case I40E_PHY_TYPE_40GBASE_CR4: | case I40E_PHY_TYPE_40GBASE_CR4: | ||||
case I40E_PHY_TYPE_40GBASE_CR4_CU: | case I40E_PHY_TYPE_40GBASE_CR4_CU: | ||||
ifmr->ifm_active |= IFM_40G_CR4; | ifmr->ifm_active |= IFM_40G_CR4; | ||||
break; | break; | ||||
case I40E_PHY_TYPE_40GBASE_SR4: | case I40E_PHY_TYPE_40GBASE_SR4: | ||||
ifmr->ifm_active |= IFM_40G_SR4; | ifmr->ifm_active |= IFM_40G_SR4; | ||||
break; | break; | ||||
▲ Show 20 Lines • Show All 304 Lines • ▼ Show 20 Lines | static char * phy_types_str[32] = { | ||||
"40GBASE-KR4", | "40GBASE-KR4", | ||||
"XAUI", | "XAUI", | ||||
"XFI", | "XFI", | ||||
"SFI", | "SFI", | ||||
"XLAUI", | "XLAUI", | ||||
"XLPPI", | "XLPPI", | ||||
"40GBASE-CR4", | "40GBASE-CR4", | ||||
"10GBASE-CR1", | "10GBASE-CR1", | ||||
"Reserved (12)", | "SFP+ Active DA", | ||||
"Reserved (13)", | "QSFP+ Active DA", | ||||
"Reserved (14)", | "Reserved (14)", | ||||
"Reserved (15)", | "Reserved (15)", | ||||
"Reserved (16)", | "Reserved (16)", | ||||
"100BASE-TX", | "100BASE-TX", | ||||
"1000BASE-T", | "1000BASE-T", | ||||
"10GBASE-T", | "10GBASE-T", | ||||
"10GBASE-SR", | "10GBASE-SR", | ||||
"10GBASE-LR", | "10GBASE-LR", | ||||
"10GBASE-SFP+Cu", | "10GBASE-SFP+Cu", | ||||
"10GBASE-CR1", | "10GBASE-CR1", | ||||
"40GBASE-CR4", | "40GBASE-CR4", | ||||
"40GBASE-SR4", | "40GBASE-SR4", | ||||
"40GBASE-LR4", | "40GBASE-LR4", | ||||
"1000BASE-SX", | "1000BASE-SX", | ||||
"1000BASE-LX", | "1000BASE-LX", | ||||
"1000BASE-T Optical", | "1000BASE-T Optical", | ||||
"20GBASE-KR2", | "20GBASE-KR2", | ||||
"Reserved (31)" | "Reserved (31)" | ||||
}; | }; | ||||
static char * ext_phy_types_str[4] = { | static char * ext_phy_types_str[8] = { | ||||
"25GBASE-KR", | "25GBASE-KR", | ||||
"25GBASE-CR", | "25GBASE-CR", | ||||
"25GBASE-SR", | "25GBASE-SR", | ||||
"25GBASE-LR" | "25GBASE-LR", | ||||
"25GBASE-AOC", | |||||
"25GBASE-ACC", | |||||
"Reserved (6)", | |||||
"Reserved (7)" | |||||
}; | }; | ||||
if (ext && bit_pos > 3) return "Invalid_Ext"; | if (ext && bit_pos > 7) return "Invalid_Ext"; | ||||
if (bit_pos > 31) return "Invalid"; | if (bit_pos > 31) return "Invalid"; | ||||
return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; | return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; | ||||
} | } | ||||
int | int | ||||
ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) | ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) | ||||
struct i40e_aqc_get_link_status link_status; | struct i40e_aqc_get_link_status link_status; | ||||
error = ixl_aq_get_link_status(pf, &link_status); | error = ixl_aq_get_link_status(pf, &link_status); | ||||
if (error) { | if (error) { | ||||
sbuf_delete(buf); | sbuf_delete(buf); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* TODO: Add 25G types */ | |||||
sbuf_printf(buf, "\n" | sbuf_printf(buf, "\n" | ||||
"PHY Type : 0x%02x<%s>\n" | "PHY Type : 0x%02x<%s>\n" | ||||
"Speed : 0x%02x\n" | "Speed : 0x%02x\n" | ||||
"Link info: 0x%02x\n" | "Link info: 0x%02x\n" | ||||
"AN info : 0x%02x\n" | "AN info : 0x%02x\n" | ||||
"Ext info : 0x%02x\n" | "Ext info : 0x%02x\n" | ||||
"Loopback : 0x%02x\n" | "Loopback : 0x%02x\n" | ||||
"Max Frame: %d\n" | "Max Frame: %d\n" | ||||
▲ Show 20 Lines • Show All 82 Lines • ▼ Show 20 Lines | sbuf_printf(buf, | ||||
"FEC Cfg : %02x\n" | "FEC Cfg : %02x\n" | ||||
"Ext CC : %02x", | "Ext CC : %02x", | ||||
abilities.link_speed, | abilities.link_speed, | ||||
abilities.abilities, abilities.eee_capability, | abilities.abilities, abilities.eee_capability, | ||||
abilities.eeer_val, abilities.d3_lpan, | abilities.eeer_val, abilities.d3_lpan, | ||||
abilities.phy_id[0], abilities.phy_id[1], | abilities.phy_id[0], abilities.phy_id[1], | ||||
abilities.phy_id[2], abilities.phy_id[3], | abilities.phy_id[2], abilities.phy_id[3], | ||||
abilities.module_type[0], abilities.module_type[1], | abilities.module_type[0], abilities.module_type[1], | ||||
abilities.module_type[2], abilities.phy_type_ext >> 5, | abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, | ||||
abilities.phy_type_ext & 0x1F, | abilities.fec_cfg_curr_mod_ext_info & 0x1F, | ||||
abilities.ext_comp_code); | abilities.ext_comp_code); | ||||
error = sbuf_finish(buf); | error = sbuf_finish(buf); | ||||
if (error) | if (error) | ||||
device_printf(dev, "Error finishing sbuf: %d\n", error); | device_printf(dev, "Error finishing sbuf: %d\n", error); | ||||
sbuf_delete(buf); | sbuf_delete(buf); | ||||
return (error); | return (error); | ||||
▲ Show 20 Lines • Show All 297 Lines • ▼ Show 20 Lines | ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) | ||||
struct i40e_aqc_get_set_rss_key_data key_data; | struct i40e_aqc_get_set_rss_key_data key_data; | ||||
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | ||||
if (!buf) { | if (!buf) { | ||||
device_printf(dev, "Could not allocate sbuf for output.\n"); | device_printf(dev, "Could not allocate sbuf for output.\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key)); | |||||
sbuf_cat(buf, "\n"); | sbuf_cat(buf, "\n"); | ||||
if (hw->mac.type == I40E_MAC_X722) { | if (hw->mac.type == I40E_MAC_X722) { | ||||
bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key)); | |||||
status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); | status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); | ||||
if (status) | if (status) | ||||
device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", | device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", | ||||
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | ||||
sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, ""); | |||||
} else { | } else { | ||||
for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { | for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { | ||||
reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); | reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); | ||||
sbuf_printf(buf, "%4D", (u_char *)®, ""); | bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); | ||||
} | } | ||||
} | } | ||||
ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); | |||||
error = sbuf_finish(buf); | error = sbuf_finish(buf); | ||||
if (error) | if (error) | ||||
device_printf(dev, "Error finishing sbuf: %d\n", error); | device_printf(dev, "Error finishing sbuf: %d\n", error); | ||||
sbuf_delete(buf); | sbuf_delete(buf); | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | |||||
ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) | |||||
{ | |||||
int i, j, k, width; | |||||
char c; | |||||
if (length < 1 || buf == NULL) return; | |||||
int byte_stride = 16; | |||||
int lines = length / byte_stride; | |||||
int rem = length % byte_stride; | |||||
if (rem > 0) | |||||
lines++; | |||||
for (i = 0; i < lines; i++) { | |||||
width = (rem > 0 && i == lines - 1) | |||||
? rem : byte_stride; | |||||
sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); | |||||
for (j = 0; j < width; j++) | |||||
sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); | |||||
if (width < byte_stride) { | |||||
for (k = 0; k < (byte_stride - width); k++) | |||||
sbuf_printf(sb, " "); | |||||
} | |||||
if (!text) { | |||||
sbuf_printf(sb, "\n"); | |||||
continue; | |||||
} | |||||
for (j = 0; j < width; j++) { | |||||
c = (char)buf[i * byte_stride + j]; | |||||
if (c < 32 || c > 126) | |||||
sbuf_printf(sb, "."); | |||||
else | |||||
sbuf_printf(sb, "%c", c); | |||||
if (j == width - 1) | |||||
sbuf_printf(sb, "\n"); | |||||
} | |||||
} | |||||
} | |||||
static int | static int | ||||
ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) | ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct sbuf *buf; | struct sbuf *buf; | ||||
int error = 0; | int error = 0; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
u8 hlut[512]; | u8 hlut[512]; | ||||
u32 reg; | u32 reg; | ||||
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | ||||
if (!buf) { | if (!buf) { | ||||
device_printf(dev, "Could not allocate sbuf for output.\n"); | device_printf(dev, "Could not allocate sbuf for output.\n"); | ||||
return (ENOMEM); | return (ENOMEM); | ||||
} | } | ||||
bzero(hlut, sizeof(hlut)); | |||||
sbuf_cat(buf, "\n"); | sbuf_cat(buf, "\n"); | ||||
if (hw->mac.type == I40E_MAC_X722) { | if (hw->mac.type == I40E_MAC_X722) { | ||||
bzero(hlut, sizeof(hlut)); | |||||
status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); | status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); | ||||
if (status) | if (status) | ||||
device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", | device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", | ||||
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | ||||
sbuf_printf(buf, "%512D", (u_char *)hlut, ""); | |||||
} else { | } else { | ||||
for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { | for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { | ||||
reg = rd32(hw, I40E_PFQF_HLUT(i)); | reg = rd32(hw, I40E_PFQF_HLUT(i)); | ||||
sbuf_printf(buf, "%4D", (u_char *)®, ""); | bcopy(®, &hlut[i << 2], 4); | ||||
} | } | ||||
} | } | ||||
ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); | |||||
error = sbuf_finish(buf); | error = sbuf_finish(buf); | ||||
if (error) | if (error) | ||||
device_printf(dev, "Error finishing sbuf: %d\n", error); | device_printf(dev, "Error finishing sbuf: %d\n", error); | ||||
sbuf_delete(buf); | sbuf_delete(buf); | ||||
return (error); | return (error); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 143 Lines • ▼ Show 20 Lines | ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, | ||||
if (status) { | if (status) { | ||||
device_printf(dev, | device_printf(dev, | ||||
"%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", | "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", | ||||
__func__, i40e_stat_str(hw, status), | __func__, i40e_stat_str(hw, status), | ||||
i40e_aq_str(hw, hw->aq.asq_last_status)); | i40e_aq_str(hw, hw->aq.asq_last_status)); | ||||
return (EIO); | return (EIO); | ||||
} | } | ||||
*is_set = !!(abilities->phy_type_ext & bit_pos); | *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); | ||||
return (0); | return (0); | ||||
} | } | ||||
static int | static int | ||||
ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, | ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, | ||||
u8 bit_pos, int set) | u8 bit_pos, int set) | ||||
{ | { | ||||
device_t dev = pf->dev; | device_t dev = pf->dev; | ||||
struct i40e_hw *hw = &pf->hw; | struct i40e_hw *hw = &pf->hw; | ||||
struct i40e_aq_set_phy_config config; | struct i40e_aq_set_phy_config config; | ||||
enum i40e_status_code status; | enum i40e_status_code status; | ||||
/* Set new PHY config */ | /* Set new PHY config */ | ||||
memset(&config, 0, sizeof(config)); | memset(&config, 0, sizeof(config)); | ||||
config.fec_config = abilities->phy_type_ext & ~(bit_pos); | config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); | ||||
if (set) | if (set) | ||||
config.fec_config |= bit_pos; | config.fec_config |= bit_pos; | ||||
if (config.fec_config != abilities->phy_type_ext) { | if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { | ||||
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; | config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; | ||||
config.phy_type = abilities->phy_type; | config.phy_type = abilities->phy_type; | ||||
config.phy_type_ext = abilities->phy_type_ext; | config.phy_type_ext = abilities->phy_type_ext; | ||||
config.link_speed = abilities->link_speed; | config.link_speed = abilities->link_speed; | ||||
config.eee_capability = abilities->eee_capability; | config.eee_capability = abilities->eee_capability; | ||||
config.eeer = abilities->eeer_val; | config.eeer = abilities->eeer_val; | ||||
config.low_power_ctrl = abilities->d3_lpan; | config.low_power_ctrl = abilities->d3_lpan; | ||||
status = i40e_aq_set_phy_config(hw, &config, NULL); | status = i40e_aq_set_phy_config(hw, &config, NULL); | ||||
Show All 12 Lines | |||||
static int | static int | ||||
ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) | ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
int mode, error = 0; | int mode, error = 0; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, &mode); | error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* Read in new mode */ | /* Read in new mode */ | ||||
error = sysctl_handle_int(oidp, &mode, 0, req); | error = sysctl_handle_int(oidp, &mode, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); | return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); | ||||
} | } | ||||
static int | static int | ||||
ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) | ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
int mode, error = 0; | int mode, error = 0; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, &mode); | error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* Read in new mode */ | /* Read in new mode */ | ||||
error = sysctl_handle_int(oidp, &mode, 0, req); | error = sysctl_handle_int(oidp, &mode, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); | return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); | ||||
} | } | ||||
static int | static int | ||||
ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) | ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
int mode, error = 0; | int mode, error = 0; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, &mode); | error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* Read in new mode */ | /* Read in new mode */ | ||||
error = sysctl_handle_int(oidp, &mode, 0, req); | error = sysctl_handle_int(oidp, &mode, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); | return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); | ||||
} | } | ||||
static int | static int | ||||
ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) | ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
int mode, error = 0; | int mode, error = 0; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, &mode); | error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* Read in new mode */ | /* Read in new mode */ | ||||
error = sysctl_handle_int(oidp, &mode, 0, req); | error = sysctl_handle_int(oidp, &mode, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); | return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); | ||||
} | } | ||||
static int | static int | ||||
ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) | ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | struct ixl_pf *pf = (struct ixl_pf *)arg1; | ||||
int mode, error = 0; | int mode, error = 0; | ||||
struct i40e_aq_get_phy_abilities_resp abilities; | struct i40e_aq_get_phy_abilities_resp abilities; | ||||
error = ixl_get_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, &mode); | error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); | ||||
if (error) | if (error) | ||||
return (error); | return (error); | ||||
/* Read in new mode */ | /* Read in new mode */ | ||||
error = sysctl_handle_int(oidp, &mode, 0, req); | error = sysctl_handle_int(oidp, &mode, 0, req); | ||||
if ((error) || (req->newptr == NULL)) | if ((error) || (req->newptr == NULL)) | ||||
return (error); | return (error); | ||||
return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); | return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); | ||||
} | } | ||||
static int | |||||
ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | |||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | |||||
struct sbuf *buf; | |||||
int error = 0; | |||||
enum i40e_status_code status; | |||||
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); | |||||
if (!buf) { | |||||
device_printf(dev, "Could not allocate sbuf for output.\n"); | |||||
return (ENOMEM); | |||||
} | |||||
u8 *final_buff; | |||||
/* This amount is only necessary if reading the entire cluster into memory */ | |||||
#define IXL_FINAL_BUFF_SIZE (1280 * 1024) | |||||
final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK); | |||||
if (final_buff == NULL) { | |||||
device_printf(dev, "Could not allocate memory for output.\n"); | |||||
goto out; | |||||
} | |||||
int final_buff_len = 0; | |||||
u8 cluster_id = 1; | |||||
bool more = true; | |||||
u8 dump_buf[4096]; | |||||
u16 curr_buff_size = 4096; | |||||
u8 curr_next_table = 0; | |||||
u32 curr_next_index = 0; | |||||
u16 ret_buff_size; | |||||
u8 ret_next_table; | |||||
u32 ret_next_index; | |||||
sbuf_cat(buf, "\n"); | |||||
while (more) { | |||||
status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, | |||||
dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); | |||||
if (status) { | |||||
device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", | |||||
i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); | |||||
goto free_out; | |||||
} | |||||
/* copy info out of temp buffer */ | |||||
bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); | |||||
final_buff_len += ret_buff_size; | |||||
if (ret_next_table != curr_next_table) { | |||||
/* We're done with the current table; we can dump out read data. */ | |||||
sbuf_printf(buf, "%d:", curr_next_table); | |||||
int bytes_printed = 0; | |||||
while (bytes_printed <= final_buff_len) { | |||||
sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); | |||||
bytes_printed += 16; | |||||
} | |||||
sbuf_cat(buf, "\n"); | |||||
/* The entire cluster has been read; we're finished */ | |||||
if (ret_next_table == 0xFF) | |||||
break; | |||||
/* Otherwise clear the output buffer and continue reading */ | |||||
bzero(final_buff, IXL_FINAL_BUFF_SIZE); | |||||
final_buff_len = 0; | |||||
} | |||||
if (ret_next_index == 0xFFFFFFFF) | |||||
ret_next_index = 0; | |||||
bzero(dump_buf, sizeof(dump_buf)); | |||||
curr_next_table = ret_next_table; | |||||
curr_next_index = ret_next_index; | |||||
} | |||||
free_out: | |||||
free(final_buff, M_DEVBUF); | |||||
out: | |||||
error = sbuf_finish(buf); | |||||
if (error) | |||||
device_printf(dev, "Error finishing sbuf: %d\n", error); | |||||
sbuf_delete(buf); | |||||
return (error); | |||||
} | |||||
static int | |||||
ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) | |||||
{ | |||||
struct ixl_pf *pf = (struct ixl_pf *)arg1; | |||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | |||||
int error = 0; | |||||
int state, new_state; | |||||
enum i40e_status_code status; | |||||
state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); | |||||
/* Read in new mode */ | |||||
error = sysctl_handle_int(oidp, &new_state, 0, req); | |||||
if ((error) || (req->newptr == NULL)) | |||||
return (error); | |||||
/* Already in requested state */ | |||||
if (new_state == state) | |||||
return (error); | |||||
if (new_state == 0) { | |||||
if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) { | |||||
device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n"); | |||||
return (EINVAL); | |||||
} | |||||
if (pf->hw.aq.api_maj_ver < 1 || | |||||
(pf->hw.aq.api_maj_ver == 1 && | |||||
pf->hw.aq.api_min_ver < 7)) { | |||||
device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); | |||||
return (EINVAL); | |||||
} | |||||
i40e_aq_stop_lldp(&pf->hw, true, NULL); | |||||
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL); | |||||
atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); | |||||
} else { | |||||
status = i40e_aq_start_lldp(&pf->hw, NULL); | |||||
if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST) | |||||
device_printf(dev, "FW LLDP agent is already running\n"); | |||||
atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); | |||||
} | |||||
return (0); | |||||
} | |||||
/* | |||||
* Get FW LLDP Agent status | |||||
*/ | |||||
int | |||||
ixl_get_fw_lldp_status(struct ixl_pf *pf) | |||||
{ | |||||
enum i40e_status_code ret = I40E_SUCCESS; | |||||
struct i40e_lldp_variables lldp_cfg; | |||||
struct i40e_hw *hw = &pf->hw; | |||||
u8 adminstatus = 0; | |||||
ret = i40e_read_lldp_cfg(hw, &lldp_cfg); | |||||
if (ret) | |||||
return ret; | |||||
/* Get the LLDP AdminStatus for the current port */ | |||||
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); | |||||
adminstatus &= 0xf; | |||||
/* Check if LLDP agent is disabled */ | |||||
if (!adminstatus) { | |||||
device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n"); | |||||
atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); | |||||
} else | |||||
atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); | |||||
return (0); | |||||
} | |||||
int | |||||
ixl_attach_get_link_status(struct ixl_pf *pf) | |||||
{ | |||||
struct i40e_hw *hw = &pf->hw; | |||||
device_t dev = pf->dev; | |||||
int error = 0; | |||||
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || | |||||
(hw->aq.fw_maj_ver < 4)) { | |||||
i40e_msec_delay(75); | |||||
error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); | |||||
if (error) { | |||||
device_printf(dev, "link restart failed, aq_err=%d\n", | |||||
pf->hw.aq.asq_last_status); | |||||
return error; | |||||
} | |||||
} | |||||
/* Determine link state */ | |||||
hw->phy.get_link_info = TRUE; | |||||
i40e_get_link_status(hw, &pf->link_up); | |||||
return (0); | |||||
} |