Page MenuHomeFreeBSD

D20290.id57840.diff
No OneTemporary

D20290.id57840.diff

Index: sys/dev/ixl/i40e_adminq.h
===================================================================
--- sys/dev/ixl/i40e_adminq.h
+++ sys/dev/ixl/i40e_adminq.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_adminq.c
===================================================================
--- sys/dev/ixl/i40e_adminq.c
+++ sys/dev/ixl/i40e_adminq.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -125,6 +125,7 @@
**/
void i40e_free_adminq_asq(struct i40e_hw *hw)
{
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
@@ -404,7 +405,7 @@
/* initialize base registers */
ret_code = i40e_config_asq_regs(hw);
if (ret_code != I40E_SUCCESS)
- goto init_adminq_free_rings;
+ goto init_config_regs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
@@ -412,6 +413,10 @@
init_adminq_free_rings:
i40e_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ i40e_free_asq_bufs(hw);
init_adminq_exit:
return ret_code;
@@ -575,21 +580,22 @@
**/
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
+ struct i40e_adminq_info *aq = &hw->aq;
+ enum i40e_status_code ret_code;
u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
- enum i40e_status_code ret_code;
int retry = 0;
/* verify input for valid configuration */
- if ((hw->aq.num_arq_entries == 0) ||
- (hw->aq.num_asq_entries == 0) ||
- (hw->aq.arq_buf_size == 0) ||
- (hw->aq.asq_buf_size == 0)) {
+ if (aq->num_arq_entries == 0 ||
+ aq->num_asq_entries == 0 ||
+ aq->arq_buf_size == 0 ||
+ aq->asq_buf_size == 0) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
- i40e_init_spinlock(&hw->aq.asq_spinlock);
- i40e_init_spinlock(&hw->aq.arq_spinlock);
+ i40e_init_spinlock(&aq->asq_spinlock);
+ i40e_init_spinlock(&aq->arq_spinlock);
/* Set up register offsets */
i40e_adminq_init_regs(hw);
@@ -616,11 +622,11 @@
*/
do {
ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver,
- &hw->aq.fw_min_ver,
- &hw->aq.fw_build,
- &hw->aq.api_maj_ver,
- &hw->aq.api_min_ver,
+ &aq->fw_maj_ver,
+ &aq->fw_min_ver,
+ &aq->fw_build,
+ &aq->api_maj_ver,
+ &aq->api_min_ver,
NULL);
if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
break;
@@ -643,26 +649,43 @@
i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
-
- /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
- if ((hw->aq.api_maj_ver > 1) ||
- ((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver >= 7)))
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
-
- if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ /*
+ * Some features were introduced in different FW API version
+ * for different MAC type.
+ */
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* The ability to RX (not drop) 802.1ad frames */
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ }
+ break;
+ case I40E_MAC_X722:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* fall through */
+ default:
+ break;
}
/* Newer versions of firmware require lock when reading the NVM */
- if ((hw->aq.api_maj_ver > 1) ||
- ((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver >= 5)))
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 5))
hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
- if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 8))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+
+ if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
@@ -682,8 +705,8 @@
init_adminq_free_asq:
i40e_shutdown_asq(hw);
init_adminq_destroy_spinlocks:
- i40e_destroy_spinlock(&hw->aq.asq_spinlock);
- i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+ i40e_destroy_spinlock(&aq->asq_spinlock);
+ i40e_destroy_spinlock(&aq->arq_spinlock);
init_adminq_exit:
return ret_code;
@@ -728,7 +751,7 @@
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
@@ -808,7 +831,7 @@
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
@@ -896,7 +919,7 @@
}
/* bump the tail */
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
@@ -942,12 +965,14 @@
cmd_completed = TRUE;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = I40E_SUCCESS;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
@@ -1063,7 +1088,7 @@
hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len, I40E_DMA_TO_NONDMA);
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
Index: sys/dev/ixl/i40e_adminq_cmd.h
===================================================================
--- sys/dev/ixl/i40e_adminq_cmd.h
+++ sys/dev/ixl/i40e_adminq_cmd.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -43,8 +43,8 @@
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X722 0x0008
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -52,6 +52,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct i40e_aq_desc {
__le16 flags;
@@ -289,6 +291,7 @@
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+ i40e_aqc_opc_lldp_restore = 0x0A0A,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -1782,6 +1785,8 @@
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_2_5GBASE_T = 0x30,
+ I40E_PHY_TYPE_5GBASE_T = 0x31,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
@@ -1823,19 +1828,25 @@
BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
- BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
@@ -1865,12 +1876,15 @@
#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
+#define I40E_AQ_EEE_AUTO 0x0001
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
#define I40E_AQ_EEE_10GBASE_T 0x0008
#define I40E_AQ_EEE_1000BASE_KX 0x0010
#define I40E_AQ_EEE_10GBASE_KX4 0x0020
#define I40E_AQ_EEE_10GBASE_KR 0x0040
+#define I40E_AQ_EEE_2_5GBASE_T 0x0100
+#define I40E_AQ_EEE_5GBASE_T 0x0200
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
@@ -1881,6 +1895,8 @@
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2122,15 +2138,29 @@
I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
};
+#pragma pack(1)
/* Run PHY Activity (0x0626) */
struct i40e_aqc_run_phy_activity {
- __le16 activity_id;
- u8 flags;
- u8 reserved1;
- __le32 control;
- __le32 data;
- u8 reserved2[4];
+ u8 cmd_flags;
+ __le16 activity_id;
+#define I40E_AQ_RUN_PHY_ACTIVITY_ACTIVITY_ID_USER_DEFINED 0x10
+ u8 reserved;
+ union {
+ struct {
+ __le32 dnl_opcode;
+#define I40E_AQ_RUN_PHY_ACTIVITY_DNL_OPCODE_GET_EEE_STATISTICS 0x801b
+ __le32 data;
+ u8 reserved2[4];
+ } cmd;
+ struct {
+ __le32 cmd_status;
+#define I40E_AQ_RUN_PHY_ACTIVITY_CMD_STATUS_SUCCESS 0x4
+ __le32 data0;
+ __le32 data1;
+ } resp;
+ } params;
};
+#pragma pack()
I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
@@ -2142,7 +2172,9 @@
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_addres;
- u8 reserved1[2];
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 1
+ u8 reserved1;
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
@@ -2157,6 +2189,8 @@
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
@@ -2404,18 +2438,19 @@
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
-
struct i40e_aqc_lldp_start {
u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
u8 reserved[15];
};
@@ -2535,6 +2570,16 @@
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
Index: sys/dev/ixl/i40e_alloc.h
===================================================================
--- sys/dev/ixl/i40e_alloc.h
+++ sys/dev/ixl/i40e_alloc.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_common.c
===================================================================
--- sys/dev/ixl/i40e_common.c
+++ sys/dev/ixl/i40e_common.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -37,7 +37,6 @@
#include "i40e_prototype.h"
#include "virtchnl.h"
-
/**
* i40e_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -62,10 +61,15 @@
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
+ case I40E_DEV_ID_10G_B:
+ case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
+ case I40E_DEV_ID_X710_N3000:
+ case I40E_DEV_ID_XXV710_N3000:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -319,32 +323,37 @@
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u32 effective_mask = hw->debug_mask & mask;
u8 *buf = (u8 *)buffer;
u16 len;
- u16 i = 0;
+ u16 i;
- if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ if (!effective_mask || !desc)
return;
len = LE16_TO_CPU(aq_desc->datalen);
- i40e_debug(hw, mask,
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
LE16_TO_CPU(aq_desc->flags),
LE16_TO_CPU(aq_desc->datalen),
LE16_TO_CPU(aq_desc->retval));
- i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tcookie (h,l) 0x%08X 0x%08X\n",
LE32_TO_CPU(aq_desc->cookie_high),
LE32_TO_CPU(aq_desc->cookie_low));
- i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tparam (0,1) 0x%08X 0x%08X\n",
LE32_TO_CPU(aq_desc->params.internal.param0),
LE32_TO_CPU(aq_desc->params.internal.param1));
- i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\taddr (h,l) 0x%08X 0x%08X\n",
LE32_TO_CPU(aq_desc->params.external.addr_high),
LE32_TO_CPU(aq_desc->params.external.addr_low));
- if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ if (buffer && (buf_len != 0) && (len != 0) &&
+ (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
@@ -1014,6 +1023,18 @@
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+ /* NVMUpdate features structure initialization */
+ hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR;
+ hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR;
+ hw->nvmupd_features.size = sizeof(hw->nvmupd_features);
+ i40e_memset(hw->nvmupd_features.features, 0x0,
+ I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN *
+ sizeof(*hw->nvmupd_features.features),
+ I40E_NONDMA_MEM);
+
+ /* No features supported at the moment */
+ hw->nvmupd_features.features[0] = 0;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -1235,6 +1256,8 @@
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;
@@ -1271,6 +1294,29 @@
return media;
}
+/**
+ * i40e_poll_globr - Poll for Global Reset completion
+ * @hw: pointer to the hardware structure
+ * @retry_limit: how many times to retry before failure
+ **/
+static enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
+{
+ u32 cnt, reg = 0;
+
+ for (cnt = 0; cnt < retry_limit; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ return I40E_SUCCESS;
+ i40e_msec_delay(100);
+ }
+
+ DEBUGOUT("Global reset failed.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg);
+
+ return I40E_ERR_RESET_FAILED;
+}
+
#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
@@ -1294,7 +1340,7 @@
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- grst_del = grst_del * 20;
+ grst_del = min(grst_del * 20, 160U);
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -1340,14 +1386,14 @@
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
- if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
- DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
- DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
- return I40E_ERR_NOT_READY;
- }
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
+ break;
i40e_msec_delay(1);
}
- if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS)
+ return I40E_ERR_RESET_FAILED;
+ } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
DEBUGOUT("PF reset polling failed to complete.\n");
return I40E_ERR_RESET_FAILED;
}
@@ -1511,7 +1557,6 @@
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1523,27 +1568,10 @@
if (!gpio_val)
continue;
-
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
}
-
return mode;
}
@@ -1558,7 +1586,6 @@
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1572,22 +1599,6 @@
if (!gpio_val)
continue;
-
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -1828,6 +1839,10 @@
if (crc_en)
cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -1968,8 +1983,7 @@
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= 7) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
__le32 tmp;
i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
@@ -4101,6 +4115,43 @@
return status;
}
+/**
+ * i40e_aq_rearrange_nvm
+ * @hw: pointer to the hw struct
+ * @rearrange_nvm: defines direction of rearrangement
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Rearrange NVM structure, available only for transition FW
+ **/
+enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_nvm_update *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ DEBUGFUNC("i40e_aq_rearrange_nvm");
+
+ cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+ rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT |
+ I40E_AQ_NVM_REARRANGE_TO_STRUCT);
+
+ if (!rearrange_nvm) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_rearrange_nvm_exit;
+ }
+
+ cmd->command_flags |= rearrange_nvm;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_rearrange_nvm_exit:
+ return status;
+}
+
/**
* i40e_aq_nvm_progress
* @hw: pointer to the hw struct
@@ -4208,7 +4259,7 @@
cmd->type = mib_type;
cmd->length = CPU_TO_LE16(buff_size);
- cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buff));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
@@ -4244,151 +4295,39 @@
}
/**
- * i40e_aq_add_lldp_tlv
+ * i40e_aq_restore_lldp
* @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: buffer with TLV to add
- * @buff_size: length of the buffer
- * @tlv_len: length of the TLV to be added
- * @mib_len: length of the LLDP MIB returned in response
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
* @cmd_details: pointer to command details structure or NULL
*
- * Add the specified TLV to LLDP Local MIB for the given bridge type,
- * it is responsibility of the caller to make sure that the TLV is not
- * already present in the LLDPDU.
- * In return firmware will write the complete LLDP MIB with the newly
- * added TLV in the response buffer.
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
**/
-enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
- void *buff, u16 buff_size, u16 tlv_len,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_add_tlv *cmd =
- (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
- enum i40e_status_code status;
-
- if (buff_size == 0 || !buff || tlv_len == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- cmd->len = CPU_TO_LE16(tlv_len);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_update_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: buffer with TLV to update
- * @buff_size: size of the buffer holding original and updated TLVs
- * @old_len: Length of the Original TLV
- * @new_len: Length of the Updated TLV
- * @offset: offset of the updated TLV in the buff
- * @mib_len: length of the returned LLDP MIB
- * @cmd_details: pointer to command details structure or NULL
- *
- * Update the specified TLV to the LLDP Local MIB for the given bridge type.
- * Firmware will place the complete LLDP MIB in response buffer with the
- * updated TLV.
- **/
-enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 old_len, u16 new_len, u16 offset,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_tlv *cmd =
- (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ struct i40e_aqc_lldp_restore *cmd =
+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
enum i40e_status_code status;
- if (buff_size == 0 || !buff || offset == 0 ||
- old_len == 0 || new_len == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- cmd->old_len = CPU_TO_LE16(old_len);
- cmd->new_offset = CPU_TO_LE16(offset);
- cmd->new_len = CPU_TO_LE16(new_len);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Restore LLDP not supported by current FW version.\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
- return status;
-}
-
-/**
- * i40e_aq_delete_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: pointer to a user supplied buffer that has the TLV
- * @buff_size: length of the buffer
- * @tlv_len: length of the TLV to be deleted
- * @mib_len: length of the returned LLDP MIB
- * @cmd_details: pointer to command details structure or NULL
- *
- * Delete the specified TLV from LLDP Local MIB for the given bridge type.
- * The firmware places the entire LLDP MIB in the response buffer.
- **/
-enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 tlv_len, u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_add_tlv *cmd =
- (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
- enum i40e_status_code status;
-
- if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+ if (restore)
+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
- cmd->len = CPU_TO_LE16(tlv_len);
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
+ if (setting)
+ *setting = cmd->command & 1;
return status;
}
@@ -4397,11 +4336,13 @@
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent
**/
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -4414,6 +4355,14 @@
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Stop LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4422,11 +4371,13 @@
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
+ * @persist: True if start of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
**/
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -4437,6 +4388,15 @@
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Start LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4458,9 +4418,7 @@
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
enum i40e_status_code status;
- if ((hw->mac.type != I40E_MAC_XL710) ||
- ((hw->aq.api_maj_ver < 1) ||
- ((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6))))
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -6088,6 +6046,70 @@
return status;
}
+
+/**
+ * i40e_enable_eee
+ * @hw: pointer to the hardware structure
+ * @enable: state of Energy Efficient Ethernet mode to be set
+ *
+ * Enables or disables Energy Efficient Ethernet (EEE) mode
+ * accordingly to @enable parameter.
+ **/
+enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code status;
+ __le16 eee_capability;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities,
+ NULL);
+ if (status)
+ goto err;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (abilities.eee_capability == 0) {
+ status = I40E_ERR_CONFIG;
+ goto err;
+ }
+
+ /* Cache initial EEE capability */
+ eee_capability = abilities.eee_capability;
+
+ /* Get current configuration */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status)
+ goto err;
+
+ /* Cache current configuration */
+ config.phy_type = abilities.phy_type;
+ config.link_speed = abilities.link_speed;
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ /* Set desired EEE state */
+ if (enable) {
+ config.eee_capability = eee_capability;
+ config.eeer |= I40E_PRTPM_EEER_TX_LPI_EN_MASK;
+ } else {
+ config.eee_capability = 0;
+ config.eeer &= ~I40E_PRTPM_EEER_TX_LPI_EN_MASK;
+ }
+
+ /* Save modified config */
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+err:
+ return status;
+}
+
/**
* i40e_read_bw_from_alt_ram
* @hw: pointer to the hardware structure
@@ -6408,6 +6430,7 @@
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -6444,6 +6467,7 @@
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -6563,7 +6587,7 @@
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, TRUE,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -6591,7 +6615,7 @@
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, TRUE,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -6625,7 +6649,7 @@
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, TRUE,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
@@ -6699,6 +6723,119 @@
return status;
}
+/**
+ * i40e_get_phy_lpi_status - read LPI status from PHY or MAC register
+ * @hw: pointer to the hw struct
+ * @stat: pointer to structure with status of rx and tx lpi
+ *
+ * Read LPI state directly from external PHY register or from MAC
+ * register, depending on device ID and current link speed.
+ */
+enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw,
+ struct i40e_hw_port_stats *stat)
+{
+ enum i40e_status_code ret = I40E_SUCCESS;
+ u32 val;
+
+ stat->rx_lpi_status = 0;
+ stat->tx_lpi_status = 0;
+
+ if (hw->device_id == I40E_DEV_ID_10G_BASE_T_BC &&
+ (hw->phy.link_info.link_speed == I40E_LINK_SPEED_2_5GB ||
+ hw->phy.link_info.link_speed == I40E_LINK_SPEED_5GB)) {
+ ret = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
+ I40E_BCM_PHY_PCS_STATUS1_PAGE,
+ TRUE,
+ I40E_BCM_PHY_PCS_STATUS1_REG,
+ &val, NULL);
+
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ stat->rx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_RX_LPI);
+ stat->tx_lpi_status = !!(val & I40E_BCM_PHY_PCS_STATUS1_TX_LPI);
+
+ return ret;
+ }
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ stat->rx_lpi_status = (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ stat->tx_lpi_status = (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+
+ return ret;
+}
+
+/**
+ * i40e_get_lpi_counters - read LPI counters from EEE statistics
+ * @hw: pointer to the hw struct
+ * @tx_counter: pointer to memory for TX LPI counter
+ * @rx_counter: pointer to memory for RX LPI counter
+ *
+ * Read Low Power Idle (LPI) mode counters from Energy Efficient
+ * Ethernet (EEE) statistics.
+ **/
+enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw,
+ u32 *tx_counter, u32 *rx_counter)
+{
+ enum i40e_status_code retval;
+ u32 cmd_status;
+
+ retval = i40e_aq_run_phy_activity(hw,
+ I40E_AQ_RUN_PHY_ACTIVITY_ACTIVITY_ID_USER_DEFINED,
+ I40E_AQ_RUN_PHY_ACTIVITY_DNL_OPCODE_GET_EEE_STATISTICS,
+ &cmd_status, tx_counter, rx_counter, NULL);
+
+ if (cmd_status != I40E_AQ_RUN_PHY_ACTIVITY_CMD_STATUS_SUCCESS)
+ retval = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return retval;
+}
+
+/**
+ * i40e_lpi_stat_update - update LPI counters with values relative to offset
+ * @hw: pointer to the hw struct
+ * @offset_loaded: flag indicating need of writing current value to offset
+ * @tx_offset: pointer to offset of TX LPI counter
+ * @tx_stat: pointer to value of TX LPI counter
+ * @rx_offset: pointer to offset of RX LPI counter
+ * @rx_stat: pointer to value of RX LPI counter
+ *
+ * Update Low Power Idle (LPI) mode counters while having regard to passed
+ * offsets.
+ **/
+enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw,
+ bool offset_loaded, u64 *tx_offset,
+ u64 *tx_stat, u64 *rx_offset,
+ u64 *rx_stat)
+{
+ enum i40e_status_code retval;
+ u32 tx_counter, rx_counter;
+
+ retval = i40e_get_lpi_counters(hw, &tx_counter, &rx_counter);
+ if (retval)
+ goto err;
+
+ if (!offset_loaded) {
+ *tx_offset = tx_counter;
+ *rx_offset = rx_counter;
+ }
+
+ if (tx_counter >= *tx_offset)
+ *tx_stat = (u32)(tx_counter - *tx_offset);
+ else
+ *tx_stat = (u32)((tx_counter + BIT_ULL(32)) - *tx_offset);
+
+ if (rx_counter >= *rx_offset)
+ *rx_stat = (u32)(rx_counter - *rx_offset);
+ else
+ *rx_stat = (u32)((rx_counter + BIT_ULL(32)) - *rx_offset);
+err:
+ return retval;
+}
+
/**
* i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
* @hw: pointer to the hw struct
@@ -6830,6 +6967,7 @@
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: enable auto page change
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
@@ -6837,7 +6975,7 @@
* Write the external PHY register.
**/
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -6854,6 +6992,9 @@
cmd->reg_address = CPU_TO_LE32(reg_addr);
cmd->reg_value = CPU_TO_LE32(reg_val);
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -6864,6 +7005,7 @@
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: enable auto page change
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
@@ -6871,7 +7013,7 @@
* Read the external PHY register.
**/
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
@@ -6887,6 +7029,9 @@
cmd->dev_addres = dev_addr;
cmd->reg_address = CPU_TO_LE32(reg_addr);
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = LE32_TO_CPU(cmd->reg_value);
@@ -6894,6 +7039,50 @@
return status;
}
+/**
+ * i40e_aq_run_phy_activity
+ * @hw: pointer to the hw struct
+ * @activity_id: ID of DNL activity to run
+ * @dnl_opcode: opcode passed to DNL script
+ * @cmd_status: pointer to memory to write return value of DNL script
+ * @data0: pointer to memory for first 4 bytes of data returned by DNL script
+ * @data1: pointer to memory for last 4 bytes of data returned by DNL script
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Run DNL admin command.
+ **/
+enum i40e_status_code
+i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 dnl_opcode,
+ u32 *cmd_status, u32 *data0, u32 *data1,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_run_phy_activity *cmd;
+ enum i40e_status_code retval;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_run_phy_activity *)&desc.params.raw;
+
+ if (!cmd_status || !data0 || !data1) {
+ retval = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_run_phy_activity);
+
+ cmd->activity_id = CPU_TO_LE16(activity_id);
+ cmd->params.cmd.dnl_opcode = CPU_TO_LE32(dnl_opcode);
+
+ retval = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (retval)
+ goto err;
+
+ *cmd_status = LE32_TO_CPU(cmd->params.resp.cmd_status);
+ *data0 = LE32_TO_CPU(cmd->params.resp.data0);
+ *data1 = LE32_TO_CPU(cmd->params.resp.data1);
+err:
+ return retval;
+}
/**
* i40e_aq_send_msg_to_pf
Index: sys/dev/ixl/i40e_dcb.h
===================================================================
--- sys/dev/ixl/i40e_dcb.h
+++ sys/dev/ixl/i40e_dcb.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -208,6 +208,12 @@
u32 deftsaassignment;
};
+
+enum i40e_get_fw_lldp_status_resp {
+ I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
+ I40E_GET_FW_LLDP_STATUS_ENABLED = 1
+};
+
enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
u16 *status);
enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
@@ -216,7 +222,11 @@
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
-enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status);
enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
struct i40e_dcbx_config *dcbcfg);
Index: sys/dev/ixl/i40e_dcb.c
===================================================================
--- sys/dev/ixl/i40e_dcb.c
+++ sys/dev/ixl/i40e_dcb.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -893,22 +893,23 @@
/**
* i40e_init_dcb
* @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
*
* Update DCB configuration from the Firmware
**/
-enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
enum i40e_status_code ret = I40E_SUCCESS;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
if (!hw->func_caps.dcb)
- return ret;
+ return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
if (ret)
- return ret;
+ return I40E_ERR_NOT_READY;
/* Get the LLDP AdminStatus for the current port */
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -917,7 +918,7 @@
/* LLDP agent disabled */
if (!adminstatus) {
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
- return ret;
+ return I40E_ERR_NOT_READY;
}
/* Get DCBX status */
@@ -926,30 +927,67 @@
return ret;
/* Check the DCBX Status */
- switch (hw->dcbx_status) {
- case I40E_DCBX_STATUS_DONE:
- case I40E_DCBX_STATUS_IN_PROGRESS:
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
if (ret)
return ret;
- break;
- case I40E_DCBX_STATUS_DISABLED:
- return ret;
- case I40E_DCBX_STATUS_NOT_STARTED:
- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
- default:
- break;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
}
/* Configure the LLDP MIB change event */
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
+
+ return ret;
+}
+
+/**
+ * i40e_get_fw_lldp_status
+ * @hw: pointer to the hw struct
+ * @lldp_status: pointer to the status enum
+ *
+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+ * Status of agent is reported via @lldp_status parameter.
+ **/
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status)
+{
+ enum i40e_status_code ret;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ if (!lldp_status)
+ return I40E_ERR_PARAM;
+
+ /* Allocate buffer for the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
if (ret)
return ret;
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
+ I40E_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (ret == I40E_SUCCESS) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ /* MIB is not available yet but the agent is running */
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ ret = I40E_SUCCESS;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
+ ret = I40E_SUCCESS;
+ }
+
+ i40e_free_virt_mem(hw, &mem);
return ret;
}
+
/**
* i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
* @tlv: Fill the ETS config data in IEEE format
Index: sys/dev/ixl/i40e_devids.h
===================================================================
--- sys/dev/ixl/i40e_devids.h
+++ sys/dev/ixl/i40e_devids.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -39,6 +39,8 @@
#define I40E_INTEL_VENDOR_ID 0x8086
/* Device IDs */
+#define I40E_DEV_ID_X710_N3000 0x0CF8
+#define I40E_DEV_ID_XXV710_N3000 0x0D58
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_B 0x1580
@@ -52,6 +54,9 @@
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
+#define I40E_DEV_ID_10G_B 0x104F
+#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#define I40E_DEV_ID_ADAPTIVE_VF 0x1889
Index: sys/dev/ixl/i40e_hmc.h
===================================================================
--- sys/dev/ixl/i40e_hmc.h
+++ sys/dev/ixl/i40e_hmc.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_hmc.c
===================================================================
--- sys/dev/ixl/i40e_hmc.c
+++ sys/dev/ixl/i40e_hmc.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_lan_hmc.h
===================================================================
--- sys/dev/ixl/i40e_lan_hmc.h
+++ sys/dev/ixl/i40e_lan_hmc.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_lan_hmc.c
===================================================================
--- sys/dev/ixl/i40e_lan_hmc.c
+++ sys/dev/ixl/i40e_lan_hmc.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -144,7 +144,7 @@
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
txq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -167,7 +167,7 @@
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
rxq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -190,7 +190,7 @@
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_cntx_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -213,7 +213,7 @@
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_filt_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -234,7 +234,7 @@
(sizeof(struct i40e_hmc_sd_entry) *
hw->hmc.sd_table.sd_cnt));
if (ret_code)
- goto init_lan_hmc_out;
+ goto free_hmc_out;
hw->hmc.sd_table.sd_entry =
(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
}
@@ -242,6 +242,11 @@
full_obj->size = l2fpm_size;
init_lan_hmc_out:
+ return ret_code;
+free_hmc_out:
+ if (hw->hmc.hmc_obj_virt_mem.va)
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+
return ret_code;
}
Index: sys/dev/ixl/i40e_nvm.c
===================================================================
--- sys/dev/ixl/i40e_nvm.c
+++ sys/dev/ixl/i40e_nvm.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -826,6 +826,7 @@
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
"I40E_NVMUPD_GET_AQ_EVENT",
+ "I40E_NVMUPD_GET_FEATURES",
};
/**
@@ -888,6 +889,31 @@
return I40E_SUCCESS;
}
+ /*
+ * A supported features request returns immediately
+ * rather than going into state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_FEATURES) {
+ if (cmd->data_size < hw->nvmupd_features.size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
+ /*
+ * If buffer is bigger than i40e_nvmupd_features structure,
+ * make sure the trailing bytes are set to 0x0.
+ */
+ if (cmd->data_size > hw->nvmupd_features.size)
+ i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
+ cmd->data_size - hw->nvmupd_features.size,
+ I40E_NONDMA_MEM);
+
+ i40e_memcpy(bytes, &hw->nvmupd_features,
+ hw->nvmupd_features.size, I40E_NONDMA_MEM);
+
+ return I40E_SUCCESS;
+ }
+
/* Clear status even it is not read and log */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
i40e_debug(hw, I40E_DEBUG_NVM,
@@ -1354,10 +1380,20 @@
upd_cmd = I40E_NVMUPD_READ_SA;
break;
case I40E_NVM_EXEC:
- if (module == 0xf)
- upd_cmd = I40E_NVMUPD_STATUS;
- else if (module == 0)
+ switch (module) {
+ case I40E_NVM_EXEC_GET_AQ_RESULT:
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_EXEC_FEATURES:
+ upd_cmd = I40E_NVMUPD_FEATURES;
+ break;
+ case I40E_NVM_EXEC_STATUS:
+ upd_cmd = I40E_NVMUPD_STATUS;
+ break;
+ default:
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
+ }
break;
case I40E_NVM_AQE:
upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
Index: sys/dev/ixl/i40e_osdep.h
===================================================================
--- sys/dev/ixl/i40e_osdep.h
+++ sys/dev/ixl/i40e_osdep.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_osdep.c
===================================================================
--- sys/dev/ixl/i40e_osdep.c
+++ sys/dev/ixl/i40e_osdep.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -33,6 +33,7 @@
/*$FreeBSD$*/
#include <sys/limits.h>
+#include <sys/time.h>
#include "ixl.h"
@@ -161,27 +162,15 @@
mtx_destroy(&lock->mutex);
}
+#ifndef MSEC_2_TICKS
+#define MSEC_2_TICKS(m) max(1, (uint32_t)((hz == 1000) ? \
+ (m) : ((uint64_t)(m) * (uint64_t)hz)/(uint64_t)1000))
+#endif
+
void
i40e_msec_pause(int msecs)
{
- int ticks_to_pause = (msecs * hz) / 1000;
- int start_ticks = ticks;
-
- if (cold || SCHEDULER_STOPPED()) {
- i40e_msec_delay(msecs);
- return;
- }
-
- while (1) {
- kern_yield(PRI_USER);
- int yielded_ticks = ticks - start_ticks;
- if (yielded_ticks > ticks_to_pause)
- break;
- else if (yielded_ticks < 0
- && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
- break;
- }
- }
+ pause("i40e_msec_pause", MSEC_2_TICKS(msecs));
}
/*
Index: sys/dev/ixl/i40e_prototype.h
===================================================================
--- sys/dev/ixl/i40e_prototype.h
+++ sys/dev/ixl/i40e_prototype.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -101,6 +101,14 @@
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+enum i40e_status_code i40e_get_phy_lpi_status(struct i40e_hw *hw,
+ struct i40e_hw_port_stats *stats);
+enum i40e_status_code i40e_get_lpi_counters(struct i40e_hw *hw, u32 *tx_counter,
+ u32 *rx_counter);
+enum i40e_status_code i40e_lpi_stat_update(struct i40e_hw *hw,
+ bool offset_loaded, u64 *tx_offset,
+ u64 *tx_stat, u64 *rx_offset,
+ u64 *rx_stat);
/* admin send queue commands */
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
@@ -264,6 +272,9 @@
u32 offset, u16 length, void *data,
bool last_command, u8 preservation_flags,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
@@ -276,26 +287,18 @@
enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
- void *buff, u16 buff_size, u16 tlv_len,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 old_len, u16 new_len, u16 offset,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 tlv_len, u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
@@ -470,6 +473,7 @@
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+enum i40e_status_code i40e_enable_eee(struct i40e_hw *hw, bool enable);
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@@ -497,6 +501,10 @@
return VIRTCHNL_LINK_SPEED_100MB;
case I40E_LINK_SPEED_1GB:
return VIRTCHNL_LINK_SPEED_1GB;
+ case I40E_LINK_SPEED_2_5GB:
+ return VIRTCHNL_LINK_SPEED_2_5GB;
+ case I40E_LINK_SPEED_5GB:
+ return VIRTCHNL_LINK_SPEED_5GB;
case I40E_LINK_SPEED_10GB:
return VIRTCHNL_LINK_SPEED_10GB;
case I40E_LINK_SPEED_40GB:
@@ -549,13 +557,17 @@
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
+ u8 phy_select, u8 dev_addr, bool page_change,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_run_phy_activity(struct i40e_hw *hw, u16 activity_id, u32 opcode,
+ u32 *cmd_status, u32 *data0, u32 *data1,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
Index: sys/dev/ixl/i40e_register.h
===================================================================
--- sys/dev/ixl/i40e_register.h
+++ sys/dev/ixl/i40e_register.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_status.h
===================================================================
--- sys/dev/ixl/i40e_status.h
+++ sys/dev/ixl/i40e_status.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/i40e_type.h
===================================================================
--- sys/dev/ixl/i40e_type.h
+++ sys/dev/ixl/i40e_type.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -56,7 +56,7 @@
#define I40E_MAX_PF_VSI 64
#define I40E_MAX_PF_QP 128
#define I40E_MAX_VSI_QP 16
-#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_VF_VSI 4
#define I40E_MAX_CHAINED_RX_BUFFERS 5
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
@@ -345,6 +345,12 @@
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_PHY_TYPE_OFFSET2 (-10)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -487,6 +493,7 @@
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
I40E_NVMUPD_GET_AQ_EVENT,
+ I40E_NVMUPD_FEATURES,
};
enum i40e_nvmupd_state {
@@ -522,6 +529,10 @@
#define I40E_NVM_AQE 0xe
#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0
+#define I40E_NVM_EXEC_FEATURES 0xe
+#define I40E_NVM_EXEC_STATUS 0xf
+
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -536,6 +547,20 @@
u8 data[1];
};
+/* NVMUpdate features API */
+#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0
+#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14
+#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
+
+#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
+
+struct i40e_nvmupd_features {
+ u8 major;
+ u8 minor;
+ u16 size;
+ u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
+};
+
/* (Q)SFP module access definitions */
#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
@@ -727,6 +752,8 @@
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
u64 flags;
/* Used in set switch config AQ command */
@@ -734,6 +761,9 @@
u16 first_tag;
u16 second_tag;
+ /* NVMUpdate features */
+ struct i40e_nvmupd_features nvmupd_features;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
@@ -1698,4 +1728,10 @@
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+#define I40E_BCM_PHY_PCS_STATUS1_PAGE 0x3
+#define I40E_BCM_PHY_PCS_STATUS1_REG 0x0001
+#define I40E_BCM_PHY_PCS_STATUS1_RX_LPI BIT(8)
+#define I40E_BCM_PHY_PCS_STATUS1_TX_LPI BIT(9)
+
#endif /* _I40E_TYPE_H_ */
Index: sys/dev/ixl/if_ixl.c
===================================================================
--- sys/dev/ixl/if_ixl.c
+++ sys/dev/ixl/if_ixl.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -48,7 +48,7 @@
* Driver version
*********************************************************************/
#define IXL_DRIVER_VERSION_MAJOR 1
-#define IXL_DRIVER_VERSION_MINOR 9
+#define IXL_DRIVER_VERSION_MINOR 11
#define IXL_DRIVER_VERSION_BUILD 9
char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "."
@@ -83,6 +83,9 @@
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_BC, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_SFP, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_B, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -190,6 +193,13 @@
&ixl_enable_tx_fc_filter, 0,
"Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
+static int ixl_i2c_access_method = 0;
+TUNABLE_INT("hw.ixl.i2c_access_method",
+ &ixl_i2c_access_method);
+SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN,
+ &ixl_i2c_access_method, 0,
+ IXL_SYSCTL_HELP_I2C_METHOD);
+
/*
* Different method for processing TX descriptor
* completion.
@@ -215,6 +225,7 @@
&ixl_shared_debug_mask, 0,
"Display debug statements that are printed in shared code");
+
/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
@@ -335,9 +346,15 @@
#else
pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
#endif
-
ixl_vsi_setup_rings_size(&pf->vsi, ixl_tx_ring_size, ixl_rx_ring_size);
+ if (ixl_i2c_access_method > IXL_I2C_ACCESS_METHOD_TYPE_LENGTH - 1
+ || ixl_i2c_access_method < IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE)
+ pf->i2c_access_method = IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE;
+ else
+ pf->i2c_access_method =
+ (enum ixl_i2c_access_method_t)ixl_i2c_access_method;
+
if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) {
device_printf(dev, "Invalid tx_itr value of %d set!\n",
ixl_tx_itr);
@@ -365,6 +382,65 @@
return (0);
}
+static int
+ixl_attach_recovery_mode(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+
+ atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
+
+ i40e_get_mac_addr(hw, hw->mac.addr);
+
+ pf->msix = ixl_init_msix(pf);
+ ixl_setup_stations(pf);
+ ixl_setup_interface(pf->dev, vsi);
+
+ if (pf->msix > 1) {
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
+ error);
+ goto recovery_err_late;
+ }
+ error = ixl_setup_adminq_tq(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
+ error);
+ goto recovery_err_late;
+ }
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ } else {
+ error = ixl_setup_legacy(pf);
+
+ error = ixl_setup_adminq_tq(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
+ error);
+ goto recovery_err_late;
+ }
+ }
+
+ /* Get the bus configuration and set the shared code's config */
+ ixl_get_bus_info(pf);
+
+ /* Initialize statistics & add sysctls */
+ ixl_add_device_sysctls(pf);
+
+ /* Start the local timer */
+ IXL_PF_LOCK(pf);
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ IXL_PF_UNLOCK(pf);
+
+recovery_err_late:
+ return (error);
+}
+
/*********************************************************************
* Device initialization routine
*
@@ -381,6 +457,7 @@
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
+ enum i40e_get_fw_lldp_status_resp lldp_status;
enum i40e_status_code status;
int error = 0;
@@ -412,19 +489,22 @@
/* Do PCI setup - map BAR0, etc */
if (ixl_allocate_pci_resources(pf)) {
- device_printf(dev, "Allocation of PCI resources failed\n");
error = ENXIO;
goto err_out;
}
/* Establish a clean starting point */
i40e_clear_hw(hw);
- status = i40e_pf_reset(hw);
- if (status) {
- device_printf(dev, "PF reset failure %s\n",
- i40e_stat_str(hw, status));
- error = EIO;
- goto err_out;
+
+ /* Don't try to reset device if it's in recovery mode */
+ if (!ixl_fw_recovery_mode(pf)) {
+ status = i40e_pf_reset(hw);
+ if (status) {
+ device_printf(dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, status));
+ error = EIO;
+ goto err_out;
+ }
}
/* Initialize the shared code */
@@ -472,6 +552,9 @@
device_printf(dev, "Please update the NVM image.\n");
}
+ if (ixl_fw_recovery_mode(pf))
+ return ixl_attach_recovery_mode(pf);
+
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -521,13 +604,20 @@
device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
pf->qtag.num_allocated, pf->qtag.num_active);
- /* Disable LLDP from the firmware for certain NVM versions */
- if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
- (pf->hw.aq.fw_maj_ver < 4)) {
- i40e_aq_stop_lldp(hw, TRUE, NULL);
+ /* Disable LLDP from the firmware on XL710 for certain NVM versions */
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 3)) ||
+ (hw->aq.fw_maj_ver < 4))) {
+ i40e_aq_stop_lldp(hw, true, false, NULL);
pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
}
+ /* Try enabling Energy Efficient Ethernet (EEE) mode */
+ if(i40e_enable_eee(hw, true) == I40E_SUCCESS)
+ atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
+ else
+ atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
+
/* Get MAC addresses from hardware */
i40e_get_mac_addr(hw, hw->mac.addr);
error = i40e_validate_mac_addr(hw->mac.addr);
@@ -539,15 +629,18 @@
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
/* Query device FW LLDP status */
- ixl_get_fw_lldp_status(pf);
- /* Tell FW to apply DCB config on link up */
- if ((hw->mac.type != I40E_MAC_X722)
- && ((pf->hw.aq.api_maj_ver > 1)
- || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7)))
- i40e_aq_set_dcb_parameters(hw, true, NULL);
+ if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
+ if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
+ atomic_set_int(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else {
+ atomic_clear_int(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ }
+ }
- /* Initialize mac filter list for VSI */
- SLIST_INIT(&vsi->ftl);
+ /* Tell FW to apply DCB config on link up */
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
/* Set up SW VSI and allocate queue memory and rings */
if (ixl_setup_stations(pf)) {
@@ -648,6 +741,9 @@
ixl_update_stats_counters(pf);
ixl_add_hw_stats(pf);
+ /* Add protocol filters to list */
+ ixl_init_filters(vsi);
+
/* Register for VLAN events */
vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
@@ -682,6 +778,10 @@
device_printf(dev, "The device is not iWARP enabled\n");
}
#endif
+ /* Start the local timer */
+ IXL_PF_LOCK(pf);
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ IXL_PF_UNLOCK(pf);
INIT_DEBUGOUT("ixl_attach: end");
return (0);
@@ -747,31 +847,37 @@
ixl_stop(pf);
/* Shutdown LAN HMC */
- status = i40e_shutdown_lan_hmc(hw);
- if (status)
- device_printf(dev,
- "Shutdown LAN HMC failed with code %d\n", status);
+ if (hw->hmc.hmc_obj) {
+ status = i40e_shutdown_lan_hmc(hw);
+ if (status)
+ device_printf(dev,
+ "Shutdown LAN HMC failed with code %s\n", i40e_stat_str(hw, status));
+ }
/* Teardown LAN queue resources */
ixl_teardown_queue_msix(vsi);
ixl_free_queue_tqs(vsi);
+
+ /* Timer enqueues admin task. Stop it before freeing the admin taskqueue */
+ callout_drain(&pf->timer);
+
/* Shutdown admin queue */
ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
- ixl_free_adminq_tq(pf);
+
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
"Shutdown Admin queue failed with code %d\n", status);
+ ixl_free_adminq_tq(pf);
+
/* Unregister VLAN events */
if (vsi->vlan_attach != NULL)
EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
if (vsi->vlan_detach != NULL)
EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
- callout_drain(&pf->timer);
-
#ifdef IXL_IW
if (ixl_enable_iwarp && pf->iw_enabled) {
error = ixl_iw_pf_detach(pf);
Index: sys/dev/ixl/if_ixlv.c
===================================================================
--- sys/dev/ixl/if_ixlv.c
+++ sys/dev/ixl/if_ixlv.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -40,7 +40,7 @@
*********************************************************************/
#define IXLV_DRIVER_VERSION_MAJOR 1
#define IXLV_DRIVER_VERSION_MINOR 5
-#define IXLV_DRIVER_VERSION_BUILD 4
+#define IXLV_DRIVER_VERSION_BUILD 8
char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "."
__XSTRING(IXLV_DRIVER_VERSION_MINOR) "."
@@ -1802,6 +1802,7 @@
goto err_free_queues;
}
}
+ sysctl_ctx_init(&vsi->sysctl_ctx);
return (0);
@@ -2269,6 +2270,11 @@
if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
+ /* There are drivers which disable auto-masking of interrupts,
+ * which is a global setting for all ports. We have to make sure
+ * to mask it to not lose IRQs */
+ ixlv_disable_queue_irq(hw, que->me);
+
++que->irqs;
more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
@@ -2680,6 +2686,7 @@
ixlv_free_queue(sc, que);
}
+ sysctl_ctx_free(&vsi->sysctl_ctx);
free(vsi->queues, M_DEVBUF);
}
@@ -2972,22 +2979,11 @@
{
device_t dev = sc->dev;
struct ixl_vsi *vsi = &sc->vsi;
- struct i40e_eth_stats *es = &vsi->eth_stats;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct sysctl_oid *vsi_node, *queue_node;
- struct sysctl_oid_list *vsi_list, *queue_list;
-
-#define QUEUE_NAME_LEN 32
- char queue_namebuf[QUEUE_NAME_LEN];
-
- struct ixl_queue *queues = vsi->queues;
- struct tx_ring *txr;
- struct rx_ring *rxr;
-
/* Driver statistics sysctls */
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
CTLFLAG_RD, &sc->watchdog_events,
@@ -3008,105 +3004,12 @@
sc, 0, ixlv_sysctl_current_speed,
"A", "Current Port Speed");
+ ixl_add_sysctls_eth_stats(ctx, child, &vsi->eth_stats);
+
/* VSI statistics sysctls */
- vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
+ vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
CTLFLAG_RD, NULL, "VSI-specific statistics");
- vsi_list = SYSCTL_CHILDREN(vsi_node);
-
- struct ixl_sysctl_info ctls[] =
- {
- {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
- {&es->rx_unicast, "ucast_pkts_rcvd",
- "Unicast Packets Received"},
- {&es->rx_multicast, "mcast_pkts_rcvd",
- "Multicast Packets Received"},
- {&es->rx_broadcast, "bcast_pkts_rcvd",
- "Broadcast Packets Received"},
- {&es->rx_discards, "rx_discards", "Discarded RX packets"},
- {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
- {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
- {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
- {&es->tx_multicast, "mcast_pkts_txd",
- "Multicast Packets Transmitted"},
- {&es->tx_broadcast, "bcast_pkts_txd",
- "Broadcast Packets Transmitted"},
- {&es->tx_errors, "tx_errors", "TX packet errors"},
- // end
- {0,0,0}
- };
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != NULL)
- {
- SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-
- /* Queue sysctls */
- for (int q = 0; q < vsi->num_queues; q++) {
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
- queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
- CTLFLAG_RD, NULL, "Queue Name");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- txr = &(queues[q].txr);
- rxr = &(queues[q].rxr);
-
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
- "m_defrag() failed");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
- CTLFLAG_RD, &(queues[q].dropped_pkts),
- "Driver dropped packets");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(queues[q].irqs),
- "irqs on this queue");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
- CTLFLAG_RD, &(queues[q].tso),
- "TSO");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
- CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &(txr->no_desc),
- "Queue No Descriptor Available");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
- CTLFLAG_RD, &(txr->total_packets),
- "Queue Packets Transmitted");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(txr->tx_bytes),
- "Queue Bytes Transmitted");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
- CTLFLAG_RD, &(rxr->rx_packets),
- "Queue Packets Received");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &(rxr->rx_bytes),
- "Queue Bytes Received");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
- CTLFLAG_RD, &(rxr->itr), 0,
- "Queue Rx ITR Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
- CTLFLAG_RD, &(txr->itr), 0,
- "Queue Tx ITR Interval");
-
-#ifdef IXL_DEBUG
- /* Examine queue state */
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixlv_sysctl_qtx_tail_handler, "IU",
- "Queue Transmit Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixlv_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer",
- CTLFLAG_RD, &(txr.watchdog_timer), 0,
- "Ticks before watchdog event is triggered");
-#endif
- }
+ ixl_vsi_add_queues_stats(vsi);
}
static void
@@ -3242,4 +3145,3 @@
return (0);
}
#endif
-
Index: sys/dev/ixl/ixl.h
===================================================================
--- sys/dev/ixl/ixl.h
+++ sys/dev/ixl/ixl.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -104,6 +104,7 @@
#include "i40e_type.h"
#include "i40e_prototype.h"
+#include "i40e_dcb.h"
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
@@ -294,6 +295,9 @@
#define IXL_FLAGS_USES_MSIX (1 << 2)
#define IXL_FLAGS_IS_VF (1 << 3)
+#define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0)
+#define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0)
+
#define IXL_VF_RESET_TIMEOUT 100
#define IXL_VSI_DATA_PORT 0x01
@@ -386,6 +390,9 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
+/* For stats sysctl naming */
+#define IXL_QUEUE_NAME_LEN 32
+
/*
*****************************************************************************
* vendor_info_array
@@ -456,7 +463,7 @@
/* Used for Dynamic ITR calculation */
u32 packets;
- u32 bytes;
+ u32 bytes;
/* Soft Stats */
u64 tx_bytes;
@@ -478,7 +485,7 @@
bool hdr_split;
bool discard;
u32 next_refresh;
- u32 next_check;
+ u32 next_check;
u32 itr;
u32 latency;
char mtx_name[16];
@@ -490,14 +497,14 @@
/* Used for Dynamic ITR calculation */
u32 packets;
- u32 bytes;
+ u32 bytes;
/* Soft stats */
u64 split;
u64 rx_packets;
- u64 rx_bytes;
- u64 desc_errs;
- u64 not_done;
+ u64 rx_bytes;
+ u64 desc_errs;
+ u64 not_done;
};
/*
@@ -535,7 +542,7 @@
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
struct ixl_vsi {
- void *back;
+ void *back;
struct ifnet *ifp;
struct device *dev;
struct i40e_hw *hw;
@@ -561,18 +568,19 @@
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
+ u64 num_hw_filters;
/* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
- eventhandler_tag vlan_attach;
- eventhandler_tag vlan_detach;
+ eventhandler_tag vlan_attach;
+ eventhandler_tag vlan_detach;
u16 num_vlans;
/* Per-VSI stats from hardware */
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- bool stat_offsets_loaded;
+ bool stat_offsets_loaded;
/* VSI stat counters */
u64 ipackets;
u64 ierrors;
@@ -586,13 +594,10 @@
u64 oqdrops;
u64 noproto;
- /* Driver statistics */
- u64 hw_filters_del;
- u64 hw_filters_add;
-
/* Misc. */
- u64 flags;
+ u64 flags;
struct sysctl_oid *vsi_node;
+ struct sysctl_ctx_list sysctl_ctx;
};
/*
@@ -600,9 +605,9 @@
*/
static inline u16
ixl_rx_unrefreshed(struct ixl_queue *que)
-{
+{
struct rx_ring *rxr = &que->rxr;
-
+
if (rxr->next_check > rxr->next_refresh)
return (rxr->next_check - rxr->next_refresh - 1);
else
@@ -632,16 +637,16 @@
*/
static inline bool
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
-{
+{
bool cmp = FALSE;
if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
(ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
- (ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
+ (ea1[4] == ea2[4]) && (ea1[5] == ea2[5]))
cmp = TRUE;
return (cmp);
-}
+}
/*
* Return next largest power of 2, unsigned
@@ -692,6 +697,9 @@
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
+void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *,
+ struct i40e_eth_stats *);
+void ixl_vsi_add_queues_stats(struct ixl_vsi *);
void ixl_vsi_setup_rings_size(struct ixl_vsi *, int, int);
int ixl_queue_hang_check(struct ixl_vsi *);
void ixl_free_vsi(struct ixl_vsi *);
Index: sys/dev/ixl/ixl_iw.h
===================================================================
--- sys/dev/ixl/ixl_iw.h
+++ sys/dev/ixl/ixl_iw.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/ixl_iw.c
===================================================================
--- sys/dev/ixl/ixl_iw.c
+++ sys/dev/ixl/ixl_iw.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/ixl_iw_int.h
===================================================================
--- sys/dev/ixl/ixl_iw_int.h
+++ sys/dev/ixl/ixl_iw_int.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/ixl_pf.h
===================================================================
--- sys/dev/ixl/ixl_pf.h
+++ sys/dev/ixl/ixl_pf.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -47,6 +47,16 @@
#define IXL_PF_STATE_EMPR_RESETTING (1 << 0)
#define IXL_PF_STATE_FW_LLDP_DISABLED (1 << 1)
+#define IXL_PF_STATE_RECOVERY_MODE (1 << 2)
+#define IXL_PF_STATE_EEE_ENABLED (1 << 3)
+
+enum ixl_i2c_access_method_t {
+ IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE = 0,
+ IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS = 1,
+ IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD = 2,
+ IXL_I2C_ACCESS_METHOD_AQ = 3,
+ IXL_I2C_ACCESS_METHOD_TYPE_LENGTH = 4
+};
struct ixl_vf {
struct ixl_vsi vsi;
@@ -57,7 +67,6 @@
uint32_t version;
struct ixl_pf_qtag qtag;
- struct sysctl_ctx_list ctx;
};
/* Physical controller structure */
@@ -85,7 +94,7 @@
bool iw_enabled;
#endif
int if_flags;
- int state;
+ volatile int state;
bool init_in_progress;
u8 supported_speeds;
@@ -103,7 +112,6 @@
struct mtx pf_mtx;
- u32 qbase;
u32 admvec;
struct task adminq;
struct taskqueue *tq;
@@ -124,6 +132,13 @@
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
+ /* I2C access methods */
+ enum ixl_i2c_access_method_t i2c_access_method;
+ s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+ s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data);
+
/* SR-IOV */
struct ixl_vf *vfs;
int num_vfs;
@@ -132,6 +147,7 @@
int vc_debug_lvl;
};
+
/*
* Defines used for NVM update ioctls.
* This value is used in the Solaris tool, too.
@@ -153,7 +169,9 @@
"\t 0x4 - advertise 10G\n" \
"\t 0x8 - advertise 20G\n" \
"\t0x10 - advertise 25G\n" \
-"\t0x20 - advertise 40G\n\n" \
+"\t0x20 - advertise 40G\n" \
+"\t0x40 - advertise 2.5G\n" \
+"\t0x80 - advertise 5G\n\n" \
"Set to 0 to disable link.\n" \
"Use \"sysctl -x\" to view flags properly."
@@ -165,7 +183,9 @@
"\t 0x4 - 10G\n" \
"\t 0x8 - 20G\n" \
"\t0x10 - 25G\n" \
-"\t0x20 - 40G\n\n" \
+"\t0x20 - 40G\n" \
+"\t0x40 - 2.5G\n" \
+"\t0x80 - 5G\n\n" \
"Use \"sysctl -x\" to view flags properly."
#define IXL_SYSCTL_HELP_FC \
@@ -184,6 +204,14 @@
"\t0 - disable\n" \
"\t1 - enable\n"
+#define IXL_SYSCTL_HELP_I2C_METHOD \
+"\nI2C access method that driver will use:\n" \
+"\t0 - best available method\n" \
+"\t1 - bit bang via I2CPARAMS register\n" \
+"\t2 - register read/write via I2CCMD register\n" \
+"\t3 - Use Admin Queue command (best)\n" \
+"Using the Admin Queue is only supported on 710 devices with FW version 1.7 or higher"
+
extern const char * const ixl_fc_string[6];
MALLOC_DECLARE(M_IXL);
@@ -210,9 +238,6 @@
#define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__)
void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
-/* For stats sysctl naming */
-#define QUEUE_NAME_LEN 32
-
/* For netmap(4) compatibility */
#define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi)
@@ -222,7 +247,6 @@
int ixl_setup_interface(device_t, struct ixl_vsi *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
-char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
void ixl_handle_que(void *context, int pending);
@@ -260,7 +284,7 @@
u64 *, u64 *);
void ixl_stop(struct ixl_pf *);
-void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
+void ixl_vsi_add_sysctls(struct ixl_vsi *, const char *, bool);
int ixl_get_hw_capabilities(struct ixl_pf *);
void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
@@ -300,6 +324,7 @@
struct i40e_aqc_get_link_status *);
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
+int ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *, struct ifreq *ifr);
void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
@@ -334,10 +359,10 @@
void ixl_set_promisc(struct ixl_vsi *);
void ixl_add_multi(struct ixl_vsi *);
void ixl_del_multi(struct ixl_vsi *);
-void ixl_setup_vlan_filters(struct ixl_vsi *);
void ixl_init_filters(struct ixl_vsi *);
void ixl_add_hw_filters(struct ixl_vsi *, int, int);
void ixl_del_hw_filters(struct ixl_vsi *, int);
+void ixl_del_default_hw_filters(struct ixl_vsi *);
struct ixl_mac_filter *
ixl_find_filter(struct ixl_vsi *, const u8 *, s16);
void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
@@ -348,16 +373,24 @@
int ixl_vsi_setup_queues(struct ixl_vsi *vsi);
void ixl_vsi_free_queues(struct ixl_vsi *vsi);
+bool ixl_fw_recovery_mode(struct ixl_pf *);
/*
* I2C Function prototypes
*/
int ixl_find_i2c_interface(struct ixl_pf *);
-s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+s32 ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+s32 ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data);
+s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
-s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
-int ixl_get_fw_lldp_status(struct ixl_pf *pf);
int ixl_attach_get_link_status(struct ixl_pf *);
#endif /* _IXL_PF_H_ */
Index: sys/dev/ixl/ixl_pf_i2c.c
===================================================================
--- sys/dev/ixl/ixl_pf_i2c.c
+++ sys/dev/ixl/ixl_pf_i2c.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -46,9 +46,9 @@
#define IXL_I2C_CLOCK_STRETCHING_TIMEOUT 500
#define IXL_I2C_REG(_hw) \
- I40E_GLGEN_I2CPARAMS(((struct i40e_osdep *)(_hw)->back)->i2c_intfc_num)
-
+ I40E_GLGEN_I2CPARAMS(_hw->func_caps.mdio_port_num)
+/* I2C bit-banging functions */
static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data);
static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl);
static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl);
@@ -62,6 +62,8 @@
static void ixl_i2c_start(struct ixl_pf *pf);
static void ixl_i2c_stop(struct ixl_pf *pf);
+static s32 ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum);
+
/**
* ixl_i2c_bus_clear - Clears the I2C bus
* @hw: pointer to hardware structure
@@ -449,10 +451,10 @@
}
/**
- * ixl_read_i2c_byte - Reads 8 bit word over I2C
+ * ixl_read_i2c_byte_bb - Reads 8 bit word over I2C
**/
s32
-ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data)
{
struct i40e_hw *hw = &pf->hw;
@@ -523,9 +525,9 @@
i40e_msec_delay(100);
retry++;
if (retry < max_retry)
- ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying.\n");
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying\n");
else
- ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error.\n");
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n");
} while (retry < max_retry);
done:
@@ -538,10 +540,10 @@
}
/**
- * ixl_write_i2c_byte - Writes 8 bit word over I2C
+ * ixl_write_i2c_byte_bb - Writes 8 bit word over I2C
**/
s32
-ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset,
+ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data)
{
struct i40e_hw *hw = &pf->hw;
@@ -589,9 +591,9 @@
i40e_msec_delay(100);
retry++;
if (retry < max_retry)
- ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying.\n");
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying\n");
else
- ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error.\n");
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n");
} while (retry < max_retry);
write_byte_out:
@@ -603,3 +605,139 @@
return status;
}
+/**
+ * ixl_read_i2c_byte_reg - Reads 8 bit word over I2C using a hardware register
+ **/
+s32
+ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg = 0;
+ s32 status;
+ *data = 0;
+
+ reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT);
+ reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT);
+ reg |= I40E_GLGEN_I2CCMD_OP_MASK;
+ wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg);
+
+ status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num);
+
+ /* Get data from I2C register */
+ reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num));
+
+ /* Retrieve data readed from EEPROM */
+ *data = (u8)(reg & 0xff);
+
+ if (status)
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n");
+ return status;
+}
+
+/**
+ * ixl_write_i2c_byte_reg - Writes 8 bit word over I2C using a hardware register
+ **/
+s32
+ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+ u32 reg = 0;
+ u8 upperbyte = 0;
+ u16 datai2c = 0;
+
+ status = ixl_read_i2c_byte_reg(pf, byte_offset + 1, dev_addr, &upperbyte);
+ datai2c = ((u16)upperbyte << 8) | (u16)data;
+ reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num));
+
+ /* Form write command */
+ reg &= ~I40E_GLGEN_I2CCMD_PHYADD_MASK;
+ reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT);
+ reg &= ~I40E_GLGEN_I2CCMD_REGADD_MASK;
+ reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT);
+ reg &= ~I40E_GLGEN_I2CCMD_DATA_MASK;
+ reg |= (datai2c << I40E_GLGEN_I2CCMD_DATA_SHIFT);
+ reg &= ~I40E_GLGEN_I2CCMD_OP_MASK;
+
+ /* Write command to registers controling I2C - data and address. */
+ wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg);
+
+ status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num);
+
+ if (status)
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n");
+ return status;
+}
+
+/**
+ * ixl_wait_for_i2c_completion
+ **/
+static s32
+ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum)
+{
+ s32 status = 0;
+ u32 timeout = 100;
+ u32 reg;
+ do {
+ reg = rd32(hw, I40E_GLGEN_I2CCMD(portnum));
+ if ((reg & I40E_GLGEN_I2CCMD_R_MASK) != 0)
+ break;
+ i40e_usec_delay(10);
+ } while (timeout-- > 0);
+
+ if (timeout == 0)
+ return I40E_ERR_TIMEOUT;
+ else
+ return status;
+}
+
+/**
+ * ixl_read_i2c_byte_aq - Reads 8 bit word over I2C using a hardware register
+ **/
+s32
+ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+ u32 reg;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ dev_addr, false,
+ byte_offset,
+ &reg, NULL);
+
+ if (status)
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ else
+ *data = (u8)reg;
+
+ return status;
+}
+
+/**
+ * ixl_write_i2c_byte_aq - Writes 8 bit word over I2C using a hardware register
+ **/
+s32
+ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct i40e_hw *hw = &pf->hw;
+ s32 status = I40E_SUCCESS;
+
+ status = i40e_aq_set_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ dev_addr, false,
+ byte_offset,
+ data, NULL);
+
+ if (status)
+ ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+
+ return status;
+}
Index: sys/dev/ixl/ixl_pf_iov.h
===================================================================
--- sys/dev/ixl/ixl_pf_iov.h
+++ sys/dev/ixl/ixl_pf_iov.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/ixl_pf_iov.c
===================================================================
--- sys/dev/ixl/ixl_pf_iov.c
+++ sys/dev/ixl/ixl_pf_iov.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -197,15 +197,16 @@
int error;
hw = &pf->hw;
+ vf->vsi.flags |= IXL_FLAGS_IS_VF;
error = ixl_vf_alloc_vsi(pf, vf);
if (error != 0)
return (error);
- vf->vsi.hw_filters_add = 0;
- vf->vsi.hw_filters_del = 0;
- ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
- ixl_reconfigure_filters(&vf->vsi);
+ vf->vsi.dev = pf->dev;
+ vf->vsi.num_hw_filters = 0;
+
+ ixl_init_filters(&vf->vsi);
return (0);
}
@@ -1722,7 +1723,7 @@
struct i40e_hw *hw;
struct ixl_vsi *pf_vsi;
enum i40e_status_code ret;
- int i, error;
+ int error;
pf = device_get_softc(dev);
hw = &pf->hw;
@@ -1737,9 +1738,6 @@
goto fail;
}
- for (i = 0; i < num_vfs; i++)
- sysctl_ctx_init(&pf->vfs[i].ctx);
-
ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1, FALSE, &pf->veb_seid, FALSE, NULL);
if (ret != I40E_SUCCESS) {
@@ -1749,6 +1747,13 @@
goto fail;
}
+ /*
+ * Adding a VEB brings back the default MAC filter(s). Remove them,
+ * and let the driver add the proper filters back.
+ */
+ ixl_del_default_hw_filters(pf_vsi);
+ ixl_reconfigure_filters(pf_vsi);
+
pf->num_vfs = num_vfs;
IXL_PF_UNLOCK(pf);
return (0);
@@ -1800,7 +1805,7 @@
/* Do this after the unlock as sysctl_ctx_free might sleep. */
for (i = 0; i < num_vfs; i++)
- sysctl_ctx_free(&vfs[i].ctx);
+ sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
free(vfs, M_IXL);
}
@@ -1838,7 +1843,7 @@
int
ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
{
- char sysctl_name[QUEUE_NAME_LEN];
+ char sysctl_name[IXL_QUEUE_NAME_LEN];
struct ixl_pf *pf;
struct ixl_vf *vf;
const void *mac;
@@ -1851,10 +1856,8 @@
IXL_PF_LOCK(pf);
vf->vf_num = vfnum;
-
vf->vsi.back = pf;
vf->vf_flags = VF_FLAG_ENABLED;
- SLIST_INIT(&vf->vsi.ftl);
/* Reserve queue allocation from PF */
vf_num_queues = nvlist_get_number(params, "num-queues");
@@ -1892,7 +1895,7 @@
IXL_PF_UNLOCK(pf);
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
- ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
+ ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);
}
return (error);
Index: sys/dev/ixl/ixl_pf_main.c
===================================================================
--- sys/dev/ixl/ixl_pf_main.c
+++ sys/dev/ixl/ixl_pf_main.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -45,15 +45,16 @@
#endif
#ifdef DEV_NETMAP
-#include <net/netmap.h>
-#include <sys/selinfo.h>
-#include <dev/netmap/netmap_kern.h>
+#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int);
static u64 ixl_max_aq_speed_to_value(u8);
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
+static enum i40e_status_code ixl_set_lla(struct ixl_vsi *);
+static const char * ixl_link_speed_string(u8 link_speed);
+
/* Sysctls */
static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
@@ -64,6 +65,7 @@
static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
/* Debug Sysctls */
static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
@@ -84,10 +86,8 @@
static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
-#ifdef IXL_DEBUG
-static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
-#endif
+static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
+
#ifdef IXL_IW
extern int ixl_enable_iwarp;
@@ -165,6 +165,12 @@
sbuf_delete(sbuf);
}
+bool
+ixl_fw_recovery_mode(struct ixl_pf *pf)
+{
+ return (rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK);
+}
+
static void
ixl_configure_tx_itr(struct ixl_pf *pf)
{
@@ -213,7 +219,6 @@
ixl_configure_rx_itr(pf);
}
-
/*********************************************************************
* Init entry point
*
@@ -232,12 +237,15 @@
struct ifnet *ifp = vsi->ifp;
device_t dev = pf->dev;
struct i40e_filter_control_settings filter;
- u8 tmpaddr[ETHER_ADDR_LEN];
- int ret;
INIT_DEBUGOUT("ixl_init_locked: begin");
IXL_PF_LOCK_ASSERT(pf);
+ if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
+ device_printf(dev, "Running in recovery mode, only firmware update available\n");
+ return;
+ }
+
ixl_stop_locked(pf);
/*
@@ -252,26 +260,11 @@
}
/* Get the latest mac address... User might use a LAA */
- bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
- ETH_ALEN);
- if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
- (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
- device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n");
- ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
- bcopy(tmpaddr, hw->mac.addr,
- ETH_ALEN);
- ret = i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_ONLY,
- hw->mac.addr, NULL);
- if (ret) {
- device_printf(dev, "LLA address"
- "change failed!!\n");
- return;
- }
+ if (ixl_set_lla(vsi)) {
+ device_printf(dev, "LLA address change failed!\n");
+ return;
}
- ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
-
/* Set the various hardware offload abilities */
ifp->if_hwassist = 0;
if (ifp->if_capenable & IFCAP_TSO)
@@ -299,12 +292,6 @@
/* Set up RSS */
ixl_config_rss(pf);
- /* Add protocol filters to list */
- ixl_init_filters(vsi);
-
- /* Setup vlan's if needed */
- ixl_setup_vlan_filters(vsi);
-
/* Set up MSI/X routing and the ITR settings */
if (pf->msix > 1) {
ixl_configure_queue_intr_msix(pf);
@@ -318,6 +305,10 @@
ixl_reconfigure_filters(vsi);
+ /* Check if PROMISC or ALLMULTI flags have been set
+ * by user before bringing interface up */
+ ixl_set_promisc(vsi);
+
/* And now turn on interrupts */
ixl_enable_intr(vsi);
@@ -326,15 +317,12 @@
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
- /* Start the local timer */
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
-
/* Now inform the stack we're ready */
ifp->if_drv_flags |= IFF_DRV_RUNNING;
#ifdef IXL_IW
if (ixl_enable_iwarp && pf->iw_enabled) {
- ret = ixl_iw_pf_init(pf);
+ int ret = ixl_iw_pf_init(pf);
if (ret)
device_printf(dev,
"initialize iwarp failed, code %d\n", ret);
@@ -355,10 +343,15 @@
struct i40e_aqc_list_capabilities_element_resp *buf;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- int error, len;
+ int error, len, i2c_intfc_num;
u16 needed;
bool again = TRUE;
+ if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
+ hw->func_caps.iwarp = 0;
+ return 0;
+ }
+
len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
retry:
if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
@@ -383,9 +376,6 @@
return (ENODEV);
}
- /* Capture this PF's starting queue pair */
- pf->qbase = hw->func_caps.base_queue;
-
#ifdef IXL_DEBUG
device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
"msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
@@ -398,11 +388,46 @@
hw->func_caps.num_rx_qp,
hw->func_caps.base_queue);
#endif
- struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back;
- osdep->i2c_intfc_num = ixl_find_i2c_interface(pf);
- if (osdep->i2c_intfc_num != -1)
+ /*
+ * Some devices have both MDIO and I2C; since this isn't reported
+ * by the FW, check registers to see if an I2C interface exists.
+ */
+ i2c_intfc_num = ixl_find_i2c_interface(pf);
+ if (i2c_intfc_num != -1)
pf->has_i2c = true;
+ /* Determine functions to use for driver I2C accesses */
+ switch (pf->i2c_access_method) {
+ case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
+ if (hw->mac.type == I40E_MAC_XL710 &&
+ hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 7) {
+ pf->read_i2c_byte = ixl_read_i2c_byte_aq;
+ pf->write_i2c_byte = ixl_write_i2c_byte_aq;
+ } else {
+ pf->read_i2c_byte = ixl_read_i2c_byte_reg;
+ pf->write_i2c_byte = ixl_write_i2c_byte_reg;
+ }
+ break;
+ }
+ case IXL_I2C_ACCESS_METHOD_AQ:
+ pf->read_i2c_byte = ixl_read_i2c_byte_aq;
+ pf->write_i2c_byte = ixl_write_i2c_byte_aq;
+ break;
+ case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
+ pf->read_i2c_byte = ixl_read_i2c_byte_reg;
+ pf->write_i2c_byte = ixl_write_i2c_byte_reg;
+ break;
+ case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
+ pf->read_i2c_byte = ixl_read_i2c_byte_bb;
+ pf->write_i2c_byte = ixl_write_i2c_byte_bb;
+ break;
+ default:
+ /* Should not happen */
+ device_printf(dev, "Error setting I2C access functions\n");
+ break;
+ }
+
/* Print a subset of the capability information. */
device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
@@ -690,6 +715,8 @@
u32 icr0;
bool more;
+ ixl_disable_intr0(hw);
+
pf->admin_irq++;
/* Clear PBA at start of ISR if using legacy interrupts */
@@ -746,6 +773,11 @@
if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
return;
+ /* There are drivers which disable auto-masking of interrupts,
+ * which is a global setting for all ports. We have to make sure
+ * to mask it to not lose IRQs */
+ ixl_disable_queue(hw, que->me);
+
++que->irqs;
more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
@@ -904,7 +936,7 @@
void
ixl_add_multi(struct ixl_vsi *vsi)
{
- struct ifmultiaddr *ifma;
+ struct ifmultiaddr *ifma;
struct ifnet *ifp = vsi->ifp;
struct i40e_hw *hw = vsi->hw;
int mcnt = 0, flags;
@@ -993,26 +1025,37 @@
* This routine checks for link status, updates statistics,
* and runs the watchdog check.
*
- * Only runs when the driver is configured UP and RUNNING.
- *
**********************************************************************/
void
ixl_local_timer(void *arg)
{
struct ixl_pf *pf = arg;
+ struct ifnet *ifp = pf->vsi.ifp;
+
+ if (ixl_fw_recovery_mode(pf)) {
+ if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixl_stop_locked(pf);
+ atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE | IXL_PF_STATE_EMPR_RESETTING);
+ device_printf(pf->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ }
+ }
IXL_PF_LOCK_ASSERT(pf);
/* Fire off the adminq task */
taskqueue_enqueue(pf->tq, &pf->adminq);
- /* Update stats */
- ixl_update_stats_counters(pf);
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ /* Update stats */
+ ixl_update_stats_counters(pf);
+ }
- /* Increment stat when a queue shows hung */
- if (ixl_queue_hang_check(&pf->vsi))
+ if (ixl_queue_hang_check(&pf->vsi)) {
+ /* Increment stat when a queue shows hung */
pf->watchdog_events++;
+ }
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
}
@@ -1043,7 +1086,7 @@
log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
ifp->if_xname,
- ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
+ ixl_link_speed_string(hw->phy.link_info.link_speed),
req_fec_string, neg_fec_string,
(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
(hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
@@ -1109,20 +1152,17 @@
IXL_PF_LOCK_ASSERT(pf);
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
+
#ifdef IXL_IW
/* Stop iWARP device */
if (ixl_enable_iwarp && pf->iw_enabled)
ixl_iw_pf_stop(pf);
#endif
- /* Stop the local timer */
- callout_stop(&pf->timer);
-
ixl_disable_rings_intr(vsi);
ixl_disable_rings(vsi);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
void
@@ -1807,6 +1847,12 @@
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+ if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_2500_T, 0, NULL);
+
+ if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_5000_T, 0, NULL);
+
if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
@@ -1887,7 +1933,7 @@
ifp = vsi->ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
+ return (ENOMEM);
}
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
@@ -1950,30 +1996,32 @@
ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
ixl_media_status);
- aq_error = i40e_aq_get_phy_capabilities(hw,
- FALSE, TRUE, &abilities, NULL);
- /* May need delay to detect fiber correctly */
- if (aq_error == I40E_ERR_UNKNOWN_PHY) {
- i40e_msec_delay(200);
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
- TRUE, &abilities, NULL);
- }
- if (aq_error) {
- if (aq_error == I40E_ERR_UNKNOWN_PHY)
- device_printf(dev, "Unknown PHY type detected!\n");
- else
- device_printf(dev,
- "Error getting supported media types, err %d,"
- " AQ error %d\n", aq_error, hw->aq.asq_last_status);
- } else {
- pf->supported_speeds = abilities.link_speed;
+ if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) == 0) {
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, TRUE, &abilities, NULL);
+ /* May need delay to detect fiber correctly */
+ if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+ i40e_msec_delay(200);
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
+ TRUE, &abilities, NULL);
+ }
+ if (aq_error) {
+ if (aq_error == I40E_ERR_UNKNOWN_PHY)
+ device_printf(dev, "Unknown PHY type detected!\n");
+ else
+ device_printf(dev,
+ "Error getting supported media types, err %d,"
+ " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+ } else {
+ pf->supported_speeds = abilities.link_speed;
#if __FreeBSD_version >= 1100000
- ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
+ ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds);
#else
- if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
+ if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
#endif
- ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ ixl_add_ifmedia(vsi, hw->phy.phy_types);
+ }
}
/* Use autoselect media by default */
@@ -2138,8 +2186,6 @@
/* Reset VSI statistics */
ixl_vsi_reset_stats(vsi);
- vsi->hw_filters_add = 0;
- vsi->hw_filters_del = 0;
ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
@@ -2262,9 +2308,6 @@
return (err);
}
-
-
-
void
ixl_vsi_free_queues(struct ixl_vsi *vsi)
{
@@ -2298,6 +2341,8 @@
IXL_RX_UNLOCK(rxr);
IXL_RX_LOCK_DESTROY(rxr);
}
+
+ sysctl_ctx_free(&vsi->sysctl_ctx);
}
@@ -2309,7 +2354,6 @@
void
ixl_free_vsi(struct ixl_vsi *vsi)
{
-
/* Free station queues */
ixl_vsi_free_queues(vsi);
if (vsi->queues)
@@ -2329,6 +2373,8 @@
SLIST_REMOVE_HEAD(&vsi->ftl, next);
free(f, M_DEVBUF);
}
+
+ vsi->num_hw_filters = 0;
}
/*
@@ -2455,6 +2501,9 @@
if (error)
break;
}
+ if (error == 0)
+ sysctl_ctx_init(&vsi->sysctl_ctx);
+
return (error);
}
@@ -2647,67 +2696,23 @@
}
void
-ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
- struct sysctl_ctx_list *ctx, const char *sysctl_name)
+ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
{
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
struct sysctl_oid_list *vsi_list;
- tree = device_get_sysctl_tree(pf->dev);
+ tree = device_get_sysctl_tree(vsi->dev);
child = SYSCTL_CHILDREN(tree);
- vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
- CTLFLAG_RD, NULL, "VSI Number");
- vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
-
- ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
-}
-
-#ifdef IXL_DEBUG
-/**
- * ixl_sysctl_qtx_tail_handler
- * Retrieves I40E_QTX_TAIL value from hardware
- * for a sysctl.
- */
-static int
-ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
+ vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
+ CTLFLAG_RD, NULL, "VSI Number");
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->txr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-
-/**
- * ixl_sysctl_qrx_tail_handler
- * Retrieves I40E_QRX_TAIL value from hardware
- * for a sysctl.
- */
-static int
-ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+ ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
- val = rd32(que->vsi->hw, que->rxr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
+ if (queues_sysctls)
+ ixl_vsi_add_queues_stats(vsi);
}
-#endif
/*
* Used to set the Tx ITR value for all of the PF LAN VSI's queues.
@@ -2781,21 +2786,11 @@
ixl_add_hw_stats(struct ixl_pf *pf)
{
device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *queues = vsi->queues;
struct i40e_hw_port_stats *pf_stats = &pf->stats;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct sysctl_oid_list *vsi_list;
-
- struct sysctl_oid *queue_node;
- struct sysctl_oid_list *queue_list;
-
- struct tx_ring *txr;
- struct rx_ring *rxr;
- char queue_namebuf[QUEUE_NAME_LEN];
/* Driver statistics */
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events",
@@ -2805,129 +2800,11 @@
CTLFLAG_RD, &pf->admin_irq,
"Admin Queue IRQ Handled");
- ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
- vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
-
- /* Queue statistics */
- for (int q = 0; q < vsi->num_queues; q++) {
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
- queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
- OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- txr = &(queues[q].txr);
- rxr = &(queues[q].rxr);
-
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
- "m_defrag() failed");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(queues[q].irqs),
- "irqs on this queue");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
- CTLFLAG_RD, &(queues[q].tso),
- "TSO");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
- CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
- CTLFLAG_RD, &(queues[q].mss_too_small),
- "TSO sends with an MSS less than 64");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &(txr->no_desc),
- "Queue No Descriptor Available");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
- CTLFLAG_RD, &(txr->total_packets),
- "Queue Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(txr->tx_bytes),
- "Queue Bytes Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
- CTLFLAG_RD, &(rxr->rx_packets),
- "Queue Packets Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &(rxr->rx_bytes),
- "Queue Bytes Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
- CTLFLAG_RD, &(rxr->desc_errs),
- "Queue Rx Descriptor Errors");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
- CTLFLAG_RD, &(rxr->itr), 0,
- "Queue Rx ITR Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
- CTLFLAG_RD, &(txr->itr), 0,
- "Queue Tx ITR Interval");
-#ifdef IXL_DEBUG
- SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog",
- CTLFLAG_RD, &(txr->watchdog_timer), 0,
- "Ticks before watchdog timer causes interface reinit");
- SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail",
- CTLFLAG_RD, &(txr->next_avail), 0,
- "Next TX descriptor to be used");
- SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean",
- CTLFLAG_RD, &(txr->next_to_clean), 0,
- "Next TX descriptor to be cleaned");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
- CTLFLAG_RD, &(rxr->not_done),
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
- CTLFLAG_RD, &(rxr->next_refresh), 0,
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
- CTLFLAG_RD, &(rxr->next_check), 0,
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qtx_tail_handler, "IU",
- "Queue Transmit Descriptor Tail");
-#endif
- }
-
+ ixl_vsi_add_sysctls(&pf->vsi, "pf", true);
/* MAC stats */
ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
}
-void
-ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
- struct sysctl_oid_list *child,
- struct i40e_eth_stats *eth_stats)
-{
- struct ixl_sysctl_info ctls[] =
- {
- {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
- {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
- "Unicast Packets Received"},
- {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
- "Multicast Packets Received"},
- {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
- "Broadcast Packets Received"},
- {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
- {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
- {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
- {&eth_stats->tx_multicast, "mcast_pkts_txd",
- "Multicast Packets Transmitted"},
- {&eth_stats->tx_broadcast, "bcast_pkts_txd",
- "Broadcast Packets Transmitted"},
- // end
- {0,0,0}
- };
-
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != 0)
- {
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-}
-
void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
@@ -3164,55 +3041,84 @@
}
/*
-** This routine updates vlan filters, called by init
-** it scans the filter table and then updates the hw
-** after a soft reset.
-*/
+ * In some firmware versions there is default MAC/VLAN filter
+ * configured which interferes with filters managed by driver.
+ * Make sure it's removed.
+ */
void
-ixl_setup_vlan_filters(struct ixl_vsi *vsi)
+ixl_del_default_hw_filters(struct ixl_vsi *vsi)
{
- struct ixl_mac_filter *f;
- int cnt = 0, flags;
+ struct i40e_aqc_remove_macvlan_element_data e;
- if (vsi->num_vlans == 0)
- return;
- /*
- ** Scan the filter list for vlan entries,
- ** mark them for addition and then call
- ** for the AQ update.
- */
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags & IXL_FILTER_VLAN) {
- f->flags |=
- (IXL_FILTER_ADD |
- IXL_FILTER_USED);
- cnt++;
- }
- }
- if (cnt == 0) {
- printf("setup vlan: no filters found!\n");
- return;
- }
- flags = IXL_FILTER_VLAN;
- flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- ixl_add_hw_filters(vsi, flags, cnt);
- return;
+ bzero(&e, sizeof(e));
+ bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
+ e.vlan_tag = 0;
+ e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
+
+ bzero(&e, sizeof(e));
+ bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
+ e.vlan_tag = 0;
+ e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
+}
+
+static enum i40e_status_code
+ixl_set_lla(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ u8 tmpaddr[ETHER_ADDR_LEN];
+ enum i40e_status_code status;
+
+ status = I40E_SUCCESS;
+
+ bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETHER_ADDR_LEN);
+ if (memcmp(hw->mac.addr, tmpaddr, ETHER_ADDR_LEN) == 0)
+ goto set_lla_exit;
+
+ status = i40e_validate_mac_addr(tmpaddr);
+ if (status != I40E_SUCCESS)
+ goto set_lla_exit;
+
+ ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+ bcopy(tmpaddr, hw->mac.addr, ETHER_ADDR_LEN);
+ status = i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ hw->mac.addr, NULL);
+ if (status != I40E_SUCCESS)
+ goto set_lla_exit;
+
+ ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+set_lla_exit:
+ return (status);
}
/*
** Initialize filter list and add filters that the hardware
** needs to know about.
**
-** Requires VSI's filter list & seid to be set before calling.
+** Requires VSI's seid to be set before calling.
*/
void
ixl_init_filters(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ /* Initialize mac filter list for VSI */
+ SLIST_INIT(&vsi->ftl);
+ vsi->num_hw_filters = 0;
+
/* Add broadcast address */
ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+ if (IXL_VSI_IS_VF(vsi))
+ return;
+
+ ixl_del_default_hw_filters(vsi);
+
+ ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
+
/*
* Prevent Tx flow control frames from being sent out by
* non-firmware transmitters.
@@ -3251,7 +3157,7 @@
void
ixl_reconfigure_filters(struct ixl_vsi *vsi)
{
- ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
+ ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_hw_filters);
}
/*
@@ -3404,7 +3310,7 @@
device_printf(dev, "aq_add_macvlan err %d, "
"aq_error %d\n", err, hw->aq.asq_last_status);
else
- vsi->hw_filters_add += j;
+ vsi->num_hw_filters += j;
}
free(a, M_DEVBUF);
return;
@@ -3463,12 +3369,12 @@
int sc = 0;
for (int i = 0; i < j; i++)
sc += (!d[i].error_code);
- vsi->hw_filters_del += sc;
+ vsi->num_hw_filters -= sc;
device_printf(dev,
"Failed to remove %d/%d filters, aq error %d\n",
j - sc, j, hw->aq.asq_last_status);
} else
- vsi->hw_filters_del += j;
+ vsi->num_hw_filters -= j;
}
free(d, M_DEVBUF);
@@ -3971,6 +3877,16 @@
ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_jabber, &nsd->rx_jabber);
+ /* EEE */
+ i40e_get_phy_lpi_status(hw, nsd);
+
+ ixl_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ ixl_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
pf->stat_offsets_loaded = true;
/* End hw stats */
@@ -3998,10 +3914,14 @@
ixl_teardown_queue_msix(vsi);
- error = i40e_shutdown_lan_hmc(hw);
- if (error)
- device_printf(dev,
- "Shutdown LAN HMC failed with code %d\n", error);
+ if (hw->hmc.hmc_obj) {
+ error = i40e_shutdown_lan_hmc(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown LAN HMC failed with code %d\n", error);
+ }
+
+ callout_drain(&pf->timer);
ixl_disable_intr0(hw);
ixl_teardown_adminq_msix(pf);
@@ -4011,14 +3931,9 @@
device_printf(dev,
"Shutdown Admin queue failed with code %d\n", error);
- callout_drain(&pf->timer);
-
/* Free ring buffers, locks and filters */
ixl_vsi_free_queues(vsi);
- /* Free VSI filter list */
- ixl_free_mac_filters(vsi);
-
ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
return (error);
@@ -4030,17 +3945,20 @@
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
device_t dev = pf->dev;
+ enum i40e_get_fw_lldp_status_resp lldp_status;
int error = 0;
device_printf(dev, "Rebuilding driver state...\n");
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "PF reset failure %s\n",
- i40e_stat_str(hw, error));
- goto ixl_rebuild_hw_structs_after_reset_err;
+ if (!(atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE)) {
+ if (ixl_fw_recovery_mode(pf)) {
+ atomic_set_int(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
+ pf->link_up = FALSE;
+ ixl_update_link_status(pf);
+ }
}
+
/* Setup */
error = i40e_init_adminq(hw);
if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
@@ -4056,37 +3974,53 @@
device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
goto ixl_rebuild_hw_structs_after_reset_err;
}
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init_lan_hmc failed: %d\n", error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
+ /* Do not init LAN HMC and bring interface up in recovery mode */
+ if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) == 0) {
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (error) {
+ device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "configure_lan_hmc failed: %d\n", error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
- /* reserve a contiguous allocation for the PF's VSI */
- error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
- if (error) {
- device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
- error);
- /* TODO: error handling */
- }
+ if (!pf->qmgr.qinfo) {
+ /* Init queue allocation manager */
+ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_rx_qp);
+ if (error) {
+ device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+ }
+ /* reserve a contiguous allocation for the PF's VSI */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
+ if (error) {
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
+ /* TODO: error handling */
+ } else
+ device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
+ pf->qtag.num_allocated, pf->qtag.num_active);
- device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
- pf->qtag.num_allocated, pf->qtag.num_active);
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+ } /* not in recovery mode */
- error = ixl_switch_config(pf);
- if (error) {
- device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
- error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
+ /* Remove default filters reinstalled by FW on reset */
+ ixl_del_default_hw_filters(vsi);
if (ixl_vsi_setup_queues(vsi)) {
device_printf(dev, "setup queues failed!\n");
@@ -4094,6 +4028,8 @@
goto ixl_rebuild_hw_structs_after_reset_err;
}
+ ixl_vsi_add_sysctls(vsi, "pf", true);
+
if (pf->msix > 1) {
error = ixl_setup_adminq_msix(pf);
if (error) {
@@ -4111,6 +4047,12 @@
error);
goto ixl_rebuild_hw_structs_after_reset_err;
}
+ error = ixl_setup_queue_tqs(vsi);
+ if (error) {
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
} else {
error = ixl_setup_legacy(pf);
if (error) {
@@ -4120,6 +4062,10 @@
}
}
+ /* Do not bring interface up in recovery mode */
+ if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0)
+ return (error);
+
/* Determine link state */
if (ixl_attach_get_link_status(pf)) {
error = EINVAL;
@@ -4127,12 +4073,25 @@
}
i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
- ixl_get_fw_lldp_status(pf);
+
+ /* Query device FW LLDP status */
+ if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
+ if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
+ atomic_set_int(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else {
+ atomic_clear_int(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ }
+ }
if (is_up)
ixl_init(pf);
device_printf(dev, "Rebuilding driver state done.\n");
+ IXL_PF_LOCK(pf);
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ IXL_PF_UNLOCK(pf);
return (0);
ixl_rebuild_hw_structs_after_reset_err:
@@ -4146,22 +4105,20 @@
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = &pf->hw;
bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
- int count = 0;
- u32 reg;
+ int error = 0;
ixl_prepare_for_reset(pf, is_up);
-
- /* Typically finishes within 3-4 seconds */
- while (count++ < 100) {
- reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
- if (reg)
- i40e_msec_delay(100);
- else
- break;
+ /*
+ * i40e_pf_reset checks the type of reset and acts
+ * accordingly. If EMP or Core reset was performed
+ * doing PF reset is not necessary and it sometimes
+ * fails.
+ */
+ error = i40e_pf_reset(hw);
+ if (error) {
+ device_printf(pf->dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, error));
}
- ixl_dbg(pf, IXL_DBG_INFO,
- "EMPR reset wait count: %d\n", count);
ixl_rebuild_hw_structs_after_reset(pf, is_up);
@@ -4181,7 +4138,7 @@
i40e_status ret;
device_t dev = pf->dev;
u32 loop = 0;
- u16 opcode, result;
+ u16 opcode, arq_pending;
if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
/* Flag cleared at end of this function */
@@ -4202,7 +4159,7 @@
IXL_PF_LOCK(pf);
/* clean and process any events */
do {
- ret = i40e_clean_arq_element(hw, &event, &result);
+ ret = i40e_clean_arq_element(hw, &event, &arq_pending);
if (ret)
break;
opcode = LE16_TO_CPU(event.desc.opcode);
@@ -4222,15 +4179,12 @@
break;
}
- } while (result && (loop++ < IXL_ADM_LIMIT));
+ } while (arq_pending && (loop++ < IXL_ADM_LIMIT));
free(event.msg_buf, M_DEVBUF);
- /*
- * If there are still messages to process, reschedule ourselves.
- * Otherwise, re-enable our interrupt.
- */
- if (result > 0)
+ /* If there are still messages to process, reschedule. */
+ if (arq_pending > 0)
taskqueue_enqueue(pf->tq, &pf->adminq);
else
ixl_enable_intr0(hw);
@@ -4248,12 +4202,10 @@
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *es;
struct i40e_eth_stats *oes;
- struct i40e_hw_port_stats *nsd;
u16 stat_idx = vsi->info.stat_counter_idx;
es = &vsi->eth_stats;
oes = &vsi->eth_stats_offsets;
- nsd = &pf->stats;
/* Gather up the stats that the hw collects */
ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
@@ -4432,6 +4384,9 @@
struct sysctl_oid *fec_node;
struct sysctl_oid_list *fec_list;
+ struct sysctl_oid *eee_node;
+ struct sysctl_oid_list *eee_list;
+
/* Set up sysctls */
SYSCTL_ADD_PROC(ctx, ctx_list,
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
@@ -4515,6 +4470,31 @@
OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
+ eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
+ OID_AUTO, "eee", CTLFLAG_RD, NULL,
+ "Energy Efficient Ethernet (EEE) Sysctls");
+ eee_list = SYSCTL_CHILDREN(eee_node);
+
+ SYSCTL_ADD_PROC(ctx, eee_list,
+ OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_eee_enable, "I",
+ "Enable Energy Efficient Ethernet (EEE)");
+
+ SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
+ CTLFLAG_RD, &pf->stats.tx_lpi_status, 0,
+ "TX LPI status");
+
+ SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
+ CTLFLAG_RD, &pf->stats.rx_lpi_status, 0,
+ "RX LPI status");
+
+ SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
+ CTLFLAG_RD, &pf->stats.tx_lpi_count,
+ "TX LPI count");
+
+ SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
+ CTLFLAG_RD, &pf->stats.rx_lpi_count,
+ "RX LPI count");
/* Add sysctls meant to print debug information, but don't list them
* in "sysctl -a" output. */
debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
@@ -4577,6 +4557,10 @@
SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW,
pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
}
#ifdef PCI_IOV
@@ -4625,6 +4609,11 @@
error = sysctl_handle_int(oidp, &requested_fc, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
+ if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
+ device_printf(dev, "Interface is currently in FW recovery mode. "
+ "Setting flow control not supported\n");
+ return (EINVAL);
+ }
if (requested_fc < 0 || requested_fc > 3) {
device_printf(dev,
"Invalid fc mode; valid modes are 0 through 3\n");
@@ -4650,12 +4639,11 @@
return (0);
}
-char *
-ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
-{
- int index;
- char *speeds[] = {
+static const char *
+ixl_link_speed_string(u8 link_speed)
+{
+ const char * link_speed_str[] = {
"Unknown",
"100 Mbps",
"1 Gbps",
@@ -4663,34 +4651,43 @@
"40 Gbps",
"20 Gbps",
"25 Gbps",
+ "2.5 Gbps",
+ "5 Gbps"
};
+ int index;
switch (link_speed) {
- case I40E_LINK_SPEED_100MB:
- index = 1;
- break;
- case I40E_LINK_SPEED_1GB:
- index = 2;
- break;
- case I40E_LINK_SPEED_10GB:
- index = 3;
- break;
- case I40E_LINK_SPEED_40GB:
- index = 4;
- break;
- case I40E_LINK_SPEED_20GB:
- index = 5;
- break;
- case I40E_LINK_SPEED_25GB:
- index = 6;
- break;
- case I40E_LINK_SPEED_UNKNOWN:
- default:
- index = 0;
- break;
+ case I40E_LINK_SPEED_100MB:
+ index = 1;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ index = 2;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ index = 3;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ index = 4;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ index = 5;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ index = 6;
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ index = 7;
+ break;
+ case I40E_LINK_SPEED_5GB:
+ index = 8;
+ break;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ index = 0;
+ break;
}
- return speeds[index];
+ return (link_speed_str[index]);
}
int
@@ -4703,8 +4700,10 @@
ixl_update_link_status(pf);
error = sysctl_handle_string(oidp,
- ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
+ __DECONST(void *,
+ ixl_link_speed_string(hw->phy.link_info.link_speed)),
8, req);
+
return (error);
}
@@ -4715,17 +4714,20 @@
static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
{
- static u16 speedmap[6] = {
+#define SPEED_MAP_SIZE 8
+ static u16 speedmap[SPEED_MAP_SIZE] = {
(I40E_LINK_SPEED_100MB | (0x1 << 8)),
(I40E_LINK_SPEED_1GB | (0x2 << 8)),
(I40E_LINK_SPEED_10GB | (0x4 << 8)),
(I40E_LINK_SPEED_20GB | (0x8 << 8)),
(I40E_LINK_SPEED_25GB | (0x10 << 8)),
- (I40E_LINK_SPEED_40GB | (0x20 << 8))
+ (I40E_LINK_SPEED_40GB | (0x20 << 8)),
+ (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
+ (I40E_LINK_SPEED_5GB | (0x80 << 8)),
};
u8 retval = 0;
- for (int i = 0; i < 6; i++) {
+ for (int i = 0; i < SPEED_MAP_SIZE; i++) {
if (to_aq)
retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
else
@@ -4768,7 +4770,8 @@
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
- config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info
+ & I40E_AQ_PHY_FEC_CONFIG_MASK;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -4784,7 +4787,7 @@
}
/*
-** Supported link speedsL
+** Supported link speeds
** Flags:
** 0x1 - 100 Mb
** 0x2 - 1G
@@ -4792,6 +4795,8 @@
** 0x8 - 20G
** 0x10 - 25G
** 0x20 - 40G
+** 0x40 - 2.5G
+** 0x80 - 5G
*/
static int
ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
@@ -4811,6 +4816,8 @@
** 0x8 - advertise 20G
** 0x10 - advertise 25G
** 0x20 - advertise 40G
+** 0x40 - advertise 2.5G
+** 0x80 - advertise 5G
**
** Set to 0 to disable link
*/
@@ -4828,9 +4835,14 @@
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
+ if ((pf->state & IXL_PF_STATE_RECOVERY_MODE) != 0) {
+ device_printf(dev, "Interface is currently in FW recovery mode. "
+ "Setting advertise speed not supported\n");
+ return (EINVAL);
+ }
/* Error out if bits outside of possible flag range are set */
- if ((requested_ls & ~((u8)0x3F)) != 0) {
+ if ((requested_ls & ~((u8)0xFF)) != 0) {
device_printf(dev, "Input advertised speed out of range; "
"valid flags are: 0x%02x\n",
ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
@@ -4869,6 +4881,10 @@
return IF_Gbps(20);
if (link_speeds & I40E_LINK_SPEED_10GB)
return IF_Gbps(10);
+ if (link_speeds & I40E_LINK_SPEED_5GB)
+ return IF_Gbps(5);
+ if (link_speeds & I40E_LINK_SPEED_2_5GB)
+ return IF_Mbps(2500);
if (link_speeds & I40E_LINK_SPEED_1GB)
return IF_Gbps(1);
if (link_speeds & I40E_LINK_SPEED_100MB)
@@ -5089,6 +5105,48 @@
return (perrno);
}
+int
+ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *pf, struct ifreq *ifr)
+{
+ struct ifi2creq i2c;
+ int error = 0;
+ int i;
+
+ if (pf->read_i2c_byte == NULL)
+ return (EINVAL);
+
+#ifdef ifr_data
+ error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+#else
+ error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
+#endif
+
+ if (error != 0)
+ return (error);
+ if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
+ error = EINVAL;
+ return (error);
+ }
+ if (i2c.len > sizeof(i2c.data)) {
+ error = EINVAL;
+ return (error);
+ }
+
+ for (i = 0; i < i2c.len; ++i) {
+ if (pf->read_i2c_byte(pf, i2c.offset + i,
+ i2c.dev_addr, &i2c.data[i]))
+ return (EIO);
+ }
+
+#ifdef ifr_data
+ error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+#else
+ error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
+#endif
+
+ return (error);
+}
+
/*********************************************************************
*
* Media Ioctl callback
@@ -5149,6 +5207,14 @@
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ifmr->ifm_active |= IFM_1000_T;
break;
+ /* 2.5 G */
+ case I40E_PHY_TYPE_2_5GBASE_T:
+ ifmr->ifm_active |= IFM_2500_T;
+ break;
+ /* 5 G */
+ case I40E_PHY_TYPE_5GBASE_T:
+ ifmr->ifm_active |= IFM_5000_T;
+ break;
/* 10 G */
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
ifmr->ifm_active |= IFM_10G_TWINAX;
@@ -5300,6 +5366,28 @@
#endif
int error = 0;
+ if ((atomic_load_acq_int(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0) {
+ /* We are in recovery mode supporting only NVM update */
+ switch (command) {
+ case SIOCSDRVSPEC:
+ case SIOCGDRVSPEC:
+ IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
+ "Info)\n");
+
+ /* NVM update command */
+ if (ifd->ifd_cmd == I40E_NVM_ACCESS)
+ error = ixl_handle_nvmupd_cmd(pf, ifd);
+ else
+ error = EINVAL;
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+ }
+
switch (command) {
case SIOCSIFADDR:
@@ -5353,16 +5441,11 @@
(IFF_PROMISC | IFF_ALLMULTI)) {
ixl_set_promisc(vsi);
}
- } else {
- IXL_PF_UNLOCK(pf);
- ixl_init(pf);
- IXL_PF_LOCK(pf);
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ixl_stop_locked(pf);
- }
- }
+ } else
+ ixl_init_locked(pf);
+ } else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ ixl_stop_locked(pf);
+
pf->if_flags = ifp->if_flags;
IXL_PF_UNLOCK(pf);
break;
@@ -5434,31 +5517,11 @@
#if __FreeBSD_version >= 1003000
case SIOCGI2C:
{
- struct ifi2creq i2c;
- int i;
-
IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
if (!pf->has_i2c)
return (ENOTTY);
- error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c));
- if (error != 0)
- break;
- if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
- error = EINVAL;
- break;
- }
- if (i2c.len > sizeof(i2c.data)) {
- error = EINVAL;
- break;
- }
-
- for (i = 0; i < i2c.len; i++)
- if (ixl_read_i2c_byte(pf, i2c.offset + i,
- i2c.dev_addr, &i2c.data[i]))
- return (EIO);
-
- error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c));
+ error = ixl_handle_i2c_eeprom_read_cmd(pf, ifr);
break;
}
#endif
@@ -5535,8 +5598,8 @@
"25GBASE-LR",
"25GBASE-AOC",
"25GBASE-ACC",
- "Reserved (6)",
- "Reserved (7)"
+ "2.5GBASE-T",
+ "5GBASE-T"
};
if (ext && bit_pos > 7) return "Invalid_Ext";
@@ -5545,6 +5608,15 @@
return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
}
+static char *
+ixl_phy_type_string_ls(u8 val)
+{
+ if (val >= 0x1F)
+ return ixl_phy_type_string(val - 0x1F, true);
+ else
+ return ixl_phy_type_string(val, false);
+}
+
int
ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
{
@@ -5571,15 +5643,6 @@
return (0);
}
-static char *
-ixl_phy_type_string_ls(u8 val)
-{
- if (val >= 0x1F)
- return ixl_phy_type_string(val - 0x1F, true);
- else
- return ixl_phy_type_string(val, false);
-}
-
static int
ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
{
@@ -5667,23 +5730,35 @@
for (int i = 0; i < 32; i++)
if ((1 << i) & abilities.phy_type)
sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
- sbuf_printf(buf, ">\n");
+ sbuf_printf(buf, ">");
}
- sbuf_printf(buf, "PHY Ext : %02x",
+ sbuf_printf(buf, "\nPHY Ext : %02x",
abilities.phy_type_ext);
if (abilities.phy_type_ext != 0) {
sbuf_printf(buf, "<");
for (int i = 0; i < 4; i++)
if ((1 << i) & abilities.phy_type_ext)
- sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
+ sbuf_printf(buf, "%s,",
+ ixl_phy_type_string(i, true));
sbuf_printf(buf, ">");
}
- sbuf_printf(buf, "\n");
- sbuf_printf(buf,
- "Speed : %02x\n"
+ sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
+ if (abilities.link_speed != 0) {
+ u8 link_speed;
+ sbuf_printf(buf, " <");
+ for (int i = 0; i < 8; i++) {
+ link_speed = (1 << i) & abilities.link_speed;
+ if (link_speed)
+ sbuf_printf(buf, "%s, ",
+ ixl_link_speed_string(link_speed));
+ }
+ sbuf_printf(buf, ">");
+ }
+
+ sbuf_printf(buf, "\n"
"Abilities: %02x\n"
"EEE cap : %04x\n"
"EEER reg : %08x\n"
@@ -5693,7 +5768,6 @@
"ModType E: %01x\n"
"FEC Cfg : %02x\n"
"Ext CC : %02x",
- abilities.link_speed,
abilities.abilities, abilities.eee_capability,
abilities.eeer_val, abilities.d3_lpan,
abilities.phy_id[0], abilities.phy_id[1],
@@ -6180,6 +6254,93 @@
return (0);
}
+/*
+ * Read some diagnostic data from a (Q)SFP+ module
+ *
+ * SFP A2 QSFP Lower Page
+ * Temperature 96-97 22-23
+ * Vcc 98-99 26-27
+ * TX power 102-103 34-35..40-41
+ * RX power 104-105 50-51..56-57
+ */
+static int
+ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ struct sbuf *sbuf;
+ int error = 0;
+ u8 output;
+
+ if (req->oldptr == NULL) {
+ error = SYSCTL_OUT(req, 0, 128);
+ return (0);
+ }
+
+ error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
+ if (error) {
+ device_printf(dev, "Error reading from i2c\n");
+ return (error);
+ }
+
+ /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
+ if (output == 0x3) {
+ /*
+ * Check for:
+ * - Internally calibrated data
+ * - Diagnostic monitoring is implemented
+ */
+ pf->read_i2c_byte(pf, 92, 0xA0, &output);
+ if (!(output & 0x60)) {
+ device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
+ return (0);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ for (u8 offset = 96; offset < 100; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA2, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ for (u8 offset = 102; offset < 106; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA2, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ } else if (output == 0xD || output == 0x11) {
+ /*
+ * QSFP+ modules are always internally calibrated, and must indicate
+ * what types of diagnostic monitoring are implemented
+ */
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ for (u8 offset = 22; offset < 24; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ for (u8 offset = 26; offset < 28; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ /* Read the data from the first lane */
+ for (u8 offset = 34; offset < 36; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ for (u8 offset = 50; offset < 52; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ } else {
+ device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
+ return (0);
+ }
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
/*
* Sysctl to read a byte from I2C bus.
*
@@ -6196,8 +6357,6 @@
device_t dev = pf->dev;
int input = -1, error = 0;
- device_printf(dev, "%s: start\n", __func__);
-
u8 dev_addr, offset, output;
/* Read in I2C read parameters */
@@ -6211,7 +6370,7 @@
}
offset = (input >> 8) & 0xFF;
- error = ixl_read_i2c_byte(pf, offset, dev_addr, &output);
+ error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
if (error)
return (error);
@@ -6250,7 +6409,7 @@
offset = (input >> 8) & 0xFF;
value = (input >> 16) & 0xFF;
- error = ixl_write_i2c_byte(pf, offset, dev_addr, value);
+ error = pf->write_i2c_byte(pf, offset, dev_addr, value);
if (error)
return (error);
@@ -6426,10 +6585,6 @@
/* This amount is only necessary if reading the entire cluster into memory */
#define IXL_FINAL_BUFF_SIZE (1280 * 1024)
final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
- if (final_buff == NULL) {
- device_printf(dev, "Could not allocate memory for output.\n");
- goto out;
- }
int final_buff_len = 0;
u8 cluster_id = 1;
@@ -6488,7 +6643,6 @@
free_out:
free(final_buff, M_DEVBUF);
-out:
error = sbuf_finish(buf);
if (error)
device_printf(dev, "Error finishing sbuf: %d\n", error);
@@ -6498,14 +6652,80 @@
}
static int
-ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
+ixl_start_fw_lldp(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ enum i40e_status_code status;
+
+ status = i40e_aq_start_lldp(hw, false, NULL);
+ if (status != I40E_SUCCESS) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EEXIST:
+ device_printf(pf->dev,
+ "FW LLDP agent is already running\n");
+ break;
+ case I40E_AQ_RC_EPERM:
+ device_printf(pf->dev,
+ "Device configuration forbids SW from starting "
+ "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
+ "attribute to \"Enabled\" to use this sysctl\n");
+ return (EINVAL);
+ default:
+ device_printf(pf->dev,
+ "Starting FW LLDP agent failed: error: %s, %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return (EINVAL);
+ }
+ }
+
+ atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ return (0);
+}
+
+static int
+ixl_stop_fw_lldp(struct ixl_pf *pf)
{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- int error = 0;
- int state, new_state;
enum i40e_status_code status;
+
+ if (hw->func_caps.npar_enable != 0) {
+ device_printf(dev,
+ "Disabling FW LLDP agent is not supported on this device\n");
+ return (EINVAL);
+ }
+
+ if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
+ device_printf(dev,
+ "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
+ return (EINVAL);
+ }
+
+ status = i40e_aq_stop_lldp(hw, true, false, NULL);
+ if (status != I40E_SUCCESS) {
+ if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
+ device_printf(dev,
+ "Disabling FW LLDP agent failed: error: %s, %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return (EINVAL);
+ }
+
+ device_printf(dev, "FW LLDP agent is already stopped\n");
+ }
+
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+ atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ return (0);
+}
+
+static int
+ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int state, new_state, error = 0;
+
state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
/* Read in new mode */
@@ -6517,57 +6737,45 @@
if (new_state == state)
return (error);
- if (new_state == 0) {
- if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
- device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
- return (EINVAL);
- }
-
- if (pf->hw.aq.api_maj_ver < 1 ||
- (pf->hw.aq.api_maj_ver == 1 &&
- pf->hw.aq.api_min_ver < 7)) {
- device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
- return (EINVAL);
- }
-
- i40e_aq_stop_lldp(&pf->hw, true, NULL);
- i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
- atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
- } else {
- status = i40e_aq_start_lldp(&pf->hw, NULL);
- if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
- device_printf(dev, "FW LLDP agent is already running\n");
- atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
- }
+ if (new_state == 0)
+ return ixl_stop_fw_lldp(pf);
- return (0);
+ return ixl_start_fw_lldp(pf);
}
-/*
- * Get FW LLDP Agent status
- */
-int
-ixl_get_fw_lldp_status(struct ixl_pf *pf)
+static int
+ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
{
- enum i40e_status_code ret = I40E_SUCCESS;
- struct i40e_lldp_variables lldp_cfg;
- struct i40e_hw *hw = &pf->hw;
- u8 adminstatus = 0;
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int state, new_state;
+ int sysctl_handle_status = 0;
+ enum i40e_status_code cmd_status;
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
- if (ret)
- return ret;
+ /* Init states' values */
+ state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
- /* Get the LLDP AdminStatus for the current port */
- adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
- adminstatus &= 0xf;
+ /* Get requested mode */
+ sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
+ if ((sysctl_handle_status) || (req->newptr == NULL))
+ return (sysctl_handle_status);
- /* Check if LLDP agent is disabled */
- if (!adminstatus) {
- device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
- atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
- } else
- atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ /* Check if state has changed */
+ if (new_state == state)
+ return (0);
+
+ /* Set new state */
+ cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
+
+ /* Save new state or report error */
+ if (!cmd_status) {
+ if (new_state == 0)
+ atomic_clear_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
+ else
+ atomic_set_int(&pf->state, IXL_PF_STATE_EEE_ENABLED);
+ } else if (cmd_status == I40E_ERR_CONFIG)
+ return (EPERM);
+ else
+ return (EIO);
return (0);
}
Index: sys/dev/ixl/ixl_pf_qmgr.h
===================================================================
--- sys/dev/ixl/ixl_pf_qmgr.h
+++ sys/dev/ixl/ixl_pf_qmgr.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/ixl_pf_qmgr.c
===================================================================
--- sys/dev/ixl/ixl_pf_qmgr.c
+++ sys/dev/ixl/ixl_pf_qmgr.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -83,8 +83,8 @@
qtag->qmgr = qmgr;
qtag->type = IXL_PF_QALLOC_CONTIGUOUS;
qtag->qidx[0] = block_start;
- qtag->num_allocated = num;
- qtag->num_active = alloc_size;
+ qtag->num_allocated = alloc_size;
+ qtag->num_active = num;
return (0);
}
Index: sys/dev/ixl/ixl_txrx.c
===================================================================
--- sys/dev/ixl/ixl_txrx.c
+++ sys/dev/ixl/ixl_txrx.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -67,9 +67,16 @@
static inline u32 ixl_get_tx_head(struct ixl_queue *que);
#ifdef DEV_NETMAP
-#include <dev/netmap/if_ixl_netmap.h>
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <dev/netmap/netmap_kern.h>
#endif /* DEV_NETMAP */
+#ifdef IXL_DEBUG
+static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
+#endif
+
/*
* @key key is saved into this parameter
*/
@@ -100,14 +107,18 @@
return "OK";
case VIRTCHNL_ERR_PARAM:
return "VIRTCHNL_ERR_PARAM";
+ case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+ return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
- case VIRTCHNL_STATUS_NOT_SUPPORTED:
- return "VIRTCHNL_STATUS_NOT_SUPPORTED";
+ case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+ return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
+ case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+ return "VIRTCHNL_STATUS_ERR_NOT_SUPPORTED";
}
snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
@@ -2072,25 +2083,207 @@
}
+void
+ixl_vsi_add_queues_stats(struct ixl_vsi * vsi)
+{
+ char queue_namebuf[IXL_QUEUE_NAME_LEN];
+ struct sysctl_oid_list *vsi_list, *queue_list;
+ struct ixl_queue *queues = vsi->queues;
+ struct sysctl_oid *queue_node;
+ struct sysctl_ctx_list *ctx;
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+ ctx = &vsi->sysctl_ctx;
+
+ /* Queue statistics */
+ for (int q = 0; q < vsi->num_queues; q++) {
+ snprintf(queue_namebuf, IXL_QUEUE_NAME_LEN, "que%d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+ OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ txr = &(queues[q].txr);
+ rxr = &(queues[q].rxr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+ CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+ "m_defrag() failed");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(queues[q].irqs),
+ "irqs on this queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+ CTLFLAG_RD, &(queues[q].tso),
+ "TSO");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
+ CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
+ "Driver tx dma failure in xmit");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small",
+ CTLFLAG_RD, &(queues[q].mss_too_small),
+ "TSO sends with an MSS less than 64");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ CTLFLAG_RD, &(txr->no_desc),
+ "Queue No Descriptor Available");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ CTLFLAG_RD, &(txr->total_packets),
+ "Queue Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "Queue Bytes Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "Queue Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "Queue Bytes Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
+ CTLFLAG_RD, &(rxr->desc_errs),
+ "Queue Rx Descriptor Errors");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+#ifdef IXL_DEBUG
+ SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog",
+ CTLFLAG_RD, &(txr->watchdog_timer), 0,
+ "Ticks before watchdog timer causes interface reinit");
+ SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail",
+ CTLFLAG_RD, &(txr->next_avail), 0,
+ "Next TX descriptor to be used");
+ SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean",
+ CTLFLAG_RD, &(txr->next_to_clean), 0,
+ "Next TX descriptor to be cleaned");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
+ CTLFLAG_RD, &(rxr->not_done),
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
+ CTLFLAG_RD, &(rxr->next_refresh), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
+ CTLFLAG_RD, &(rxr->next_check), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qtx_tail_handler, "IU",
+ "Queue Transmit Descriptor Tail");
+#endif
+ }
+
+}
+
+void
+ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_eth_stats *eth_stats)
+{
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+ {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
+ "Unicast Packets Received"},
+ {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
+ "Multicast Packets Received"},
+ {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
+ "Broadcast Packets Received"},
+ {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&eth_stats->rx_unknown_protocol, "rx_unknown_proto",
+ "RX unknown protocol packets"},
+ {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+ {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+ {&eth_stats->tx_multicast, "mcast_pkts_txd",
+ "Multicast Packets Transmitted"},
+ {&eth_stats->tx_broadcast, "bcast_pkts_txd",
+ "Broadcast Packets Transmitted"},
+ {&eth_stats->tx_errors, "tx_errors", "TX packet errors"},
+ // end
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+#ifdef IXL_DEBUG
+/**
+ * ixl_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL value from hardware
+ * for a sysctl.
+ */
+static int
+ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->txr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
+/**
+ * ixl_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL value from hardware
+ * for a sysctl.
+ */
+static int
+ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->rxr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+#endif
+
static void
ixl_queue_sw_irq(struct ixl_vsi *vsi, int qidx)
{
struct i40e_hw *hw = vsi->hw;
u32 reg, mask;
- if ((vsi->flags & IXL_FLAGS_IS_VF) != 0) {
+ if (IXL_VSI_IS_PF(vsi)) {
+ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
+
+ reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ?
+ I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0;
+ } else {
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
reg = I40E_VFINT_DYN_CTLN1(qidx);
- } else {
- mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
- I40E_PFINT_DYN_CTLN_ITR_INDX_MASK);
-
- reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ?
- I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0;
}
wr32(hw, reg, mask);
Index: sys/dev/ixl/ixlv.h
===================================================================
--- sys/dev/ixl/ixlv.h
+++ sys/dev/ixl/ixlv.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -141,7 +141,6 @@
struct mtx mtx;
- u32 qbase;
u32 admvec;
struct timeout_task timeout;
struct task aq_irq;
Index: sys/dev/ixl/ixlv_vc_mgr.h
===================================================================
--- sys/dev/ixl/ixlv_vc_mgr.h
+++ sys/dev/ixl/ixlv_vc_mgr.h
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/ixlvc.c
===================================================================
--- sys/dev/ixl/ixlvc.c
+++ sys/dev/ixl/ixlvc.c
@@ -1,8 +1,8 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Index: sys/dev/ixl/virtchnl.h
===================================================================
--- sys/dev/ixl/virtchnl.h
+++ sys/dev/ixl/virtchnl.h
@@ -1,6 +1,6 @@
/******************************************************************************
- Copyright (c) 2013-2017, Intel Corporation
+ Copyright (c) 2013-2019, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -32,6 +32,7 @@
******************************************************************************/
/*$FreeBSD$*/
+
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
@@ -70,19 +71,27 @@
/* Error Codes */
enum virtchnl_status_code {
VIRTCHNL_STATUS_SUCCESS = 0,
- VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
- VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
};
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
enum virtchnl_link_speed {
VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
@@ -92,6 +101,8 @@
VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
};
/* for hsplit_0 field of Rx HMC context */
@@ -135,6 +146,7 @@
VIRTCHNL_OP_GET_STATS = 15,
VIRTCHNL_OP_RSVD = 16,
VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
VIRTCHNL_OP_IWARP = 20, /* advanced opcode */
VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, /* advanced opcode */
VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, /* advanced opcode */
@@ -145,16 +157,21 @@
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
VIRTCHNL_OP_REQUEST_QUEUES = 29,
-
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
};
-/* This macro is used to generate a compilation error if a structure
+/* These macros are used to generate compilation errors if a structure/union
* is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
*/
#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
- {virtchnl_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0)}
+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
/* Virtual channel message descriptor. This overlays the admin queue
* descriptor. All other data is passed in external buffers.
@@ -169,7 +186,7 @@
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
@@ -247,6 +264,7 @@
#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
@@ -254,7 +272,9 @@
#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
-
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
VIRTCHNL_VF_OFFLOAD_VLAN | \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -295,7 +315,13 @@
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -307,7 +333,8 @@
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u32 pad1;
+ u8 crc_disable;
+ u8 pad1[3];
u64 dma_ring_addr;
enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
u32 pad2;
@@ -316,11 +343,14 @@
VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
- * VF sends this message to set parameters for all active TX and RX queues
+ * VF sends this message to set parameters for active TX and RX queues
* associated with the specified VSI.
* PF configures queues and returns status.
* If the number of queues specified is greater than the number of queues
* associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
*/
struct virtchnl_queue_pair_info {
/* NOTE: vsi_id and queue_id should be identical for both queues. */
@@ -358,8 +388,13 @@
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
* are to be associated with the specified vector.
- * The "other" causes are always mapped to vector 0.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
* PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
*/
struct virtchnl_vector_map {
u16 vsi_id;
@@ -386,6 +421,9 @@
* (Currently, we only support 16 queues per VF, but we make the field
* u32 to allow for expansion.)
* PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
*/
struct virtchnl_queue_select {
u16 vsi_id;
@@ -465,9 +503,24 @@
* the virtchnl_queue_select struct to specify the VSI. The queue_id
* field is ignored by the PF.
*
- * PF replies with struct eth_stats in an external buffer.
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
*/
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes */
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
/* VIRTCHNL_OP_CONFIG_RSS_KEY
* VIRTCHNL_OP_CONFIG_RSS_LUT
* VF sends these messages to configure RSS. Only supported if both PF
@@ -506,6 +559,81 @@
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
/* VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
@@ -526,10 +654,23 @@
struct virtchnl_pf_event {
enum virtchnl_event_codes event;
union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
struct {
enum virtchnl_link_speed link_speed;
- bool link_status;
+ u8 link_status;
} link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
} event_data;
int severity;
@@ -549,14 +690,6 @@
* to a single vector.
* PF configures interrupt mapping and returns status.
*/
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define QUEUE_TYPE_PE_AEQ 0x80
-#define QUEUE_INVALID_IDX 0xFFFF
-
struct virtchnl_iwarp_qv_info {
u32 v_idx; /* msix_vector */
u16 ceq_idx;
@@ -574,6 +707,36 @@
VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_iwarp_qvlist_info);
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+ sizeof(struct virtchnl_queue_pair_info),
+
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+ sizeof(struct virtchnl_vector_map),
+
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr),
+
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+ sizeof(u16),
+
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_iwarp_qvlist_info)) /
+ sizeof(struct virtchnl_iwarp_qv_info),
+
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+ sizeof(struct virtchnl_channel_info),
+};
+
/* VF reset states - these are written into the RSTAT register:
* VFGEN_RSTAT on the VF
* When the PF initiates a reset, it writes 0
@@ -629,11 +792,16 @@
if (msglen >= valid_len) {
struct virtchnl_vsi_queue_config_info *vqc =
(struct virtchnl_vsi_queue_config_info *)msg;
+
+ if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+ err_msg_format = TRUE;
+ break;
+ }
+
valid_len += (vqc->num_queue_pairs *
sizeof(struct
virtchnl_queue_pair_info));
- if (vqc->num_queue_pairs == 0)
- err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
@@ -641,10 +809,15 @@
if (msglen >= valid_len) {
struct virtchnl_irq_map_info *vimi =
(struct virtchnl_irq_map_info *)msg;
+
+ if (vimi->num_vectors == 0 || vimi->num_vectors >
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+ err_msg_format = TRUE;
+ break;
+ }
+
valid_len += (vimi->num_vectors *
sizeof(struct virtchnl_vector_map));
- if (vimi->num_vectors == 0)
- err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
@@ -657,10 +830,15 @@
if (msglen >= valid_len) {
struct virtchnl_ether_addr_list *veal =
(struct virtchnl_ether_addr_list *)msg;
+
+ if (veal->num_elements == 0 || veal->num_elements >
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+ err_msg_format = TRUE;
+ break;
+ }
+
valid_len += veal->num_elements *
sizeof(struct virtchnl_ether_addr);
- if (veal->num_elements == 0)
- err_msg_format = TRUE;
}
break;
case VIRTCHNL_OP_ADD_VLAN:
@@ -669,9 +847,14 @@
if (msglen >= valid_len) {
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- valid_len += vfl->num_elements * sizeof(u16);
- if (vfl->num_elements == 0)
+
+ if (vfl->num_elements == 0 || vfl->num_elements >
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
err_msg_format = TRUE;
+ break;
+ }
+
+ valid_len += vfl->num_elements * sizeof(u16);
}
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
@@ -697,10 +880,13 @@
if (msglen >= valid_len) {
struct virtchnl_iwarp_qvlist_info *qv =
(struct virtchnl_iwarp_qvlist_info *)msg;
- if (qv->num_vectors == 0) {
+
+ if (qv->num_vectors == 0 || qv->num_vectors >
+ VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP_MAX) {
err_msg_format = TRUE;
break;
}
+
valid_len += ((qv->num_vectors - 1) *
sizeof(struct virtchnl_iwarp_qv_info));
}
@@ -732,11 +918,34 @@
case VIRTCHNL_OP_REQUEST_QUEUES:
valid_len = sizeof(struct virtchnl_vf_res_request);
break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+
+ if (vti->num_tc == 0 || vti->num_tc >
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+ err_msg_format = TRUE;
+ break;
+ }
+
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ /* fall through */
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
default:
- return VIRTCHNL_ERR_PARAM;
+ return VIRTCHNL_STATUS_ERR_PARAM;
}
/* few more checks */
if (err_msg_format || valid_len != msglen)

File Metadata

Mime Type
text/plain
Expires
Thu, Jan 1, 1:45 AM (9 h, 20 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27426986
Default Alt Text
D20290.id57840.diff (179 KB)

Event Timeline