Page MenuHomeFreeBSD

D26322.id76636.diff
No OneTemporary

D26322.id76636.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: sys/conf/files.amd64
===================================================================
--- sys/conf/files.amd64
+++ sys/conf/files.amd64
@@ -177,7 +177,7 @@
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
- compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031000 -mice_ddp -c${.TARGET}" \
no-implicit-rule before-depend local \
clean "ice_ddp.c"
ice_ddp.fwo optional ice_ddp \
@@ -186,8 +186,8 @@
no-implicit-rule \
clean "ice_ddp.fwo"
ice_ddp.fw optional ice_ddp \
- dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
- compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
+ dependency "$S/contrib/dev/ice/ice-1.3.16.0.pkg" \
+ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.16.0.pkg ice_ddp.fw" \
no-obj no-implicit-rule \
clean "ice_ddp.fw"
dev/ioat/ioat.c optional ioat pci
Index: sys/conf/files.arm64
===================================================================
--- sys/conf/files.arm64
+++ sys/conf/files.arm64
@@ -288,7 +288,7 @@
dev/ice/ice_switch.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
- compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031000 -mice_ddp -c${.TARGET}" \
no-implicit-rule before-depend local \
clean "ice_ddp.c"
ice_ddp.fwo optional ice_ddp \
@@ -297,8 +297,8 @@
no-implicit-rule \
clean "ice_ddp.fwo"
ice_ddp.fw optional ice_ddp \
- dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
- compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
+ dependency "$S/contrib/dev/ice/ice-1.3.16.0.pkg" \
+ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.16.0.pkg ice_ddp.fw" \
no-obj no-implicit-rule \
clean "ice_ddp.fw"
dev/iicbus/sy8106a.c optional sy8106a fdt
Index: sys/dev/ice/ice_adminq_cmd.h
===================================================================
--- sys/dev/ice/ice_adminq_cmd.h
+++ sys/dev/ice/ice_adminq_cmd.h
@@ -156,12 +156,13 @@
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_MAX_MTU 0x0047
#define ICE_AQC_CAPS_NVM_VER 0x0048
+#define ICE_AQC_CAPS_OROM_VER 0x004A
+#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_CEM 0x00F2
#define ICE_AQC_CAPS_IWARP 0x0051
#define ICE_AQC_CAPS_LED 0x0061
#define ICE_AQC_CAPS_SDP 0x0062
#define ICE_AQC_CAPS_WR_CSR_PROT 0x0064
-#define ICE_AQC_CAPS_NO_DROP_POLICY 0x0065
#define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073
#define ICE_AQC_CAPS_SKU 0x0074
#define ICE_AQC_CAPS_PORT_MAP 0x0075
@@ -281,13 +282,6 @@
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
-/* The response buffer is as follows. Note that the length of the
- * elements array varies with the length of the command response.
- */
-struct ice_aqc_get_sw_cfg_resp {
- struct ice_aqc_get_sw_cfg_resp_elem elements[1];
-};
-
/* Set Port parameters, (direct, 0x0203) */
struct ice_aqc_set_port_params {
__le16 cmd_flags;
@@ -338,8 +332,6 @@
#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49
#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50
#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51
-#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
-#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
/* Resource types 0x62-67 are reserved for Hash profile builder */
@@ -372,15 +364,6 @@
__le16 total_free; /* Resources un-allocated/not reserved by any PF */
};
-/* Buffer for Get Resource command */
-struct ice_aqc_get_res_resp {
- /* Number of resource entries to be calculated using
- * datalen/sizeof(struct ice_aqc_cmd_resp)).
- * Value of 'datalen' gets updated as part of response.
- */
- struct ice_aqc_get_res_resp_elem elem[1];
-};
-
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
*/
@@ -406,7 +389,7 @@
#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
(0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
__le16 num_elems;
- struct ice_aqc_res_elem elem[1];
+ struct ice_aqc_res_elem elem[STRUCT_HACK_VAR_LEN];
};
/* Get Allocated Resource Descriptors Command (indirect 0x020A) */
@@ -428,10 +411,6 @@
__le32 addr_low;
};
-struct ice_aqc_get_allocd_res_desc_resp {
- struct ice_aqc_res_elem elem[1];
-};
-
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@@ -758,7 +737,6 @@
__le32 addr_low;
};
-#pragma pack(1)
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
@@ -841,9 +819,8 @@
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[1];
+ u8 hdr[STRUCT_HACK_VAR_LEN];
};
-#pragma pack()
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
@@ -852,7 +829,6 @@
struct ice_sw_rule_lg_act {
__le16 index; /* Index in large action table */
__le16 size;
- __le32 act[1]; /* array of size for actions */
/* Max number of large actions */
#define ICE_MAX_LG_ACT 4
/* Bit 0:1 - Action type */
@@ -903,6 +879,7 @@
#define ICE_LG_ACT_STAT_COUNT 0x7
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+ __le32 act[STRUCT_HACK_VAR_LEN]; /* array of size for actions */
};
/* Add/Update/Remove VSI list command/response entry
@@ -912,7 +889,7 @@
struct ice_sw_rule_vsi_list {
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
- __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+ __le16 vsi[STRUCT_HACK_VAR_LEN]; /* Array of number_vsi VSI numbers */
};
#pragma pack(1)
@@ -977,8 +954,10 @@
struct ice_aqc_set_dcb_params {
u8 cmd_flags; /* unused in response */
#define ICE_AQC_LINK_UP_DCB_CFG BIT(0)
+#define ICE_AQC_PERSIST_DCB_CFG BIT(1)
u8 valid_flags; /* unused in response */
#define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0)
+#define ICE_AQC_PERSIST_DCB_CFG_VALID BIT(1)
u8 rsvd[14];
};
@@ -1008,14 +987,6 @@
__le32 addr_low;
};
-/* This is the buffer for:
- * Suspend Nodes (indirect 0x0409)
- * Resume Nodes (indirect 0x040A)
- */
-struct ice_aqc_suspend_resume_elem {
- __le32 teid[1];
-};
-
struct ice_aqc_txsched_move_grp_info_hdr {
__le32 src_parent_teid;
__le32 dest_parent_teid;
@@ -1025,7 +996,7 @@
struct ice_aqc_move_elem {
struct ice_aqc_txsched_move_grp_info_hdr hdr;
- __le32 teid[1];
+ __le32 teid[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_elem_info_bw {
@@ -1078,15 +1049,7 @@
struct ice_aqc_add_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_conf_elem {
- struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_get_elem {
- struct ice_aqc_txsched_elem_data generic[1];
+ struct ice_aqc_txsched_elem_data generic[STRUCT_HACK_VAR_LEN];
};
struct ice_aqc_get_topo_elem {
@@ -1097,7 +1060,7 @@
struct ice_aqc_delete_elem {
struct ice_aqc_txsched_topo_grp_info_hdr hdr;
- __le32 teid[1];
+ __le32 teid[STRUCT_HACK_VAR_LEN];
};
/* Query Port ETS (indirect 0x040E)
@@ -1160,10 +1123,6 @@
__le16 rl_encode;
};
-struct ice_aqc_rl_profile_generic_elem {
- struct ice_aqc_rl_profile_elem generic[1];
-};
-
/* Configure L2 Node CGD (indirect 0x0414)
* This indirect command allows configuring a congestion domain for given L2
* node TEIDs in the scheduler topology.
@@ -1181,10 +1140,6 @@
u8 reserved[3];
};
-struct ice_aqc_cfg_l2_node_cgd_data {
- struct ice_aqc_cfg_l2_node_cgd_elem elem[1];
-};
-
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -1330,7 +1285,7 @@
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1381,6 +1336,7 @@
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define ICE_AQC_MOD_TYPE_IDENT 1
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
@@ -1490,6 +1446,9 @@
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 link_cfg_err;
#define ICE_AQ_LINK_CFG_ERR BIT(0)
+#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
#define ICE_AQ_LINK_FAULT BIT(1)
@@ -1607,7 +1566,7 @@
u8 reserved[15];
};
-/* DNL Get Status command (indirect 0x680)
+/* DNL Get Status command (indirect 0x0680)
* Structure used for the response, the command uses the generic
* ice_aqc_generic struct to pass a buffer address to the FW.
*/
@@ -1667,7 +1626,7 @@
u32 sb_iosf_clk_cntr;
};
-/* DNL run command (direct 0x681) */
+/* DNL run command (direct 0x0681) */
struct ice_aqc_dnl_run_command {
u8 reserved0;
u8 command;
@@ -1686,7 +1645,7 @@
u8 reserved1[12];
};
-/* DNL call command (indirect 0x682)
+/* DNL call command (indirect 0x0682)
* Struct is used for both command and response
*/
struct ice_aqc_dnl_call_command {
@@ -1698,14 +1657,14 @@
__le32 addr_low;
};
-/* DNL call command/response buffer (indirect 0x682) */
+/* DNL call command/response buffer (indirect 0x0682) */
struct ice_aqc_dnl_call {
__le32 stores[4];
};
/* Used for both commands:
- * DNL read sto command (indirect 0x683)
- * DNL write sto command (indirect 0x684)
+ * DNL read sto command (indirect 0x0683)
+ * DNL write sto command (indirect 0x0684)
*/
struct ice_aqc_dnl_read_write_command {
u8 ctx;
@@ -1720,8 +1679,8 @@
};
/* Used for both command responses:
- * DNL read sto response (indirect 0x683)
- * DNL write sto response (indirect 0x684)
+ * DNL read sto response (indirect 0x0683)
+ * DNL write sto response (indirect 0x0684)
*/
struct ice_aqc_dnl_read_write_response {
u8 reserved;
@@ -1732,14 +1691,14 @@
__le32 addr_low; /* Reserved for write command */
};
-/* DNL set breakpoints command (indirect 0x686) */
+/* DNL set breakpoints command (indirect 0x0686) */
struct ice_aqc_dnl_set_breakpoints_command {
__le32 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
-/* DNL set breakpoints data buffer structure (indirect 0x686) */
+/* DNL set breakpoints data buffer structure (indirect 0x0686) */
struct ice_aqc_dnl_set_breakpoints {
u8 ctx;
u8 ena; /* 0- disabled, 1- enabled */
@@ -1747,7 +1706,7 @@
__le16 activity_id;
};
-/* DNL read log data command(indirect 0x687) */
+/* DNL read log data command(indirect 0x0687) */
struct ice_aqc_dnl_read_log_command {
__le16 reserved0;
__le16 offset;
@@ -1757,7 +1716,7 @@
};
-/* DNL read log data response(indirect 0x687) */
+/* DNL read log data response(indirect 0x0687) */
struct ice_aqc_dnl_read_log_response {
__le16 reserved;
__le16 size;
@@ -1976,6 +1935,7 @@
struct ice_aqc_get_port_options_elem {
u8 pmd;
+#define ICE_AQC_PORT_INV_PORT_OPT 4
#define ICE_AQC_PORT_OPT_PMD_COUNT_S 0
#define ICE_AQC_PORT_OPT_PMD_COUNT_M (0xF << ICE_AQC_PORT_OPT_PMD_COUNT_S)
#define ICE_AQC_PORT_OPT_PMD_WIDTH_S 4
@@ -1995,13 +1955,6 @@
u8 phy_scid[2];
};
-/* The buffer for command 0x06EA contains port_options_count of options
- * in the option array.
- */
-struct ice_aqc_get_port_options_data {
- struct ice_aqc_get_port_options_elem option[1];
-};
-
/* Set Port Option (direct, 0x06EB) */
struct ice_aqc_set_port_option {
u8 lport_num;
@@ -2114,6 +2067,7 @@
#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M MAKEMASK(0x3FF, 0)
#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
@@ -2353,6 +2307,18 @@
u8 reserved[15];
};
+/* LLDP Filter Control (direct 0x0A0A) */
+struct ice_aqc_lldp_filter_ctrl {
+ u8 cmd_flags;
+#define ICE_AQC_LLDP_FILTER_ACTION_M MAKEMASK(3, 0)
+#define ICE_AQC_LLDP_FILTER_ACTION_ADD 0x0
+#define ICE_AQC_LLDP_FILTER_ACTION_DELETE 0x1
+#define ICE_AQC_LLDP_FILTER_ACTION_UPDATE 0x2
+ u8 reserved1;
+ __le16 vsi_num;
+ u8 reserved2[12];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -2389,7 +2355,7 @@
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
__le16 vsi_id;
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
@@ -2450,7 +2416,7 @@
__le32 parent_teid;
u8 num_txqs;
u8 rsvd[3];
- struct ice_aqc_add_txqs_perq txqs[1];
+ struct ice_aqc_add_txqs_perq txqs[STRUCT_HACK_VAR_LEN];
};
/* Disable Tx LAN Queues (indirect 0x0C31) */
@@ -2483,23 +2449,20 @@
* added before the start of the next group, to allow correct
* alignment of the parent_teid field.
*/
+#pragma pack(1)
struct ice_aqc_dis_txq_item {
__le32 parent_teid;
u8 num_qs;
u8 rsvd;
/* The length of the q_id array varies according to num_qs */
- __le16 q_id[1];
- /* This only applies from F8 onward */
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
(0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
(1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+ __le16 q_id[STRUCT_HACK_VAR_LEN];
};
-
-struct ice_aqc_dis_txq {
- struct ice_aqc_dis_txq_item qgrps[1];
-};
+#pragma pack()
/* Tx LAN Queues Cleanup Event (0x0C31) */
struct ice_aqc_txqs_cleanup {
@@ -2540,11 +2503,11 @@
struct ice_aqc_move_txqs_data {
__le32 src_teid;
__le32 dest_teid;
- struct ice_aqc_move_txqs_elem txqs[1];
+ struct ice_aqc_move_txqs_elem txqs[STRUCT_HACK_VAR_LEN];
};
/* Download Package (indirect 0x0C40) */
-/* Also used for Update Package (indirect 0x0C42) */
+/* Also used for Update Package (indirect 0x0C42 and 0x0C41) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@@ -2593,7 +2556,7 @@
/* Get Package Info List response buffer format (0x0C43) */
struct ice_aqc_get_pkg_info_resp {
__le32 count;
- struct ice_aqc_get_pkg_info pkg_info[1];
+ struct ice_aqc_get_pkg_info pkg_info[STRUCT_HACK_VAR_LEN];
};
/* Driver Shared Parameters (direct, 0x0C90) */
@@ -2617,6 +2580,50 @@
u8 reserved[8];
};
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_config {
+ u8 event_source;
+#define ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
+#define ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
+#define ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
+ u8 reserved[15];
+};
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ice_aqc_get_supported_health_status_codes {
+ __le16 health_code_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+#define ICE_AQC_HEALTH_STATUS_PF (0x1)
+#define ICE_AQC_HEALTH_STATUS_PORT (0x2)
+#define ICE_AQC_HEALTH_STATUS_GLOBAL (0x3)
+ __le32 internal_data1;
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
+ __le32 internal_data2;
+};
+
+/* Clear Health Status (direct 0xFF23) */
+struct ice_aqc_clear_health_status {
+ __le32 reserved[4];
+};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -2706,6 +2713,7 @@
struct ice_aqc_lldp_start lldp_start;
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
+ struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -2727,6 +2735,12 @@
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_set_health_status_config
+ set_health_status_config;
+ struct ice_aqc_get_supported_health_status_codes
+ get_supported_health_status_codes;
+ struct ice_aqc_get_health_status get_health_status;
+ struct ice_aqc_clear_health_status clear_health_status;
} params;
};
@@ -2918,6 +2932,8 @@
ice_aqc_opc_nvm_sr_dump = 0x0707,
ice_aqc_opc_nvm_save_factory_settings = 0x0708,
ice_aqc_opc_nvm_update_empr = 0x0709,
+ ice_aqc_opc_nvm_pkg_data = 0x070A,
+ ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
@@ -2940,6 +2956,7 @@
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
+ ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@@ -2963,6 +2980,12 @@
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* SystemDiagnostic commands */
+ ice_aqc_opc_set_health_status_config = 0xFF20,
+ ice_aqc_opc_get_supported_health_status_codes = 0xFF21,
+ ice_aqc_opc_get_health_status = 0xFF22,
+ ice_aqc_opc_clear_health_status = 0xFF23
};
#endif /* _ICE_ADMINQ_CMD_H_ */
Index: sys/dev/ice/ice_bitops.h
===================================================================
--- sys/dev/ice/ice_bitops.h
+++ sys/dev/ice/ice_bitops.h
@@ -242,7 +242,7 @@
ice_bitmap_t mask;
u16 i;
- /* Handle all but last chunk*/
+ /* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] | bmp2[i];
@@ -273,7 +273,7 @@
ice_bitmap_t mask;
u16 i;
- /* Handle all but last chunk*/
+ /* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
dst[i] = bmp1[i] ^ bmp2[i];
@@ -286,6 +286,37 @@
dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
}
+/**
+ * ice_andnot_bitmap - bitwise ANDNOT 2 bitmaps and result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap of ANDNOT operation
+ * @bmp2: The second bitmap to ANDNOT operation
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise ANDNOT on two "source" bitmaps of the same
+ * size, and stores the result to "dst" bitmap. The "dst" bitmap must be of the
+ * same size as the "source" bitmaps to avoid buffer overflows.
+ */
+static inline void
+ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk */
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ dst[i] = bmp1[i] & ~bmp2[i];
+
+ /* We want to only clear bits within the size. Furthermore, we also do
+ * not want to modify destination bits which are beyond the specified
+ * size. Use a bitmask to ensure that we only modify the bits that are
+ * within the specified size.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask);
+}
+
/**
* ice_find_next_bit - Find the index of the next set bit of a bitmap
* @bitmap: the bitmap to scan
@@ -343,6 +374,11 @@
return ice_find_next_bit(bitmap, size, 0);
}
+#define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \
+ for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \
+ (_bitpos) < (_maxlen); \
+ (_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1))
+
/**
* ice_is_any_bit_set - Return true of any bit in the bitmap is set
* @bitmap: the bitmap to check
@@ -372,6 +408,48 @@
ICE_NONDMA_TO_NONDMA);
}
+/**
+ * ice_bitmap_set - set a number of bits in bitmap from a starting position
+ * @dst: bitmap destination
+ * @pos: first bit position to set
+ * @num_bits: number of bits to set
+ *
+ * This function sets bits in a bitmap from pos to (pos + num_bits) - 1.
+ * Note that this function assumes it is operating on a bitmap declared using
+ * ice_declare_bitmap.
+ */
+static inline void
+ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits)
+{
+ u16 i;
+
+ for (i = pos; i < num_bits; i++)
+ ice_set_bit(i, dst);
+}
+
+/**
+ * ice_bitmap_hweight - hamming weight of bitmap
+ * @bm: bitmap pointer
+ * @size: size of bitmap (in bits)
+ *
+ * This function determines the number of set bits in a bitmap.
+ * Note that this function assumes it is operating on a bitmap declared using
+ * ice_declare_bitmap.
+ */
+static inline int
+ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
+{
+ int count = 0;
+ u16 bit = 0;
+
+ while (size > (bit = ice_find_next_bit(bm, size, bit))) {
+ count++;
+ bit++;
+ }
+
+ return count;
+}
+
/**
* ice_cmp_bitmaps - compares two bitmaps.
* @bmp1: the bitmap to compare
@@ -386,12 +464,12 @@
ice_bitmap_t mask;
u16 i;
- /* Handle all but last chunk*/
+ /* Handle all but last chunk */
for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
if (bmp1[i] != bmp2[i])
return false;
- /* We want to only compare bits within the size.*/
+ /* We want to only compare bits within the size */
mask = LAST_CHUNK_MASK(size);
if ((bmp1[i] & mask) != (bmp2[i] & mask))
return false;
Index: sys/dev/ice/ice_common.h
===================================================================
--- sys/dev/ice/ice_common.h
+++ sys/dev/ice/ice_common.h
@@ -46,12 +46,18 @@
ICE_FW_MODE_ROLLBACK
};
+/* prototype for functions used for SW locks */
+void ice_free_list(struct LIST_HEAD_TYPE *list);
+void ice_init_lock(struct ice_lock *lock);
+void ice_acquire_lock(struct ice_lock *lock);
+void ice_release_lock(struct ice_lock *lock);
+void ice_destroy_lock(struct ice_lock *lock);
+void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
+void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
+
void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
-enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -147,7 +153,8 @@
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info);
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
@@ -165,9 +172,6 @@
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
@@ -186,6 +190,7 @@
enum ice_status
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
@@ -248,6 +253,7 @@
void ice_sched_replay_agg(struct ice_hw *hw);
enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
struct ice_q_ctx *
@@ -263,7 +269,6 @@
struct ice_eth_stats *cur_stats);
enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
-bool ice_is_generic_mac(struct ice_hw *hw);
enum ice_status
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
@@ -276,10 +281,16 @@
enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf);
+ struct ice_aqc_txsched_elem_data *buf);
enum ice_status
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
enum ice_status
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
-enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw);
+enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *ver);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
+enum ice_status
+ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
#endif /* _ICE_COMMON_H_ */
Index: sys/dev/ice/ice_common.c
===================================================================
--- sys/dev/ice/ice_common.c
+++ sys/dev/ice/ice_common.c
@@ -115,7 +115,8 @@
* is returned in user specified buffer. Please interpret user specified
* buffer as "manage_mac_read" response.
* Response such as various MAC addresses are stored in HW struct (port.mac)
- * ice_aq_discover_caps is expected to be called before this function is called.
+ * ice_discover_dev_caps is expected to be called before this function is
+ * called.
*/
enum ice_status
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
@@ -180,11 +181,13 @@
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return ICE_ERR_PARAM;
+ hw = pi->hw;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -192,11 +195,39 @@
cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
cmd->param0 |= CPU_TO_LE16(report_mode);
- status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+ status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
+
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
+ report_mode);
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
+ (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
+ (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ pcaps->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ pcaps->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ pcaps->link_fec_options);
+ ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
+ pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
+ pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ pcaps->module_type[0]);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ pcaps->module_type[1]);
+ ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ pcaps->module_type[2]);
if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
+ ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
+ sizeof(pi->phy.link_info.module_type),
+ ICE_NONDMA_TO_NONDMA);
}
return status;
@@ -234,7 +265,7 @@
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
-/*
+/**
* ice_is_media_cage_present
* @pi: port information structure
*
@@ -269,6 +300,18 @@
return ICE_MEDIA_UNKNOWN;
if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+ ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ICE_MEDIA_DA;
+
switch (hw_link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_1000BASE_SX:
case ICE_PHY_TYPE_LOW_1000BASE_LX:
@@ -289,6 +332,15 @@
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
case ICE_PHY_TYPE_LOW_100GBASE_DR:
return ICE_MEDIA_FIBER;
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ return ICE_MEDIA_FIBER;
case ICE_PHY_TYPE_LOW_100BASE_TX:
case ICE_PHY_TYPE_LOW_1000BASE_T:
case ICE_PHY_TYPE_LOW_2500BASE_T:
@@ -315,7 +367,7 @@
case ICE_PHY_TYPE_LOW_100G_AUI4:
case ICE_PHY_TYPE_LOW_100G_CAUI4:
if (ice_is_media_cage_present(pi))
- return ICE_MEDIA_DA;
+ return ICE_MEDIA_AUI;
/* fall-through */
case ICE_PHY_TYPE_LOW_1000BASE_KX:
case ICE_PHY_TYPE_LOW_2500BASE_KX:
@@ -335,11 +387,15 @@
} else {
switch (hw_link_info->phy_type_high) {
case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
if (ice_is_media_cage_present(pi))
- return ICE_MEDIA_DA;
+ return ICE_MEDIA_AUI;
/* fall-through */
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
return ICE_MEDIA_BACKPLANE;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ return ICE_MEDIA_FIBER;
}
}
return ICE_MEDIA_UNKNOWN;
@@ -420,18 +476,21 @@
li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
- ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, "get link info\n");
+ ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)li->phy_type_low);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)li->phy_type_high);
- ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
- ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
- ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
- ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
- ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
- ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
- ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
+ ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
+ ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
+ li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
/* save link status information */
if (link)
@@ -443,6 +502,43 @@
return ICE_SUCCESS;
}
+/**
+ * ice_fill_tx_timer_and_fc_thresh
+ * @hw: pointer to the HW struct
+ * @cmd: pointer to MAC cfg structure
+ *
+ * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
+ * descriptor
+ */
+static void
+ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
+ struct ice_aqc_set_mac_cfg *cmd)
+{
+ u16 fc_thres_val, tx_timer_val;
+ u32 val;
+
+ /* We read back the transmit timer and fc threshold value of
+ * LFC. Thus, we will use index =
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
+ *
+ * Also, because we are opearating on transmit timer and fc
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
+ */
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+
+ /* Retrieve the transmit timer */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+ tx_timer_val = val &
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+ fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+
+ cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
+}
+
/**
* ice_aq_set_mac_cfg
* @hw: pointer to the HW struct
@@ -454,10 +550,8 @@
enum ice_status
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
- u16 fc_threshold_val, tx_timer_val;
struct ice_aqc_set_mac_cfg *cmd;
struct ice_aq_desc desc;
- u32 reg_val;
cmd = &desc.params.set_mac_cfg;
@@ -468,27 +562,7 @@
cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
- /* We read back the transmit timer and fc threshold value of
- * LFC. Thus, we will use index =
- * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
- *
- * Also, because we are opearating on transmit timer and fc
- * threshold of LFC, we don't turn on any bit in tx_tmr_priority
- */
-#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
-
- /* Retrieve the transmit timer */
- reg_val = rd32(hw,
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
- tx_timer_val = reg_val &
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
- cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
-
- /* Retrieve the fc threshold */
- reg_val = rd32(hw,
- PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
- fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
- cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
+ ice_fill_tx_timer_and_fc_thresh(hw, cmd);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -500,6 +574,7 @@
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
+ enum ice_status status;
hw->switch_info = (struct ice_switch_info *)
ice_malloc(hw, sizeof(*hw->switch_info));
@@ -510,28 +585,38 @@
return ICE_ERR_NO_MEMORY;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
+ sw->prof_res_bm_init = 0;
- return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
+ status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
+ if (status) {
+ ice_free(hw, hw->switch_info);
+ return status;
+ }
+ return ICE_SUCCESS;
}
/**
- * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
+ * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
* @hw: pointer to the HW struct
+ * @sw: pointer to switch info struct for which function clears filters
*/
-static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+static void
+ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_vsi_list_map_info *v_pos_map;
struct ice_vsi_list_map_info *v_tmp_map;
struct ice_sw_recipe *recps;
u8 i;
+ if (!sw)
+ return;
+
LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
ice_vsi_list_map_info, list_entry) {
LIST_DEL(&v_pos_map->list_entry);
ice_free(hw, v_pos_map);
}
- recps = hw->switch_info->recp_list;
+ recps = sw->recp_list;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
@@ -571,11 +656,20 @@
if (recps[i].root_buf)
ice_free(hw, recps[i].root_buf);
}
- ice_rm_all_sw_replay_rule_info(hw);
+ ice_rm_sw_replay_rule_info(hw, sw);
ice_free(hw, sw->recp_list);
ice_free(hw, sw);
}
+/**
+ * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
+ * @hw: pointer to the HW struct
+ */
+static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
+}
+
/**
* ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
@@ -610,13 +704,14 @@
void ice_print_rollback_msg(struct ice_hw *hw)
{
char nvm_str[ICE_NVM_VER_LEN] = { 0 };
- struct ice_nvm_info *nvm = &hw->nvm;
struct ice_orom_info *orom;
+ struct ice_nvm_info *nvm;
- orom = &nvm->orom;
+ orom = &hw->flash.orom;
+ nvm = &hw->flash.nvm;
SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
- nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
+ nvm->major, nvm->minor, nvm->eetrack, orom->major,
orom->build, orom->patch);
ice_warn(hw,
"Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
@@ -648,7 +743,6 @@
status = ice_reset(hw, ICE_RESET_PFR);
if (status)
return status;
-
ice_get_itr_intrl_gran(hw);
status = ice_create_all_ctrlq(hw);
@@ -691,8 +785,7 @@
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Failed to get scheduler allocated resources\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
goto err_unroll_alloc;
}
ice_sched_get_psm_clk_freq(hw);
@@ -701,7 +794,6 @@
status = ice_sched_init_port(hw->port_info);
if (status)
goto err_unroll_sched;
-
pcaps = (struct ice_aqc_get_phy_caps_data *)
ice_malloc(hw, sizeof(*pcaps));
if (!pcaps) {
@@ -714,7 +806,8 @@
ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
ice_free(hw, pcaps);
if (status)
- goto err_unroll_sched;
+ ice_debug(hw, ICE_DBG_PHY, "%s: Get PHY capabilities failed, continuing anyway\n",
+ __func__);
/* Initialize port_info struct with link information */
status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
@@ -730,7 +823,6 @@
/* Initialize max burst size */
if (!hw->max_burst_size)
ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
-
status = ice_init_fltr_mgmt_struct(hw);
if (status)
goto err_unroll_sched;
@@ -749,6 +841,10 @@
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
ice_free(hw, mac_buf);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ /* enable jumbo frame support at MAC level */
+ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
if (status)
goto err_unroll_fltr_mgmt_struct;
status = ice_init_hw_tbls(hw);
@@ -804,25 +900,24 @@
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay, uld_mask;
+ u32 cnt, reg = 0, grst_timeout, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
* Add 1sec for outstanding AQ commands that can take a long time.
*/
- grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
- GLGEN_RSTCTL_GRSTDEL_S) + 10;
+ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
- for (cnt = 0; cnt < grst_delay; cnt++) {
+ for (cnt = 0; cnt < grst_timeout; cnt++) {
ice_msec_delay(100, true);
reg = rd32(hw, GLGEN_RSTAT);
if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
break;
}
- if (cnt == grst_delay) {
- ice_debug(hw, ICE_DBG_INIT,
- "Global reset polling failed to complete.\n");
+ if (cnt == grst_timeout) {
+ ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
@@ -840,16 +935,14 @@
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, GLNVM_ULD) & uld_mask;
if (reg == uld_mask) {
- ice_debug(hw, ICE_DBG_INIT,
- "Global reset processes done. %d\n", cnt);
+ ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
break;
}
ice_msec_delay(10, true);
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
- ice_debug(hw, ICE_DBG_INIT,
- "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
+ ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
return ICE_ERR_RESET_FAILED;
}
@@ -887,7 +980,12 @@
wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
- for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ /* Wait for the PFR to complete. The wait time is the global config lock
+ * timeout plus the PFR timeout which will account for a possible reset
+ * that is occurring during a download package operation.
+ */
+ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
+ ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, PFGEN_CTRL);
if (!(reg & PFGEN_CTRL_PFSWR_M))
break;
@@ -896,8 +994,7 @@
}
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
- ice_debug(hw, ICE_DBG_INIT,
- "PF reset polling failed to complete.\n");
+ ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
return ICE_ERR_RESET_FAILED;
}
@@ -1021,7 +1118,7 @@
rlan_ctx->prefena = 1;
- ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1144,7 +1241,7 @@
{
u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
- ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
+ ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
}
@@ -1235,7 +1332,8 @@
{
u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
- ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
+ ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
+ ice_tx_drbell_q_ctx_info);
return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
}
@@ -1521,8 +1619,7 @@
goto ice_acquire_res_exit;
if (status)
- ice_debug(hw, ICE_DBG_RES,
- "resource %d acquire type %d failed.\n", res, access);
+ ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
/* If necessary, poll until the current lock owner timeouts */
timeout = time_left;
@@ -1545,11 +1642,9 @@
ice_acquire_res_exit:
if (status == ICE_ERR_AQ_NO_WORK) {
if (access == ICE_RES_WRITE)
- ice_debug(hw, ICE_DBG_RES,
- "resource indicates no work to do.\n");
+ ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else
- ice_debug(hw, ICE_DBG_RES,
- "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
}
return status;
}
@@ -1634,9 +1729,8 @@
enum ice_status status;
u16 buf_len;
- buf_len = ice_struct_size(buf, elem, num - 1);
- buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(buf, elem, num);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1652,7 +1746,7 @@
if (status)
goto ice_alloc_res_exit;
- ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
+ ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
ICE_NONDMA_TO_NONDMA);
ice_alloc_res_exit:
@@ -1667,14 +1761,13 @@
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status
-ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
enum ice_status status;
u16 buf_len;
- buf_len = ice_struct_size(buf, elem, num - 1);
+ buf_len = ice_struct_size(buf, elem, num);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -1682,7 +1775,7 @@
/* Prepare buffer to free resource. */
buf->num_elems = CPU_TO_LE16(num);
buf->res_type = CPU_TO_LE16(type);
- ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
+ ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
ICE_NONDMA_TO_NONDMA);
status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
@@ -1784,282 +1877,423 @@
}
/**
- * ice_parse_caps - parse function/device capabilities
+ * ice_parse_common_caps - parse common device/function capabilities
* @hw: pointer to the HW struct
- * @buf: pointer to a buffer containing function/device capability records
- * @cap_count: number of capability records in the list
- * @opc: type of capabilities list to parse
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
*
- * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Returns: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ struct ice_aqc_list_caps_elem *elem, const char *prefix)
+{
+ u32 logical_id = LE32_TO_CPU(elem->logical_id);
+ u32 phys_id = LE32_TO_CPU(elem->phys_id);
+ u32 number = LE32_TO_CPU(elem->number);
+ u16 cap = LE16_TO_CPU(elem->cap);
+ bool found = true;
+
+ switch (cap) {
+ case ICE_AQC_CAPS_SWITCHING_MODE:
+ caps->switching_mode = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
+ caps->switching_mode);
+ break;
+ case ICE_AQC_CAPS_MANAGEABILITY_MODE:
+ caps->mgmt_mode = number;
+ caps->mgmt_protocols_mctp = logical_id;
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
+ caps->mgmt_mode);
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
+ caps->mgmt_protocols_mctp);
+ break;
+ case ICE_AQC_CAPS_OS2BMC:
+ caps->os2bmc = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
+ break;
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
+ caps->valid_functions);
+ break;
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
+ caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_802_1QBG:
+ caps->evb_802_1_qbg = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
+ break;
+ case ICE_AQC_CAPS_802_1BR:
+ caps->evb_802_1_qbh = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
+ break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
+ break;
+ case ICE_AQC_CAPS_ISCSI:
+ caps->iscsi = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
+ break;
+ case ICE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
+ caps->rss_table_size);
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
+ caps->rss_table_entry_width);
+ break;
+ case ICE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
+ caps->num_rxq);
+ ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
+ caps->rxq_first_id);
+ break;
+ case ICE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
+ caps->num_txq);
+ ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
+ caps->txq_first_id);
+ break;
+ case ICE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
+ caps->num_msix_vectors);
+ ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
+ caps->msix_vector_first_id);
+ break;
+ case ICE_AQC_CAPS_NVM_VER:
+ break;
+ case ICE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
+ caps->nvm_unified_update);
+ break;
+ case ICE_AQC_CAPS_CEM:
+ caps->mgmt_cem = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
+ caps->mgmt_cem);
+ break;
+ case ICE_AQC_CAPS_LED:
+ if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
+ caps->led[phys_id] = true;
+ caps->led_pin_num++;
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
+ }
+ break;
+ case ICE_AQC_CAPS_SDP:
+ if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
+ caps->sdp[phys_id] = true;
+ caps->sdp_pin_num++;
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
+ }
+ break;
+ case ICE_AQC_CAPS_WR_CSR_PROT:
+ caps->wr_csr_prot = number;
+ caps->wr_csr_prot |= (u64)logical_id << 32;
+ ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
+ (unsigned long long)caps->wr_csr_prot);
+ break;
+ case ICE_AQC_CAPS_WOL_PROXY:
+ caps->num_wol_proxy_fltr = number;
+ caps->wol_proxy_vsi_seid = logical_id;
+ caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
+ caps->acpi_prog_mthd = !!(phys_id &
+ ICE_ACPI_PROG_MTHD_M);
+ caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
+ caps->num_wol_proxy_fltr);
+ ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
+ caps->wol_proxy_vsi_seid);
+ break;
+ case ICE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ prefix, caps->max_mtu);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ice_recalc_port_limited_caps - Recalculate port limited capabilities
+ * @hw: pointer to the HW structure
+ * @caps: pointer to capabilities structure to fix
+ *
+ * Re-calculate the capabilities that are dependent on the number of physical
+ * ports; i.e. some features are not supported or function differently on
+ * devices with more than 4 ports.
*/
static void
-ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
- enum ice_adminq_opc opc)
+ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
{
- struct ice_aqc_list_caps_elem *cap_resp;
- struct ice_hw_func_caps *func_p = NULL;
- struct ice_hw_dev_caps *dev_p = NULL;
- struct ice_hw_common_caps *caps;
- char const *prefix;
- u32 i;
-
- if (!buf)
- return;
-
- cap_resp = (struct ice_aqc_list_caps_elem *)buf;
-
- if (opc == ice_aqc_opc_list_dev_caps) {
- dev_p = &hw->dev_caps;
- caps = &dev_p->common_cap;
- prefix = "dev cap";
- } else if (opc == ice_aqc_opc_list_func_caps) {
- func_p = &hw->func_caps;
- caps = &func_p->common_cap;
- prefix = "func cap";
- } else {
- ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
- return;
- }
-
- for (i = 0; caps && i < cap_count; i++, cap_resp++) {
- u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
- u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
- u32 number = LE32_TO_CPU(cap_resp->number);
- u16 cap = LE16_TO_CPU(cap_resp->cap);
-
- switch (cap) {
- case ICE_AQC_CAPS_SWITCHING_MODE:
- caps->switching_mode = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: switching_mode = %d\n", prefix,
- caps->switching_mode);
- break;
- case ICE_AQC_CAPS_MANAGEABILITY_MODE:
- caps->mgmt_mode = number;
- caps->mgmt_protocols_mctp = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: mgmt_mode = %d\n", prefix,
- caps->mgmt_mode);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: mgmt_protocols_mctp = %d\n", prefix,
- caps->mgmt_protocols_mctp);
- break;
- case ICE_AQC_CAPS_OS2BMC:
- caps->os2bmc = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: os2bmc = %d\n", prefix, caps->os2bmc);
- break;
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
- caps->valid_functions = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: valid_functions (bitmap) = %d\n", prefix,
- caps->valid_functions);
-
- /* store func count for resource management purposes */
- if (dev_p)
- dev_p->num_funcs = ice_hweight32(number);
- break;
- case ICE_AQC_CAPS_SRIOV:
- caps->sr_iov_1_1 = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: sr_iov_1_1 = %d\n", prefix,
- caps->sr_iov_1_1);
- break;
- case ICE_AQC_CAPS_VF:
- if (dev_p) {
- dev_p->num_vfs_exposed = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_vfs_exposed = %d\n", prefix,
- dev_p->num_vfs_exposed);
- } else if (func_p) {
- func_p->num_allocd_vfs = number;
- func_p->vf_base_id = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_allocd_vfs = %d\n", prefix,
- func_p->num_allocd_vfs);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: vf_base_id = %d\n", prefix,
- func_p->vf_base_id);
- }
- break;
- case ICE_AQC_CAPS_802_1QBG:
- caps->evb_802_1_qbg = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: evb_802_1_qbg = %d\n", prefix, number);
- break;
- case ICE_AQC_CAPS_802_1BR:
- caps->evb_802_1_qbh = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: evb_802_1_qbh = %d\n", prefix, number);
- break;
- case ICE_AQC_CAPS_VSI:
- if (dev_p) {
- dev_p->num_vsi_allocd_to_host = number;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_vsi_allocd_to_host = %d\n",
- prefix,
- dev_p->num_vsi_allocd_to_host);
- } else if (func_p) {
- func_p->guar_num_vsi =
- ice_get_num_per_func(hw, ICE_MAX_VSI);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: guar_num_vsi (fw) = %d\n",
- prefix, number);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: guar_num_vsi = %d\n",
- prefix, func_p->guar_num_vsi);
- }
- break;
- case ICE_AQC_CAPS_DCB:
- caps->dcb = (number == 1);
- caps->active_tc_bitmap = logical_id;
- caps->maxtc = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: dcb = %d\n", prefix, caps->dcb);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: active_tc_bitmap = %d\n", prefix,
- caps->active_tc_bitmap);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d\n", prefix, caps->maxtc);
- break;
- case ICE_AQC_CAPS_ISCSI:
- caps->iscsi = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: iscsi = %d\n", prefix, caps->iscsi);
- break;
- case ICE_AQC_CAPS_RSS:
- caps->rss_table_size = number;
- caps->rss_table_entry_width = logical_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_size = %d\n", prefix,
- caps->rss_table_size);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rss_table_entry_width = %d\n", prefix,
- caps->rss_table_entry_width);
- break;
- case ICE_AQC_CAPS_RXQS:
- caps->num_rxq = number;
- caps->rxq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_rxq = %d\n", prefix,
- caps->num_rxq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: rxq_first_id = %d\n", prefix,
- caps->rxq_first_id);
- break;
- case ICE_AQC_CAPS_TXQS:
- caps->num_txq = number;
- caps->txq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_txq = %d\n", prefix,
- caps->num_txq);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: txq_first_id = %d\n", prefix,
- caps->txq_first_id);
- break;
- case ICE_AQC_CAPS_MSIX:
- caps->num_msix_vectors = number;
- caps->msix_vector_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_msix_vectors = %d\n", prefix,
- caps->num_msix_vectors);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: msix_vector_first_id = %d\n", prefix,
- caps->msix_vector_first_id);
- break;
- case ICE_AQC_CAPS_NVM_VER:
- break;
- case ICE_AQC_CAPS_NVM_MGMT:
- caps->nvm_unified_update =
- (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
- true : false;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: nvm_unified_update = %d\n", prefix,
- caps->nvm_unified_update);
- break;
- case ICE_AQC_CAPS_CEM:
- caps->mgmt_cem = (number == 1);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: mgmt_cem = %d\n", prefix,
- caps->mgmt_cem);
- break;
- case ICE_AQC_CAPS_LED:
- if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
- caps->led[phys_id] = true;
- caps->led_pin_num++;
- }
- break;
- case ICE_AQC_CAPS_SDP:
- if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
- caps->sdp[phys_id] = true;
- caps->sdp_pin_num++;
- }
- break;
- case ICE_AQC_CAPS_WR_CSR_PROT:
- caps->wr_csr_prot = number;
- caps->wr_csr_prot |= (u64)logical_id << 32;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: wr_csr_prot = 0x%llX\n", prefix,
- (unsigned long long)caps->wr_csr_prot);
- break;
- case ICE_AQC_CAPS_WOL_PROXY:
- caps->num_wol_proxy_fltr = number;
- caps->wol_proxy_vsi_seid = logical_id;
- caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
- caps->acpi_prog_mthd = !!(phys_id &
- ICE_ACPI_PROG_MTHD_M);
- caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: num_wol_proxy_fltr = %d\n", prefix,
- caps->num_wol_proxy_fltr);
- ice_debug(hw, ICE_DBG_INIT,
- "%s: wol_proxy_vsi_seid = %d\n", prefix,
- caps->wol_proxy_vsi_seid);
- break;
- case ICE_AQC_CAPS_MAX_MTU:
- caps->max_mtu = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
- prefix, caps->max_mtu);
- break;
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "%s: unknown capability[%d]: 0x%x\n", prefix,
- i, cap);
- break;
- }
- }
-
- ice_print_led_caps(hw, caps, prefix, true);
- ice_print_sdp_caps(hw, caps, prefix, true);
-
- /* Re-calculate capabilities that are dependent on the number of
- * physical ports; i.e. some features are not supported or function
- * differently on devices with more than 4 ports.
+ /* This assumes device capabilities are always scanned before function
+ * capabilities during the initialization flow.
*/
if (hw->dev_caps.num_funcs > 4) {
/* Max 4 TCs per port */
caps->maxtc = 4;
- ice_debug(hw, ICE_DBG_INIT,
- "%s: maxtc = %d (based on #ports)\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
caps->maxtc);
}
}
/**
- * ice_aq_discover_caps - query function/device capabilities
+ * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
* @hw: pointer to the HW struct
- * @buf: a virtual buffer to hold the capabilities
- * @buf_size: Size of the virtual buffer
- * @cap_count: cap count needed if AQ err==ENOMEM
- * @opc: capabilities type to discover - pass in the command opcode
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VF.
+ */
+static void
+ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = LE32_TO_CPU(cap->number);
+ u32 logical_id = LE32_TO_CPU(cap->logical_id);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
+ func_p->vf_base_id);
+}
+
+/**
+ * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VSI.
+ */
+static void
+ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
+ LE32_TO_CPU(cap->number));
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
+ func_p->guar_num_vsi);
+}
+
+/**
+ * ice_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void
+ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ice_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VF:
+ ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case ICE_AQC_CAPS_VSI:
+ ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
+ i, cap);
+ break;
+ }
+ }
+
+ ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
+ ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
+
+ ice_recalc_port_limited_caps(hw, &func_p->common_cap);
+}
+
+/**
+ * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = LE32_TO_CPU(cap->number);
+
+ dev_p->num_funcs = ice_hweight32(number);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
+ dev_p->num_funcs);
+}
+
+/**
+ * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VF for device capabilities.
+ */
+static void
+ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = LE32_TO_CPU(cap->number);
+
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
+ dev_p->num_vfs_exposed);
+}
+
+/**
+ * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VSI for device capabilities.
+ */
+static void
+ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ u32 number = LE32_TO_CPU(cap->number);
+
+ dev_p->num_vsi_allocd_to_host = number;
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
+ dev_p->num_vsi_allocd_to_host);
+}
+
+/**
+ * ice_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void
+ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ice_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
+ break;
+ case ICE_AQC_CAPS_VF:
+ ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case ICE_AQC_CAPS_VSI:
+ ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
+ i, cap);
+ break;
+ }
+ }
+
+ ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
+ ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
+
+ ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
+}
+
+/**
+ * ice_aq_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
* @cd: pointer to command details structure or NULL
*
- * Get the function(0x000a)/device(0x000b) capabilities description from
- * the firmware.
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
+ * firmware could return) to avoid this.
*/
-enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+static enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
@@ -2072,59 +2306,78 @@
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
- if (!status)
- ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
- else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+
+ if (cap_count)
*cap_count = LE32_TO_CPU(cmd->count);
+
return status;
}
/**
- * ice_discover_caps - get info about the HW
+ * ice_discover_dev_caps - Read and extract device capabilities
* @hw: pointer to the hardware structure
- * @opc: capabilities type to discover - pass in the command opcode
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
*/
static enum ice_status
-ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
enum ice_status status;
- u32 cap_count;
- u16 cbuf_len;
- u8 retries;
+ u32 cap_count = 0;
+ void *cbuf;
- /* The driver doesn't know how many capabilities the device will return
- * so the buffer size required isn't known ahead of time. The driver
- * starts with cbuf_len and if this turns out to be insufficient, the
- * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
- * The driver then allocates the buffer based on the count and retries
- * the operation. So it follows that the retry count is 2.
+ cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
*/
-#define ICE_GET_CAP_BUF_COUNT 40
-#define ICE_GET_CAP_RETRY_COUNT 2
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
- cap_count = ICE_GET_CAP_BUF_COUNT;
- retries = ICE_GET_CAP_RETRY_COUNT;
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+ ice_aqc_opc_list_dev_caps, NULL);
+ if (!status)
+ ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+ ice_free(hw, cbuf);
- do {
- void *cbuf;
+ return status;
+}
- cbuf_len = (u16)(cap_count *
- sizeof(struct ice_aqc_list_caps_elem));
- cbuf = ice_malloc(hw, cbuf_len);
- if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+/**
+ * ice_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ */
+static enum ice_status
+ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
+{
+ enum ice_status status;
+ u32 cap_count = 0;
+ void *cbuf;
- status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
- opc, NULL);
- ice_free(hw, cbuf);
+ cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
- if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
- break;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
- /* If ENOMEM is returned, try again with bigger buffer */
- } while (--retries);
+ status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+ ice_aqc_opc_list_func_caps, NULL);
+ if (!status)
+ ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
+ ice_free(hw, cbuf);
return status;
}
@@ -2137,26 +2390,25 @@
{
struct ice_hw_func_caps *func_caps = &hw->func_caps;
struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
- u32 valid_func, rxq_first_id, txq_first_id;
- u32 msix_vector_first_id, max_mtu;
+ struct ice_hw_common_caps cached_caps;
u32 num_funcs;
/* cache some func_caps values that should be restored after memset */
- valid_func = func_caps->common_cap.valid_functions;
- txq_first_id = func_caps->common_cap.txq_first_id;
- rxq_first_id = func_caps->common_cap.rxq_first_id;
- msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
- max_mtu = func_caps->common_cap.max_mtu;
+ cached_caps = func_caps->common_cap;
/* unset func capabilities */
memset(func_caps, 0, sizeof(*func_caps));
+#define ICE_RESTORE_FUNC_CAP(name) \
+ func_caps->common_cap.name = cached_caps.name
+
/* restore cached values */
- func_caps->common_cap.valid_functions = valid_func;
- func_caps->common_cap.txq_first_id = txq_first_id;
- func_caps->common_cap.rxq_first_id = rxq_first_id;
- func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
- func_caps->common_cap.max_mtu = max_mtu;
+ ICE_RESTORE_FUNC_CAP(valid_functions);
+ ICE_RESTORE_FUNC_CAP(txq_first_id);
+ ICE_RESTORE_FUNC_CAP(rxq_first_id);
+ ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
+ ICE_RESTORE_FUNC_CAP(max_mtu);
+ ICE_RESTORE_FUNC_CAP(nvm_unified_update);
/* one Tx and one Rx queue in safe mode */
func_caps->common_cap.num_rxq = 1;
@@ -2167,22 +2419,22 @@
func_caps->guar_num_vsi = 1;
/* cache some dev_caps values that should be restored after memset */
- valid_func = dev_caps->common_cap.valid_functions;
- txq_first_id = dev_caps->common_cap.txq_first_id;
- rxq_first_id = dev_caps->common_cap.rxq_first_id;
- msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
- max_mtu = dev_caps->common_cap.max_mtu;
+ cached_caps = dev_caps->common_cap;
num_funcs = dev_caps->num_funcs;
/* unset dev capabilities */
memset(dev_caps, 0, sizeof(*dev_caps));
+#define ICE_RESTORE_DEV_CAP(name) \
+ dev_caps->common_cap.name = cached_caps.name
+
/* restore cached values */
- dev_caps->common_cap.valid_functions = valid_func;
- dev_caps->common_cap.txq_first_id = txq_first_id;
- dev_caps->common_cap.rxq_first_id = rxq_first_id;
- dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
- dev_caps->common_cap.max_mtu = max_mtu;
+ ICE_RESTORE_DEV_CAP(valid_functions);
+ ICE_RESTORE_DEV_CAP(txq_first_id);
+ ICE_RESTORE_DEV_CAP(rxq_first_id);
+ ICE_RESTORE_DEV_CAP(msix_vector_first_id);
+ ICE_RESTORE_DEV_CAP(max_mtu);
+ ICE_RESTORE_DEV_CAP(nvm_unified_update);
dev_caps->num_funcs = num_funcs;
/* one Tx and one Rx queue per function in safe mode */
@@ -2201,11 +2453,11 @@
{
enum ice_status status;
- status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
- if (!status)
- status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+ status = ice_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
- return status;
+ return ice_discover_func_caps(hw, &hw->func_caps);
}
/**
@@ -2499,8 +2751,7 @@
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
- ice_debug(hw, ICE_DBG_PHY,
- "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
cfg->caps);
cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
@@ -2510,19 +2761,24 @@
desc.params.set_phy.lport_num = pi->lport;
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
- ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
(unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
(unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
- ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
cfg->low_power_ctrl_an);
- ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+ ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
+ cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ status = ICE_SUCCESS;
+
if (!status)
pi->phy.curr_user_phy_cfg = *cfg;
@@ -2559,10 +2815,6 @@
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
- if (status == ICE_SUCCESS)
- ice_memcpy(li->module_type, &pcaps->module_type,
- sizeof(li->module_type),
- ICE_NONDMA_TO_NONDMA);
ice_free(hw, pcaps);
}
@@ -2650,53 +2902,48 @@
}
/**
- * ice_set_fc
+ * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
* @pi: port information structure
- * @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @ena_auto_link_update: enable automatic link update
- *
- * Set the requested flow control mode.
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
*/
-enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+static enum ice_status
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fc_mode req_mode)
{
- struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_phy_cache_mode_data cache_data;
- struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u8 pause_mask = 0x0;
- struct ice_hw *hw;
- if (!pi || !aq_failures)
- return ICE_ERR_PARAM;
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
- hw = pi->hw;
- *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
-
- /* Cache user FC request */
- cache_data.data.curr_user_fc_req = pi->fc.req_mode;
- ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
-
- pcaps = (struct ice_aqc_get_phy_caps_data *)
- ice_malloc(hw, sizeof(*pcaps));
- if (!pcaps)
- return ICE_ERR_NO_MEMORY;
-
- switch (pi->fc.req_mode) {
+ switch (req_mode) {
case ICE_FC_AUTO:
+ {
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(pi->hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
/* Query the value of FC that both the NIC and attached media
* can do.
*/
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
if (status) {
- *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
- goto out;
+ ice_free(pi->hw, pcaps);
+ return status;
}
pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+
+ ice_free(pi->hw, pcaps);
break;
+ }
case ICE_FC_FULL:
pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
@@ -2711,8 +2958,48 @@
break;
}
+ /* clear the old pause settings */
+ cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+ /* Cache user FC request */
+ cache_data.data.curr_user_fc_req = req_mode;
+ ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @ena_auto_link_update: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi || !aq_failures)
+ return ICE_ERR_BAD_PTR;
+
+ *aq_failures = 0;
+ hw = pi->hw;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
/* Get the current PHY config */
- ice_memset(pcaps, 0, sizeof(*pcaps), ICE_NONDMA_MEM);
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status) {
@@ -2722,12 +3009,14 @@
ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
- /* clear the old pause settings */
- cfg.caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
- ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+ /* Configure the set PHY data */
+ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
+ if (status) {
+ if (status != ICE_ERR_BAD_PTR)
+ *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
- /* set the new capabilities */
- cfg.caps |= pause_mask;
+ goto out;
+ }
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
@@ -2871,6 +3160,9 @@
if (status)
goto out;
+ cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
+ cfg->link_fec_opt = pcaps->link_fec_options;
+
switch (fec) {
case ICE_FEC_BASER:
/* Clear RS bits, and AND BASE-R ability
@@ -2943,8 +3235,7 @@
status = ice_update_link_info(pi);
if (status)
- ice_debug(pi->hw, ICE_DBG_LINK,
- "get link status error, status = %d\n",
+ ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
status);
}
@@ -3339,10 +3630,10 @@
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
{
- u16 i, sum_header_size, sum_q_size = 0;
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -3356,18 +3647,13 @@
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
return ICE_ERR_PARAM;
- sum_header_size = num_qgrps *
- (sizeof(*qg_list) - sizeof(*qg_list->txqs));
-
- list = qg_list;
- for (i = 0; i < num_qgrps; i++) {
- struct ice_aqc_add_txqs_perq *q = list->txqs;
-
- sum_q_size += list->num_txqs * sizeof(*q);
- list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ for (i = 0, list = qg_list; i < num_qgrps; i++) {
+ sum_size += ice_struct_size(list, txqs, list->num_txqs);
+ list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
+ list->num_txqs);
}
- if (buf_size != (sum_header_size + sum_q_size))
+ if (buf_size != sum_size)
return ICE_ERR_PARAM;
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
@@ -3395,6 +3681,7 @@
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
+ struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
enum ice_status status;
@@ -3445,16 +3732,16 @@
*/
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
- for (i = 0; i < num_qgrps; ++i) {
- /* Calculate the size taken up by the queue IDs in this group */
- sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
-
- /* Add the size of the group header */
- sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+ for (i = 0, item = qg_list; i < num_qgrps; i++) {
+ u16 item_size = ice_struct_size(item, q_id, item->num_qs);
/* If the num of queues is even, add 2 bytes of padding */
- if ((qg_list[i].num_qs % 2) == 0)
- sz += 2;
+ if ((item->num_qs % 2) == 0)
+ item_size += 2;
+
+ sz += item_size;
+
+ item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
}
if (buf_size != sz)
@@ -3733,12 +4020,14 @@
/**
* ice_set_ctx - set context bits in packed structure
+ * @hw: pointer to the hardware structure
* @src_ctx: pointer to a generic non-packed context structure
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
enum ice_status
-ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
+ const struct ice_ctx_ele *ce_info)
{
int f;
@@ -3747,6 +4036,11 @@
* using the correct size so that we are correct regardless
* of the endianness of the machine.
*/
+ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
+ ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
+ f, ce_info[f].width, ce_info[f].size_of);
+ continue;
+ }
switch (ce_info[f].size_of) {
case sizeof(u8):
ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
@@ -4073,7 +4367,18 @@
* Without setting the generic section as valid in valid_sections, the
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
- buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ buf->txqs[0].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->txqs[0].info.generic = 0;
+ buf->txqs[0].info.cir_bw.bw_profile_idx =
+ CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.cir_bw.bw_alloc =
+ CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
+ buf->txqs[0].info.eir_bw.bw_profile_idx =
+ CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->txqs[0].info.eir_bw.bw_alloc =
+ CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
@@ -4121,24 +4426,32 @@
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
- struct ice_aqc_dis_txq_item qg_list;
+ struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
- u16 i;
+ struct ice_hw *hw;
+ u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
+ hw = pi->hw;
+
if (!num_queues) {
/* if queue is disabled already yet the disable queue command
* has to be sent to complete the VF reset, then call
* ice_aq_dis_lan_txq without any queue information
*/
if (rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
return ICE_ERR_CFG;
}
+ buf_size = ice_struct_size(qg_list, q_id, 1);
+ qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
+ if (!qg_list)
+ return ICE_ERR_NO_MEMORY;
+
ice_acquire_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -4147,23 +4460,22 @@
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
- q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
if (!q_ctx) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
q_handles[i]);
continue;
}
if (q_ctx->q_handle != q_handles[i]) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
q_ctx->q_handle, q_handles[i]);
continue;
}
- qg_list.parent_teid = node->info.parent_teid;
- qg_list.num_qs = 1;
- qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
- status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
- sizeof(qg_list), rst_src, vmvf_num,
- cd);
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
+ vmvf_num, cd);
if (status != ICE_SUCCESS)
break;
@@ -4171,6 +4483,7 @@
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
ice_release_lock(&pi->sched_lock);
+ ice_free(hw, qg_list);
return status;
}
@@ -4231,19 +4544,34 @@
ICE_SCHED_NODE_OWNER_LAN);
}
+/**
+ * ice_is_main_vsi - checks whether the VSI is main VSI
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ *
+ * Checks whether the VSI is the main VSI (the first PF VSI created on
+ * given PF).
+ */
+static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
+}
+
/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
+ * @sw: pointer to switch info struct for which function initializes filters
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+static enum ice_status
+ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status;
u8 i;
/* Delete old entries from replay filter list head if there is any */
- ice_rm_all_sw_replay_rule_info(hw);
+ ice_rm_sw_replay_rule_info(hw, sw);
/* In start of replay, move entries into replay_rules list, it
* will allow adding rules entries back to filt_rules list,
* which is operational list.
@@ -4253,6 +4581,10 @@
&sw->recp_list[i].filt_replay_rules);
ice_sched_replay_agg_vsi_preinit(hw);
+ status = ice_sched_replay_root_node_bw(hw->port_info);
+ if (status)
+ return status;
+
return ice_sched_replay_tc_node_bw(hw->port_info);
}
@@ -4266,14 +4598,16 @@
*/
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_port_info *pi = hw->port_info;
enum ice_status status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
/* Replay pre-initialization if there is any */
- if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
- status = ice_replay_pre_init(hw);
+ if (ice_is_main_vsi(hw, vsi_handle)) {
+ status = ice_replay_pre_init(hw, sw);
if (status)
return status;
}
@@ -4282,7 +4616,7 @@
if (status)
return status;
/* Replay per VSI all filters */
- status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
if (!status)
status = ice_replay_vsi_agg(hw, vsi_handle);
return status;
@@ -4561,14 +4895,14 @@
*/
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf)
+ struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
- buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
+ buf->node_teid = CPU_TO_LE32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status != ICE_SUCCESS || num_elem_ret != 1)
@@ -4691,7 +5025,7 @@
loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
loc_data += loc_data_tmp;
- /* We need to skip LLDP configuration section length (2 bytes)*/
+ /* We need to skip LLDP configuration section length (2 bytes) */
loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
/* Read the LLDP Default Configure */
@@ -4713,88 +5047,6 @@
return ret;
}
-/**
- * ice_get_netlist_ver_info
- * @hw: pointer to the HW struct
- *
- * Get the netlist version information
- */
-enum ice_status
-ice_get_netlist_ver_info(struct ice_hw *hw)
-{
- struct ice_netlist_ver_info *ver = &hw->netlist_ver;
- enum ice_status ret;
- u32 id_blk_start;
- __le16 raw_data;
- u16 data, i;
- u16 *buff;
-
- ret = ice_acquire_nvm(hw, ICE_RES_READ);
- if (ret)
- return ret;
- buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
- sizeof(*buff));
- if (!buff) {
- ret = ICE_ERR_NO_MEMORY;
- goto exit_no_mem;
- }
-
- /* read module length */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
- ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
- false, false, NULL);
- if (ret)
- goto exit_error;
-
- data = LE16_TO_CPU(raw_data);
- /* exit if length is = 0 */
- if (!data)
- goto exit_error;
-
- /* read node count */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
- ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
- false, false, NULL);
- if (ret)
- goto exit_error;
- data = LE16_TO_CPU(raw_data);
-
- /* netlist ID block starts from offset 4 + node count * 2 */
- id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
-
- /* read the entire netlist ID block */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- id_blk_start * 2,
- ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
- false, NULL);
- if (ret)
- goto exit_error;
-
- for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
- buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
-
- ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
- ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
- ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
- ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
- ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
- /* Read the left most 4 bytes of SHA */
- ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
-
-exit_error:
- ice_free(hw, buff);
-exit_no_mem:
- ice_release_nvm(hw);
- return ret;
-}
-
/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
@@ -4834,8 +5086,7 @@
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read link override TLV.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
return status;
}
@@ -4846,8 +5097,7 @@
/* link options first */
status = ice_read_sr_word(hw, tlv_start, &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override link options.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
@@ -4858,8 +5108,7 @@
offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
status = ice_read_sr_word(hw, offset, &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override phy config.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
return status;
}
ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
@@ -4869,8 +5118,7 @@
for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
status = ice_read_sr_word(hw, (offset + i), &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override link options.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
/* shift 16 bits at a time to fill 64 bits */
@@ -4883,8 +5131,7 @@
for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
status = ice_read_sr_word(hw, (offset + i), &buf);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read override link options.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
return status;
}
/* shift 16 bits at a time to fill 64 bits */
@@ -4893,3 +5140,98 @@
return status;
}
+
+/**
+ * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
+ * @caps: get PHY capability data
+ */
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
+{
+ if (caps->caps & ICE_AQC_PHY_AN_MODE ||
+ caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
+ ICE_AQC_PHY_AN_EN_CLAUSE73 |
+ ICE_AQC_PHY_AN_EN_CLAUSE37))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = CPU_TO_LE16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
+ * @hw: pointer to HW struct
+ */
+bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
+{
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
+ hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
+ * @hw: pointer to HW struct
+ * @vsi_num: absolute HW index for VSI
+ * @add: boolean for if adding or removing a filter
+ */
+enum ice_status
+ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+{
+ struct ice_aqc_lldp_filter_ctrl *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_filter_ctrl;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
+
+ if (add)
+ cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
+ else
+ cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
+
+ cmd->vsi_num = CPU_TO_LE16(vsi_num);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
Index: sys/dev/ice/ice_controlq.h
===================================================================
--- sys/dev/ice/ice_controlq.h
+++ sys/dev/ice/ice_controlq.h
@@ -63,6 +63,8 @@
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
+#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
@@ -88,6 +90,7 @@
u32 bal;
u32 len_mask;
u32 len_ena_mask;
+ u32 len_crit_mask;
u32 head_mask;
};
Index: sys/dev/ice/ice_controlq.c
===================================================================
--- sys/dev/ice/ice_controlq.c
+++ sys/dev/ice/ice_controlq.c
@@ -41,6 +41,7 @@
(qinfo)->sq.bal = prefix##_ATQBAL; \
(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
(qinfo)->rq.head = prefix##_ARQH; \
(qinfo)->rq.tail = prefix##_ARQT; \
@@ -49,6 +50,7 @@
(qinfo)->rq.bal = prefix##_ARQBAL; \
(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
} while (0)
@@ -208,7 +210,9 @@
i--;
for (; i >= 0; i--)
ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
+ cq->rq.r.rq_bi = NULL;
ice_free(hw, cq->rq.dma_head);
+ cq->rq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -246,7 +250,9 @@
i--;
for (; i >= 0; i--)
ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
+ cq->sq.r.sq_bi = NULL;
ice_free(hw, cq->sq.dma_head);
+ cq->sq.dma_head = NULL;
return ICE_ERR_NO_MEMORY;
}
@@ -305,6 +311,24 @@
return ICE_SUCCESS;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ /* free descriptors */ \
+ if ((qi)->ring.r.ring##_bi) { \
+ int i; \
+ \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) \
+ ice_free_dma_mem((hw), \
+ &(qi)->ring.r.ring##_bi[i]); \
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ ice_free(hw, (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ ice_free(hw, (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_init_sq - main initialization routine for Control ATQ
* @hw: pointer to the hardware structure
@@ -360,6 +384,7 @@
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
@@ -421,27 +446,13 @@
goto init_ctrlq_exit;
init_ctrlq_free_rings:
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
-#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
-do { \
- int i; \
- /* free descriptors */ \
- for (i = 0; i < (qi)->num_##ring##_entries; i++) \
- if ((qi)->ring.r.ring##_bi[i].pa) \
- ice_free_dma_mem((hw), \
- &(qi)->ring.r.ring##_bi[i]); \
- /* free the buffer info list */ \
- if ((qi)->ring.cmd_buf) \
- ice_free(hw, (qi)->ring.cmd_buf); \
- /* free DMA head */ \
- ice_free(hw, (qi)->ring.dma_head); \
-} while (0)
-
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -650,73 +661,6 @@
return ret_code;
}
-/**
- * ice_init_all_ctrlq - main initialization routine for all control queues
- * @hw: pointer to the hardware structure
- *
- * Prior to calling this function, the driver MUST* set the following fields
- * in the cq->structure for all control queues:
- * - cq->num_sq_entries
- * - cq->num_rq_entries
- * - cq->rq_buf_size
- * - cq->sq_buf_size
- *
- * NOTE: this function does not initialize the controlq locks.
- */
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
-{
- enum ice_status status;
-
- ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
-
- /* Init FW admin queue */
- status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
- if (status)
- return status;
-
- status = ice_init_check_adminq(hw);
- if (status)
- return status;
- /* Init Mailbox queue */
- return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
-}
-
-/**
- * ice_init_ctrlq_locks - Initialize locks for a control queue
- * @cq: pointer to the control queue
- *
- * Initializes the send and receive queue locks for a given control queue.
- */
-static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
-{
- ice_init_lock(&cq->sq_lock);
- ice_init_lock(&cq->rq_lock);
-}
-
-/**
- * ice_create_all_ctrlq - main initialization routine for all control queues
- * @hw: pointer to the hardware structure
- *
- * Prior to calling this function, the driver *MUST* set the following fields
- * in the cq->structure for all control queues:
- * - cq->num_sq_entries
- * - cq->num_rq_entries
- * - cq->rq_buf_size
- * - cq->sq_buf_size
- *
- * This function creates all the control queue locks and then calls
- * ice_init_all_ctrlq. It should be called once during driver load. If the
- * driver needs to re-initialize control queues at run time it should call
- * ice_init_all_ctrlq instead.
- */
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
-{
- ice_init_ctrlq_locks(&hw->adminq);
- ice_init_ctrlq_locks(&hw->mailboxq);
-
- return ice_init_all_ctrlq(hw);
-}
-
/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
@@ -764,14 +708,90 @@
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
+/**
+ * ice_init_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
+ */
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+{
+ enum ice_status status;
+ u32 retry = 0;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Init FW admin queue */
+ do {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (status)
+ return status;
+
+ status = ice_init_check_adminq(hw);
+ if (status != ICE_ERR_AQ_FW_CRITICAL)
+ break;
+
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
+ } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
+
+ if (status)
+ return status;
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_init_lock(&cq->sq_lock);
+ ice_init_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
/**
* ice_destroy_ctrlq_locks - Destroy locks for a control queue
* @cq: pointer to the control queue
*
* Destroys the send and receive queue locks for a given control queue.
*/
-static void
-ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
{
ice_destroy_lock(&cq->sq_lock);
ice_destroy_lock(&cq->rq_lock);
@@ -813,8 +833,7 @@
details = ICE_CTL_Q_DETAILS(*sq, ntc);
while (rd32(hw, cq->sq.head) != ntc) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
+ ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
ntc++;
@@ -852,8 +871,7 @@
datalen = LE16_TO_CPU(cq_desc->datalen);
flags = LE16_TO_CPU(cq_desc->flags);
- ice_debug(hw, ICE_DBG_AQ_DESC,
- "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(cq_desc->opcode), flags, datalen,
LE16_TO_CPU(cq_desc->retval));
ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
@@ -926,8 +944,7 @@
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send queue not initialized.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
status = ICE_ERR_AQ_EMPTY;
goto sq_send_command_error;
}
@@ -939,8 +956,7 @@
if (buf) {
if (buf_size > cq->sq_buf_size) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Invalid buffer size for Control Send queue: %d.\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
status = ICE_ERR_INVAL_SIZE;
goto sq_send_command_error;
@@ -953,8 +969,7 @@
val = rd32(hw, cq->sq.head);
if (val >= cq->num_sq_entries) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "head overrun at %d in the Control Send Queue ring\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
status = ICE_ERR_AQ_EMPTY;
goto sq_send_command_error;
@@ -972,8 +987,7 @@
* called in a separate thread in case of asynchronous completions.
*/
if (ice_clean_sq(hw, cq) == 0) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Error: Control Send Queue is full.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
status = ICE_ERR_AQ_FULL;
goto sq_send_command_error;
}
@@ -1002,8 +1016,7 @@
}
/* Debug desc and buffer */
- ice_debug(hw, ICE_DBG_AQ_DESC,
- "ATQ: Control Send queue desc and buffer:\n");
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
@@ -1029,8 +1042,7 @@
u16 copy_size = LE16_TO_CPU(desc->datalen);
if (copy_size > buf_size) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Return len %d > than buf len %d\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
status = ICE_ERR_AQ_ERROR;
} else {
@@ -1040,8 +1052,7 @@
}
retval = LE16_TO_CPU(desc->retval);
if (retval) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
LE16_TO_CPU(desc->opcode),
retval);
@@ -1054,8 +1065,7 @@
cq->sq_last_status = (enum ice_aq_err)retval;
}
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "ATQ: desc and buffer writeback:\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
ice_debug_cq(hw, (void *)desc, buf, buf_size);
@@ -1066,9 +1076,14 @@
/* update the error if time out occurred */
if (!cmd_completed) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
+ rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
+ status = ICE_ERR_AQ_FW_CRITICAL;
+ } else {
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
}
sq_send_command_error:
@@ -1151,8 +1166,7 @@
ice_acquire_lock(&cq->rq_lock);
if (!cq->rq.count) {
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive queue not initialized.\n");
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
ret_code = ICE_ERR_AQ_EMPTY;
goto clean_rq_elem_err;
}
@@ -1174,8 +1188,7 @@
flags = LE16_TO_CPU(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
- ice_debug(hw, ICE_DBG_AQ_MSG,
- "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
LE16_TO_CPU(desc->opcode),
cq->rq_last_status);
}
@@ -1188,8 +1201,7 @@
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
- ice_debug_cq(hw, (void *)desc, e->msg_buf,
- cq->rq_buf_size);
+ ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
Index: sys/dev/ice/ice_dcb.h
===================================================================
--- sys/dev/ice/ice_dcb.h
+++ sys/dev/ice/ice_dcb.h
@@ -131,17 +131,11 @@
#define ICE_IEEE_APP_TLV_LEN 11
#pragma pack(1)
-/* IEEE 802.1AB LLDP TLV structure */
-struct ice_lldp_generic_tlv {
- __be16 typelen;
- u8 tlvinfo[1];
-};
-
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
__be32 ouisubtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[STRUCT_HACK_VAR_LEN];
};
#pragma pack()
@@ -164,7 +158,7 @@
#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
#define ICE_CEE_FEAT_TLV_ERR_M 0x20
u8 subtype;
- u8 tlvinfo[1];
+ u8 tlvinfo[STRUCT_HACK_VAR_LEN];
};
#pragma pack(1)
@@ -219,9 +213,6 @@
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd);
enum ice_status
-ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
- struct ice_sq_cd *cd);
-enum ice_status
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd);
enum ice_status
Index: sys/dev/ice/ice_dcb.c
===================================================================
--- sys/dev/ice/ice_dcb.c
+++ sys/dev/ice/ice_dcb.c
@@ -265,39 +265,6 @@
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
-/**
- * ice_aq_set_lldp_mib - Set the LLDP MIB
- * @hw: pointer to the HW struct
- * @mib_type: Local, Remote or both Local and Remote MIBs
- * @buf: pointer to the caller-supplied buffer to store the MIB block
- * @buf_size: size of the buffer (in bytes)
- * @cd: pointer to command details structure or NULL
- *
- * Set the LLDP MIB. (0x0A08)
- */
-enum ice_status
-ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
-
- cmd = &desc.params.lldp_set_mib;
-
- if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
-
- desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
- desc.datalen = CPU_TO_LE16(buf_size);
-
- cmd->type = mib_type;
- cmd->length = CPU_TO_LE16(buf_size);
-
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-}
-
/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
@@ -761,8 +728,7 @@
*
* Parse DCB configuration from the LLDPDU
*/
-enum ice_status
-ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
enum ice_status ret = ICE_SUCCESS;
@@ -1140,9 +1106,9 @@
return ICE_ERR_PARAM;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
else if (dcbx_mode == ICE_DCBX_MODE_CEE)
- dcbx_cfg = &pi->desired_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
* or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
@@ -1153,7 +1119,7 @@
goto out;
/* Get Remote DCB Config */
- dcbx_cfg = &pi->remote_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
@@ -1182,14 +1148,14 @@
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (ret == ICE_SUCCESS) {
/* CEE mode */
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbx_cfg->tlv_status = LE32_TO_CPU(cee_cfg.tlv_status);
ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
}
@@ -1206,26 +1172,26 @@
*/
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
- struct ice_port_info *pi = hw->port_info;
+ struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret = ICE_SUCCESS;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
- pi->is_sw_lldp = true;
+ qos_cfg->is_sw_lldp = true;
/* Get DCBX status */
- pi->dcbx_status = ice_get_dcbx_status(hw);
+ qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
- pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
- pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
- ret = ice_get_dcb_cfg(pi);
+ ret = ice_get_dcb_cfg(hw->port_info);
if (ret)
return ret;
- pi->is_sw_lldp = false;
- } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ qos_cfg->is_sw_lldp = false;
+ } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@@ -1233,7 +1199,7 @@
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
if (ret)
- pi->is_sw_lldp = true;
+ qos_cfg->is_sw_lldp = true;
}
return ret;
@@ -1248,21 +1214,21 @@
*/
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
- struct ice_port_info *pi = hw->port_info;
+ struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
/* Get DCBX status */
- pi->dcbx_status = ice_get_dcbx_status(hw);
+ qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
+ if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
- pi->is_sw_lldp = !ena_mib;
+ qos_cfg->is_sw_lldp = !ena_mib;
return ret;
}
@@ -1559,7 +1525,7 @@
hw = pi->hw;
/* update the HW local config */
- dcbcfg = &pi->local_dcbx_cfg;
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
/* Allocate the LLDPDU */
lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
if (!lldpmib)
@@ -1618,7 +1584,7 @@
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
- struct ice_aqc_get_elem elem;
+ struct ice_aqc_txsched_elem_data elem;
enum ice_status status = ICE_SUCCESS;
u32 teid1, teid2;
u8 i, j;
@@ -1660,7 +1626,7 @@
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
- status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ status = ice_sched_add_node(pi, 1, &elem);
if (status)
break;
/* update the TC number */
Index: sys/dev/ice/ice_drv_info.h
===================================================================
--- sys/dev/ice/ice_drv_info.h
+++ sys/dev/ice/ice_drv_info.h
@@ -63,16 +63,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
-const char ice_driver_version[] = "0.26.0-k";
+const char ice_driver_version[] = "0.27.0-k";
const uint8_t ice_major_version = 0;
-const uint8_t ice_minor_version = 26;
+const uint8_t ice_minor_version = 27;
const uint8_t ice_patch_version = 0;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
- PVID(vendor, devid, name " - 0.26.0-k")
+ PVID(vendor, devid, name " - 0.27.0-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
- PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.0-k")
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.27.0-k")
/**
* @var ice_vendor_info_array
@@ -113,6 +113,9 @@
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
ICE_INTEL_VENDOR_ID, 0x0008, 0,
"Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x000D, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
"Intel(R) Ethernet Controller E810-C for QSFP"),
PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
Index: sys/dev/ice/ice_flex_pipe.h
===================================================================
--- sys/dev/ice/ice_flex_pipe.h
+++ sys/dev/ice/ice_flex_pipe.h
@@ -50,9 +50,6 @@
enum ice_status
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
enum ice_status
-ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
-void ice_release_change_lock(struct ice_hw *hw);
-enum ice_status
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
enum ice_status
@@ -103,9 +100,9 @@
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-struct ice_prof_map *
+enum ice_status
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
-struct ice_prof_map *
+enum ice_status
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
enum ice_status
@@ -124,8 +121,4 @@
enum ice_status
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
-enum ice_status
-ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
- u16 len);
-
#endif /* _ICE_FLEX_PIPE_H_ */
Index: sys/dev/ice/ice_flex_pipe.c
===================================================================
--- sys/dev/ice/ice_flex_pipe.c
+++ sys/dev/ice/ice_flex_pipe.c
@@ -676,11 +676,11 @@
* This function generates a key from a value, a don't care mask and a never
* match mask.
* upd, dc, and nm are optional parameters, and can be NULL:
- * upd == NULL --> udp mask is all 1's (update all bits)
+ * upd == NULL --> upd mask is all 1's (update all bits)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-enum ice_status
+static enum ice_status
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -740,8 +740,7 @@
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status == ICE_ERR_AQ_NO_WORK)
- ice_debug(hw, ICE_DBG_PKG,
- "Global config lock: No work to do\n");
+ ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
return status;
}
@@ -764,7 +763,7 @@
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+static enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -779,7 +778,7 @@
*
* This function will release the change lock using the proper Admin Command.
*/
-void ice_release_change_lock(struct ice_hw *hw)
+static void ice_release_change_lock(struct ice_hw *hw)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -970,8 +969,7 @@
last, &offset, &info, NULL);
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Update pkg failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
status, offset, info);
break;
}
@@ -1049,8 +1047,7 @@
/* Save AQ status from download package */
hw->pkg_dwnld_status = hw->adminq.sq_last_status;
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Pkg download failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
break;
@@ -1148,8 +1145,7 @@
meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
meta_seg->pkg_name);
} else {
- ice_debug(hw, ICE_DBG_INIT,
- "Did not find metadata segment in driver package\n");
+ ice_debug(hw, ICE_DBG_INIT, "Did not find metadata segment in driver package\n");
return ICE_ERR_CFG;
}
@@ -1166,8 +1162,7 @@
seg_hdr->seg_format_ver.draft,
seg_hdr->seg_id);
} else {
- ice_debug(hw, ICE_DBG_INIT,
- "Did not find ice segment in driver package\n");
+ ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
return ICE_ERR_CFG;
}
@@ -1189,7 +1184,7 @@
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1);
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
if (!pkg_info)
return ICE_ERR_NO_MEMORY;
@@ -1285,7 +1280,7 @@
u32 seg_count;
u32 i;
- if (len < sizeof(*pkg))
+ if (len < ice_struct_size(pkg, seg_offset, 1))
return ICE_ERR_BUF_TOO_SHORT;
if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
@@ -1300,7 +1295,7 @@
return ICE_ERR_CFG;
/* make sure segment array fits in package length */
- if (len < ice_struct_size(pkg, seg_offset, seg_count - 1))
+ if (len < ice_struct_size(pkg, seg_offset, seg_count))
return ICE_ERR_BUF_TOO_SHORT;
/* all segments must fit within length */
@@ -1407,7 +1402,7 @@
}
/* Check if FW is compatible with the OS package */
- size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT);
pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
if (!pkg)
return ICE_ERR_NO_MEMORY;
@@ -1425,8 +1420,7 @@
(*seg)->hdr.seg_format_ver.minor >
pkg->pkg_info[i].ver.minor) {
status = ICE_ERR_FW_DDP_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT,
- "OS package is not compatible with NVM.\n");
+ ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
}
/* done processing NVM package so break */
break;
@@ -1436,6 +1430,88 @@
return status;
}
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section =
+ (struct ice_sw_fv_section *)section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= LE16_TO_CPU(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = LE16_TO_CPU(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in hw for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
@@ -1494,8 +1570,7 @@
ice_init_pkg_hints(hw, seg);
status = ice_download_pkg(hw, seg);
if (status == ICE_ERR_AQ_NO_WORK) {
- ice_debug(hw, ICE_DBG_INIT,
- "package previously loaded - no work.\n");
+ ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
status = ICE_SUCCESS;
}
@@ -1516,6 +1591,7 @@
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
@@ -1592,38 +1668,6 @@
return bld;
}
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector
- * vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section =
- (struct ice_sw_fv_section *)section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= LE16_TO_CPU(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = LE16_TO_CPU(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
@@ -1662,18 +1706,13 @@
struct ice_seg *ice_seg;
struct ice_fv *fv;
- ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
-
if (req_profs == ICE_PROF_ALL) {
- u16 i;
-
- for (i = 0; i < ICE_MAX_NUM_PROFILES; i++)
- ice_set_bit(i, bm);
+ ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
return;
}
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
-
ice_seg = hw->seg;
do {
enum ice_prof_type prof_type;
@@ -2228,14 +2267,14 @@
sect_rx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- sizeof(*sect_rx));
+ ice_struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_create_tunnel_err;
sect_rx->count = CPU_TO_LE16(1);
sect_tx = (struct ice_boost_tcam_section *)
ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- sizeof(*sect_tx));
+ ice_struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_create_tunnel_err;
sect_tx->count = CPU_TO_LE16(1);
@@ -2313,7 +2352,7 @@
}
/* size of section - there is at least one entry */
- size = ice_struct_size(sect_rx, tcam, count - 1);
+ size = ice_struct_size(sect_rx, tcam, count);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
@@ -2403,16 +2442,14 @@
hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */
status = ice_destroy_tunnel(hw, port, false);
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "ERR: 0x%x - destroy tunnel port 0x%x\n",
+ ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n",
status, port);
break;
}
status = ice_create_tunnel(hw, type, port);
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "ERR: 0x%x - create tunnel port 0x%x\n",
+ ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n",
status, port);
break;
}
@@ -2471,7 +2508,8 @@
u16 index;
bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
- ICE_XLT1_SIZE(ICE_XLT1_CNT),
+ ice_struct_size(sect, value,
+ ICE_XLT1_CNT),
(void **)&sect);
if (!bld)
return ICE_ERR_NO_MEMORY;
@@ -2713,12 +2751,10 @@
u16 count = 0;
/* compare counts */
- LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+ LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list)
count++;
- }
- LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+ LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list)
chk_count++;
- }
if (!count || count != chk_count)
return false;
@@ -2760,7 +2796,7 @@
enum ice_status status;
bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
- sizeof(struct ice_xlt2_section),
+ ice_struct_size(sect, value, 1),
(void **)&sect);
if (!bld)
return ICE_ERR_NO_MEMORY;
@@ -2892,13 +2928,12 @@
struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
u16 i;
- for (i = 0; i < xlt2->count; i++) {
+ for (i = 0; i < xlt2->count; i++)
if (xlt2->vsig_tbl[i].in_use &&
ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
return ICE_SUCCESS;
}
- }
return ICE_ERR_DOES_NOT_EXIST;
}
@@ -3118,15 +3153,6 @@
static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
- case ICE_BLK_SW:
- *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
- break;
- case ICE_BLK_ACL:
- *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
- break;
- case ICE_BLK_FD:
- *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
- break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
break;
@@ -3147,15 +3173,6 @@
static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
{
switch (blk) {
- case ICE_BLK_SW:
- *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
- break;
- case ICE_BLK_ACL:
- *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
- break;
- case ICE_BLK_FD:
- *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
- break;
case ICE_BLK_RSS:
*rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
break;
@@ -3172,20 +3189,22 @@
* ice_alloc_tcam_ent - allocate hardware TCAM entry
* @hw: pointer to the HW struct
* @blk: the block to allocate the TCAM for
+ * @btm: true to allocate from bottom of table, false to allocate from top
* @tcam_idx: pointer to variable to receive the TCAM entry
*
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
static enum ice_status
-ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
+ u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
- return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+ return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
/**
@@ -3604,16 +3623,8 @@
ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
ice_flow_prof, l_entry) {
- struct ice_flow_entry *e, *t;
-
- LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
- ice_flow_entry, l_entry)
- ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
- ICE_FLOW_ENTRY_HNDL(e));
-
LIST_DEL(&p->l_entry);
- if (p->acts)
- ice_free(hw, p->acts);
+
ice_free(hw, p);
}
ice_release_lock(&hw->fl_profs_locks[blk_idx]);
@@ -3740,7 +3751,7 @@
prof_redir->count * sizeof(*prof_redir->t),
ICE_NONDMA_MEM);
- ice_memset(es->t, 0, es->count * sizeof(*es->t),
+ ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw,
ICE_NONDMA_MEM);
ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
ICE_NONDMA_MEM);
@@ -3848,10 +3859,15 @@
es->ref_count = (u16 *)
ice_calloc(hw, es->count, sizeof(*es->ref_count));
- es->written = (u8 *)
- ice_calloc(hw, es->count, sizeof(*es->written));
if (!es->ref_count)
goto err;
+
+ es->written = (u8 *)
+ ice_calloc(hw, es->count, sizeof(*es->written));
+
+ if (!es->written)
+ goto err;
+
}
return ICE_SUCCESS;
@@ -3923,7 +3939,7 @@
* @prof_id: profile ID
* @ptg: packet type group (PTG) portion of key
* @vsig: VSIG portion of key
- * @cdid: CDID: portion of key
+ * @cdid: CDID portion of key
* @flags: flag portion of key
* @vl_msk: valid mask
* @dc_msk: don't care mask
@@ -3990,13 +4006,11 @@
struct ice_vsig_prof *ent;
LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
- ice_vsig_prof, list) {
+ ice_vsig_prof, list)
if (ent->profile_cookie == hdl)
return true;
- }
- ice_debug(hw, ICE_DBG_INIT,
- "Characteristic list for VSI group %d not found.\n",
+ ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n",
vsig);
return false;
}
@@ -4015,7 +4029,7 @@
u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
struct ice_chs_chg *tmp;
- LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
struct ice_pkg_es *p;
@@ -4023,7 +4037,9 @@
id = ice_sect_id(blk, ICE_VEC_TBL);
p = (struct ice_pkg_es *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p, es,
+ 1) +
vec_size -
sizeof(p->es[0]));
@@ -4036,7 +4052,6 @@
ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
ICE_NONDMA_TO_NONDMA);
}
- }
return ICE_SUCCESS;
}
@@ -4054,14 +4069,17 @@
{
struct ice_chs_chg *tmp;
- LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
struct ice_prof_id_section *p;
u32 id;
id = ice_sect_id(blk, ICE_PROF_TCAM);
p = (struct ice_prof_id_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ entry,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -4075,7 +4093,6 @@
sizeof(hw->blk[blk].prof.t->key),
ICE_NONDMA_TO_NONDMA);
}
- }
return ICE_SUCCESS;
}
@@ -4092,14 +4109,17 @@
{
struct ice_chs_chg *tmp;
- LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry)
if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
struct ice_xlt1_section *p;
u32 id;
id = ice_sect_id(blk, ICE_XLT1);
p = (struct ice_xlt1_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ value,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -4108,7 +4128,6 @@
p->offset = CPU_TO_LE16(tmp->ptype);
p->value[0] = tmp->ptg;
}
- }
return ICE_SUCCESS;
}
@@ -4135,7 +4154,10 @@
case ICE_VSIG_REM:
id = ice_sect_id(blk, ICE_XLT2);
p = (struct ice_xlt2_section *)
- ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+ ice_pkg_buf_alloc_section(bld, id,
+ ice_struct_size(p,
+ value,
+ 1));
if (!p)
return ICE_ERR_MAX_LIMIT;
@@ -4314,37 +4336,30 @@
byte++;
continue;
}
+
/* Examine 8 bits per byte */
- for (bit = 0; bit < 8; bit++) {
- if (ptypes[byte] & BIT(bit)) {
- u16 ptype;
- u8 ptg;
- u8 m;
+ ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte],
+ BITS_PER_BYTE) {
+ u16 ptype;
+ u8 ptg;
- ptype = byte * BITS_PER_BYTE + bit;
+ ptype = byte * BITS_PER_BYTE + bit;
- /* The package should place all ptypes in a
- * non-zero PTG, so the following call should
- * never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
- /* If PTG is already added, skip and continue */
- if (ice_is_bit_set(ptgs_used, ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
- ice_set_bit(ptg, ptgs_used);
- prof->ptg[prof->ptg_cnt] = ptg;
+ ice_set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
- if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- break;
-
- /* nothing left in byte, then exit */
- m = ~(u8)((1 << (bit + 1)) - 1);
- if (!(ptypes[byte] & m))
- break;
- }
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
}
bytes--;
@@ -4359,32 +4374,6 @@
return status;
}
-/**
- * ice_search_prof_id_low - Search for a profile tracking ID low level
- * @hw: pointer to the HW struct
- * @blk: hardware block
- * @id: profile tracking ID
- *
- * This will search for a profile tracking ID which was previously added. This
- * version assumes that the caller has already acquired the prof map lock.
- */
-static struct ice_prof_map *
-ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
-{
- struct ice_prof_map *entry = NULL;
- struct ice_prof_map *map;
-
- LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
- list) {
- if (map->profile_cookie == id) {
- entry = map;
- break;
- }
- }
-
- return entry;
-}
-
/**
* ice_search_prof_id - Search for a profile tracking ID
* @hw: pointer to the HW struct
@@ -4392,15 +4381,19 @@
* @id: profile tracking ID
*
* This will search for a profile tracking ID which was previously added.
+ * The profile map lock should be held before calling this function.
*/
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
{
- struct ice_prof_map *entry;
+ struct ice_prof_map *entry = NULL;
+ struct ice_prof_map *map;
- ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
- entry = ice_search_prof_id_low(hw, blk, id);
- ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list)
+ if (map->profile_cookie == id) {
+ entry = map;
+ break;
+ }
return entry;
}
@@ -4412,16 +4405,20 @@
* @id: profile tracking ID
* @cntxt: context
*/
-struct ice_prof_map *
+enum ice_status
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *entry;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id(hw, blk, id);
- if (entry)
+ if (entry) {
entry->context = cntxt;
-
- return entry;
+ status = ICE_SUCCESS;
+ }
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4431,16 +4428,20 @@
* @id: profile tracking ID
* @cntxt: pointer to variable to receive the context
*/
-struct ice_prof_map *
+enum ice_status
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *entry;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id(hw, blk, id);
- if (entry)
+ if (entry) {
*cntxt = entry->context;
-
- return entry;
+ status = ICE_SUCCESS;
+ }
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4456,9 +4457,8 @@
struct ice_vsig_prof *p;
LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
- ice_vsig_prof, list) {
+ ice_vsig_prof, list)
count++;
- }
return count;
}
@@ -4503,7 +4503,7 @@
enum ice_status status;
u16 i;
- for (i = 0; i < prof->tcam_count; i++) {
+ for (i = 0; i < prof->tcam_count; i++)
if (prof->tcam[i].in_use) {
prof->tcam[i].in_use = false;
status = ice_rel_tcam_idx(hw, blk,
@@ -4511,7 +4511,6 @@
if (status)
return ICE_ERR_HW_TABLE;
}
- }
return ICE_SUCCESS;
}
@@ -4549,7 +4548,7 @@
/* If the VSIG has at least 1 VSI then iterate through the list
* and remove the VSIs before deleting the group.
*/
- if (vsi_cur) {
+ if (vsi_cur)
do {
struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
struct ice_chs_chg *p;
@@ -4567,7 +4566,6 @@
vsi_cur = tmp;
} while (vsi_cur);
- }
return ice_vsig_free(hw, blk, vsig);
}
@@ -4590,7 +4588,7 @@
LIST_FOR_EACH_ENTRY_SAFE(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
- ice_vsig_prof, list) {
+ ice_vsig_prof, list)
if (p->profile_cookie == hdl) {
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
@@ -4603,7 +4601,6 @@
}
return status;
}
- }
return ICE_ERR_DOES_NOT_EXIST;
}
@@ -4618,13 +4615,13 @@
ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- struct LIST_HEAD_TYPE chg;
enum ice_status status;
+ struct LIST_HEAD_TYPE chg;
u16 i;
INIT_LIST_HEAD(&chg);
- for (i = 1; i < ICE_MAX_VSIGS; i++) {
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
if (ice_has_prof_vsig(hw, blk, i, id)) {
status = ice_rem_prof_id_vsig(hw, blk, i, id,
@@ -4633,7 +4630,6 @@
goto err_ice_rem_flow_all;
}
}
- }
status = ice_upd_prof_hw(hw, blk, &chg);
@@ -4663,7 +4659,7 @@
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
- pmap = ice_search_prof_id_low(hw, blk, id);
+ pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
status = ICE_ERR_DOES_NOT_EXIST;
goto err_ice_rem_prof;
@@ -4696,21 +4692,27 @@
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_chs_chg *p;
u16 i;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_get_prof;
+ }
- for (i = 0; i < map->ptg_cnt; i++) {
+ for (i = 0; i < map->ptg_cnt; i++)
if (!hw->blk[blk].es.written[map->prof_id]) {
/* add ES to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_get_prof;
+ }
p->type = ICE_PTG_ES_ADD;
p->ptype = 0;
@@ -4724,13 +4726,11 @@
LIST_ADD(&p->list_entry, chg);
}
- }
-
- return ICE_SUCCESS;
err_ice_get_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
@@ -4784,17 +4784,23 @@
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *lst, u64 hdl)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
u16 i;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_to_lst;
+ }
p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
- if (!p)
- return ICE_ERR_NO_MEMORY;
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_prof_to_lst;
+ }
p->profile_cookie = map->profile_cookie;
p->prof_id = map->prof_id;
@@ -4808,7 +4814,9 @@
LIST_ADD(&p->list, lst);
- return ICE_SUCCESS;
+err_ice_add_prof_to_lst:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -4861,12 +4869,11 @@
{
struct ice_chs_chg *pos, *tmp;
- LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) {
+ LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry)
if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
LIST_DEL(&tmp->list_entry);
ice_free(hw, tmp);
}
- }
}
/**
@@ -4907,7 +4914,7 @@
}
/* for re-enabling, reallocate a TCAM */
- status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx);
if (status)
return status;
@@ -5029,16 +5036,12 @@
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
- /* Get the details on the profile specified by the handle ID */
- map = ice_search_prof_id(hw, blk, hdl);
- if (!map)
- return ICE_ERR_DOES_NOT_EXIST;
-
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
return ICE_ERR_ALREADY_EXISTS;
@@ -5048,22 +5051,31 @@
if (!t)
return ICE_ERR_NO_MEMORY;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_add_prof_id_vsig;
+ }
+
t->profile_cookie = map->profile_cookie;
t->prof_id = map->prof_id;
t->tcam_count = map->ptg_cnt;
/* create TCAM entries */
for (i = 0; i < map->ptg_cnt; i++) {
- enum ice_status status;
u16 tcam_idx;
/* add TCAM to change list */
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
- if (!p)
+ if (!p) {
+ status = ICE_ERR_NO_MEMORY;
goto err_ice_add_prof_id_vsig;
+ }
/* allocate the TCAM entry index */
- status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx);
if (status) {
ice_free(hw, p);
goto err_ice_add_prof_id_vsig;
@@ -5104,12 +5116,14 @@
LIST_ADD(&t->list,
&hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
- return ICE_SUCCESS;
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
err_ice_add_prof_id_vsig:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
/* let caller clean up the change list */
ice_free(hw, t);
- return ICE_ERR_NO_MEMORY;
+ return status;
}
/**
@@ -5211,8 +5225,8 @@
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- struct LIST_HEAD_TYPE lst;
enum ice_status status;
+ struct LIST_HEAD_TYPE lst;
INIT_LIST_HEAD(&lst);
@@ -5287,10 +5301,10 @@
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
- struct LIST_HEAD_TYPE union_lst;
struct ice_chs_chg *tmp, *del;
- struct LIST_HEAD_TYPE chg;
+ struct LIST_HEAD_TYPE union_lst;
enum ice_status status;
+ struct LIST_HEAD_TYPE chg;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -5457,13 +5471,12 @@
{
struct ice_vsig_prof *ent, *tmp;
- LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list)
if (ent->profile_cookie == hdl) {
LIST_DEL(&ent->list);
ice_free(hw, ent);
return ICE_SUCCESS;
}
- }
return ICE_ERR_DOES_NOT_EXIST;
}
@@ -5483,8 +5496,8 @@
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
- struct LIST_HEAD_TYPE chg, copy;
struct ice_chs_chg *tmp, *del;
+ struct LIST_HEAD_TYPE chg, copy;
enum ice_status status;
u16 vsig;
@@ -5514,7 +5527,7 @@
if (last_profile) {
/* If there are no profiles left for this VSIG,
- * then simply remove the the VSIG.
+ * then simply remove the VSIG.
*/
status = ice_rem_vsig(hw, blk, vsig, &chg);
if (status)
Index: sys/dev/ice/ice_flex_type.h
===================================================================
--- sys/dev/ice/ice_flex_type.h
+++ sys/dev/ice/ice_flex_type.h
@@ -55,7 +55,7 @@
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
- __le32 seg_offset[1];
+ __le32 seg_offset[STRUCT_HACK_VAR_LEN];
};
/* generic segment */
@@ -86,12 +86,12 @@
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
- struct ice_device_id_entry device_table[1];
+ struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN];
};
struct ice_nvm_table {
__le32 table_count;
- __le32 vers[1];
+ __le32 vers[STRUCT_HACK_VAR_LEN];
};
struct ice_buf {
@@ -101,7 +101,7 @@
struct ice_buf_table {
__le32 buf_count;
- struct ice_buf buf_array[1];
+ struct ice_buf buf_array[STRUCT_HACK_VAR_LEN];
};
/* global metadata specific segment */
@@ -134,11 +134,12 @@
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
- struct ice_section_entry section_entry[1];
+ struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+ ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+ (ent_sz))
/* ice package section IDs */
#define ICE_SID_XLT0_SW 10
@@ -365,17 +366,17 @@
struct ice_label_section {
__le16 count;
- struct ice_label label[1];
+ struct ice_label label[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_label_section) - sizeof(struct ice_label), \
- sizeof(struct ice_label))
+ ice_struct_size((struct ice_label_section *)0, label, 1) - \
+ sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
- struct ice_fv fv[1];
+ struct ice_fv fv[STRUCT_HACK_VAR_LEN];
};
struct ice_sw_fv_list_entry {
@@ -420,43 +421,32 @@
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
- struct ice_boost_tcam_entry tcam[1];
+ struct ice_boost_tcam_entry tcam[STRUCT_HACK_VAR_LEN];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- sizeof(struct ice_boost_tcam_section) - \
+ ice_struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
-#pragma pack(1)
struct ice_xlt1_section {
__le16 count;
__le16 offset;
- u8 value[1];
+ u8 value[STRUCT_HACK_VAR_LEN];
};
-#pragma pack()
-
-#define ICE_XLT1_SIZE(n) (sizeof(struct ice_xlt1_section) + \
- (sizeof(u8) * ((n) - 1)))
struct ice_xlt2_section {
__le16 count;
__le16 offset;
- __le16 value[1];
+ __le16 value[STRUCT_HACK_VAR_LEN];
};
-#define ICE_XLT2_SIZE(n) (sizeof(struct ice_xlt2_section) + \
- (sizeof(u16) * ((n) - 1)))
-
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
- u8 redir_value[1];
+ u8 redir_value[STRUCT_HACK_VAR_LEN];
};
-#define ICE_PROF_REDIR_SIZE(n) (sizeof(struct ice_prof_redir_section) + \
- (sizeof(u8) * ((n) - 1)))
-
/* package buffer building */
struct ice_buf_build {
@@ -513,7 +503,7 @@
struct ice_pkg_es {
__le16 count;
__le16 offset;
- struct ice_fv_word es[1];
+ struct ice_fv_word es[STRUCT_HACK_VAR_LEN];
};
struct ice_es {
@@ -664,12 +654,12 @@
u8 key[ICE_TCAM_KEY_SZ];
u8 prof_id;
};
+#pragma pack()
struct ice_prof_id_section {
__le16 count;
- struct ice_prof_tcam_entry entry[1];
+ struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN];
};
-#pragma pack()
struct ice_prof_tcam {
u32 sid;
Index: sys/dev/ice/ice_flow.h
===================================================================
--- sys/dev/ice/ice_flow.h
+++ sys/dev/ice/ice_flow.h
@@ -34,6 +34,7 @@
#define _ICE_FLOW_H_
#include "ice_flex_type.h"
+
#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
#define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
#define ICE_FLOW_PROF_ID_BYPASS 0
@@ -85,6 +86,10 @@
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
+ /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
+ * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
+ */
+ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
};
enum ice_flow_field {
@@ -180,6 +185,19 @@
BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+enum ice_rss_hash_func {
+ ICE_RSS_HASH_TOEPLITZ = 0,
+ ICE_RSS_HASH_TOEPLITZ_SYMMETRIC = 1,
+ ICE_RSS_HASH_XOR = 2,
+ ICE_RSS_HASH_JHASH = 3,
+};
+
+struct ice_rss_hash_cfg {
+ u32 addl_hdrs;
+ u64 hash_flds;
+ enum ice_rss_hash_func hash_func;
+};
+
enum ice_flow_dir {
ICE_FLOW_DIR_UNDEFINED = 0,
ICE_FLOW_TX = 0x01,
@@ -194,9 +212,7 @@
};
#define ICE_FLOW_SEG_MAX 2
-#define ICE_FLOW_SEG_RAW_FLD_MAX 2
#define ICE_FLOW_PROFILE_MAX 1024
-#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
#define ICE_FLOW_FV_EXTRACT_SZ 2
@@ -234,40 +250,15 @@
struct ice_flow_seg_xtrct xtrct;
};
-struct ice_flow_seg_fld_raw {
- struct ice_flow_fld_info info;
- u16 off; /* Offset from the start of the segment */
-};
-
struct ice_flow_seg_info {
u32 hdrs; /* Bitmask indicating protocol headers present */
u64 match; /* Bitmask indicating header fields to be matched */
u64 range; /* Bitmask indicating header fields matched as ranges */
struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
-
- u8 raws_cnt; /* Number of raw fields to be matched */
- struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
-};
-
-/* This structure describes a flow entry, and is tracked only in this file */
-struct ice_flow_entry {
- struct LIST_ENTRY_TYPE l_entry;
-
- u64 id;
- struct ice_flow_prof *prof;
- /* Action list */
- struct ice_flow_action *acts;
- /* Flow entry's content */
- void *entry;
- enum ice_flow_priority priority;
- u16 vsi_handle;
- u16 entry_sz;
- u8 acts_cnt;
};
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
-#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
struct ice_flow_prof {
struct LIST_ENTRY_TYPE l_entry;
@@ -275,11 +266,6 @@
u64 id;
enum ice_flow_dir dir;
u8 segs_cnt;
- u8 acts_cnt;
-
- /* Keep track of flow entries associated with this flow profile */
- struct ice_lock entries_lock;
- struct LIST_HEAD_TYPE entries;
struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
@@ -288,12 +274,7 @@
union {
/* struct sw_recipe */
- /* struct fd */
- u32 data;
} cfg;
-
- /* Default actions */
- struct ice_flow_action *acts;
};
struct ice_rss_cfg {
@@ -338,36 +319,14 @@
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt);
enum ice_status
-ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
- u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
- struct ice_flow_action *acts, u8 acts_cnt,
- struct ice_flow_prof **prof);
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
-enum ice_status
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
enum ice_status
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof);
-
-u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
-enum ice_status
-ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
- u64 entry_id, u16 vsi, enum ice_flow_priority prio,
- void *data, struct ice_flow_action *acts, u8 acts_cnt,
- u64 *entry_h);
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
- u64 entry_h);
-void
-ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
- u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 prefix_loc, u8 prefix_sz);
-void
-ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
- u16 val_loc, u16 mask_loc);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
Index: sys/dev/ice/ice_flow.c
===================================================================
--- sys/dev/ice/ice_flow.c
+++ sys/dev/ice/ice_flow.c
@@ -74,7 +74,7 @@
/* ICE_FLOW_FIELD_IDX_C_VLAN */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE),
/* IPv4 / IPv6 */
/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, ICE_FLOW_FLD_SZ_IP_DSCP),
@@ -159,7 +159,9 @@
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv4 header */
+/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
+ * include IPV4 other PTYPEs
+ */
static const u32 ice_ptypes_ipv4_ofos[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -171,6 +173,20 @@
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header, includes
+ * IPV4 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv4_ofos_all[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Innermost/Last IPv4 header */
static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
@@ -183,7 +199,9 @@
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-/* Packet types for packets with an Outer/First/Single IPv6 header */
+/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
+ * include IVP6 other PTYPEs
+ */
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -195,6 +213,20 @@
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv6 header, includes
+ * IPV6 other PTYPEs
+ */
+static const u32 ice_ptypes_ipv6_ofos_all[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Innermost/Last IPv6 header */
static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
@@ -207,6 +239,54 @@
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* Packet types for packets with an Outermost/First ARP header */
static const u32 ice_ptypes_arp_of[] = {
0x00000800, 0x00000000, 0x00000000, 0x00000000,
@@ -325,6 +405,9 @@
#define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
ICE_FLOW_SEG_HDR_SCTP)
+/* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */
+#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/**
* ice_flow_val_hdrs - validates packet segments for valid protocol headers
@@ -351,54 +434,6 @@
return ICE_SUCCESS;
}
-/* Sizes of fixed known protocol headers without header options */
-#define ICE_FLOW_PROT_HDR_SZ_MAC 14
-#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
-#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
-#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
-#define ICE_FLOW_PROT_HDR_SZ_ARP 28
-#define ICE_FLOW_PROT_HDR_SZ_ICMP 8
-#define ICE_FLOW_PROT_HDR_SZ_TCP 20
-#define ICE_FLOW_PROT_HDR_SZ_UDP 8
-#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
-
-/**
- * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
- * @params: information about the flow to be processed
- * @seg: index of packet segment whose header size is to be determined
- */
-static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
-{
- u16 sz;
-
- /* L2 headers */
- sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
- ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
-
- /* L3 headers */
- if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
- sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
- else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
- sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
- else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
- sz += ICE_FLOW_PROT_HDR_SZ_ARP;
- else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
- /* A L3 header is required if L4 is specified */
- return 0;
-
- /* L4 headers */
- if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
- sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
- else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
- sz += ICE_FLOW_PROT_HDR_SZ_TCP;
- else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
- sz += ICE_FLOW_PROT_HDR_SZ_UDP;
- else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
- sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
-
- return sz;
-}
-
/**
* ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
* @params: information about the flow to be processed
@@ -442,11 +477,37 @@
ICE_FLOW_PTYPE_MAX);
}
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ?
+ (const ice_bitmap_t *)ice_ptypes_ipv4_il :
+ (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
+ src = i ?
+ (const ice_bitmap_t *)ice_ptypes_ipv6_il :
+ (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const ice_bitmap_t *)ice_ipv4_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ipv4_il_no_l4;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
(const ice_bitmap_t *)ice_ptypes_ipv4_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
+ src = !i ? (const ice_bitmap_t *)ice_ipv6_ofos_no_l4 :
+ (const ice_bitmap_t *)ice_ipv6_il_no_l4;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
(const ice_bitmap_t *)ice_ptypes_ipv6_il;
@@ -454,12 +515,7 @@
ICE_FLOW_PTYPE_MAX);
}
- if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
- src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
- (const ice_bitmap_t *)ice_ptypes_icmp_il;
- ice_and_bitmap(params->ptypes, params->ptypes, src,
- ICE_FLOW_PTYPE_MAX);
- } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
src = (const ice_bitmap_t *)ice_ptypes_udp_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
@@ -471,6 +527,13 @@
src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
ice_and_bitmap(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
+ (const ice_bitmap_t *)ice_ptypes_icmp_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const ice_bitmap_t *)ice_ptypes_gre_of;
@@ -483,42 +546,6 @@
return ICE_SUCCESS;
}
-/**
- * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
- * @hw: pointer to the HW struct
- * @params: information about the flow to be processed
- * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
- *
- * This function will allocate an extraction sequence entries for a DWORD size
- * chunk of the packet flags.
- */
-static enum ice_status
-ice_flow_xtract_pkt_flags(struct ice_hw *hw,
- struct ice_flow_prof_params *params,
- enum ice_flex_mdid_pkt_flags flags)
-{
- u8 fv_words = hw->blk[params->blk].es.fvw;
- u8 idx;
-
- /* Make sure the number of extraction sequence entries required does not
- * exceed the block's capacity.
- */
- if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
-
- /* some blocks require a reversed field vector layout */
- if (hw->blk[params->blk].es.reverse)
- idx = fv_words - params->es_cnt - 1;
- else
- idx = params->es_cnt;
-
- params->es[idx].prot_id = ICE_PROT_META_ID;
- params->es[idx].off = flags;
- params->es_cnt++;
-
- return ICE_SUCCESS;
-}
-
/**
* ice_flow_xtract_fld - Create an extraction sequence entry for the given field
* @hw: pointer to the HW struct
@@ -539,7 +566,6 @@
u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
- s16 adj = 0;
u16 off;
flds = params->prof->segs[seg].fields;
@@ -570,7 +596,7 @@
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ else
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
break;
case ICE_FLOW_FIELD_IDX_IPV6_TTL:
@@ -583,7 +609,7 @@
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
- else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ else
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
break;
case ICE_FLOW_FIELD_IDX_IPV4_SA:
@@ -639,7 +665,7 @@
flds[fld].xtrct.prot_id = prot_id;
flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
ICE_FLOW_FV_EXTRACT_SZ;
- flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
+ flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
flds[fld].xtrct.idx = params->es_cnt;
/* Adjust the next field-entry index after accommodating the number of
@@ -683,82 +709,6 @@
return ICE_SUCCESS;
}
-/**
- * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
- * @hw: pointer to the HW struct
- * @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
- */
-static enum ice_status
-ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
- u8 seg)
-{
- u16 fv_words;
- u16 hdrs_sz;
- u8 i;
-
- if (!params->prof->segs[seg].raws_cnt)
- return ICE_SUCCESS;
-
- if (params->prof->segs[seg].raws_cnt >
- ARRAY_SIZE(params->prof->segs[seg].raws))
- return ICE_ERR_MAX_LIMIT;
-
- /* Offsets within the segment headers are not supported */
- hdrs_sz = ice_flow_calc_seg_sz(params, seg);
- if (!hdrs_sz)
- return ICE_ERR_PARAM;
-
- fv_words = hw->blk[params->blk].es.fvw;
-
- for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
- struct ice_flow_seg_fld_raw *raw;
- u16 off, cnt, j;
-
- raw = &params->prof->segs[seg].raws[i];
-
- /* Storing extraction information */
- raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
- raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
- ICE_FLOW_FV_EXTRACT_SZ;
- raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
- BITS_PER_BYTE;
- raw->info.xtrct.idx = params->es_cnt;
-
- /* Determine the number of field vector entries this raw field
- * consumes.
- */
- cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
- (raw->info.src.last * BITS_PER_BYTE),
- (ICE_FLOW_FV_EXTRACT_SZ *
- BITS_PER_BYTE));
- off = raw->info.xtrct.off;
- for (j = 0; j < cnt; j++) {
- u16 idx;
-
- /* Make sure the number of extraction sequence required
- * does not exceed the block's capability
- */
- if (params->es_cnt >= hw->blk[params->blk].es.count ||
- params->es_cnt >= ICE_MAX_FV_WORDS)
- return ICE_ERR_MAX_LIMIT;
-
- /* some blocks require a reversed field vector layout */
- if (hw->blk[params->blk].es.reverse)
- idx = fv_words - params->es_cnt - 1;
- else
- idx = params->es_cnt;
-
- params->es[idx].prot_id = raw->info.xtrct.prot_id;
- params->es[idx].off = off;
- params->es_cnt++;
- off += ICE_FLOW_FV_EXTRACT_SZ;
- }
- }
-
- return ICE_SUCCESS;
-}
-
/**
* ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
* @hw: pointer to the HW struct
@@ -774,35 +724,17 @@
enum ice_status status = ICE_SUCCESS;
u8 i;
- /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
- * packet flags
- */
- if (params->blk == ICE_BLK_ACL) {
- status = ice_flow_xtract_pkt_flags(hw, params,
- ICE_RX_MDID_PKT_FLAGS_15_0);
- if (status)
- return status;
- }
-
for (i = 0; i < params->prof->segs_cnt; i++) {
u64 match = params->prof->segs[i].match;
enum ice_flow_field j;
- for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
- const u64 bit = BIT_ULL(j);
-
- if (match & bit) {
- status = ice_flow_xtract_fld(hw, params, i, j);
- if (status)
- return status;
- match &= ~bit;
- }
+ ice_for_each_set_bit(j, (ice_bitmap_t *)&match,
+ ICE_FLOW_FIELD_IDX_MAX) {
+ status = ice_flow_xtract_fld(hw, params, i, j);
+ if (status)
+ return status;
+ ice_clear_bit(j, (ice_bitmap_t *)&match);
}
-
- /* Process raw matching bytes */
- status = ice_flow_xtract_raws(hw, params, i);
- if (status)
- return status;
}
return status;
@@ -828,15 +760,8 @@
switch (params->blk) {
case ICE_BLK_RSS:
- /* Only header information is provided for RSS configuration.
- * No further processing is needed.
- */
status = ICE_SUCCESS;
break;
- case ICE_BLK_FD:
- status = ICE_SUCCESS;
- break;
- case ICE_BLK_SW:
default:
return ICE_ERR_NOT_IMPL;
}
@@ -866,7 +791,7 @@
struct ice_flow_prof *p, *prof = NULL;
ice_acquire_lock(&hw->fl_profs_locks[blk]);
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
segs_cnt && segs_cnt == p->segs_cnt) {
u8 i;
@@ -892,7 +817,6 @@
break;
}
}
- }
ice_release_lock(&hw->fl_profs_locks[blk]);
return prof;
@@ -929,55 +853,35 @@
{
struct ice_flow_prof *p;
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry)
if (p->id == prof_id)
return p;
- }
return NULL;
}
/**
- * ice_dealloc_flow_entry - Deallocate flow entry memory
- * @hw: pointer to the HW struct
- * @entry: flow entry to be removed
- */
-static void
-ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
-{
- if (!entry)
- return;
-
- if (entry->entry)
- ice_free(hw, entry->entry);
-
- if (entry->acts) {
- ice_free(hw, entry->acts);
- entry->acts = NULL;
- entry->acts_cnt = 0;
- }
-
- ice_free(hw, entry);
-}
-
-/**
- * ice_flow_rem_entry_sync - Remove a flow entry
+ * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
* @hw: pointer to the HW struct
* @blk: classification stage
- * @entry: flow entry to be removed
+ * @prof_id: the profile ID handle
+ * @hw_prof_id: pointer to variable to receive the HW profile ID
*/
-static enum ice_status
-ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __ALWAYS_UNUSED blk,
- struct ice_flow_entry *entry)
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u8 *hw_prof_id)
{
- if (!entry)
- return ICE_ERR_BAD_PTR;
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
+ struct ice_prof_map *map;
- LIST_DEL(&entry->l_entry);
-
- ice_dealloc_flow_entry(hw, entry);
-
- return ICE_SUCCESS;
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ map = ice_search_prof_id(hw, blk, prof_id);
+ if (map) {
+ *hw_prof_id = map->prof_id;
+ status = ICE_SUCCESS;
+ }
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
}
/**
@@ -1001,75 +905,64 @@
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof)
{
- struct ice_flow_prof_params params;
+ struct ice_flow_prof_params *params;
enum ice_status status;
u8 i;
if (!prof || (acts_cnt && !acts))
return ICE_ERR_BAD_PTR;
- ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
- params.prof = (struct ice_flow_prof *)
- ice_malloc(hw, sizeof(*params.prof));
- if (!params.prof)
+ params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params));
+ if (!params)
return ICE_ERR_NO_MEMORY;
+ params->prof = (struct ice_flow_prof *)
+ ice_malloc(hw, sizeof(*params->prof));
+ if (!params->prof) {
+ status = ICE_ERR_NO_MEMORY;
+ goto free_params;
+ }
+
/* initialize extraction sequence to all invalid (0xff) */
for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
- params.es[i].prot_id = ICE_PROT_INVALID;
- params.es[i].off = ICE_FV_OFFSET_INVAL;
+ params->es[i].prot_id = ICE_PROT_INVALID;
+ params->es[i].off = ICE_FV_OFFSET_INVAL;
}
- params.blk = blk;
- params.prof->id = prof_id;
- params.prof->dir = dir;
- params.prof->segs_cnt = segs_cnt;
+ params->blk = blk;
+ params->prof->id = prof_id;
+ params->prof->dir = dir;
+ params->prof->segs_cnt = segs_cnt;
/* Make a copy of the segments that need to be persistent in the flow
* profile instance
*/
for (i = 0; i < segs_cnt; i++)
- ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
+ ice_memcpy(&params->prof->segs[i], &segs[i], sizeof(*segs),
ICE_NONDMA_TO_NONDMA);
- /* Make a copy of the actions that need to be persistent in the flow
- * profile instance.
- */
- if (acts_cnt) {
- params.prof->acts = (struct ice_flow_action *)
- ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
- ICE_NONDMA_TO_NONDMA);
-
- if (!params.prof->acts) {
- status = ICE_ERR_NO_MEMORY;
- goto out;
- }
- }
-
- status = ice_flow_proc_segs(hw, &params);
+ status = ice_flow_proc_segs(hw, params);
if (status) {
- ice_debug(hw, ICE_DBG_FLOW,
- "Error processing a flow's packet segments\n");
+ ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n");
goto out;
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ params->es);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
}
- INIT_LIST_HEAD(&params.prof->entries);
- ice_init_lock(&params.prof->entries_lock);
- *prof = params.prof;
+ *prof = params->prof;
out:
if (status) {
- if (params.prof->acts)
- ice_free(hw, params.prof->acts);
- ice_free(hw, params.prof);
+ ice_free(hw, params->prof);
}
+free_params:
+ ice_free(hw, params);
return status;
}
@@ -1088,29 +981,10 @@
{
enum ice_status status;
- /* Remove all remaining flow entries before removing the flow profile */
- if (!LIST_EMPTY(&prof->entries)) {
- struct ice_flow_entry *e, *t;
-
- ice_acquire_lock(&prof->entries_lock);
-
- LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
- l_entry) {
- status = ice_flow_rem_entry_sync(hw, blk, e);
- if (status)
- break;
- }
-
- ice_release_lock(&prof->entries_lock);
- }
-
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
if (!status) {
LIST_DEL(&prof->l_entry);
- ice_destroy_lock(&prof->entries_lock);
- if (prof->acts)
- ice_free(hw, prof->acts);
ice_free(hw, prof);
}
@@ -1169,8 +1043,7 @@
if (!status)
ice_set_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile add failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n",
status);
}
@@ -1201,8 +1074,7 @@
if (!status)
ice_clear_bit(vsi_handle, prof->vsis);
else
- ice_debug(hw, ICE_DBG_FLOW,
- "HW profile remove failed, %d\n",
+ ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n",
status);
}
@@ -1221,7 +1093,7 @@
* @acts_cnt: number of default actions
* @prof: stores the returned flow profile added
*/
-enum ice_status
+static enum ice_status
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_action *acts, u8 acts_cnt,
@@ -1260,7 +1132,7 @@
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-enum ice_status
+static enum ice_status
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
@@ -1283,187 +1155,6 @@
return status;
}
-/**
- * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @prof_id: the profile ID handle
- * @hw_prof_id: pointer to variable to receive the HW profile ID
- */
-enum ice_status
-ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
- u8 *hw_prof_id)
-{
- struct ice_prof_map *map;
-
- map = ice_search_prof_id(hw, blk, prof_id);
- if (map) {
- *hw_prof_id = map->prof_id;
- return ICE_SUCCESS;
- }
-
- return ICE_ERR_DOES_NOT_EXIST;
-}
-
-/**
- * ice_flow_find_entry - look for a flow entry using its unique ID
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @entry_id: unique ID to identify this flow entry
- *
- * This function looks for the flow entry with the specified unique ID in all
- * flow profiles of the specified classification stage. If the entry is found,
- * and it returns the handle to the flow entry. Otherwise, it returns
- * ICE_FLOW_ENTRY_ID_INVAL.
- */
-u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
-{
- struct ice_flow_entry *found = NULL;
- struct ice_flow_prof *p;
-
- ice_acquire_lock(&hw->fl_profs_locks[blk]);
-
- LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
- struct ice_flow_entry *e;
-
- ice_acquire_lock(&p->entries_lock);
- LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
- if (e->id == entry_id) {
- found = e;
- break;
- }
- ice_release_lock(&p->entries_lock);
-
- if (found)
- break;
- }
-
- ice_release_lock(&hw->fl_profs_locks[blk]);
-
- return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
-}
-
-/**
- * ice_flow_add_entry - Add a flow entry
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @prof_id: ID of the profile to add a new flow entry to
- * @entry_id: unique ID to identify this flow entry
- * @vsi_handle: software VSI handle for the flow entry
- * @prio: priority of the flow entry
- * @data: pointer to a data buffer containing flow entry's match values/masks
- * @acts: arrays of actions to be performed on a match
- * @acts_cnt: number of actions
- * @entry_h: pointer to buffer that receives the new flow entry's handle
- */
-enum ice_status
-ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
- u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
- void *data, struct ice_flow_action *acts, u8 acts_cnt,
- u64 *entry_h)
-{
- struct ice_flow_entry *e = NULL;
- struct ice_flow_prof *prof;
- enum ice_status status = ICE_SUCCESS;
-
- /* ACL entries must indicate an action */
- if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
- return ICE_ERR_PARAM;
-
- /* No flow entry data is expected for RSS */
- if (!entry_h || (!data && blk != ICE_BLK_RSS))
- return ICE_ERR_BAD_PTR;
-
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
-
- ice_acquire_lock(&hw->fl_profs_locks[blk]);
-
- prof = ice_flow_find_prof_id(hw, blk, prof_id);
- if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
- } else {
- /* Allocate memory for the entry being added and associate
- * the VSI to the found flow profile
- */
- e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
- if (!e)
- status = ICE_ERR_NO_MEMORY;
- else
- status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
- }
-
- ice_release_lock(&hw->fl_profs_locks[blk]);
- if (status)
- goto out;
-
- e->id = entry_id;
- e->vsi_handle = vsi_handle;
- e->prof = prof;
- e->priority = prio;
-
- switch (blk) {
- case ICE_BLK_RSS:
- /* RSS will add only one entry per VSI per profile */
- break;
- case ICE_BLK_FD:
- break;
- case ICE_BLK_SW:
- case ICE_BLK_PE:
- default:
- status = ICE_ERR_NOT_IMPL;
- goto out;
- }
-
- if (blk != ICE_BLK_ACL) {
- /* ACL will handle the entry management */
- ice_acquire_lock(&prof->entries_lock);
- LIST_ADD(&e->l_entry, &prof->entries);
- ice_release_lock(&prof->entries_lock);
- }
-
- *entry_h = ICE_FLOW_ENTRY_HNDL(e);
-
-out:
- if (status && e) {
- if (e->entry)
- ice_free(hw, e->entry);
- ice_free(hw, e);
- }
-
- return status;
-}
-
-/**
- * ice_flow_rem_entry - Remove a flow entry
- * @hw: pointer to the HW struct
- * @blk: classification stage
- * @entry_h: handle to the flow entry to be removed
- */
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
- u64 entry_h)
-{
- struct ice_flow_entry *entry;
- struct ice_flow_prof *prof;
- enum ice_status status = ICE_SUCCESS;
-
- if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
- return ICE_ERR_PARAM;
-
- entry = ICE_FLOW_ENTRY_PTR(entry_h);
-
- /* Retain the pointer to the flow profile as the entry will be freed */
- prof = entry->prof;
-
- if (prof) {
- ice_acquire_lock(&prof->entries_lock);
- status = ice_flow_rem_entry_sync(hw, blk, entry);
- ice_release_lock(&prof->entries_lock);
- }
-
- return status;
-}
-
/**
* ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
* @seg: packet segment the field being set belongs to
@@ -1478,7 +1169,7 @@
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
@@ -1526,7 +1217,7 @@
* create the content of a match entry. This function should only be used for
* fixed-size data structures.
*/
-void
+static void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
{
@@ -1565,48 +1256,11 @@
pref_loc, (u16)pref_sz);
}
-/**
- * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
- * @seg: packet segment the field being set belongs to
- * @off: offset of the raw field from the beginning of the segment in bytes
- * @len: length of the raw pattern to be matched
- * @val_loc: location of the value to match from entry's input buffer
- * @mask_loc: location of mask value from entry's input buffer
- *
- * This function specifies the offset of the raw field to be match from the
- * beginning of the specified packet segment, and the locations, in the form of
- * byte offsets from the start of the input buffer for a flow entry, from where
- * the value to match and the mask value to be extracted. These locations are
- * then stored in the flow profile. When adding flow entries to the associated
- * flow profile, these locations can be used to quickly extract the values to
- * create the content of a match entry. This function should only be used for
- * fixed-size data structures.
- */
-void
-ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
- u16 val_loc, u16 mask_loc)
-{
- if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
- seg->raws[seg->raws_cnt].off = off;
- seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
- seg->raws[seg->raws_cnt].info.src.val = val_loc;
- seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
- /* The "last" field is used to store the length of the field */
- seg->raws[seg->raws_cnt].info.src.last = len;
- }
-
- /* Overflows of "raws" will be handled as an error condition later in
- * the flow when this information is processed.
- */
- seg->raws_cnt++;
-}
-
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
- (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
- ICE_FLOW_SEG_HDR_SCTP)
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
@@ -1626,20 +1280,15 @@
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
- u64 val = hash_fields;
+ u64 val;
u8 i;
- for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
- u64 bit = BIT_ULL(i);
+ ice_for_each_set_bit(i, (ice_bitmap_t *)&hash_fields,
+ ICE_FLOW_FIELD_IDX_MAX)
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
- if (val & bit) {
- ice_flow_set_fld(segs, (enum ice_flow_field)i,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL,
- ICE_FLOW_FLD_OFF_INVAL, false);
- val &= ~bit;
- }
- }
ICE_FLOW_SET_HDRS(segs, flow_hdr);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
@@ -1672,13 +1321,12 @@
ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
- ice_rss_cfg, l_entry) {
+ ice_rss_cfg, l_entry)
if (ice_test_and_clear_bit(vsi_handle, r->vsis))
if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
LIST_DEL(&r->l_entry);
ice_free(hw, r);
}
- }
ice_release_lock(&hw->rss_locks);
}
@@ -1703,22 +1351,21 @@
if (LIST_EMPTY(&hw->fl_profs[blk]))
return ICE_SUCCESS;
- ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
- l_entry) {
+ l_entry)
if (ice_is_bit_set(p->vsis, vsi_handle)) {
status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
if (status)
break;
if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
- status = ice_flow_rem_prof_sync(hw, blk, p);
+ status = ice_flow_rem_prof(hw, blk, p->id);
if (status)
break;
}
}
- }
- ice_release_lock(&hw->fl_profs_locks[blk]);
+ ice_release_lock(&hw->rss_locks);
return status;
}
@@ -1741,7 +1388,7 @@
* remove from the RSS entry list of the VSI context and delete entry.
*/
LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
- ice_rss_cfg, l_entry) {
+ ice_rss_cfg, l_entry)
if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
ice_clear_bit(vsi_handle, r->vsis);
@@ -1751,7 +1398,6 @@
}
return;
}
- }
}
/**
@@ -1853,7 +1499,7 @@
goto exit;
/* Check if a flow profile exists with the same protocol headers and
- * associated with the input VSI. If so disasscociate the VSI from
+ * associated with the input VSI. If so disassociate the VSI from
* this profile. The VSI will be added to a new profile created with
* the protocol header and new hash field configuration.
*/
@@ -2208,7 +1854,8 @@
*/
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
{
- struct ice_rss_cfg *r, *rss_cfg = NULL;
+ u64 rss_hash = ICE_HASH_INVALID;
+ struct ice_rss_cfg *r;
/* verify if the protocol header is non zero and VSI is valid */
if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
@@ -2219,10 +1866,10 @@
ice_rss_cfg, l_entry)
if (ice_is_bit_set(r->vsis, vsi_handle) &&
r->packet_hdr == hdrs) {
- rss_cfg = r;
+ rss_hash = r->hashed_flds;
break;
}
ice_release_lock(&hw->rss_locks);
- return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+ return rss_hash;
}
Index: sys/dev/ice/ice_hw_autogen.h
===================================================================
--- sys/dev/ice/ice_hw_autogen.h
+++ sys/dev/ice/ice_hw_autogen.h
@@ -5260,8 +5260,8 @@
#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
#define PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
Index: sys/dev/ice/ice_lan_tx_rx.h
===================================================================
--- sys/dev/ice/ice_lan_tx_rx.h
+++ sys/dev/ice/ice_lan_tx_rx.h
@@ -190,7 +190,9 @@
#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
+#define ICE_FXD_FLTR_QW1_FDID_PRI_ZERO 0x0ULL
#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL
#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
@@ -1049,7 +1051,7 @@
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
/* LAN Tx Completion Queue data */
Index: sys/dev/ice/ice_lib.h
===================================================================
--- sys/dev/ice/ice_lib.h
+++ sys/dev/ice/ice_lib.h
@@ -527,6 +527,7 @@
ICE_STATE_LINK_STATUS_REPORTED,
ICE_STATE_DETACHING,
ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
+ ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER,
/* This entry must be last */
ICE_STATE_LAST,
};
@@ -807,5 +808,6 @@
int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
int ice_alloc_intr_tracking(struct ice_softc *sc);
void ice_free_intr_tracking(struct ice_softc *sc);
+void ice_set_default_local_lldp_mib(struct ice_softc *sc);
#endif /* _ICE_LIB_H_ */
Index: sys/dev/ice/ice_lib.c
===================================================================
--- sys/dev/ice/ice_lib.c
+++ sys/dev/ice/ice_lib.c
@@ -143,8 +143,8 @@
ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
u64 *phy_type_high);
static int
-ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low,
- u64 *phy_type_high);
+ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds,
+ u64 *phy_type_low, u64 *phy_type_high);
static int
ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low,
u64 *phy_type_high);
@@ -1388,44 +1388,54 @@
int
ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
{
- struct ice_aqc_add_tx_qgrp qg = { 0 };
+ struct ice_aqc_add_tx_qgrp *qg;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
enum ice_status status;
- int i, err;
- u16 pf_q;
+ int i;
+ int err = 0;
+ u16 qg_size, pf_q;
- qg.num_txqs = 1;
+ qg_size = ice_struct_size(qg, txqs, 1);
+ qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO);
+ if (!qg)
+ return (ENOMEM);
+
+ qg->num_txqs = 1;
for (i = 0; i < vsi->num_tx_queues; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_tx_queue *txq = &vsi->tx_queues[i];
pf_q = vsi->tx_qmap[txq->me];
- qg.txqs[0].txq_id = htole16(pf_q);
+ qg->txqs[0].txq_id = htole16(pf_q);
err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q);
if (err)
- return err;
+ goto free_txqg;
- ice_set_ctx((u8 *)&tlan_ctx, qg.txqs[0].txq_ctx,
+ ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx,
ice_tlan_ctx_info);
status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0,
- i, 1, &qg, sizeof(qg), NULL);
+ i, 1, qg, qg_size, NULL);
if (status) {
device_printf(dev,
"Failed to set LAN Tx queue context, err %s aq_err %s\n",
ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
- return (ENODEV);
+ err = ENODEV;
+ goto free_txqg;
}
/* Keep track of the Tx queue TEID */
- if (pf_q == le16toh(qg.txqs[0].txq_id))
- txq->q_teid = le32toh(qg.txqs[0].q_teid);
+ if (pf_q == le16toh(qg->txqs[0].txq_id))
+ txq->q_teid = le32toh(qg->txqs[0].q_teid);
}
- return (0);
+free_txqg:
+ free(qg, M_ICE);
+
+ return (err);
}
/**
@@ -2196,9 +2206,9 @@
static void
ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf)
{
- struct ice_nvm_info *nvm = &hw->nvm;
- struct ice_orom_info *orom = &nvm->orom;
- struct ice_netlist_ver_info *netlist_ver = &hw->netlist_ver;
+ struct ice_nvm_info *nvm = &hw->flash.nvm;
+ struct ice_orom_info *orom = &hw->flash.orom;
+ struct ice_netlist_info *netlist = &hw->flash.netlist;
/* Note that the netlist versions are stored in packed Binary Coded
* Decimal format. The use of '%x' will correctly display these as
@@ -2210,10 +2220,10 @@
"fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
hw->api_maj_ver, hw->api_min_ver,
- nvm->major_ver, nvm->minor_ver, nvm->eetrack,
- netlist_ver->major, netlist_ver->minor,
- netlist_ver->type >> 16, netlist_ver->type & 0xFFFF,
- netlist_ver->rev, netlist_ver->cust_ver, netlist_ver->hash,
+ nvm->major, nvm->minor, nvm->eetrack,
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver, netlist->hash,
orom->major, orom->build, orom->patch);
}
@@ -2343,6 +2353,10 @@
ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast);
ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast);
ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast);
+ /* This stat register doesn't have an lport */
+ ice_stat_update32(hw, PRTRPB_RDPC,
+ sc->stats.offsets_loaded,
+ &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards);
ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down);
ICE_PF_STAT40(GLPRT_PRC64, rx_size_64);
@@ -2808,6 +2822,7 @@
/**
* ice_intersect_media_types_with_caps - Restrict input AQ PHY flags
* @sc: driver private structure
+ * @sysctl_speeds: current SW configuration of PHY types
* @phy_type_low: input/output flag set for low PHY types
* @phy_type_high: input/output flag set for high PHY types
*
@@ -2819,34 +2834,101 @@
* mode
*/
static int
-ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low,
- u64 *phy_type_high)
+ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds,
+ u64 *phy_type_low, u64 *phy_type_high)
{
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_port_info *pi = sc->hw.port_info;
device_t dev = sc->dev;
enum ice_status status;
+ u64 temp_phy_low, temp_phy_high;
+ u64 final_phy_low, final_phy_high;
+ u16 topo_speeds;
- u64 new_phy_low, new_phy_high;
-
- status = ice_get_phy_types(sc, &new_phy_low, &new_phy_high);
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ &pcaps, NULL);
if (status != ICE_SUCCESS) {
- /* Function already prints appropriate error message */
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
return (EIO);
}
- ice_apply_supported_speed_filter(&new_phy_low, &new_phy_high);
+ final_phy_low = le64toh(pcaps.phy_type_low);
+ final_phy_high = le64toh(pcaps.phy_type_high);
- new_phy_low &= *phy_type_low;
- new_phy_high &= *phy_type_high;
+ topo_speeds = ice_aq_phy_types_to_sysctl_speeds(final_phy_low,
+ final_phy_high);
- if (new_phy_low == 0 && new_phy_high == 0) {
+ /*
+ * If the user specifies a subset of speeds the media is already
+ * capable of supporting, then we're good to go.
+ */
+ if ((sysctl_speeds & topo_speeds) == sysctl_speeds)
+ goto intersect_final;
+
+ temp_phy_low = final_phy_low;
+ temp_phy_high = final_phy_high;
+ /*
+ * Otherwise, we'll have to use the superset if Lenient Mode is
+ * supported.
+ */
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) {
+ /*
+ * Start with masks that _don't_ include the PHY types
+ * discovered by the TOPO_CAP.
+ */
+ ice_sysctl_speeds_to_aq_phy_types(topo_speeds, &final_phy_low,
+ &final_phy_high);
+ final_phy_low = ~final_phy_low;
+ final_phy_high = ~final_phy_high;
+
+ /* Get the PHY types the NVM says we can support */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (NVM_CAP) failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ return (status);
+ }
+
+ /*
+ * Clear out the unsupported PHY types, including those
+ * from TOPO_CAP.
+ */
+ final_phy_low &= le64toh(pcaps.phy_type_low);
+ final_phy_high &= le64toh(pcaps.phy_type_high);
+ /*
+ * Include PHY types from TOPO_CAP (which may be a subset
+ * of the types the NVM specifies).
+ */
+ final_phy_low |= temp_phy_low;
+ final_phy_high |= temp_phy_high;
+ }
+
+intersect_final:
+
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE))
+ ice_apply_supported_speed_filter(&final_phy_low, &final_phy_high);
+
+ ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &temp_phy_low,
+ &temp_phy_high);
+
+ final_phy_low &= temp_phy_low;
+ final_phy_high &= temp_phy_high;
+
+ if (final_phy_low == 0 && final_phy_high == 0) {
device_printf(dev,
"The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
return (EINVAL);
}
/* Overwrite input phy_type values and return */
- *phy_type_low = new_phy_low;
- *phy_type_high = new_phy_high;
+ *phy_type_low = final_phy_low;
+ *phy_type_high = final_phy_high;
return (0);
}
@@ -2859,7 +2941,7 @@
*
* Retrieves a suitable set of PHY type flags to use for an "auto" speed
* setting by either using the NVM default overrides for speed, or retrieving
- * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode.
+ * a default from the adapter using Get PHY capabilites in TOPO_CAPS mode.
*
* @returns 0 on success or EIO on AQ command failure
*/
@@ -2960,8 +3042,8 @@
/* Function already prints appropriate error message */
return (error);
} else {
- ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &phy_low, &phy_high);
- error = ice_intersect_media_types_with_caps(sc, &phy_low, &phy_high);
+ error = ice_intersect_media_types_with_caps(sc, sysctl_speeds,
+ &phy_low, &phy_high);
if (error)
/* Function already prints appropriate error message */
return (error);
@@ -2976,7 +3058,7 @@
cfg.phy_type_low = phy_low;
cfg.phy_type_high = phy_high;
- cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
if (status != ICE_SUCCESS) {
@@ -3752,9 +3834,10 @@
return (EIO);
}
ice_aq_set_dcb_parameters(hw, true, NULL);
- hw->port_info->is_sw_lldp = true;
+ hw->port_info->qos_cfg.is_sw_lldp = true;
ice_add_rx_lldp_filter(sc);
} else {
+ ice_del_rx_lldp_filter(sc);
retry_start_lldp:
status = ice_aq_start_lldp(hw, true, NULL);
if (status) {
@@ -3778,8 +3861,7 @@
return (EIO);
}
}
- hw->port_info->is_sw_lldp = false;
- ice_del_rx_lldp_filter(sc);
+ hw->port_info->qos_cfg.is_sw_lldp = false;
}
return (error);
@@ -3977,8 +4059,9 @@
* Adds statistics sysctls for the ethernet statistics of the MAC or a VSI.
* Will add them under the parent node specified.
*
- * Note that rx_discards and tx_errors are only meaningful for VSIs and not
- * the global MAC/PF statistics, so they are not included here.
+ * Note that tx_errors is only meaningful for VSIs and not the global MAC/PF
+ * statistics, so it is not included here. Similarly, rx_discards has different
+ * descriptions for VSIs and MAC/PF stats, so it is also not included here.
*/
void
ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
@@ -4195,7 +4278,7 @@
SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards",
CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards,
- 0, "Discarded Rx Packets");
+ 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)");
SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors",
CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors,
@@ -4256,6 +4339,8 @@
{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
{&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"},
+ {&stats->eth.rx_discards, "rx_discards",
+ "Discarded Rx Packets by Port (shortage of storage space)"},
/* Packet Transmission Stats */
{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
@@ -6652,7 +6737,7 @@
break;
}
- reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x14;
+ reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13;
switch (reg) {
case ice_pcie_speed_2_5GT:
@@ -6845,9 +6930,9 @@
return;
}
- hw->port_info->dcbx_status = ice_get_dcbx_status(hw);
- if (hw->port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
- hw->port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
+ hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw);
+ if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE &&
+ hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
/*
* Start DCBX agent, but not LLDP. The return value isn't
* checked here because a more detailed dcbx agent status is
@@ -6856,7 +6941,7 @@
ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL);
}
- /* This sets hw->port_info->is_sw_lldp */
+ /* This sets hw->port_info->qos_cfg.is_sw_lldp */
status = ice_init_dcb(hw, true);
/* If there is an error, then FW LLDP is not in a usable state */
@@ -6871,10 +6956,10 @@
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
}
- hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
+ hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
}
- switch (hw->port_info->dcbx_status) {
+ switch (hw->port_info->qos_cfg.dcbx_status) {
case ICE_DCBX_STATUS_DIS:
ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n");
break;
@@ -6889,11 +6974,9 @@
}
/* LLDP disabled in FW */
- if (hw->port_info->is_sw_lldp) {
+ if (hw->port_info->qos_cfg.is_sw_lldp) {
ice_add_rx_lldp_filter(sc);
device_printf(dev, "Firmware LLDP agent disabled\n");
- } else {
- ice_del_rx_lldp_filter(sc);
}
}
@@ -7117,6 +7200,25 @@
device_t dev = sc->dev;
enum ice_status status;
int err;
+ u16 vsi_num;
+
+ /*
+ * If FW is new enough, use a direct AQ command to perform the filter
+ * addition.
+ */
+ if (ice_fw_supports_lldp_fltr_ctrl(hw)) {
+ vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ status = ice_lldp_fltr_add_remove(hw, vsi_num, true);
+ if (status) {
+ device_printf(dev,
+ "Failed to add Rx LLDP filter, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ } else
+ ice_set_state(&sc->state,
+ ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER);
+ return;
+ }
INIT_LIST_HEAD(&ethertype_list);
@@ -7132,13 +7234,17 @@
}
status = ice_add_eth_mac(hw, &ethertype_list);
- if (status == ICE_ERR_ALREADY_EXISTS) {
- ; /* Don't complain if we try to add a filter that already exists */
- } else if (status) {
+ if (status && status != ICE_ERR_ALREADY_EXISTS) {
device_printf(dev,
"Failed to add Rx LLDP filter, err %s aq_err %s\n",
ice_status_str(status),
ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ /*
+ * If status == ICE_ERR_ALREADY_EXISTS, we won't treat an
+ * already existing filter as an error case.
+ */
+ ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER);
}
free_ethertype_list:
@@ -7162,6 +7268,31 @@
device_t dev = sc->dev;
enum ice_status status;
int err;
+ u16 vsi_num;
+
+ /*
+ * Only in the scenario where the driver added the filter during
+ * this session (while the driver was loaded) would we be able to
+ * delete this filter.
+ */
+ if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER))
+ return;
+
+ /*
+ * If FW is new enough, use a direct AQ command to perform the filter
+ * removal.
+ */
+ if (ice_fw_supports_lldp_fltr_ctrl(hw)) {
+ vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
+ status = ice_lldp_fltr_add_remove(hw, vsi_num, false);
+ if (status) {
+ device_printf(dev,
+ "Failed to remove Rx LLDP filter, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+ return;
+ }
INIT_LIST_HEAD(&ethertype_list);
@@ -7693,7 +7824,6 @@
struct ice_hw *hw = &sc->hw;
int error = 0, retries = 0;
enum ice_status status;
- u16 lport;
if (length > 16)
return (EINVAL);
@@ -7704,11 +7834,8 @@
if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA))
return (ENXIO);
- /* Set bit to indicate lport value is valid */
- lport = hw->port_info->lport | (0x1 << 8);
-
do {
- status = ice_aq_sff_eeprom(hw, lport, dev_addr,
+ status = ice_aq_sff_eeprom(hw, 0, dev_addr,
offset, 0, 0, data, length,
false, NULL);
if (!status) {
@@ -7998,3 +8125,50 @@
return (ICE_SUCCESS);
}
+
+/**
+ * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings
+ * @sc: device softc structure
+ *
+ * This function needs to be called after link up; it makes sure the FW
+ * has certain PFC/DCB settings. This is intended to workaround a FW behavior
+ * where these settings seem to be cleared on link up.
+ */
+void
+ice_set_default_local_lldp_mib(struct ice_softc *sc)
+{
+ struct ice_dcbx_cfg *dcbcfg;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ pi = hw->port_info;
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+
+ /* This value is only 3 bits; 8 TCs maps to 0 */
+ u8 maxtcs = hw->func_caps.common_cap.maxtc & ICE_IEEE_ETS_MAXTC_M;
+
+ /**
+ * Setup the default settings used by the driver for the Set Local
+ * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no
+ * PFC).
+ */
+ memset(dcbcfg, 0, sizeof(*dcbcfg));
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ dcbcfg->etscfg.maxtcs = maxtcs;
+ dcbcfg->etsrec.willing = 1;
+ dcbcfg->etsrec.tcbwtable[0] = 100;
+ dcbcfg->etsrec.maxtcs = maxtcs;
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = maxtcs;
+
+ status = ice_set_dcb_cfg(pi);
+
+ if (status)
+ device_printf(dev,
+ "Error setting Local LLDP MIB: %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+}
Index: sys/dev/ice/ice_nvm.h
===================================================================
--- sys/dev/ice/ice_nvm.h
+++ sys/dev/ice/ice_nvm.h
@@ -135,9 +135,16 @@
enum ice_status
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd);
enum ice_status
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd);
+enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
+enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
#endif /* _ICE_NVM_H_ */
Index: sys/dev/ice/ice_nvm.c
===================================================================
--- sys/dev/ice/ice_nvm.c
+++ sys/dev/ice/ice_nvm.c
@@ -86,7 +86,7 @@
*
* Reads a portion of the NVM, as a flat memory space. This function correctly
* breaks read requests across Shadow RAM sectors and ensures that no single
- * read request exceeds the maximum 4Kb read for a single AdminQ command.
+ * read request exceeds the maximum 4KB read for a single AdminQ command.
*
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
@@ -105,19 +105,18 @@
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
- if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: requested data is beyond Shadow RAM limit\n");
+ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
+ ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
do {
u32 read_size, sector_offset;
- /* ice_aq_read_nvm cannot read more than 4Kb at a time.
+ /* ice_aq_read_nvm cannot read more than 4KB at a time.
* Additionally, a read from the Shadow RAM may not cross over
* a sector boundary. Conveniently, the sector size is also
- * 4Kb.
+ * 4KB.
*/
sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
@@ -157,7 +156,7 @@
*
* Update the NVM using the admin queue commands (0x0703)
*/
-static enum ice_status
+enum ice_status
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -294,16 +293,14 @@
static enum ice_status
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
- if ((offset + words) > hw->nvm.sr_words) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: offset beyond SR lmt.\n");
+ if ((offset + words) > hw->flash.sr_words) {
+ ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n");
return ICE_ERR_PARAM;
}
if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
/* We can access only up to 4KB (one sector), in one AQ write */
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: tried to access %d words, limit is %d.\n",
+ ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n",
words, ICE_SR_SECTOR_SIZE_IN_WORDS);
return ICE_ERR_PARAM;
}
@@ -311,8 +308,7 @@
if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
(offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
/* A single access cannot spread over two sectors */
- ice_debug(hw, ICE_DBG_NVM,
- "NVM error: cannot spread over two sectors.\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n");
return ICE_ERR_PARAM;
}
@@ -327,8 +323,7 @@
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-enum ice_status
-ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
enum ice_status status;
@@ -341,7 +336,7 @@
* boundary
*/
status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
- (u8 *)&data_local, true);
+ (_FORCE_ u8 *)&data_local, true);
if (status)
return status;
@@ -393,7 +388,7 @@
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- /* ice_read_flat_nvm takes into account the 4Kb AdminQ and Shadow RAM
+ /* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM
* sector restrictions necessary when reading from the NVM.
*/
status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
@@ -420,7 +415,7 @@
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- if (hw->nvm.blank_nvm_mode)
+ if (hw->flash.blank_nvm_mode)
return ICE_SUCCESS;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
@@ -436,7 +431,7 @@
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
- if (hw->nvm.blank_nvm_mode)
+ if (hw->flash.blank_nvm_mode)
return;
ice_release_res(hw, ICE_NVM_RES_ID);
@@ -569,16 +564,14 @@
*/
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
- ice_debug(hw, ICE_DBG_INIT,
- "Buffer too small for PBA data.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
return ICE_ERR_PARAM;
}
for (i = 0; i < pba_size; i++) {
status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
if (status != ICE_SUCCESS) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read PBA Block word %d.\n", i);
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
return status;
}
@@ -590,25 +583,63 @@
return status;
}
+/**
+ * ice_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ */
+static enum ice_status
+ice_get_nvm_ver_info(struct ice_hw *hw, struct ice_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ enum ice_status status;
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n");
+ return status;
+ }
+ nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n");
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_get_orom_ver_info - Read Option ROM version information
* @hw: pointer to the HW struct
+ * @orom: pointer to Option ROM info structure
*
* Read the Combo Image version data from the Boot Configuration TLV and fill
* in the option ROM version data.
*/
-static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
+static enum ice_status
+ice_get_orom_ver_info(struct ice_hw *hw, struct ice_orom_info *orom)
{
u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
- struct ice_orom_info *orom = &hw->nvm.orom;
enum ice_status status;
u32 combo_ver;
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read Boot Configuration Block TLV.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
return status;
}
@@ -616,8 +647,7 @@
* (Combo Image Version High and Combo Image Version Low)
*/
if (boot_cfg_tlv_len < 2) {
- ice_debug(hw, ICE_DBG_INIT,
- "Invalid Boot Configuration Block TLV size.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
return ICE_ERR_INVAL_SIZE;
}
@@ -646,6 +676,88 @@
return ICE_SUCCESS;
}
+/**
+ * ice_get_netlist_ver_info
+ * @hw: pointer to the HW struct
+ * @ver: pointer to netlist version info structure
+ *
+ * Get the netlist version information
+ */
+enum ice_status
+ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *ver)
+{
+ enum ice_status ret;
+ u32 id_blk_start;
+ __le16 raw_data;
+ u16 data, i;
+ u16 *buff;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+ buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
+ sizeof(*buff));
+ if (!buff) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto exit_no_mem;
+ }
+
+ /* read module length */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+
+ data = LE16_TO_CPU(raw_data);
+ /* exit if length is = 0 */
+ if (!data)
+ goto exit_error;
+
+ /* read node count */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+ data = LE16_TO_CPU(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
+
+ /* netlist ID block starts from offset 4 + node count * 2 */
+ id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+
+ /* read the entire netlist ID block */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ id_blk_start * 2,
+ ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
+ false, NULL);
+ if (ret)
+ goto exit_error;
+
+ for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
+ buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
+
+ ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
+ ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
+ ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
+ ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+
+exit_error:
+ ice_free(hw, buff);
+exit_no_mem:
+ ice_release_nvm(hw);
+ return ret;
+}
+
/**
* ice_discover_flash_size - Discover the available flash size.
* @hw: pointer to the HW struct
@@ -673,14 +785,12 @@
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == ICE_ERR_AQ_ERROR &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
- ice_debug(hw, ICE_DBG_NVM,
- "%s: New upper bound of %u bytes\n",
+ ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = ICE_SUCCESS;
max_size = offset;
} else if (!status) {
- ice_debug(hw, ICE_DBG_NVM,
- "%s: New lower bound of %u bytes\n",
+ ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
__func__, offset);
min_size = offset;
} else {
@@ -689,10 +799,9 @@
}
}
- ice_debug(hw, ICE_DBG_NVM,
- "Predicted flash size is %u bytes\n", max_size);
+ ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
- hw->nvm.flash_size = max_size;
+ hw->flash.flash_size = max_size;
err_read_flat_nvm:
ice_release_nvm(hw);
@@ -700,6 +809,151 @@
return status;
}
+/**
+ * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ */
+static enum ice_status
+ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+{
+ enum ice_status status;
+ u16 value;
+
+ status = ice_read_sr_word(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & ICE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ */
+static enum ice_status
+ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+{
+ enum ice_status status;
+ u16 value;
+
+ status = ice_read_sr_word(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ice_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ */
+static enum ice_status
+ice_determine_active_flash_banks(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ enum ice_status status;
+ u16 ctrl_word;
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n");
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
+ ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
+ return ICE_ERR_CFG;
+ }
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = ICE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = ICE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = ICE_2ND_FLASH_BANK;
+
+ status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n");
+ return status;
+ }
+
+ status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n");
+ return status;
+ }
+
+ status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n");
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
@@ -709,8 +963,7 @@
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
- struct ice_nvm_info *nvm = &hw->nvm;
- u16 eetrack_lo, eetrack_hi, ver;
+ struct ice_flash_info *flash = &hw->flash;
enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -724,78 +977,45 @@
sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
/* Switching to words (sr_size contains power of 2) */
- nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
+ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
- nvm->blank_nvm_mode = false;
+ flash->blank_nvm_mode = false;
} else {
/* Blank programming mode */
- nvm->blank_nvm_mode = true;
- ice_debug(hw, ICE_DBG_NVM,
- "NVM init error: unsupported blank mode.\n");
+ flash->blank_nvm_mode = true;
+ ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to read DEV starter version.\n");
- return status;
- }
- nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
-
- status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
- return status;
- }
- status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
- return status;
- }
-
- nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
-
status = ice_discover_flash_size(hw);
if (status) {
- ice_debug(hw, ICE_DBG_NVM,
- "NVM init error: failed to discover flash size.\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
return status;
}
- switch (hw->device_id) {
- /* the following devices do not have boot_cfg_tlv yet */
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
+ status = ice_determine_active_flash_banks(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n");
return status;
- default:
- break;
}
- status = ice_get_orom_ver_info(hw);
+ status = ice_get_nvm_ver_info(hw, &flash->nvm);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
+ return status;
+ }
+
+ status = ice_get_orom_ver_info(hw, &flash->orom);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
return status;
}
/* read the netlist version information */
- status = ice_get_netlist_ver_info(hw);
+ status = ice_get_netlist_ver_info(hw, &flash->netlist);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
return ICE_SUCCESS;
@@ -929,7 +1149,7 @@
/* Calculate SW checksum that covers the whole 64kB shadow RAM
* except the VPD and PCIe ALT Auto-load modules
*/
- for (i = 0; i < hw->nvm.sr_words; i++) {
+ for (i = 0; i < hw->flash.sr_words; i++) {
/* Read SR page */
if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS;
@@ -943,12 +1163,12 @@
if (i == ICE_SR_SW_CHECKSUM_WORD)
continue;
/* Skip VPD module (convert byte size to word count) */
- if ((i >= (u32)vpd_module) &&
- (i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)))
+ if (i >= (u32)vpd_module &&
+ i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS))
continue;
/* Skip PCIe ALT module (convert byte size to word count) */
- if ((i >= (u32)pcie_alt_module) &&
- (i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)))
+ if (i >= (u32)pcie_alt_module &&
+ i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS))
continue;
checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS];
@@ -1072,7 +1292,7 @@
{
/* The provided data_size must be at least as large as our NVM
* features structure. A larger size should not be treated as an
- * error, to allow future extensions to to the features structure to
+ * error, to allow future extensions to the features structure to
* work on older drivers.
*/
if (cmd->data_size < sizeof(struct ice_nvm_features))
@@ -1239,8 +1459,7 @@
break;
}
- ice_debug(hw, ICE_DBG_NVM,
- "NVM access: writing register %08x with value %08x\n",
+ ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n",
cmd->offset, data->regval);
/* Write the data field to the specified register */
Index: sys/dev/ice/ice_protocol_type.h
===================================================================
--- sys/dev/ice/ice_protocol_type.h
+++ sys/dev/ice/ice_protocol_type.h
@@ -79,8 +79,10 @@
ICE_NON_TUN = 0,
ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN_GPE,
- ICE_SW_TUN_GENEVE,
- ICE_SW_TUN_VXLAN,
+ ICE_SW_TUN_GENEVE, /* GENEVE matches only non-VLAN pkts */
+ ICE_SW_TUN_GENEVE_VLAN, /* GENEVE matches both VLAN and non-VLAN pkts */
+ ICE_SW_TUN_VXLAN, /* VXLAN matches only non-VLAN pkts */
+ ICE_SW_TUN_VXLAN_VLAN, /* VXLAN matches both VLAN and non-VLAN pkts */
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
* and GENEVE
@@ -141,6 +143,7 @@
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
+#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
#define ICE_IPV4_IL_HW 33
@@ -160,7 +163,9 @@
#define ICE_MDID_SIZE 2
#define ICE_TUN_FLAG_MDID 21
+#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_TUN_FLAG_VLAN_MASK 0x01
#define ICE_TUN_FLAG_FV_IND 2
#define ICE_PROTOCOL_MAX_ENTRIES 16
Index: sys/dev/ice/ice_sched.h
===================================================================
--- sys/dev/ice/ice_sched.h
+++ sys/dev/ice/ice_sched.h
@@ -103,15 +103,15 @@
/* FW AQ command calls */
enum ice_status
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, struct ice_sq_cd *cd);
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
- struct ice_aqc_cfg_l2_node_cgd_data *buf, u16 buf_size,
+ struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
@@ -145,7 +145,7 @@
bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node);
enum ice_status
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd);
/* Tx scheduler rate limiter functions */
Index: sys/dev/ice/ice_sched.c
===================================================================
--- sys/dev/ice/ice_sched.c
+++ sys/dev/ice/ice_sched.c
@@ -158,7 +158,7 @@
*/
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
@@ -178,8 +178,8 @@
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info)
{
+ struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
- struct ice_aqc_get_elem elem;
struct ice_sched_node *node;
enum ice_status status;
struct ice_hw *hw;
@@ -193,13 +193,12 @@
parent = ice_sched_find_node_by_teid(pi->root,
LE32_TO_CPU(info->parent_teid));
if (!parent) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Parent Node not found for parent_teid=0x%x\n",
+ ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
LE32_TO_CPU(info->parent_teid));
return ICE_ERR_PARAM;
}
- /* query the current node information from FW before additing it
+ /* query the current node information from FW before adding it
* to the SW DB
*/
status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
@@ -222,7 +221,7 @@
node->parent = parent;
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
- node->info = elem.generic[0];
+ node->info = elem;
return ICE_SUCCESS;
}
@@ -265,7 +264,7 @@
enum ice_status status;
u16 buf_size;
- buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf_size = ice_struct_size(buf, teid, num_nodes);
buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -450,7 +449,7 @@
*/
static enum ice_status
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_conf_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
@@ -491,8 +490,7 @@
* Suspend scheduling elements (0x0409)
*/
static enum ice_status
-ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
@@ -512,8 +510,7 @@
* resume scheduling elements (0x040A)
*/
static enum ice_status
-ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
- struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
@@ -554,18 +551,17 @@
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
- struct ice_aqc_suspend_resume_elem *buf;
u16 i, buf_size, num_elem_ret = 0;
enum ice_status status;
+ __le32 *buf;
buf_size = sizeof(*buf) * num_nodes;
- buf = (struct ice_aqc_suspend_resume_elem *)
- ice_malloc(hw, buf_size);
+ buf = (__le32 *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
for (i = 0; i < num_nodes; i++)
- buf->teid[i] = CPU_TO_LE32(node_teids[i]);
+ buf[i] = CPU_TO_LE32(node_teids[i]);
if (suspend)
status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
@@ -627,18 +623,18 @@
/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
- * @opcode:opcode for add, query, or remove profile(s)
+ * @opcode: opcode for add, query, or remove profile(s)
* @num_profiles: the number of profiles
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @num_processed: number of processed add or remove profile(s) to return
* @cd: pointer to command details structure
*
- * Rl profile function to add, query, or remove profile(s)
+ * RL profile function to add, query, or remove profile(s)
*/
static enum ice_status
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
- u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
@@ -669,13 +665,11 @@
*/
static enum ice_status
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, u16 *num_profiles_added,
- struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ u16 *num_profiles_added, struct ice_sq_cd *cd)
{
- return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
- num_profiles, buf,
- buf_size, num_profiles_added, cd);
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
+ buf, buf_size, num_profiles_added, cd);
}
/**
@@ -690,8 +684,8 @@
*/
enum ice_status
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
num_profiles, buf, buf_size, NULL, cd);
@@ -710,13 +704,12 @@
*/
static enum ice_status
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
- struct ice_aqc_rl_profile_generic_elem *buf,
- u16 buf_size, u16 *num_profiles_removed,
- struct ice_sq_cd *cd)
+ struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+ u16 *num_profiles_removed, struct ice_sq_cd *cd)
{
return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
- num_profiles, buf,
- buf_size, num_profiles_removed, cd);
+ num_profiles, buf, buf_size,
+ num_profiles_removed, cd);
}
/**
@@ -732,7 +725,7 @@
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
- struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
enum ice_status status;
u16 num_profiles = 1;
@@ -741,8 +734,7 @@
return ICE_ERR_IN_USE;
/* Safe to remove profile ID */
- buf = (struct ice_aqc_rl_profile_generic_elem *)
- &rl_info->profile;
+ buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
@@ -763,22 +755,21 @@
static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
{
u16 ln;
+ struct ice_hw *hw = pi->hw;
- for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
struct ice_aqc_rl_profile_info *rl_prof_elem;
struct ice_aqc_rl_profile_info *rl_prof_tmp;
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
- &pi->rl_prof_list[ln],
+ &hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
- struct ice_hw *hw = pi->hw;
enum ice_status status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Remove rl profile failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
/* On error, free mem required */
LIST_DEL(&rl_prof_elem->list_entry);
ice_free(hw, rl_prof_elem);
@@ -817,7 +808,7 @@
}
/**
- * ice_sched_clear_tx_topo - clears the schduler tree nodes
+ * ice_sched_clear_tx_topo - clears the scheduler tree nodes
* @pi: port information structure
*
* This function removes all the nodes from HW as well as from SW DB.
@@ -888,7 +879,7 @@
*/
enum ice_status
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
- struct ice_aqc_cfg_l2_node_cgd_data *buf,
+ struct ice_aqc_cfg_l2_node_cgd_elem *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_l2_node_cgd *cmd;
@@ -927,7 +918,7 @@
u16 buf_size;
u32 teid;
- buf_size = ice_struct_size(buf, generic, num_nodes - 1);
+ buf_size = ice_struct_size(buf, generic, num_nodes);
buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -965,8 +956,7 @@
for (i = 0; i < num_nodes; i++) {
status = ice_sched_add_node(pi, layer, &buf->generic[i]);
if (status != ICE_SUCCESS) {
- ice_debug(hw, ICE_DBG_SCHED,
- "add nodes in SW DB failed status =%d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
break;
}
@@ -974,8 +964,7 @@
teid = LE32_TO_CPU(buf->generic[i].node_teid);
new_node = ice_sched_find_node_by_teid(parent, teid);
if (!new_node) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Node is missing for teid =%d\n", teid);
+ ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid);
break;
}
@@ -1299,7 +1288,7 @@
pi->port_state = ICE_SCHED_PORT_STATE_READY;
ice_init_lock(&pi->sched_lock);
for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- INIT_LIST_HEAD(&pi->rl_prof_list[i]);
+ INIT_LIST_HEAD(&hw->rl_prof_list[i]);
err_init_port:
if (status && pi->root) {
@@ -1332,8 +1321,7 @@
ice_release_lock(&pi->sched_lock);
if (!node)
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Node not found for teid=0x%x\n", teid);
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid);
return node;
}
@@ -1469,6 +1457,53 @@
return false;
}
+/**
+ * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
+ * @pi: port information structure
+ * @vsi_node: software VSI handle
+ * @qgrp_node: first queue group node identified for scanning
+ * @owner: LAN or RDMA
+ *
+ * This function retrieves a free LAN or RDMA queue group node by scanning
+ * qgrp_node and its siblings for the queue group with the fewest number
+ * of queues currently assigned.
+ */
+static struct ice_sched_node *
+ice_sched_get_free_qgrp(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node,
+ struct ice_sched_node *qgrp_node, u8 owner)
+{
+ struct ice_sched_node *min_qgrp;
+ u8 min_children;
+
+ if (!qgrp_node)
+ return qgrp_node;
+ min_children = qgrp_node->num_children;
+ if (!min_children)
+ return qgrp_node;
+ min_qgrp = qgrp_node;
+ /* scan all queue groups until find a node which has less than the
+ * minimum number of children. This way all queue group nodes get
+ * equal number of shares and active. The bandwidth will be equally
+ * distributed across all queues.
+ */
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < min_children &&
+ qgrp_node->owner == owner) {
+ /* replace the new min queue group node */
+ min_qgrp = qgrp_node;
+ min_children = min_qgrp->num_children;
+ /* break if it has no children, */
+ if (!min_children)
+ break;
+ }
+ qgrp_node = qgrp_node->sibling;
+ }
+ return min_qgrp;
+}
+
/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
@@ -1482,7 +1517,7 @@
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
- struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
@@ -1496,7 +1531,7 @@
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
- goto lan_q_exit;
+ return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
@@ -1509,8 +1544,8 @@
qgrp_node = qgrp_node->sibling;
}
-lan_q_exit:
- return qgrp_node;
+ /* Select the best queue group */
+ return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
@@ -1583,7 +1618,7 @@
*/
static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
{
- struct ice_aqc_get_elem buf;
+ struct ice_aqc_txsched_elem_data buf;
enum ice_status status;
u32 node_teid;
@@ -1592,7 +1627,7 @@
if (status != ICE_SUCCESS)
return false;
- if (memcmp(buf.generic, &node->info, sizeof(*buf.generic))) {
+ if (memcmp(&buf, &node->info, sizeof(buf))) {
ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
node_teid);
return false;
@@ -1960,8 +1995,7 @@
* This function removes single aggregator VSI info entry from
* aggregator list.
*/
-static void
-ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *atmp;
@@ -2037,8 +2071,7 @@
continue;
if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "VSI has leaf nodes in TC %d\n", i);
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
status = ICE_ERR_IN_USE;
goto exit_sched_rm_vsi_cfg;
}
@@ -2122,7 +2155,7 @@
*/
enum ice_status
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_node_to_root *cmd;
@@ -2142,7 +2175,7 @@
* This function validates aggregator ID. The function returns info if
* aggregator ID is present in list otherwise it returns null.
*/
-static struct ice_sched_agg_info*
+static struct ice_sched_agg_info *
ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
@@ -2249,6 +2282,7 @@
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ u16 buf_len;
hw = pi->hw;
@@ -2256,11 +2290,12 @@
return ICE_ERR_PARAM;
/* Does parent have enough space */
- if (parent->num_children + num_items >=
+ if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer])
return ICE_ERR_AQ_FULL;
- buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf));
+ buf_len = ice_struct_size(buf, teid, 1);
+ buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -2275,7 +2310,7 @@
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = CPU_TO_LE16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf),
+ status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
status = ICE_ERR_CFG;
@@ -2324,6 +2359,10 @@
if (!vsi_node)
return ICE_ERR_DOES_NOT_EXIST;
+ /* Is this VSI already part of given aggregator? */
+ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
+ return ICE_SUCCESS;
+
aggl = ice_sched_get_agg_layer(pi->hw);
vsil = ice_sched_get_vsi_layer(pi->hw);
@@ -2725,7 +2764,7 @@
* The function returns aggregator VSI info based on VSI handle. This function
* needs to be called with scheduler lock held.
*/
-static struct ice_sched_agg_vsi_info*
+static struct ice_sched_agg_vsi_info *
ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
@@ -2747,7 +2786,7 @@
* VSI has in this case a different aggregator than the default one. This
* function needs to be called with scheduler lock held.
*/
-static struct ice_sched_agg_info*
+static struct ice_sched_agg_info *
ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_sched_agg_info *agg_info;
@@ -2841,41 +2880,31 @@
if (status)
break;
- if (agg_id != ICE_DFLT_AGG_ID)
- ice_set_bit(tc, agg_vsi_info->tc_bitmap);
- else
- ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
- }
- /* If VSI moved back to default aggregator, delete agg_vsi_info. */
- if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap,
- ICE_MAX_TRAFFIC_CLASS)) {
- LIST_DEL(&agg_vsi_info->list_entry);
- ice_free(hw, agg_vsi_info);
+ ice_set_bit(tc, agg_vsi_info->tc_bitmap);
}
return status;
}
/**
* ice_sched_rm_unused_rl_prof - remove unused RL profile
- * @pi: port information structure
+ * @hw: pointer to the hardware structure
*
* This function removes unused rate limit profiles from the HW and
* SW DB. The caller needs to hold scheduler lock.
*/
-static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
+static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
{
u16 ln;
- for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ for (ln = 0; ln < hw->num_tx_sched_layers; ln++) {
struct ice_aqc_rl_profile_info *rl_prof_elem;
struct ice_aqc_rl_profile_info *rl_prof_tmp;
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
- &pi->rl_prof_list[ln],
+ &hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
- if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Removed rl profile\n");
+ if (!ice_sched_del_rl_profile(hw, rl_prof_elem))
+ ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n");
}
}
}
@@ -2886,7 +2915,7 @@
* @node: pointer to node
* @info: node info to update
*
- * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * Update the HW DB, and local SW DB of node. Update the scheduling
* parameters of node from argument info data buffer (Info->data buf) and
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
@@ -2895,18 +2924,18 @@
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
- struct ice_aqc_conf_elem buf;
+ struct ice_aqc_txsched_elem_data buf;
enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
- buf.generic[0] = *info;
+ buf = *info;
/* Parent TEID is reserved field in this aq call */
- buf.generic[0].parent_teid = 0;
+ buf.parent_teid = 0;
/* Element type is reserved field in this aq call */
- buf.generic[0].data.elem_type = 0;
+ buf.data.elem_type = 0;
/* Flags is reserved field in this aq call */
- buf.generic[0].data.flags = 0;
+ buf.data.flags = 0;
/* Update HW DB */
/* Configure element node */
@@ -3021,7 +3050,7 @@
ice_free(pi->hw, agg_info);
/* Remove unused RL profile IDs from HW and SW DB */
- ice_sched_rm_unused_rl_prof(pi);
+ ice_sched_rm_unused_rl_prof(pi->hw);
exit_ice_rm_agg_cfg:
ice_release_lock(&pi->sched_lock);
@@ -3107,8 +3136,7 @@
*
* Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
@@ -3127,8 +3155,7 @@
*
* Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
@@ -3153,8 +3180,7 @@
*
* Save or clear shared bandwidth (BW) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
{
if (bw == ICE_SCHED_DFLT_BW) {
ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
@@ -3216,8 +3242,7 @@
*
* Save or clear priority (prio) in the passed param bw_t_info.
*/
-static void
-ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
+static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
{
bw_t_info->generic = prio;
if (bw_t_info->generic)
@@ -3499,7 +3524,6 @@
* @pi: port information structure
* @num_qs: number of VSI queues
* @q_ids: queue IDs array
- * @q_ids: queue IDs array
* @q_prio: queue priority array
*
* This function configures the queue node priority (Sibling Priority) of the
@@ -3847,7 +3871,7 @@
/**
* ice_sched_add_rl_profile - add RL profile
- * @pi: port information structure
+ * @hw: pointer to the hardware structure
* @rl_type: type of rate limit BW - min, max, or shared
* @bw: bandwidth in Kbps - Kilo bits per sec
* @layer_num: specifies in which layer to create profile
@@ -3859,14 +3883,13 @@
* The caller needs to hold the scheduler lock.
*/
static struct ice_aqc_rl_profile_info *
-ice_sched_add_rl_profile(struct ice_port_info *pi,
- enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
+ u32 bw, u8 layer_num)
{
- struct ice_aqc_rl_profile_generic_elem *buf;
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
+ struct ice_aqc_rl_profile_elem *buf;
enum ice_status status;
- struct ice_hw *hw;
u8 profile_type;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
@@ -3885,13 +3908,12 @@
return NULL;
}
- if (!pi)
+ if (!hw)
return NULL;
- hw = pi->hw;
- LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
- rl_prof_elem->bw == bw)
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
@@ -3913,8 +3935,7 @@
rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
/* Create new entry in HW DB */
- buf = (struct ice_aqc_rl_profile_generic_elem *)
- &rl_prof_elem->profile;
+ buf = &rl_prof_elem->profile;
status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&profiles_added, NULL);
if (status || profiles_added != num_profiles)
@@ -3922,7 +3943,7 @@
/* Good entry - add in the list */
rl_prof_elem->prof_id_ref = 0;
- LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
+ LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]);
return rl_prof_elem;
exit_add_rl_prof:
@@ -4101,7 +4122,7 @@
/**
* ice_sched_rm_rl_profile - remove RL profile ID
- * @pi: port information structure
+ * @hw: pointer to the hardware structure
* @layer_num: layer number where profiles are saved
* @profile_type: profile type like EIR, CIR, or SRL
* @profile_id: profile ID to remove
@@ -4111,7 +4132,7 @@
* scheduler lock.
*/
static enum ice_status
-ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
+ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
@@ -4120,19 +4141,19 @@
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return ICE_ERR_PARAM;
/* Check the existing list for RL profile */
- LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num],
ice_aqc_rl_profile_info, list_entry)
- if (rl_prof_elem->profile.flags == profile_type &&
+ if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
+ profile_type &&
LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
rl_prof_elem->prof_id_ref--;
/* Remove old profile ID from database */
- status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
+ status = ice_sched_del_rl_profile(hw, rl_prof_elem);
if (status && status != ICE_ERR_IN_USE)
- ice_debug(pi->hw, ICE_DBG_SCHED,
- "Remove rl profile failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
if (status == ICE_ERR_IN_USE)
@@ -4192,7 +4213,7 @@
old_id == ICE_SCHED_INVAL_PROF_ID)
return ICE_SUCCESS;
- return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
+ return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
}
/**
@@ -4261,7 +4282,7 @@
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
- rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
+ rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
if (!rl_prof_info)
return status;
@@ -4283,9 +4304,9 @@
old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
return ICE_SUCCESS;
- return ice_sched_rm_rl_profile(pi, layer_num,
- rl_prof_info->profile.flags,
- old_id);
+ return ice_sched_rm_rl_profile(hw, layer_num,
+ rl_prof_info->profile.flags &
+ ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
@@ -4312,7 +4333,7 @@
return ICE_ERR_PARAM;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
- ice_sched_rm_unused_rl_prof(pi);
+ ice_sched_rm_unused_rl_prof(hw);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
@@ -5156,7 +5177,7 @@
return ICE_SUCCESS;
}
-/*
+/**
* ice_sched_replay_node_prio - re-configure node priority
* @hw: pointer to the HW struct
* @node: sched node to configure
@@ -5320,7 +5341,7 @@
ice_acquire_lock(&pi->sched_lock);
LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
- list_entry) {
+ list_entry)
/* replay aggregator (re-create aggregator node) */
if (!ice_cmp_bitmap(agg_info->tc_bitmap,
agg_info->replay_tc_bitmap,
@@ -5349,7 +5370,6 @@
ice_info(hw, "Replay agg bw [id=%d] failed\n",
agg_info->agg_id);
}
- }
ice_release_lock(&pi->sched_lock);
}
@@ -5378,14 +5398,33 @@
ice_release_lock(&pi->sched_lock);
}
+/**
+ * ice_sched_replay_root_node_bw - replay root node BW
+ * @pi: port information structure
+ *
+ * Replay root node BW settings.
+ */
+enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!pi->hw)
+ return ICE_ERR_PARAM;
+ ice_acquire_lock(&pi->sched_lock);
+
+ status = ice_sched_replay_node_bw(pi->hw, pi->root,
+ &pi->root_node_bw_t_info);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
/**
* ice_sched_replay_tc_node_bw - replay TC node(s) BW
* @pi: port information structure
*
* This function replay TC nodes.
*/
-enum ice_status
-ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
+enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
{
enum ice_status status = ICE_SUCCESS;
u8 tc;
@@ -5508,8 +5547,7 @@
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status
-ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
enum ice_status status;
Index: sys/dev/ice/ice_sriov.h
===================================================================
--- sys/dev/ice/ice_sriov.h
+++ sys/dev/ice/ice_sriov.h
@@ -33,16 +33,30 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_common.h"
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
enum ice_status
ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
enum ice_status v_retval, u8 *msg, u16 msglen,
struct ice_sq_cd *cd);
-
enum ice_status
ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd);
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+enum ice_status
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ u16 vf_id, bool *is_mal_vf);
+enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
+void ice_mbx_deinit_snapshot(struct ice_hw *hw);
+enum ice_status
+ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf);
#endif /* _ICE_SRIOV_H_ */
Index: sys/dev/ice/ice_sriov.c
===================================================================
--- sys/dev/ice/ice_sriov.c
+++ sys/dev/ice/ice_sriov.c
@@ -31,7 +31,6 @@
/*$FreeBSD$*/
#include "ice_common.h"
-#include "ice_adminq_cmd.h"
#include "ice_sriov.h"
/**
@@ -191,3 +190,365 @@
return speed;
}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This can be
+ * done via ice_mbx_reset_snapshot().
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a VFId is received which
+ * is passed to the state handler. The boolean output is_malvf of the state
+ * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
+ * whether this VF is malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator. The caller can invoke ice_mbx_report_malvf()
+ * to help determine if a malicious VF is to be reported or not. This function
+ * requires the caller to maintain a global bitmap to track all malicious VFs
+ * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
+ * to be malicious by ice_mbx_vf_state_handler().
+ *
+ * 6. The global bitmap maintained by PF can be cleared completely if PF is in
+ * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
+ * When a VF is shut down and brought back up, we assume that the new VF
+ * brought up is not malicious and hence report it if found malicious.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ *
+ * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
+ * when driver is unloaded.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_id: relative virtual function ID
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static enum ice_status
+ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ if (vf_id >= snap->mbx_vf.vfcntr_len)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* increment the message count in the VF array */
+ snap->mbx_vf.vf_cntr[vf_id]++;
+
+ if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ *
+ * Reset the mailbox snapshot structure and clear VF counter array.
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ u32 vfcntr_len;
+
+ if (!snap || !snap->mbx_vf.vf_cntr)
+ return;
+
+ /* Clear VF counters. */
+ vfcntr_len = snap->mbx_vf.vfcntr_len;
+ if (vfcntr_len)
+ ice_memset(snap->mbx_vf.vf_cntr, 0,
+ (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)),
+ ICE_NONDMA_MEM);
+
+ /* Reset mailbox snapshot for a new capture. */
+ ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf),
+ ICE_NONDMA_MEM);
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_id: relative virtual function (VF) ID
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+enum ice_status
+ice_mbx_vf_state_handler(struct ice_hw *hw,
+ struct ice_mbx_data *mbx_data, u16 vf_id,
+ bool *is_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!is_malvf || !mbx_data)
+ return ICE_ERR_BAD_PTR;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ *is_malvf = false;
+
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return ICE_ERR_INVAL_SIZE;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return ICE_ERR_PARAM;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = ICE_ERR_CFG;
+ }
+
+ snap_buf->state = new_state;
+
+ return status;
+}
+
+/**
+ * ice_mbx_report_malvf - Track and note malicious VF
+ * @hw: pointer to the HW struct
+ * @all_malvfs: all malicious VFs tracked by PF
+ * @bitmap_len: length of bitmap in bits
+ * @vf_id: relative virtual function ID of the malicious VF
+ * @report_malvf: boolean to indicate if malicious VF must be reported
+ *
+ * This function will update a bitmap that keeps track of the malicious
+ * VFs attached to the PF. A malicious VF must be reported only once if
+ * discovered between VF resets or loading so the function checks
+ * the input vf_id against the bitmap to verify if the VF has been
+ * detected in any previous mailbox iterations.
+ */
+enum ice_status
+ice_mbx_report_malvf(struct ice_hw *hw, ice_bitmap_t *all_malvfs,
+ u16 bitmap_len, u16 vf_id, bool *report_malvf)
+{
+ if (!all_malvfs || !report_malvf)
+ return ICE_ERR_PARAM;
+
+ *report_malvf = false;
+
+ if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
+ return ICE_ERR_INVAL_SIZE;
+
+ if (vf_id >= bitmap_len)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* If the vf_id is found in the bitmap set bit and boolean to true */
+ if (!ice_is_bit_set(all_malvfs, vf_id)) {
+ ice_set_bit(vf_id, all_malvfs);
+ ice_debug(hw, ICE_DBG_TRACE, "Malicious VF=%d found\n", vf_id);
+ *report_malvf = true;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ * @vf_count: number of VFs allocated on a PF
+ *
+ * Clear the mailbox snapshot structure and allocate memory
+ * for the VF counter array based on the number of VFs allocated
+ * on that PF.
+ *
+ * Assumption: This function will assume ice_get_caps() has already been
+ * called to ensure that the vf_count can be compared against the number
+ * of VFs supported as defined in the functional capabilities of the device.
+ */
+enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Ensure that the number of VFs allocated is non-zero and
+ * is not greater than the number of supported VFs defined in
+ * the functional capabilities of the PF.
+ */
+ if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
+ return ICE_ERR_INVAL_SIZE;
+
+ snap->mbx_vf.vf_cntr =
+ (u32 *)ice_calloc(hw, vf_count,
+ sizeof(*snap->mbx_vf.vf_cntr));
+ if (!snap->mbx_vf.vf_cntr)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Setting the VF counter length to the number of allocated
+ * VFs for given PF's functional capabilities.
+ */
+ snap->mbx_vf.vfcntr_len = vf_count;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and free the VF counter array.
+ */
+void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ /* Free VF counter array and reset vf counter length */
+ ice_free(hw, snap->mbx_vf.vf_cntr);
+ snap->mbx_vf.vfcntr_len = 0;
+
+ /* Clear mbx_buf in the mailbox snaphot structure */
+ ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
+}
Index: sys/dev/ice/ice_status.h
===================================================================
--- sys/dev/ice/ice_status.h
+++ sys/dev/ice/ice_status.h
@@ -70,6 +70,7 @@
ICE_ERR_AQ_FULL = -102,
ICE_ERR_AQ_NO_WORK = -103,
ICE_ERR_AQ_EMPTY = -104,
+ ICE_ERR_AQ_FW_CRITICAL = -105,
};
#endif /* _ICE_STATUS_H_ */
Index: sys/dev/ice/ice_strings.c
===================================================================
--- sys/dev/ice/ice_strings.c
+++ sys/dev/ice/ice_strings.c
@@ -252,6 +252,9 @@
case ICE_ERR_HW_TABLE:
str = "ICE_ERR_HW_TABLE";
break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ str = "ICE_ERR_FW_DDP_MISMATCH";
+ break;
case ICE_ERR_DOES_NOT_EXIST:
str = "ICE_ERR_DOES_NOT_EXIST";
break;
@@ -270,8 +273,8 @@
case ICE_ERR_AQ_EMPTY:
str = "ICE_ERR_AQ_EMPTY";
break;
- case ICE_ERR_FW_DDP_MISMATCH:
- str = "ICE_ERR_FW_DDP_MISMATCH";
+ case ICE_ERR_AQ_FW_CRITICAL:
+ str = "ICE_ERR_AQ_FW_CRITICAL";
break;
}
@@ -987,6 +990,8 @@
return "DETACHING";
case ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING:
return "LINK_DEFAULT_OVERRIDE_PENDING";
+ case ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER:
+ return "LLDP_RX_FLTR_FROM_DRIVER";
case ICE_STATE_LAST:
return NULL;
}
Index: sys/dev/ice/ice_switch.h
===================================================================
--- sys/dev/ice/ice_switch.h
+++ sys/dev/ice/ice_switch.h
@@ -43,6 +43,20 @@
#define ICE_FLTR_TX BIT(1)
#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
+#define DUMMY_ETH_HDR_LEN 16
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
+ (DUMMY_ETH_HDR_LEN * \
+ sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+#define ICE_SW_RULE_LG_ACT_SIZE(n) \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
+ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
+#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
+ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+
/* Worst case buffer length for ice_aqc_opc_get_res_alloc */
#define ICE_MAX_RES_TYPES 0x80
#define ICE_AQ_GET_RES_ALLOC_BUF_LEN \
@@ -254,8 +268,7 @@
/* Profiles this recipe is associated with */
u8 num_profs, *prof_ids;
- /* Possible result indexes are 44, 45, 46 and 47 */
-#define ICE_POSSIBLE_RES_IDX 0x0000F00000000000ULL
+ /* Bit map for possible result indexes */
ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS);
/* This allows user to specify the recipe priority.
@@ -407,13 +420,13 @@
enum ice_status
ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id);
enum ice_status
-ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
- u16 buf_size, struct ice_sq_cd *cd);
+ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
+ struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_get_allocd_res_desc_resp *buf,
- u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
- struct ice_sq_cd *cd);
+ struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
+ bool res_shared, u16 *desc_id, struct ice_sq_cd *cd);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list);
enum ice_status
@@ -462,7 +475,9 @@
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
+ u16 vsi_handle);
+void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
-
#endif /* _ICE_SWITCH_H_ */
Index: sys/dev/ice/ice_switch.c
===================================================================
--- sys/dev/ice/ice_switch.c
+++ sys/dev/ice/ice_switch.c
@@ -54,32 +54,10 @@
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
-#define DUMMY_ETH_HDR_LEN 16
static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
-#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_lg_act) - \
- sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
- ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
-#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (sizeof(struct ice_aqc_sw_rules_elem) - \
- sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
- sizeof(struct ice_sw_rule_vsi_list) - \
- sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
- ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
-
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -121,7 +99,7 @@
* @num_elems: pointer to number of elements
* @cd: pointer to command details structure or NULL
*
- * Get switch configuration (0x0200) to be placed in 'buff'.
+ * Get switch configuration (0x0200) to be placed in buf.
* This admin command returns information such as initial VSI/port number
* and switch ID it belongs to.
*
@@ -138,13 +116,13 @@
* parsing the response buffer.
*/
static enum ice_status
-ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
+ enum ice_status status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -178,9 +156,8 @@
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@@ -260,9 +237,8 @@
enum ice_status status, ret_status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@@ -300,8 +276,7 @@
status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "VEB counter resource could not be freed\n");
+ ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n");
ret_status = status;
}
@@ -697,8 +672,7 @@
return ICE_ERR_PARAM;
break;
default:
- ice_debug(hw, ICE_DBG_SW,
- "Error due to unsupported rule_type %u\n", rule_type);
+ ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type);
return ICE_ERR_OUT_OF_RANGE;
}
@@ -720,8 +694,7 @@
* than ICE_MAX_VSI, if not return with error.
*/
if (id >= ICE_MAX_VSI) {
- ice_debug(hw, ICE_DBG_SW,
- "Error VSI index (%u) out-of-range\n",
+ ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n",
id);
ice_free(hw, mr_list);
return ICE_ERR_OUT_OF_RANGE;
@@ -805,9 +778,8 @@
enum ice_status status;
u16 buf_len;
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
sw_buf->num_elems = CPU_TO_LE16(1);
@@ -850,7 +822,7 @@
* @hw: pointer to the HW struct
* @bcast_thresh: represents the upper threshold for broadcast storm control
* @mcast_thresh: represents the upper threshold for multicast storm control
- * @ctl_bitmask: storm control control knobs
+ * @ctl_bitmask: storm control knobs
*
* Sets the storm control configuration (0x0280)
*/
@@ -877,7 +849,7 @@
* @hw: pointer to the HW struct
* @bcast_thresh: represents the upper threshold for broadcast storm control
* @mcast_thresh: represents the upper threshold for multicast storm control
- * @ctl_bitmask: storm control control knobs
+ * @ctl_bitmask: storm control knobs
*
* Gets the storm control configuration (0x0281)
*/
@@ -923,6 +895,7 @@
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
+ enum ice_status status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -936,7 +909,12 @@
desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
CPU_TO_LE16(num_rules);
- return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ status = ICE_ERR_DOES_NOT_EXIST;
+
+ return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
@@ -961,8 +939,7 @@
pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
- ice_debug(pi->hw, ICE_DBG_SW,
- "incorrect VSI/port type received\n");
+ ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
break;
}
}
@@ -972,7 +949,7 @@
*/
enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
{
- struct ice_aqc_get_sw_cfg_resp *rbuf;
+ struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
enum ice_status status;
u8 num_total_ports;
u16 req_desc = 0;
@@ -982,7 +959,7 @@
num_total_ports = 1;
- rbuf = (struct ice_aqc_get_sw_cfg_resp *)
+ rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *)
ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
if (!rbuf)
@@ -994,19 +971,19 @@
* writing a non-zero value in req_desc
*/
do {
+ struct ice_aqc_get_sw_cfg_resp_elem *ele;
+
status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
&req_desc, &num_elems, NULL);
if (status)
break;
- for (i = 0; i < num_elems; i++) {
- struct ice_aqc_get_sw_cfg_resp_elem *ele;
+ for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
u16 pf_vf_num, swid, vsi_port_num;
bool is_vf = false;
u8 res_type;
- ele = rbuf[i].elements;
vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
@@ -1026,8 +1003,7 @@
case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
if (j == num_total_ports) {
- ice_debug(hw, ICE_DBG_SW,
- "more ports than expected\n");
+ ice_debug(hw, ICE_DBG_SW, "more ports than expected\n");
status = ICE_ERR_CFG;
goto out;
}
@@ -1043,7 +1019,7 @@
} while (req_desc && !status);
out:
- ice_free(hw, (void *)rbuf);
+ ice_free(hw, rbuf);
return status;
}
@@ -1294,8 +1270,7 @@
m_ent->fltr_info.fwd_id.hw_vsi_id;
act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
- act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
- ICE_LG_ACT_VSI_LIST_ID_M;
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
@@ -1376,13 +1351,11 @@
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
- lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
- rules_size);
+ lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
if (!lg_act)
return ICE_ERR_NO_MEMORY;
- rx_tx = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)lg_act + lg_act_size);
+ rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
@@ -1950,26 +1923,11 @@
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
- u16 s_rule_size;
-
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
- s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
-
/* Free the vsi_list resource that we allocated. It is assumed that the
* list is empty at this point.
*/
- status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
+ return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
ice_aqc_opc_free_res);
-
- ice_free(hw, s_rule);
- return status;
}
/**
@@ -2031,8 +1989,7 @@
tmp_fltr_info.vsi_handle = rem_vsi_handle;
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
tmp_fltr_info.fwd_id.hw_vsi_id, status);
return status;
}
@@ -2048,8 +2005,7 @@
/* Remove the VSI list since it is no longer used */
status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
if (status) {
- ice_debug(hw, ICE_DBG_SW,
- "Failed to remove VSI list %d, error %d\n",
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
vsi_list_id, status);
return status;
}
@@ -2157,17 +2113,18 @@
* ice_aq_get_res_alloc - get allocated resources
* @hw: pointer to the HW struct
* @num_entries: pointer to u16 to store the number of resource entries returned
- * @buf: pointer to user-supplied buffer
- * @buf_size: size of buff
+ * @buf: pointer to buffer
+ * @buf_size: size of buf
* @cd: pointer to command details structure or NULL
*
- * The user-supplied buffer must be large enough to store the resource
+ * The caller-supplied buffer must be large enough to store the resource
* information for all resource types. Each resource type is an
- * ice_aqc_get_res_resp_data_elem structure.
+ * ice_aqc_get_res_resp_elem structure.
*/
enum ice_status
-ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
- u16 buf_size, struct ice_sq_cd *cd)
+ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
+ struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
{
struct ice_aqc_get_res_alloc *resp;
enum ice_status status;
@@ -2194,8 +2151,8 @@
* ice_aq_get_res_descs - get allocated resource descriptors
* @hw: pointer to the hardware structure
* @num_entries: number of resource entries in buffer
- * @buf: Indirect buffer to hold data parameters and response
- * @buf_size: size of buffer for indirect commands
+ * @buf: structure to hold response data buffer
+ * @buf_size: size of buffer
* @res_type: resource type
* @res_shared: is resource shared
* @desc_id: input - first desc ID to start; output - next desc ID
@@ -2203,9 +2160,8 @@
*/
enum ice_status
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_get_allocd_res_desc_resp *buf,
- u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
- struct ice_sq_cd *cd)
+ struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
+ bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
{
struct ice_aqc_get_allocd_res_desc *cmd;
struct ice_aq_desc desc;
@@ -2398,8 +2354,7 @@
*
* Function add MAC rule for logical port from HW struct
*/
-enum ice_status
-ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
if (!m_list || !hw)
return ICE_ERR_PARAM;
@@ -2511,8 +2466,7 @@
*/
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
- ice_debug(hw, ICE_DBG_SW,
- "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
status = ICE_ERR_CFG;
goto exit;
}
@@ -2597,8 +2551,7 @@
*
* Function add VLAN rule for logical port from HW struct
*/
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
+enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
{
if (!v_list || !hw)
return ICE_ERR_PARAM;
@@ -2644,7 +2597,6 @@
return ICE_SUCCESS;
}
-enum ice_status
/**
* ice_add_eth_mac - Add a ethertype based filter rule
* @hw: pointer to the hardware structure
@@ -2652,6 +2604,7 @@
*
* Function add ethertype rule for logical port from HW struct
*/
+enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
{
if (!em_list || !hw)
@@ -2773,7 +2726,8 @@
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+
s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
if (!s_rule)
return ICE_ERR_NO_MEMORY;
@@ -2930,8 +2884,7 @@
* @m_list: list of MAC addresses and forwarding information
*
*/
-enum ice_status
-ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
struct ice_sw_recipe *recp_list;
@@ -3116,17 +3069,17 @@
}
/**
- * ice_get_vsi_promisc - get promiscuous mode of given VSI
+ * _ice_get_vsi_promisc - get promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
+ * @sw: pointer to switch info struct for which function add rule
*/
-enum ice_status
-ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
- u16 *vid)
+static enum ice_status
+_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid, struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
@@ -3156,17 +3109,32 @@
}
/**
- * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
+ * ice_get_vsi_promisc - get promiscuous mode of given VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to retrieve info from
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
*/
enum ice_status
-ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
- u16 *vid)
+ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid)
+{
+ return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask,
+ vid, hw->switch_info);
+}
+
+/**
+ * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to retrieve info from
+ * @promisc_mask: pointer to mask to be filled in
+ * @vid: VLAN ID of promisc VLAN VSI
+ * @sw: pointer to switch info struct for which function add rule
+ */
+static enum ice_status
+_ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid, struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
@@ -3195,6 +3163,21 @@
return ICE_SUCCESS;
}
+/**
+ * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to retrieve info from
+ * @promisc_mask: pointer to mask to be filled in
+ * @vid: VLAN ID of promisc VLAN VSI
+ */
+enum ice_status
+ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid)
+{
+ return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask,
+ vid, hw->switch_info);
+}
+
/**
* ice_remove_promisc - Remove promisc based filter rules
* @hw: pointer to the hardware structure
@@ -3220,17 +3203,17 @@
}
/**
- * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * _ice_clear_vsi_promisc - clear specified promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to clear mode
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
+ * @sw: pointer to switch info struct for which function add rule
*/
-enum ice_status
-ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
- u16 vid)
+static enum ice_status
+_ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid, struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *fm_entry, *tmp;
struct LIST_HEAD_TYPE remove_list_head;
struct ice_fltr_mgmt_list_entry *itr;
@@ -3295,14 +3278,32 @@
}
/**
- * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
+ u8 promisc_mask, u16 vid)
+{
+ return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask,
+ vid, hw->switch_info);
+}
+
+/**
+ * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
+ * @lport: logical port number to configure promisc mode
+ * @sw: pointer to switch info struct for which function add rule
*/
-enum ice_status
-ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
+static enum ice_status
+_ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid, u8 lport, struct ice_switch_info *sw)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
@@ -3393,17 +3394,16 @@
new_fltr.src = hw_vsi_id;
} else {
new_fltr.flag |= ICE_FLTR_RX;
- new_fltr.src = hw->port_info->lport;
+ new_fltr.src = lport;
}
new_fltr.fltr_act = ICE_FWD_TO_VSI;
new_fltr.vsi_handle = vsi_handle;
new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
f_list_entry.fltr_info = new_fltr;
- recp_list = &hw->switch_info->recp_list[recipe_id];
+ recp_list = &sw->recp_list[recipe_id];
- status = ice_add_rule_internal(hw, recp_list,
- hw->port_info->lport,
+ status = ice_add_rule_internal(hw, recp_list, lport,
&f_list_entry);
if (status != ICE_SUCCESS)
goto set_promisc_exit;
@@ -3414,19 +3414,37 @@
}
/**
- * ice_set_vlan_vsi_promisc
+ * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid,
+ hw->port_info->lport,
+ hw->switch_info);
+}
+
+/**
+ * _ice_set_vlan_vsi_promisc
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to configure
* @promisc_mask: mask of promiscuous config bits
* @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ * @lport: logical port number to configure promisc mode
+ * @sw: pointer to switch info struct for which function add rule
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-enum ice_status
-ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
- bool rm_vlan_promisc)
+static enum ice_status
+_ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc, u8 lport,
+ struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_list_entry *list_itr, *tmp;
struct LIST_HEAD_TYPE vsi_list_head;
struct LIST_HEAD_TYPE *vlan_head;
@@ -3448,11 +3466,13 @@
list_entry) {
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
- status = ice_clear_vsi_promisc(hw, vsi_handle,
- promisc_mask, vlan_id);
+ status = _ice_clear_vsi_promisc(hw, vsi_handle,
+ promisc_mask,
+ vlan_id, sw);
else
- status = ice_set_vsi_promisc(hw, vsi_handle,
- promisc_mask, vlan_id);
+ status = _ice_set_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id,
+ lport, sw);
if (status)
break;
}
@@ -3466,6 +3486,24 @@
return status;
}
+/**
+ * ice_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ *
+ * Configure VSI with all associated VLANs to given promiscuous mode(s)
+ */
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc)
+{
+ return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask,
+ rm_vlan_promisc, hw->port_info->lport,
+ hw->switch_info);
+}
+
/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
@@ -3514,8 +3552,7 @@
ice_remove_eth_mac(hw, &remove_list_head);
break;
case ICE_SW_LKUP_DFLT:
- ice_debug(hw, ICE_DBG_SW,
- "Remove filters for this lookup type hasn't been implemented yet\n");
+ ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n");
break;
case ICE_SW_LKUP_LAST:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
@@ -3586,9 +3623,8 @@
u16 buf_len;
/* Allocate resource */
- buf_len = sizeof(*buf);
- buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(buf, elem, 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -3625,9 +3661,8 @@
u16 buf_len;
/* Free resource */
- buf_len = sizeof(*buf);
- buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(buf, elem, 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!buf)
return ICE_ERR_NO_MEMORY;
@@ -3639,8 +3674,7 @@
status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
ice_aqc_opc_free_res, NULL);
if (status)
- ice_debug(hw, ICE_DBG_SW,
- "counter resource could not be freed\n");
+ ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
ice_free(hw, buf);
return status;
@@ -3687,9 +3721,8 @@
return ICE_ERR_PARAM;
/* Allocate resource for large action */
- buf_len = sizeof(*sw_buf);
- sw_buf = (struct ice_aqc_alloc_free_res_elem *)
- ice_malloc(hw, buf_len);
+ buf_len = ice_struct_size(sw_buf, elem, 1);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
if (!sw_buf)
return ICE_ERR_NO_MEMORY;
@@ -3944,6 +3977,7 @@
LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
list_entry) {
struct ice_fltr_list_entry f_entry;
+ u16 vsi_handle;
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
@@ -3955,12 +3989,8 @@
}
/* Add a filter per VSI separately */
- while (1) {
- u16 vsi_handle;
-
- vsi_handle =
- ice_find_first_bit(itr->vsi_list_info->vsi_map,
- ICE_MAX_VSI);
+ ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI) {
if (!ice_is_vsi_valid(hw, vsi_handle))
break;
@@ -4012,6 +4042,8 @@
/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
+ * @pi: pointer to port information structure
+ * @sw: pointer to switch info struct for which function replays filters
* @vsi_handle: driver VSI handle
* @recp_id: Recipe ID for which rules need to be replayed
* @list_head: list for which filters need to be replayed
@@ -4020,7 +4052,8 @@
* It is required to pass valid VSI handle.
*/
static enum ice_status
-ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
+ struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
struct LIST_HEAD_TYPE *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
@@ -4030,7 +4063,7 @@
if (LIST_EMPTY(list_head))
return status;
- recp_list = &hw->switch_info->recp_list[recp_id];
+ recp_list = &sw->recp_list[recp_id];
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
@@ -4044,7 +4077,7 @@
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_list,
- hw->port_info->lport,
+ pi->lport,
&f_entry);
if (status != ICE_SUCCESS)
goto end;
@@ -4064,7 +4097,7 @@
status = ice_add_vlan_internal(hw, recp_list, &f_entry);
else
status = ice_add_rule_internal(hw, recp_list,
- hw->port_info->lport,
+ pi->lport,
&f_entry);
if (status != ICE_SUCCESS)
goto end;
@@ -4076,11 +4109,14 @@
/**
* ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
+ * @pi: pointer to port information structure
* @vsi_handle: driver VSI handle
*
* Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
+enum ice_status
+ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
+ u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
enum ice_status status = ICE_SUCCESS;
@@ -4092,7 +4128,8 @@
head = &sw->recp_list[i].filt_replay_rules;
if (!sw->recp_list[i].adv_rule)
- status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
+ head);
if (status != ICE_SUCCESS)
return status;
}
@@ -4101,14 +4138,14 @@
}
/**
- * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules
* @hw: pointer to the HW struct
+ * @sw: pointer to switch info struct for which function removes filters
*
- * Deletes the filter replay rules.
+ * Deletes the filter replay rules for given switch
*/
-void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw)
{
- struct ice_switch_info *sw = hw->switch_info;
u8 i;
if (!sw)
@@ -4124,3 +4161,15 @@
}
}
}
+
+/**
+ * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * @hw: pointer to the HW struct
+ *
+ * Deletes the filter replay rules.
+ */
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+{
+ ice_rm_sw_replay_rule_info(hw, hw->switch_info);
+}
+
Index: sys/dev/ice/ice_type.h
===================================================================
--- sys/dev/ice/ice_type.h
+++ sys/dev/ice/ice_type.h
@@ -56,6 +56,13 @@
#define IS_ASCII(_ch) ((_ch) < 0x80)
+#define STRUCT_HACK_VAR_LEN
+/**
+ * ice_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
#define ice_struct_size(ptr, field, num) \
(sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
@@ -231,6 +238,7 @@
ICE_MEDIA_BASET,
ICE_MEDIA_BACKPLANE,
ICE_MEDIA_DA,
+ ICE_MEDIA_AUI,
};
/* Software VSI types. */
@@ -468,15 +476,54 @@
u16 build; /* Build version of OROM */
};
-/* NVM Information */
+/* NVM version information */
struct ice_nvm_info {
+ u32 eetrack;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ice_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ice_flash_bank {
+ ICE_INVALID_FLASH_BANK,
+ ICE_1ST_FLASH_BANK,
+ ICE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ice_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ice_flash_bank nvm_bank; /* Active NVM bank */
+ enum ice_flash_bank orom_bank; /* Active OROM bank */
+ enum ice_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ice_flash_info {
struct ice_orom_info orom; /* Option ROM version info */
- u32 eetrack; /* NVM data version */
+ struct ice_nvm_info nvm; /* NVM version information */
+ struct ice_netlist_info netlist;/* Netlist version info */
+ struct ice_bank_info banks; /* Flash Bank information */
u16 sr_words; /* Shadow RAM size in words */
u32 flash_size; /* Size of available flash in bytes */
- u8 major_ver; /* major version of dev starter */
- u8 minor_ver; /* minor version of dev starter */
- u8 blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
struct ice_link_default_override_tlv {
@@ -503,16 +550,6 @@
#define ICE_NVM_VER_LEN 32
-/* netlist version information */
-struct ice_netlist_ver_info {
- u32 major; /* major high/low */
- u32 minor; /* minor high/low */
- u32 type; /* type high/low */
- u32 rev; /* revision high/low */
- u32 hash; /* SHA-1 hash word */
- u16 cust_ver; /* customer version */
-};
-
/* Max number of port to queue branches w.r.t topology */
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -587,7 +624,7 @@
#define ICE_SCHED_NO_BW_WT 0
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
-#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
@@ -720,6 +757,14 @@
#define ICE_DCBX_APPS_NON_WILLING 0x1
};
+struct ice_qos_cfg {
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp : 1;
+};
+
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to HW instance */
@@ -741,16 +786,9 @@
struct ice_lock sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
- /* List contain profile ID(s) and other params per layer */
- struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct ice_bw_type_info root_node_bw_t_info;
struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
- struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
- /* DCBX info */
- struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
- struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
- /* LLDP/DCBX Status */
- u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
- u8 is_sw_lldp:1;
+ struct ice_qos_cfg qos_cfg;
u8 is_vf:1;
};
@@ -758,10 +796,85 @@
struct LIST_HEAD_TYPE vsi_list_map_head;
struct ice_sw_recipe *recp_list;
u16 prof_res_bm_init;
+ u16 max_used_prof_index;
ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
+/* Enum defining the different states of the mailbox snapshot in the
+ * PF-VF mailbox overflow detection algorithm. The snapshot can be in
+ * states:
+ * 1. ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot
+ * within the mailbox buffer.
+ * 2. ICE_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot
+ * 3. ICE_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the
+ * mailbox and mark any VFs sending more messages than the threshold limit set.
+ * 4. ICE_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to 0xFFFFFFFF.
+ */
+enum ice_mbx_snapshot_state {
+ ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0,
+ ICE_MAL_VF_DETECT_STATE_TRAVERSE,
+ ICE_MAL_VF_DETECT_STATE_DETECT,
+ ICE_MAL_VF_DETECT_STATE_INVALID = 0xFFFFFFFF,
+};
+
+/* Structure to hold information of the static snapshot and the mailbox
+ * buffer data used to generate and track the snapshot.
+ * 1. state: the state of the mailbox snapshot in the malicious VF
+ * detection state handler ice_mbx_vf_state_handler()
+ * 2. head : head of the mailbox snapshot in a circular mailbox buffer
+ * 3. tail : tail of the mailbox snapshot in a circular mailbox buffer
+ * 4. num_iterations: number of messages traversed in circular mailbox buffer
+ * 5. num_msg_proc: number of messages processed in mailbox
+ * 6. num_pending_arq: number of pending asynchronous messages
+ * 7. max_num_msgs_mbx: maximum messages in mailbox for currently
+ * serviced work item or interrupt.
+ */
+struct ice_mbx_snap_buffer_data {
+ enum ice_mbx_snapshot_state state;
+ u32 head;
+ u32 tail;
+ u32 num_iterations;
+ u16 num_msg_proc;
+ u16 num_pending_arq;
+ u16 max_num_msgs_mbx;
+};
+
+/* Structure to track messages sent by VFs on mailbox:
+ * 1. vf_cntr : a counter array of VFs to track the number of
+ * asynchronous messages sent by each VF
+ * 2. vfcntr_len : number of entries in VF counter array
+ */
+struct ice_mbx_vf_counter {
+ u32 *vf_cntr;
+ u32 vfcntr_len;
+};
+
+/* Structure to hold data relevant to the captured static snapshot
+ * of the PF-VF mailbox.
+ */
+struct ice_mbx_snapshot {
+ struct ice_mbx_snap_buffer_data mbx_buf;
+ struct ice_mbx_vf_counter mbx_vf;
+};
+
+/* Structure to hold data to be used for capturing or updating a
+ * static snapshot.
+ * 1. num_msg_proc: number of messages processed in mailbox
+ * 2. num_pending_arq: number of pending asynchronous messages
+ * 3. max_num_msgs_mbx: maximum messages in mailbox for currently
+ * serviced work item or interrupt.
+ * 4. async_watermark_val: An upper threshold set by caller to determine
+ * if the pending arq count is large enough to assume that there is
+ * the possibility of a mailicious VF.
+ */
+struct ice_mbx_data {
+ u16 num_msg_proc;
+ u16 num_pending_arq;
+ u16 max_num_msgs_mbx;
+ u16 async_watermark_val;
+};
+
/* Port hardware description */
struct ice_hw {
u8 *hw_addr;
@@ -796,21 +909,21 @@
u8 sw_entry_point_layer;
u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct LIST_HEAD_TYPE agg_list; /* lists all aggregator */
+ /* List contain profile ID(s) and other params per layer */
+ struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
- struct ice_nvm_info nvm;
+ struct ice_flash_info flash;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
- struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
/* Control Queue info */
struct ice_ctl_q_info adminq;
struct ice_ctl_q_info mailboxq;
-
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
u8 api_min_ver; /* API minor version */
@@ -883,6 +996,7 @@
struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
struct ice_lock rss_locks; /* protect RSS configuration */
struct LIST_HEAD_TYPE rss_list_head;
+ struct ice_mbx_snapshot mbx_snapshot;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@@ -1022,7 +1136,7 @@
#define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
#define ICE_SR_NVM_BANK_SIZE 0x43
-#define ICE_SR_1ND_OROM_BANK_PTR 0x44
+#define ICE_SR_1ST_OROM_BANK_PTR 0x44
#define ICE_SR_OROM_BANK_SIZE 0x45
#define ICE_SR_NETLIST_BANK_PTR 0x46
#define ICE_SR_NETLIST_BANK_SIZE 0x47
@@ -1037,6 +1151,12 @@
#define ICE_SR_PCIE_ALT_SIZE_WORDS 512
#define ICE_SR_CTRL_WORD_1_S 0x06
#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
+#define ICE_SR_CTRL_WORD_VALID 0x1
+#define ICE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define ICE_SR_CTRL_WORD_NVM_BANK BIT(5)
+
+#define ICE_SR_NVM_PTR_4KB_UNITS BIT(15)
/* Shadow RAM related */
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
@@ -1069,4 +1189,8 @@
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1
#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2
+/* AQ API version for LLDP_FILTER_CONTROL */
+#define ICE_FW_API_LLDP_FLTR_MAJ 1
+#define ICE_FW_API_LLDP_FLTR_MIN 7
+#define ICE_FW_API_LLDP_FLTR_PATCH 1
#endif /* _ICE_TYPE_H_ */
Index: sys/dev/ice/if_ice_iflib.c
===================================================================
--- sys/dev/ice/if_ice_iflib.c
+++ sys/dev/ice/if_ice_iflib.c
@@ -82,6 +82,8 @@
static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter);
static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
+static int ice_if_suspend(if_ctx_t ctx);
+static int ice_if_resume(if_ctx_t ctx);
static int ice_msix_que(void *arg);
static int ice_msix_admin(void *arg);
@@ -167,6 +169,8 @@
DEVMETHOD(ifdi_get_counter, ice_if_get_counter),
DEVMETHOD(ifdi_priv_ioctl, ice_if_priv_ioctl),
DEVMETHOD(ifdi_i2c_req, ice_if_i2c_req),
+ DEVMETHOD(ifdi_suspend, ice_if_suspend),
+ DEVMETHOD(ifdi_resume, ice_if_resume),
DEVMETHOD_END
};
@@ -278,7 +282,6 @@
MODULE_DEPEND(ice, pci, 1, 1, 1);
MODULE_DEPEND(ice, ether, 1, 1, 1);
MODULE_DEPEND(ice, iflib, 1, 1, 1);
-MODULE_DEPEND(ice, firmware, 1, 1, 1);
IFLIB_PNP_INFO(pci, ice, ice_vendor_info_array);
@@ -663,6 +666,7 @@
ice_update_link_status(struct ice_softc *sc, bool update_media)
{
struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
/* Never report link up when in recovery mode */
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
@@ -673,6 +677,8 @@
if (sc->link_up) { /* link is up */
uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info);
+ ice_set_default_local_lldp_mib(sc);
+
iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
ice_link_up_msg(sc);
@@ -687,7 +693,7 @@
/* Update the supported media types */
if (update_media) {
- enum ice_status status = ice_add_media_types(sc, sc->media);
+ status = ice_add_media_types(sc, sc->media);
if (status)
device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
ice_status_str(status),
@@ -1827,6 +1833,16 @@
ASSERT_CTX_LOCKED(sc);
+ /*
+ * We've seen an issue with 11.3/12.1 where sideband routines are
+ * called after detach is called. This would call routines after
+ * if_stop, causing issues with the teardown process. This has
+ * seemingly been fixed in STABLE snapshots, but it seems like a
+ * good idea to have this guard here regardless.
+ */
+ if (ice_driver_is_detaching(sc))
+ return;
+
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
return;
@@ -2573,7 +2589,7 @@
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE))
return;
- /* Set capabilities that the driver supports */
+ /* Set capabilities that all devices support */
ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap);
@@ -2872,3 +2888,46 @@
return ice_handle_i2c_req(sc, req);
}
+/**
+ * ice_if_suspend - PCI device suspend handler for iflib
+ * @ctx: iflib context pointer
+ *
+ * Deinitializes the driver and clears HW resources in preparation for
+ * suspend or an FLR.
+ *
+ * @returns 0; this return value is ignored
+ */
+static int
+ice_if_suspend(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ /* At least a PFR is always going to happen after this;
+ * either via FLR or during the D3->D0 transition.
+ */
+ ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
+
+ ice_prepare_for_reset(sc);
+
+ return (0);
+}
+
+/**
+ * ice_if_resume - PCI device resume handler for iflib
+ * @ctx: iflib context pointer
+ *
+ * Reinitializes the driver and the HW after PCI resume or after
+ * an FLR. An init is performed by iflib after this function is finished.
+ *
+ * @returns 0; this return value is ignored
+ */
+static int
+ice_if_resume(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ ice_rebuild(sc);
+
+ return (0);
+}
+
Index: sys/dev/ice/virtchnl.h
===================================================================
--- sys/dev/ice/virtchnl.h
+++ sys/dev/ice/virtchnl.h
@@ -156,9 +156,11 @@
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
- /* opcodes 34, 35, 36, 37 and 38 are reserved */
- /* opcodes 39, 40, 41 and 42 are reserved */
- /* opcode 42 is reserved */
+ /* opcode 34 is reserved */
+ /* opcodes 39, 40, 41, 42 and 43 are reserved */
+ /* opcode 44 is reserved */
+ /* opcode 45, 46, 47, 48 and 49 are reserved */
+ VIRTCHNL_OP_MAX,
};
/* These macros are used to generate compilation errors if a structure/union
@@ -273,6 +275,9 @@
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
+ /* 0x04000000 is reserved */
+ /* 0X08000000 and 0X10000000 are reserved */
+ /* 0X20000000 is reserved */
/* 0X40000000 is reserved */
/* 0X80000000 is reserved */
@@ -449,9 +454,36 @@
* PF removes the filters and returns status.
*/
+/* VIRTCHNL_ETHER_ADDR_LEGACY
+ * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad
+ * bytes. Moving forward all VF drivers should not set type to
+ * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy
+ * behavior. The control plane function (i.e. PF) can use a best effort method
+ * of tracking the primary/device unicast in this case, but there is no
+ * guarantee and functionality depends on the implementation of the PF.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_PRIMARY
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the
+ * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and
+ * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane
+ * function (i.e. PF) to accurately track and use this MAC address for
+ * displaying on the host and for VM/function reset.
+ */
+
+/* VIRTCHNL_ETHER_ADDR_EXTRA
+ * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra
+ * unicast and/or multicast filters that are being added/deleted via
+ * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively.
+ */
struct virtchnl_ether_addr {
u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
- u8 pad[2];
+ u8 type;
+#define VIRTCHNL_ETHER_ADDR_LEGACY 0
+#define VIRTCHNL_ETHER_ADDR_PRIMARY 1
+#define VIRTCHNL_ETHER_ADDR_EXTRA 2
+#define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */
+ u8 pad;
};
VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
@@ -630,6 +662,11 @@
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
+ VIRTCHNL_ACTION_PASSTHRU,
+ VIRTCHNL_ACTION_QUEUE,
+ VIRTCHNL_ACTION_Q_REGION,
+ VIRTCHNL_ACTION_MARK,
+ VIRTCHNL_ACTION_COUNT,
};
enum virtchnl_flow_type {
@@ -752,7 +789,7 @@
u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len = 0;
+ u32 valid_len = 0;
/* Validate message length. */
switch (v_opcode) {
Index: sys/dev/ice/virtchnl_inline_ipsec.h
===================================================================
--- sys/dev/ice/virtchnl_inline_ipsec.h
+++ sys/dev/ice/virtchnl_inline_ipsec.h
@@ -39,8 +39,11 @@
#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
-#define VIRTCHNL_IPSEC_SELECTED_SA_DESTROY 0
-#define VIRTCHNL_IPSEC_ALL_SA_DESTROY 1
+#define VIRTCHNL_IPSEC_SA_DESTROY 0
+#define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF
+#define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF
+#define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF
/* crypto type */
#define VIRTCHNL_AUTH 1
@@ -49,7 +52,7 @@
/* algorithm type */
/* Hash Algorithm */
-#define VIRTCHNL_NO_ALG 0 /* NULL algorithm */
+#define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */
#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
@@ -65,13 +68,14 @@
#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
/* Cipher Algorithm */
-#define VIRTCHNL_3DES_CBC 15 /* Triple DES algorithm in CBC mode */
-#define VIRTCHNL_AES_CBC 16 /* AES algorithm in CBC mode */
-#define VIRTCHNL_AES_CTR 17 /* AES algorithm in Counter mode */
+#define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */
+#define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */
/* AEAD Algorithm */
-#define VIRTCHNL_AES_CCM 18 /* AES algorithm in CCM mode */
-#define VIRTCHNL_AES_GCM 19 /* AES algorithm in GCM mode */
-#define VIRTCHNL_CHACHA20_POLY1305 20 /* algorithm of ChaCha20-Poly1305 */
+#define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */
/* protocol type */
#define VIRTCHNL_PROTO_ESP 1
@@ -97,6 +101,32 @@
#define VIRTCHNL_IPV4 1
#define VIRTCHNL_IPV6 2
+/* for virtchnl_ipsec_resp */
+enum inline_ipsec_resp {
+ INLINE_IPSEC_SUCCESS = 0,
+ INLINE_IPSEC_FAIL = -1,
+ INLINE_IPSEC_ERR_FIFO_FULL = -2,
+ INLINE_IPSEC_ERR_NOT_READY = -3,
+ INLINE_IPSEC_ERR_VF_DOWN = -4,
+ INLINE_IPSEC_ERR_INVALID_PARAMS = -5,
+ INLINE_IPSEC_ERR_NO_MEM = -6,
+};
+
+/* Detailed opcodes for DPDK and IPsec use */
+enum inline_ipsec_ops {
+ INLINE_IPSEC_OP_GET_CAP = 0,
+ INLINE_IPSEC_OP_GET_STATUS = 1,
+ INLINE_IPSEC_OP_SA_CREATE = 2,
+ INLINE_IPSEC_OP_SA_UPDATE = 3,
+ INLINE_IPSEC_OP_SA_DESTROY = 4,
+ INLINE_IPSEC_OP_SP_CREATE = 5,
+ INLINE_IPSEC_OP_SP_DESTROY = 6,
+ INLINE_IPSEC_OP_SA_READ = 7,
+ INLINE_IPSEC_OP_EVENT = 8,
+ INLINE_IPSEC_OP_RESP = 9,
+};
+
+#pragma pack(1)
/* Not all valid, if certain field is invalid, set 1 for all bits */
struct virtchnl_algo_cap {
u32 algo_type;
@@ -119,6 +149,7 @@
u16 max_aad_size;
u16 inc_aad_size;
};
+#pragma pack()
/* vf record the capability of crypto from the virtchnl */
struct virtchnl_sym_crypto_cap {
@@ -178,17 +209,7 @@
struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
};
-/* using desc_id to record the format of rx descriptor */
-struct virtchnl_rx_desc_fmt {
- u16 desc_id;
-};
-
-/* using desc_id to record the format of tx descriptor */
-struct virtchnl_tx_desc_fmt {
- u8 desc_num;
- u16 desc_ids[VIRTCHNL_IPSEC_MAX_TX_DESC_NUM];
-};
-
+#pragma pack(1)
/* configuration of crypto function */
struct virtchnl_ipsec_crypto_cfg_item {
u8 crypto_type;
@@ -201,18 +222,23 @@
/* Length of digest */
u16 digest_len;
+ /* SA salt */
+ u32 salt;
+
/* The length of the symmetric key */
u16 key_len;
/* key data buffer */
u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
};
+#pragma pack()
struct virtchnl_ipsec_sym_crypto_cfg {
struct virtchnl_ipsec_crypto_cfg_item
items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
};
+#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_CREATE
* VF send this SA configuration to PF using virtchnl;
* PF create SA as configuration and PF driver will return
@@ -249,9 +275,6 @@
/* outer dst ip address */
u8 dst_addr[16];
- /* SA salt */
- u32 salt;
-
/* SPD reference. Used to link an SA with its policy.
* PF drivers may ignore this field.
*/
@@ -311,6 +334,7 @@
/* crypto configuration */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
+#pragma pack()
/* VIRTCHNL_OP_IPSEC_SA_UPDATE
* VF send configuration of index of SA to PF
@@ -322,6 +346,7 @@
u32 esn_low; /* low 32 bits of esn */
};
+#pragma pack(1)
/* VIRTCHNL_OP_IPSEC_SA_DESTROY
* VF send configuration of index of SA to PF
* PF will destroy SA according to configuration
@@ -329,14 +354,12 @@
* be destroyed
*/
struct virtchnl_ipsec_sa_destroy {
- /* VIRTCHNL_SELECTED_SA_DESTROY: selected SA will be destroyed.
- * VIRTCHNL_ALL_SA_DESTROY: all SA will be destroyed.
+ /* All zero bitmap indicates all SA will be destroyed.
+ * Non-zero bitmap indicates the selected SA in
+ * array sa_index will be destroyed.
*/
u8 flag;
- u8 pad1; /* pading */
- u16 pad2; /* pading */
-
/* selected SA index */
u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
};
@@ -445,5 +468,127 @@
/* crypto configuration. Salt and keys are set to 0 */
struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
};
+#pragma pack()
+
+/* Add whitelist entry in IES */
+struct virtchnl_ipsec_sp_cfg {
+ u32 spi;
+ u32 dip[4];
+
+ /* Drop frame if true or redirect to QAT if false. */
+ u8 drop;
+
+ /* Congestion domain. For future use. */
+ u8 cgd;
+
+ /* 0 for IPv4 table, 1 for IPv6 table. */
+ u8 table_id;
+
+ /* Set TC (congestion domain) if true. For future use. */
+ u8 set_tc;
+};
+
+#pragma pack(1)
+/* Delete whitelist entry in IES */
+struct virtchnl_ipsec_sp_destroy {
+ /* 0 for IPv4 table, 1 for IPv6 table. */
+ u8 table_id;
+ u32 rule_id;
+};
+#pragma pack()
+
+/* Response from IES to whitelist operations */
+struct virtchnl_ipsec_sp_cfg_resp {
+ u32 rule_id;
+};
+
+struct virtchnl_ipsec_sa_cfg_resp {
+ u32 sa_handle;
+};
+
+#define INLINE_IPSEC_EVENT_RESET 0x1
+#define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2
+#define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4
+
+struct virtchnl_ipsec_event {
+ u32 ipsec_event_data;
+};
+
+#define INLINE_IPSEC_STATUS_AVAILABLE 0x1
+#define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2
+
+struct virtchnl_ipsec_status {
+ u32 status;
+};
+
+struct virtchnl_ipsec_resp {
+ u32 resp;
+};
+
+/* Internal message descriptor for VF <-> IPsec communication */
+struct inline_ipsec_msg {
+ u16 ipsec_opcode;
+ u16 req_id;
+
+ union {
+ /* IPsec request */
+ struct virtchnl_ipsec_sa_cfg sa_cfg[0];
+ struct virtchnl_ipsec_sp_cfg sp_cfg[0];
+ struct virtchnl_ipsec_sa_update sa_update[0];
+ struct virtchnl_ipsec_sa_destroy sa_destroy[0];
+ struct virtchnl_ipsec_sp_destroy sp_destroy[0];
+
+ /* IPsec response */
+ struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0];
+ struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0];
+ struct virtchnl_ipsec_cap ipsec_cap[0];
+ struct virtchnl_ipsec_status ipsec_status[0];
+ /* response to del_sa, del_sp, update_sa */
+ struct virtchnl_ipsec_resp ipsec_resp[0];
+
+ /* IPsec event (no req_id is required) */
+ struct virtchnl_ipsec_event event[0];
+
+ /* Reserved */
+ struct virtchnl_ipsec_sa_read sa_read[0];
+ } ipsec_data;
+};
+
+static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode)
+{
+ u16 valid_len = sizeof(struct inline_ipsec_msg);
+
+ switch (opcode) {
+ case INLINE_IPSEC_OP_GET_CAP:
+ case INLINE_IPSEC_OP_GET_STATUS:
+ break;
+ case INLINE_IPSEC_OP_SA_CREATE:
+ valid_len += sizeof(struct virtchnl_ipsec_sa_cfg);
+ break;
+ case INLINE_IPSEC_OP_SP_CREATE:
+ valid_len += sizeof(struct virtchnl_ipsec_sp_cfg);
+ break;
+ case INLINE_IPSEC_OP_SA_UPDATE:
+ valid_len += sizeof(struct virtchnl_ipsec_sa_update);
+ break;
+ case INLINE_IPSEC_OP_SA_DESTROY:
+ valid_len += sizeof(struct virtchnl_ipsec_sa_destroy);
+ break;
+ case INLINE_IPSEC_OP_SP_DESTROY:
+ valid_len += sizeof(struct virtchnl_ipsec_sp_destroy);
+ break;
+ /* Only for msg length caculation of response to VF in case of
+ * inline ipsec failure.
+ */
+ case INLINE_IPSEC_OP_RESP:
+ valid_len += sizeof(struct virtchnl_ipsec_resp);
+ break;
+ default:
+ valid_len = 0;
+ break;
+ }
+
+ return valid_len;
+}
#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
Index: sys/modules/ice_ddp/Makefile
===================================================================
--- sys/modules/ice_ddp/Makefile
+++ sys/modules/ice_ddp/Makefile
@@ -1,6 +1,6 @@
# $FreeBSD$
KMOD= ice_ddp
-FIRMWS= ${SRCTOP}/sys/contrib/dev/ice/ice-1.3.9.0.pkg:ice_ddp:0x01030900
+FIRMWS= ${SRCTOP}/sys/contrib/dev/ice/ice-1.3.16.0.pkg:ice_ddp:0x01031000
.include <bsd.kmod.mk>

File Metadata

Mime Type
text/plain
Expires
Sat, Oct 19, 5:30 AM (20 h, 8 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
14259608
Default Alt Text
D26322.id76636.diff (331 KB)

Event Timeline