Index: sys/dev/ice/ice_adminq_cmd.h =================================================================== --- sys/dev/ice/ice_adminq_cmd.h +++ sys/dev/ice/ice_adminq_cmd.h @@ -167,6 +167,7 @@ #define ICE_AQC_CAPS_SKU 0x0074 #define ICE_AQC_CAPS_PORT_MAP 0x0075 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; @@ -412,6 +413,40 @@ __le32 addr_low; }; +/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */ +struct ice_aqc_set_vlan_mode { + u8 reserved; + u8 l2tag_prio_tagging; +#define ICE_AQ_VLAN_PRIO_TAG_S 0 +#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S) +#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0 +#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1 +#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2 +#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3 +#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4 +#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4 +#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7 + u8 l2tag_reserved[64]; + u8 rdma_packet; +#define ICE_AQ_VLAN_RDMA_TAG_S 0 +#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S) +#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10 +#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A + u8 rdma_reserved[2]; + u8 mng_vlan_prot_id; +#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10 +#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11 + u8 prot_id_reserved[30]; +}; + +/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */ +struct ice_aqc_get_vlan_mode { + u8 vlan_mode; +#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0) + u8 l2tag_prio_tagging; + u8 reserved[98]; +}; + /* Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) * Get VSI (indirect 0x0212) @@ -485,108 +520,114 @@ #define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7) u8 sw_flags2; #define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0 -#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \ - (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) +#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) #define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0) #define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4) u8 veb_stat_id; #define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0 -#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) +#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) #define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5) /* security section */ u8 sec_flags; #define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0) #define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2) -#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 -#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) #define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0) u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - u8 pvlan_reserved[2]; - u8 vlan_flags; -#define ICE_AQ_VSI_VLAN_MODE_S 0 -#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) -#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 -#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 -#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 -#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) -#define ICE_AQ_VSI_VLAN_EMOD_S 3 -#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) - u8 pvlan_reserved2[3]; + __le16 port_based_inner_vlan; /* VLANS include priority bits */ + u8 inner_vlan_reserved[2]; + u8 inner_vlan_flags; +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0 +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S) +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1 +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2 +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3 +#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3 +#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_BLOCK_TX_DESC BIT(5) + u8 inner_vlan_reserved2[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ -#define ICE_AQ_VSI_UP_TABLE_UP0_S 0 -#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) -#define ICE_AQ_VSI_UP_TABLE_UP1_S 3 -#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) -#define ICE_AQ_VSI_UP_TABLE_UP2_S 6 -#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) -#define ICE_AQ_VSI_UP_TABLE_UP3_S 9 -#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) -#define ICE_AQ_VSI_UP_TABLE_UP4_S 12 -#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) -#define ICE_AQ_VSI_UP_TABLE_UP5_S 15 -#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) -#define ICE_AQ_VSI_UP_TABLE_UP6_S 18 -#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) -#define ICE_AQ_VSI_UP_TABLE_UP7_S 21 -#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) +#define ICE_AQ_VSI_UP_TABLE_UP0_S 0 +#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) +#define ICE_AQ_VSI_UP_TABLE_UP1_S 3 +#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) +#define ICE_AQ_VSI_UP_TABLE_UP2_S 6 +#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) +#define ICE_AQ_VSI_UP_TABLE_UP3_S 9 +#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) +#define ICE_AQ_VSI_UP_TABLE_UP4_S 12 +#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) +#define ICE_AQ_VSI_UP_TABLE_UP5_S 15 +#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) +#define ICE_AQ_VSI_UP_TABLE_UP6_S 18 +#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) +#define ICE_AQ_VSI_UP_TABLE_UP7_S 21 +#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) __le32 egress_table; /* same defines as for ingress table */ /* outer tags section */ - __le16 outer_tag; - u8 outer_tag_flags; -#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0 -#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S) -#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0 -#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1 -#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2 -#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 -#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) -#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 -#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 -#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 -#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 -#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4) -#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6) - u8 outer_tag_reserved; + __le16 port_based_outer_vlan; + u8 outer_vlan_flags; +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S) +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) +#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 +#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 +#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4) +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5 +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1 +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2 +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3 +#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7) + u8 outer_vlan_reserved; /* queue mapping section */ __le16 mapping_flags; -#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 -#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) +#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 +#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) __le16 q_mapping[16]; -#define ICE_AQ_VSI_Q_S 0 -#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) +#define ICE_AQ_VSI_Q_S 0 +#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) __le16 tc_mapping[8]; -#define ICE_AQ_VSI_TC_Q_OFFSET_S 0 -#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) -#define ICE_AQ_VSI_TC_Q_NUM_S 11 -#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) +#define ICE_AQ_VSI_TC_Q_OFFSET_S 0 +#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) +#define ICE_AQ_VSI_TC_Q_NUM_S 11 +#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) /* queueing option section */ u8 q_opt_rss; -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 -#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 -#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) -#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 -#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) u8 q_opt_tc; -#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 -#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) -#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) +#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 +#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) +#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) u8 q_opt_flags; -#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) +#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) u8 q_opt_reserved[3]; /* outer up section */ __le32 outer_up_table; /* same structure and defines as ingress tbl */ @@ -594,27 +635,27 @@ __le16 sect_10_reserved; /* flow director section */ __le16 fd_options; -#define ICE_AQ_VSI_FD_ENABLE BIT(0) -#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) -#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) +#define ICE_AQ_VSI_FD_ENABLE BIT(0) +#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) +#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) __le16 max_fd_fltr_dedicated; __le16 max_fd_fltr_shared; __le16 fd_def_q; -#define ICE_AQ_VSI_FD_DEF_Q_S 0 -#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) -#define ICE_AQ_VSI_FD_DEF_GRP_S 12 -#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) +#define ICE_AQ_VSI_FD_DEF_Q_S 0 +#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) +#define ICE_AQ_VSI_FD_DEF_GRP_S 12 +#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) __le16 fd_report_opt; -#define ICE_AQ_VSI_FD_REPORT_Q_S 0 -#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) -#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 -#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) -#define ICE_AQ_VSI_FD_DEF_DROP BIT(15) +#define ICE_AQ_VSI_FD_REPORT_Q_S 0 +#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) +#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 +#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) +#define ICE_AQ_VSI_FD_DEF_DROP BIT(15) /* PASID section */ __le32 pasid_id; -#define ICE_AQ_VSI_PASID_ID_S 0 -#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) -#define ICE_AQ_VSI_PASID_ID_VALID BIT(31) +#define ICE_AQ_VSI_PASID_ID_S 0 +#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) +#define ICE_AQ_VSI_PASID_ID_VALID BIT(31) u8 reserved[24]; }; @@ -992,7 +1033,8 @@ __le32 src_parent_teid; __le32 dest_parent_teid; __le16 num_elems; - __le16 reserved; + u8 flags; + u8 reserved; }; struct ice_aqc_move_elem { @@ -1197,16 +1239,18 @@ __le16 param0; /* 18.0 - Report qualified modules */ #define ICE_AQC_GET_PHY_RQM BIT(0) - /* 18.1 - 18.2 : Report mode - * 00b - Report NVM capabilities - * 01b - Report topology capabilities - * 10b - Report SW configured + /* 18.1 - 18.3 : Report mode + * 000b - Report NVM capabilities + * 001b - Report topology capabilities + * 010b - Report SW configured + * 100b - Report default capabilities */ -#define ICE_AQC_REPORT_MODE_S 1 -#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) -#define ICE_AQC_REPORT_NVM_CAP 0 -#define ICE_AQC_REPORT_TOPO_CAP BIT(1) -#define ICE_AQC_REPORT_SW_CFG BIT(2) +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 +#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) +#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) +#define ICE_AQC_REPORT_DFLT_CFG BIT(3) __le32 reserved1; __le32 addr_high; __le32 addr_low; @@ -1446,11 +1490,13 @@ #define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) #define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) u8 link_cfg_err; -#define ICE_AQ_LINK_CFG_ERR BIT(0) -#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2) +#define ICE_AQ_LINK_CFG_ERR BIT(0) +#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2) #define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3) #define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4) #define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5) +#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) +#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7) u8 link_info; #define ICE_AQ_LINK_UP BIT(0) /* Link Status */ #define ICE_AQ_LINK_FAULT BIT(1) @@ -1932,7 +1978,11 @@ #define ICE_AQC_PORT_OPT_ACTIVE_M (0xF << ICE_AQC_PORT_OPT_ACTIVE_S) #define ICE_AQC_PORT_OPT_FORCED BIT(6) #define ICE_AQC_PORT_OPT_VALID BIT(7) - u8 rsvd[3]; + u8 pending_port_option_status; +#define ICE_AQC_PENDING_PORT_OPT_IDX_S 0 +#define ICE_AQC_PENDING_PORT_OPT_IDX_M (0xF << ICE_AQC_PENDING_PORT_OPT_IDX_S) +#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7) + u8 rsvd[2]; __le32 addr_high; __le32 addr_low; }; @@ -1957,6 +2007,7 @@ #define ICE_AQC_PORT_OPT_MAX_LANE_100G 7 u8 global_scid[2]; u8 phy_scid[2]; + u8 pf2port_cid[2]; }; /* Set Port Option (direct, 0x06EB) */ @@ -2021,6 +2072,25 @@ u8 rsvd[12]; }; +/* Program topology device NVM (direct, 0x06F2) */ +struct ice_aqc_program_topology_device_nvm { + u8 lport_num; + u8 lport_num_valid; + u8 node_type_ctx; + u8 index; + u8 rsvd[12]; +}; + +/* Read topology device NVM (indirect, 0x06F3) */ +struct ice_aqc_read_topology_device_nvm { + u8 lport_num; + u8 lport_num_valid; + u8 node_type_ctx; + u8 index; + __le32 start_address; + u8 data_read[8]; +}; + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Write commands (indirect 0x0703) @@ -2050,6 +2120,7 @@ #define ICE_AQC_NVM_POR_FLAG 0 /* Used by NVM Write completion on ARQ */ #define ICE_AQC_NVM_PERST_FLAG 1 #define ICE_AQC_NVM_EMPR_FLAG 2 +#define ICE_AQC_NVM_EMPR_ENA BIT(0) __le16 module_typeid; __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF @@ -2667,6 +2738,50 @@ __le32 reserved[4]; }; +/* Set FW Logging configuration (indirect 0xFF30) + * Register for FW Logging (indirect 0xFF31) + * Query FW Logging (indirect 0xFF32) + * FW Log Event (indirect 0xFF33) + * Get FW Log (indirect 0xFF34) + * Clear FW Log (indirect 0xFF35) + */ + +struct ice_aqc_fw_log { + u8 cmd_flags; +#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0) +#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1) +#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3) +#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0) +#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2) +#define ICE_AQC_FW_LOG_PERSISTENT BIT(0) + u8 rsp_flag; +#define ICE_AQC_FW_LOG_MORE_DATA BIT(1) + __le16 fw_rt_msb; + union { + struct { + __le32 fw_rt_lsb; + } sync; + struct { + __le16 log_resolution; +#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1) +#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128) + __le16 mdl_cnt; + } cfg; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response Buffer for: + * Set Firmware Logging Configuration (0xFF30) + * Query FW Logging (0xFF32) + */ +struct ice_aqc_fw_log_cfg_resp { + __le16 module_identifier; + u8 log_level; + u8 rsvd0; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -2718,10 +2833,13 @@ struct ice_aqc_dnl_read_log_command dnl_read_log; struct ice_aqc_dnl_read_log_response dnl_read_log_resp; struct ice_aqc_i2c read_write_i2c; + struct ice_aqc_read_i2c_resp read_i2c_resp; struct ice_aqc_mdio read_write_mdio; struct ice_aqc_gpio_by_func read_write_gpio_by_func; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_set_led set_led; + struct ice_aqc_mdio read_mdio; + struct ice_aqc_mdio write_mdio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_port_options get_port_options; @@ -2888,6 +3006,8 @@ ice_aqc_opc_alloc_res = 0x0208, ice_aqc_opc_free_res = 0x0209, ice_aqc_opc_get_allocd_res_desc = 0x020A, + ice_aqc_opc_set_vlan_mode_parameters = 0x020C, + ice_aqc_opc_get_vlan_mode_parameters = 0x020D, /* VSI commands */ ice_aqc_opc_add_vsi = 0x0210, @@ -2965,6 +3085,8 @@ ice_aqc_opc_sff_eeprom = 0x06EE, ice_aqc_opc_sw_set_gpio = 0x06EF, ice_aqc_opc_sw_get_gpio = 0x06F0, + ice_aqc_opc_program_topology_device_nvm = 0x06F2, + ice_aqc_opc_read_topology_device_nvm = 0x06F3, /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, @@ -3030,7 +3152,15 @@ ice_aqc_opc_set_health_status_config = 0xFF20, ice_aqc_opc_get_supported_health_status_codes = 0xFF21, ice_aqc_opc_get_health_status = 0xFF22, - ice_aqc_opc_clear_health_status = 0xFF23 + ice_aqc_opc_clear_health_status = 0xFF23, + + /* FW Logging Commands */ + ice_aqc_opc_fw_logs_config = 0xFF30, + ice_aqc_opc_fw_logs_register = 0xFF31, + ice_aqc_opc_fw_logs_query = 0xFF32, + ice_aqc_opc_fw_logs_event = 0xFF33, + ice_aqc_opc_fw_logs_get = 0xFF34, + ice_aqc_opc_fw_logs_clear = 0xFF35 }; #endif /* _ICE_ADMINQ_CMD_H_ */ Index: sys/dev/ice/ice_bitops.h =================================================================== --- sys/dev/ice/ice_bitops.h +++ sys/dev/ice/ice_bitops.h @@ -477,6 +477,51 @@ return true; } +/** + * ice_bitmap_from_array32 - copies u32 array source into bitmap destination + * @dst: the destination bitmap + * @src: the source u32 array + * @size: size of the bitmap (in bits) + * + * This function copies the src bitmap stored in an u32 array into the dst + * bitmap stored as an ice_bitmap_t. + */ +static inline void +ice_bitmap_from_array32(ice_bitmap_t *dst, u32 *src, u16 size) +{ + u32 remaining_bits, i; + +#define BITS_PER_U32 (sizeof(u32) * BITS_PER_BYTE) + /* clear bitmap so we only have to set when iterating */ + ice_zero_bitmap(dst, size); + + for (i = 0; i < (u32)(size / BITS_PER_U32); i++) { + u32 bit_offset = i * BITS_PER_U32; + u32 entry = src[i]; + u32 j; + + for (j = 0; j < BITS_PER_U32; j++) { + if (entry & BIT(j)) + ice_set_bit((u16)(j + bit_offset), dst); + } + } + + /* still need to check the leftover bits (i.e. if size isn't evenly + * divisible by BITS_PER_U32 + **/ + remaining_bits = size % BITS_PER_U32; + if (remaining_bits) { + u32 bit_offset = i * BITS_PER_U32; + u32 entry = src[i]; + u32 j; + + for (j = 0; j < remaining_bits; j++) { + if (entry & BIT(j)) + ice_set_bit((u16)(j + bit_offset), dst); + } + } +} + #undef BIT_CHUNK #undef BIT_IN_CHUNK #undef LAST_CHUNK_BITS Index: sys/dev/ice/ice_common.h =================================================================== --- sys/dev/ice/ice_common.h +++ sys/dev/ice/ice_common.h @@ -39,6 +39,9 @@ #include "virtchnl.h" #include "ice_switch.h" +#define ICE_SQ_SEND_DELAY_TIME_MS 10 +#define ICE_SQ_SEND_MAX_EXECUTE 3 + enum ice_fw_modes { ICE_FW_MODE_NORMAL, ICE_FW_MODE_DBG, @@ -219,6 +222,11 @@ bool write, struct ice_sq_cd *cd); enum ice_status +ice_aq_get_port_options(struct ice_hw *hw, + struct ice_aqc_get_port_options_elem *options, + u8 *option_count, u8 lport, bool lport_valid, + u8 *active_option_idx, bool *active_option_valid); +enum ice_status ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info); enum ice_status __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data); @@ -275,4 +283,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +enum ice_status +ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, + struct ice_sq_cd *cd); +bool ice_is_fw_health_report_supported(struct ice_hw *hw); +bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ Index: sys/dev/ice/ice_common.c =================================================================== --- sys/dev/ice/ice_common.c +++ sys/dev/ice/ice_common.c @@ -189,6 +189,10 @@ return ICE_ERR_PARAM; hw = pi->hw; + if (report_mode == ICE_AQC_REPORT_DFLT_CFG && + !ice_fw_supports_report_dflt_cfg(hw)) + return ICE_ERR_PARAM; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); if (qual_mods) @@ -222,7 +226,7 @@ ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", pcaps->module_type[2]); - if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) { + if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type, @@ -454,6 +458,7 @@ li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); li->link_info = link_data.link_info; + li->link_cfg_err = link_data.link_cfg_err; li->an_info = link_data.an_info; li->ext_info = link_data.ext_info; li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size); @@ -803,10 +808,11 @@ /* Initialize port_info struct with PHY capabilities */ status = ice_aq_get_phy_caps(hw->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); ice_free(hw, pcaps); if (status) - ice_debug(hw, ICE_DBG_PHY, "Get PHY capabilities failed, continuing anyway\n"); + ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n", + status); /* Initialize port_info struct with link information */ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); @@ -851,8 +857,6 @@ goto err_unroll_fltr_mgmt_struct; ice_init_lock(&hw->tnl_lock); - ice_init_vlan_mode_ops(hw); - return ICE_SUCCESS; err_unroll_fltr_mgmt_struct: @@ -1363,6 +1367,97 @@ /* FW Admin Queue command wrappers */ +/** + * ice_should_retry_sq_send_cmd + * @opcode: AQ opcode + * + * Decide if we should retry the send command routine for the ATQ, depending + * on the opcode. + */ +static bool ice_should_retry_sq_send_cmd(u16 opcode) +{ + switch (opcode) { + case ice_aqc_opc_dnl_get_status: + case ice_aqc_opc_dnl_run: + case ice_aqc_opc_dnl_call: + case ice_aqc_opc_dnl_read_sto: + case ice_aqc_opc_dnl_write_sto: + case ice_aqc_opc_dnl_set_breakpoints: + case ice_aqc_opc_dnl_read_log: + case ice_aqc_opc_get_link_topo: + case ice_aqc_opc_done_alt_write: + case ice_aqc_opc_lldp_stop: + case ice_aqc_opc_lldp_start: + case ice_aqc_opc_lldp_filter_ctrl: + return true; + } + + return false; +} + +/** + * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * Retry sending the FW Admin Queue command, multiple times, to the FW Admin + * Queue if the EBUSY AQ error is returned. + */ +static enum ice_status +ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc_cpy; + enum ice_status status; + bool is_cmd_for_retry; + u8 *buf_cpy = NULL; + u8 idx = 0; + u16 opcode; + + opcode = LE16_TO_CPU(desc->opcode); + is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); + ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM); + + if (is_cmd_for_retry) { + if (buf) { + buf_cpy = (u8 *)ice_malloc(hw, buf_size); + if (!buf_cpy) + return ICE_ERR_NO_MEMORY; + } + + ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy), + ICE_NONDMA_TO_NONDMA); + } + + do { + status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); + + if (!is_cmd_for_retry || status == ICE_SUCCESS || + hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) + break; + + if (buf_cpy) + ice_memcpy(buf, buf_cpy, buf_size, + ICE_NONDMA_TO_NONDMA); + + ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy), + ICE_NONDMA_TO_NONDMA); + + ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false); + + } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); + + if (buf_cpy) + ice_free(hw, buf_cpy); + + return status; +} + /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct @@ -1377,7 +1472,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); } /** @@ -1817,15 +1912,15 @@ * @hw: pointer to the ice_hw instance * @caps: pointer to common caps instance * @prefix: string to prefix when printing - * @debug: set to indicate debug print + * @dbg: set to indicate debug print */ static void ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, - char const *prefix, bool debug) + char const *prefix, bool dbg) { u8 i; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix, caps->led_pin_num); else @@ -1836,7 +1931,7 @@ if (!caps->led[i]) continue; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n", prefix, i, caps->led[i]); else @@ -1850,15 +1945,15 @@ * @hw: pointer to the ice_hw instance * @caps: pointer to common caps instance * @prefix: string to prefix when printing - * @debug: set to indicate debug print + * @dbg: set to indicate debug print */ static void ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, - char const *prefix, bool debug) + char const *prefix, bool dbg) { u8 i; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix, caps->sdp_pin_num); else @@ -1869,7 +1964,7 @@ if (!caps->sdp[i]) continue; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n", prefix, i, caps->sdp[i]); else @@ -2825,7 +2920,7 @@ if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status == ICE_SUCCESS) @@ -2933,7 +3028,6 @@ if (!pi || !cfg) return ICE_ERR_BAD_PTR; - switch (req_mode) { case ICE_FC_AUTO: { @@ -2944,11 +3038,10 @@ ice_malloc(pi->hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; - /* Query the value of FC that both the NIC and attached media * can do. */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { ice_free(pi->hw, pcaps); @@ -3017,8 +3110,9 @@ return ICE_ERR_NO_MEMORY; /* Get the current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + pcaps, NULL); + if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; @@ -3135,17 +3229,6 @@ cfg->link_fec_opt = caps->link_fec_options; cfg->module_compliance_enforcement = caps->module_compliance_enforcement; - - if (ice_fw_supports_link_override(pi->hw)) { - struct ice_link_default_override_tlv tlv; - - if (ice_get_link_default_override(&tlv, pi)) - return; - - if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) - cfg->module_compliance_enforcement |= - ICE_LINK_OVERRIDE_STRICT_MODE; - } } /** @@ -3172,8 +3255,11 @@ if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, + (ice_fw_supports_report_dflt_cfg(hw) ? + ICE_AQC_REPORT_DFLT_CFG : + ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); + if (status) goto out; @@ -3212,7 +3298,8 @@ break; } - if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) && + !ice_fw_supports_report_dflt_cfg(pi->hw)) { struct ice_link_default_override_tlv tlv; if (ice_get_link_default_override(&tlv, pi)) @@ -5167,6 +5254,141 @@ return false; } +/** + * ice_is_fw_health_report_supported + * @hw: pointer to the hardware structure + * + * Return true if firmware supports health status reports, + * false otherwise + */ +bool ice_is_fw_health_report_supported(struct ice_hw *hw) +{ + if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ) + return true; + + if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) { + if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN && + hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH) + return true; + } + + return false; +} + +/** + * ice_aq_set_health_status_config - Configure FW health events + * @hw: pointer to the HW struct + * @event_source: type of diagnostic events to enable + * @cd: pointer to command details structure or NULL + * + * Configure the health status event types that the firmware will send to this + * PF. The supported event types are: PF-specific, all PFs, and global + */ +enum ice_status +ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_health_status_config *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_health_status_config; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_set_health_status_config); + + cmd->event_source = event_source; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_get_port_options + * @hw: pointer to the hw struct + * @options: buffer for the resultant port options + * @option_count: input - size of the buffer in port options structures, + * output - number of returned port options + * @lport: logical port to call the command with (optional) + * @lport_valid: when false, FW uses port owned by the PF instead of lport, + * when PF owns more than 1 port it must be true + * @active_option_idx: index of active port option in returned buffer + * @active_option_valid: active option in returned buffer is valid + * + * Calls Get Port Options AQC (0x06ea) and verifies result. + */ +enum ice_status +ice_aq_get_port_options(struct ice_hw *hw, + struct ice_aqc_get_port_options_elem *options, + u8 *option_count, u8 lport, bool lport_valid, + u8 *active_option_idx, bool *active_option_valid) +{ + struct ice_aqc_get_port_options *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u8 pmd_count; + u8 max_speed; + u8 i; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* options buffer shall be able to hold max returned options */ + if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) + return ICE_ERR_PARAM; + + cmd = &desc.params.get_port_options; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); + + if (lport_valid) + cmd->lport_num = lport; + cmd->lport_num_valid = lport_valid; + + status = ice_aq_send_cmd(hw, &desc, options, + *option_count * sizeof(*options), NULL); + if (status != ICE_SUCCESS) + return status; + + /* verify direct FW response & set output parameters */ + *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M; + ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); + *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID; + if (*active_option_valid) { + *active_option_idx = cmd->port_options & + ICE_AQC_PORT_OPT_ACTIVE_M; + if (*active_option_idx > (*option_count - 1)) + return ICE_ERR_OUT_OF_RANGE; + ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", + *active_option_idx); + } + + /* verify indirect FW response & mask output options fields */ + for (i = 0; i < *option_count; i++) { + options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M; + options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M; + pmd_count = options[i].pmd; + max_speed = options[i].max_lane_speed; + ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", + pmd_count, max_speed); + + /* check only entries containing valid max pmd speed values, + * other reserved values may be returned, when logical port + * used is unrelated to specific option + */ + if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) { + if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV) + return ICE_ERR_OUT_OF_RANGE; + if (pmd_count > 2 && + max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G) + return ICE_ERR_CFG; + if (pmd_count > 7 && + max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G) + return ICE_ERR_CFG; + } + } + + return ICE_SUCCESS; +} + /** * ice_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the HW struct @@ -5246,3 +5468,23 @@ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } + +/** + * ice_fw_supports_report_dflt_cfg + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports report default configuration + */ +bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) +{ + if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && + hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) + return true; + } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + return true; + } + return false; +} Index: sys/dev/ice/ice_common_sysctls.h =================================================================== --- sys/dev/ice/ice_common_sysctls.h +++ sys/dev/ice/ice_common_sysctls.h @@ -73,6 +73,18 @@ */ bool ice_enable_tx_lldp_filter = true; +/** + * @var ice_enable_health_events + * @brief boolean indicating if health status events from the FW should be reported + * + * Global sysctl variable indicating whether the Health Status events from the + * FW should be enabled. If true, if an event occurs, the driver will print out + * a message with a description of the event and possible actions to take. + * + * @remark each PF has a separate sysctl which can override this value. + */ +bool ice_enable_health_events = true; + /* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will * automatically load tunable values, without the need to manually create the * TUNABLE definition. @@ -89,6 +101,10 @@ static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0, "ICE driver debug parameters"); +SYSCTL_BOOL(_hw_ice, OID_AUTO, enable_health_events, CTLFLAG_RDTUN, + &ice_enable_health_events, 0, + "Enable FW health event reporting globally"); + SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ice_enable_tx_fc_filter, 0, "Drop Ethertype 0x8808 control frames originating from non-HW sources"); Index: sys/dev/ice/ice_controlq.h =================================================================== --- sys/dev/ice/ice_controlq.h +++ sys/dev/ice/ice_controlq.h @@ -43,8 +43,8 @@ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) #define ICE_CTL_Q_DESC_UNUSED(R) \ - (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) + ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1)) /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. @@ -112,7 +112,6 @@ /* Control Queue information */ struct ice_ctl_q_info { enum ice_ctl_q qtype; - enum ice_aq_err rq_last_status; /* last status on receive queue */ struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring sq; /* send queue */ u32 sq_cmd_timeout; /* send queue cmd write back timeout */ Index: sys/dev/ice/ice_controlq.c =================================================================== --- sys/dev/ice/ice_controlq.c +++ sys/dev/ice/ice_controlq.c @@ -1094,7 +1094,7 @@ * ice_sq_send_cmd - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue - * @desc: prefilled descriptor describing the command (non DMA mem) + * @desc: prefilled descriptor describing the command * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure @@ -1151,6 +1151,7 @@ struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; + enum ice_aq_err rq_last_status; enum ice_status ret_code = ICE_SUCCESS; struct ice_aq_desc *desc; struct ice_dma_mem *bi; @@ -1184,13 +1185,12 @@ desc = ICE_CTL_Q_DESC(cq->rq, ntc); desc_idx = ntc; - cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); + rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); flags = LE16_TO_CPU(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", - LE16_TO_CPU(desc->opcode), - cq->rq_last_status); + LE16_TO_CPU(desc->opcode), rq_last_status); } ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); Index: sys/dev/ice/ice_drv_info.h =================================================================== --- sys/dev/ice/ice_drv_info.h +++ sys/dev/ice/ice_drv_info.h @@ -63,16 +63,16 @@ * @var ice_rc_version * @brief driver release candidate version number */ -const char ice_driver_version[] = "0.28.1-k"; +const char ice_driver_version[] = "0.29.4-k"; const uint8_t ice_major_version = 0; -const uint8_t ice_minor_version = 28; -const uint8_t ice_patch_version = 1; +const uint8_t ice_minor_version = 29; +const uint8_t ice_patch_version = 4; const uint8_t ice_rc_version = 0; #define PVIDV(vendor, devid, name) \ - PVID(vendor, devid, name " - 0.28.1-k") + PVID(vendor, devid, name " - 0.29.4-k") #define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \ - PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.28.1-k") + PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.29.4-k") /** * @var ice_vendor_info_array @@ -116,6 +116,9 @@ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x000D, 0, "Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"), + PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, + ICE_INTEL_VENDOR_ID, 0x000E, 0, + "Intel(R) Ethernet Network Adapter E810-2C-Q2"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, "Intel(R) Ethernet Controller E810-C for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, @@ -133,6 +136,9 @@ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0009, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"), + PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, + ICE_INTEL_VENDOR_ID, 0x000C, 0, + "Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, "Intel(R) Ethernet Controller E810-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE, Index: sys/dev/ice/ice_features.h =================================================================== --- sys/dev/ice/ice_features.h +++ sys/dev/ice/ice_features.h @@ -63,7 +63,9 @@ ICE_FEATURE_RDMA, ICE_FEATURE_SAFE_MODE, ICE_FEATURE_LENIENT_LINK_MODE, - ICE_FEATURE_DEFAULT_OVERRIDE, + ICE_FEATURE_LINK_MGMT_VER_1, + ICE_FEATURE_LINK_MGMT_VER_2, + ICE_FEATURE_HEALTH_STATUS, /* Must be last entry */ ICE_FEATURE_COUNT }; Index: sys/dev/ice/ice_flex_pipe.h =================================================================== --- sys/dev/ice/ice_flex_pipe.h +++ sys/dev/ice/ice_flex_pipe.h @@ -64,12 +64,11 @@ ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); enum ice_status -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd); - -enum ice_status ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); +enum ice_status +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd); bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port); @@ -120,5 +119,10 @@ u64 id); enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section); +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); #endif /* _ICE_FLEX_PIPE_H_ */ Index: sys/dev/ice/ice_flex_pipe.c =================================================================== --- sys/dev/ice/ice_flex_pipe.c +++ sys/dev/ice/ice_flex_pipe.c @@ -38,6 +38,7 @@ /* To support tunneling entries by PF, the package will append the PF number to * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. */ +#define ICE_TNL_PRE "TNL_" static const struct ice_tunnel_type_scan tnls[] = { { TNL_VXLAN, "TNL_VXLAN_PF" }, { TNL_GENEVE, "TNL_GENEVE_PF" }, @@ -364,6 +365,7 @@ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_BST_TCAMS_IN_BUF) return NULL; @@ -435,6 +437,7 @@ if (!section) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_LABELS_IN_BUF) return NULL; @@ -480,6 +483,42 @@ return label->name; } +/** + * ice_add_tunnel_hint + * @hw: pointer to the HW structure + * @label_name: label text + * @val: value of the tunnel port boost entry + */ +static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +{ + if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + u16 i; + + for (i = 0; tnls[i].type != TNL_LAST; i++) { + size_t len = strlen(tnls[i].label_prefix); + + /* Look for matching label start, before continuing */ + if (strncmp(label_name, tnls[i].label_prefix, len)) + continue; + + /* Make sure this label matches our PF. Note that the PF + * character ('0' - '7') will be located where our + * prefix string's null terminator is located. + */ + if ((label_name[len] - '0') == hw->pf_id) { + hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; + hw->tnl.tbl[hw->tnl.count].valid = false; + hw->tnl.tbl[hw->tnl.count].in_use = false; + hw->tnl.tbl[hw->tnl.count].marked = false; + hw->tnl.tbl[hw->tnl.count].boost_addr = val; + hw->tnl.tbl[hw->tnl.count].port = 0; + hw->tnl.count++; + break; + } + } + } +} + /** * ice_init_pkg_hints * @hw: pointer to the HW structure @@ -506,34 +545,15 @@ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, &val); - while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { - for (i = 0; tnls[i].type != TNL_LAST; i++) { - size_t len = strlen(tnls[i].label_prefix); - - /* Look for matching label start, before continuing */ - if (strncmp(label_name, tnls[i].label_prefix, len)) - continue; - - /* Make sure this label matches our PF. Note that the PF - * character ('0' - '7') will be located where our - * prefix string's null terminator is located. - */ - if ((label_name[len] - '0') == hw->pf_id) { - hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; - hw->tnl.tbl[hw->tnl.count].valid = false; - hw->tnl.tbl[hw->tnl.count].in_use = false; - hw->tnl.tbl[hw->tnl.count].marked = false; - hw->tnl.tbl[hw->tnl.count].boost_addr = val; - hw->tnl.tbl[hw->tnl.count].port = 0; - hw->tnl.count++; - break; - } - } + while (label_name) { + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); label_name = ice_enum_labels(NULL, 0, &state, &val); } - /* Cache the appropriate boost TCAM entry pointers */ + /* Cache the appropriate boost TCAM entry pointers for tunnels */ for (i = 0; i < hw->tnl.count; i++) { ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, &hw->tnl.tbl[i].boost_entry); @@ -943,6 +963,36 @@ return NULL; } +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +static enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status = ICE_SUCCESS; + u32 i; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + u32 offset, info; + + status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + return status; +} + /** * ice_update_pkg * @hw: pointer to the hardware structure @@ -955,25 +1005,12 @@ ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { enum ice_status status; - u32 offset, info, i; status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) return status; - for (i = 0; i < count; i++) { - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); - bool last = ((i + 1) == count); - - status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), - last, &offset, &info, NULL); - - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", - status, offset, info); - break; - } - } + status = ice_update_pkg_no_lock(hw, bufs, count); ice_release_change_lock(hw); @@ -1102,6 +1139,7 @@ ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; + enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", @@ -1119,8 +1157,12 @@ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", LE32_TO_CPU(ice_buf_tbl->buf_count)); - return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - LE32_TO_CPU(ice_buf_tbl->buf_count)); + status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + ice_cache_vlan_mode(hw); + + return status; } /** @@ -1882,7 +1924,7 @@ * * Frees a package buffer */ -static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) { ice_free(hw, bld); } @@ -1991,7 +2033,7 @@ * Allocates a package buffer with a single section. * Note: all package contents must be in Little Endian form. */ -static struct ice_buf_build * +struct ice_buf_build * ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, void **section) { @@ -2105,7 +2147,7 @@ * * Return a pointer to the buffer's header */ -static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) { if (!bld) return NULL; @@ -2342,7 +2384,7 @@ u16 count = 0; u16 index; u16 size; - u16 i; + u16 i, j; ice_acquire_lock(&hw->tnl_lock); @@ -2382,30 +2424,31 @@ size); if (!sect_rx) goto ice_destroy_tunnel_err; - sect_rx->count = CPU_TO_LE16(1); + sect_rx->count = CPU_TO_LE16(count); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, size); if (!sect_tx) goto ice_destroy_tunnel_err; - sect_tx->count = CPU_TO_LE16(1); + sect_tx->count = CPU_TO_LE16(count); /* copy original boost entry to update package buffer, one copy to Rx * section, another copy to the Tx section */ - for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) { - ice_memcpy(sect_rx->tcam + i, + ice_memcpy(sect_rx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); - ice_memcpy(sect_tx->tcam + i, + ice_memcpy(sect_tx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_tx->tcam), ICE_NONDMA_TO_NONDMA); hw->tnl.tbl[i].marked = true; + j++; } status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); @@ -2768,6 +2811,7 @@ count++; LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) chk_count++; + /* cppcheck-suppress knownConditionTrueFalse */ if (!count || count != chk_count) return false; Index: sys/dev/ice/ice_flex_type.h =================================================================== --- sys/dev/ice/ice_flex_type.h +++ sys/dev/ice/ice_flex_type.h @@ -612,8 +612,8 @@ #define ICE_PF_NUM_S 13 #define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) #define ICE_VSIG_VALUE(vsig, pf_id) \ - (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ - (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) + ((u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))) #define ICE_DEFAULT_VSIG 0 /* XLT2 Table */ @@ -730,4 +730,30 @@ ICE_PROF_TUN_ALL = 0x6, ICE_PROF_ALL = 0xFF, }; + +/* Number of bits/bytes contained in meta init entry. Note, this should be a + * multiple of 32 bits. + */ +#define ICE_META_INIT_BITS 192 +#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \ + BITS_PER_BYTE)) + +/* The meta init Flag field starts at this bit */ +#define ICE_META_FLAGS_ST 123 + +/* The entry and bit to check for Double VLAN Mode (DVM) support */ +#define ICE_META_VLAN_MODE_ENTRY 0 +#define ICE_META_FLAG_VLAN_MODE 60 +#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \ + ICE_META_FLAG_VLAN_MODE) + +struct ice_meta_init_entry { + __le32 bm[ICE_META_INIT_DW_CNT]; +}; + +struct ice_meta_init_section { + __le16 count; + __le16 offset; + struct ice_meta_init_entry entry[1]; +}; #endif /* _ICE_FLEX_TYPE_H_ */ Index: sys/dev/ice/ice_flow.c =================================================================== --- sys/dev/ice/ice_flow.c +++ sys/dev/ice/ice_flow.c @@ -1494,9 +1494,9 @@ * 3 for tunneled with outer ipv6 */ #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \ - (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ - (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ - (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)) + ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ + (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ + (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))) /** * ice_add_rss_cfg_sync - add an RSS configuration Index: sys/dev/ice/ice_iflib.h =================================================================== --- sys/dev/ice/ice_iflib.h +++ sys/dev/ice/ice_iflib.h @@ -274,6 +274,9 @@ /* Ethertype filters enabled */ bool enable_tx_fc_filter; bool enable_tx_lldp_filter; + + /* Other tunable flags */ + bool enable_health_events; int rebuild_ticks; Index: sys/dev/ice/ice_lan_tx_rx.h =================================================================== --- sys/dev/ice/ice_lan_tx_rx.h +++ sys/dev/ice/ice_lan_tx_rx.h @@ -1079,6 +1079,7 @@ #pragma pack(1) struct ice_tx_cmpltnq_ctx { u64 base; +#define ICE_TX_CMPLTNQ_CTX_BASE_S 7 u32 q_len; #define ICE_TX_CMPLTNQ_CTX_Q_LEN_S 4 u8 generation; @@ -1086,6 +1087,9 @@ u8 pf_num; u16 vmvf_num; u8 vmvf_type; +#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_VF 0 +#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_VMQ 1 +#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_PF 2 u8 tph_desc_wr; u8 cpuid; u32 cmpltn_cache[16]; @@ -1115,10 +1119,15 @@ #pragma pack(1) struct ice_tx_drbell_q_ctx { u64 base; +#define ICE_TX_DRBELL_Q_CTX_BASE_S 7 u16 ring_len; +#define ICE_TX_DRBELL_Q_CTX_RING_LEN_S 4 u8 pf_num; u16 vf_num; u8 vmvf_type; +#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_VF 0 +#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_VMQ 1 +#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_PF 2 u8 cpuid; u8 tph_desc_rd; u8 tph_desc_wr; @@ -1175,7 +1184,7 @@ /* L2 Packet types */ ICE_PTT_UNUSED_ENTRY(0), ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(2), ICE_PTT_UNUSED_ENTRY(3), ICE_PTT_UNUSED_ENTRY(4), ICE_PTT_UNUSED_ENTRY(5), @@ -1289,7 +1298,7 @@ /* Non Tunneled IPv6 */ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(91), ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), Index: sys/dev/ice/ice_lib.h =================================================================== --- sys/dev/ice/ice_lib.h +++ sys/dev/ice/ice_lib.h @@ -114,6 +114,9 @@ /* global sysctl indicating whether the Tx LLDP filter should be enabled */ extern bool ice_enable_tx_lldp_filter; +/* global sysctl indicating whether FW health status events should be enabled */ +extern bool ice_enable_health_events; + /** * @struct ice_bar_info * @brief PCI BAR mapping information @@ -243,6 +246,19 @@ #define ICE_DEFAULT_VF_QUEUES 4 +/* + * There are three settings that can be updated independently or + * altogether: Link speed, FEC, and Flow Control. These macros allow + * the caller to specify which setting(s) to update. + */ +#define ICE_APPLY_LS BIT(0) +#define ICE_APPLY_FEC BIT(1) +#define ICE_APPLY_FC BIT(2) +#define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC) +#define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC) +#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC) +#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC) + /** * @enum ice_dyn_idx_t * @brief Dynamic Control ITR indexes @@ -761,7 +777,7 @@ struct sysctl_oid *parent, struct ice_hw_port_stats *stats); void ice_configure_misc_interrupts(struct ice_softc *sc); -int ice_sync_multicast_filters(struct ice_softc *sc); +int ice_sync_multicast_filters(struct ice_softc *sc); enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent); @@ -789,7 +805,7 @@ const char *ice_fec_str(enum ice_fec_mode mode); const char *ice_fc_str(enum ice_fc_mode mode); const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action); -const char * ice_state_to_str(enum ice_state state); +const char *ice_state_to_str(enum ice_state state); int ice_init_link_events(struct ice_softc *sc); void ice_configure_rx_itr(struct ice_vsi *vsi); void ice_configure_tx_itr(struct ice_vsi *vsi); @@ -797,17 +813,18 @@ void ice_handle_mdd_event(struct ice_softc *sc); void ice_init_dcb_setup(struct ice_softc *sc); int ice_send_version(struct ice_softc *sc); -int ice_cfg_pf_ethertype_filters(struct ice_softc *sc); +int ice_cfg_pf_ethertype_filters(struct ice_softc *sc); void ice_init_link_configuration(struct ice_softc *sc); void ice_init_saved_phy_cfg(struct ice_softc *sc); -void ice_apply_saved_phy_cfg(struct ice_softc *sc); +int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings); void ice_set_link_management_mode(struct ice_softc *sc); -int ice_module_event_handler(module_t mod, int what, void *arg); -int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd); -int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req); -int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length); -int ice_alloc_intr_tracking(struct ice_softc *sc); +int ice_module_event_handler(module_t mod, int what, void *arg); +int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd); +int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req); +int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length); +int ice_alloc_intr_tracking(struct ice_softc *sc); void ice_free_intr_tracking(struct ice_softc *sc); void ice_set_default_local_lldp_mib(struct ice_softc *sc); +void ice_init_health_events(struct ice_softc *sc); #endif /* _ICE_LIB_H_ */ Index: sys/dev/ice/ice_lib.c =================================================================== --- sys/dev/ice/ice_lib.c +++ sys/dev/ice/ice_lib.c @@ -49,6 +49,7 @@ #include #include #include +#include /** * @var M_ICE @@ -119,21 +120,19 @@ enum ice_sw_fwd_act_type action); static void ice_add_rx_lldp_filter(struct ice_softc *sc); static void ice_del_rx_lldp_filter(struct ice_softc *sc); -static u16 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, - u64 phy_type_high); -static void -ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static u16 ice_aq_phy_types_to_link_speeds(u64 phy_type_low, + u64 phy_type_high); +struct ice_phy_data; +static int +ice_intersect_phy_types_and_speeds(struct ice_softc *sc, + struct ice_phy_data *phy_data); +static int +ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg); -static void -ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static int +ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg); static void -ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, - struct ice_aqc_set_phy_cfg_data *cfg); -static void ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg); static void @@ -142,16 +141,13 @@ static void ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, u64 *phy_type_high); -static int -ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, - u64 *phy_type_low, u64 *phy_type_high); -static int -ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, - u64 *phy_type_high); +static u16 ice_apply_supported_speed_filter(u16 report_speeds); static void -ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high); -static enum ice_status -ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high); +ice_handle_health_status_event(struct ice_softc *sc, + struct ice_rq_event_info *event); +static void +ice_print_health_status_string(device_t dev, + struct ice_aqc_health_status_elem *elem); static int ice_module_init(void); static int ice_module_exit(void); @@ -198,6 +194,7 @@ static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS); /** * ice_map_bar - Map PCIe BAR memory @@ -465,13 +462,13 @@ /* Traffic from VSI can be sent to LAN */ ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; /* Allow all packets untagged/tagged */ - ctx->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & - ICE_AQ_VSI_VLAN_MODE_M) >> - ICE_AQ_VSI_VLAN_MODE_S); + ctx->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & + ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> + ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); /* Show VLAN/UP from packets in Rx descriptors */ - ctx->info.vlan_flags |= ((ICE_AQ_VSI_VLAN_EMOD_STR_BOTH & - ICE_AQ_VSI_VLAN_EMOD_M) >> - ICE_AQ_VSI_VLAN_EMOD_S); + ctx->info.inner_vlan_flags |= ((ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH & + ICE_AQ_VSI_INNER_VLAN_EMODE_M) >> + ICE_AQ_VSI_INNER_VLAN_EMODE_S); /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(1, 1); @@ -485,7 +482,7 @@ ctx->info.egress_table = CPU_TO_LE32(table); /* Have 1:1 UP mapping for outer to inner UP table */ ctx->info.outer_up_table = CPU_TO_LE32(table); - /* No Outer tag support, so outer_tag_flags remains zero */ + /* No Outer tag support, so outer_vlan_flags remains zero */ } /** @@ -959,8 +956,8 @@ * ice_phy_types_to_max_rate - Returns port's max supported baudrate * @pi: port info struct * - * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP parameter needs to have - * been called before this function for it to work. + * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP_MEDIA parameter needs + * to have been called before this function for it to work. */ static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi) @@ -1081,6 +1078,8 @@ enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) { + struct ice_aqc_get_phy_caps_data pcaps = { 0 }; + struct ice_port_info *pi = sc->hw.port_info; enum ice_status status; uint64_t phy_low, phy_high; int bit; @@ -1096,13 +1095,17 @@ /* Remove all previous media types */ ifmedia_removeall(media); - status = ice_get_phy_types(sc, &phy_low, &phy_high); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + &pcaps, NULL); if (status != ICE_SUCCESS) { - /* Function already prints appropriate error - * message - */ + device_printf(sc->dev, + "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", + __func__, ice_status_str(status), + ice_aq_str(sc->hw.adminq.sq_last_status)); return (status); } + phy_low = le64toh(pcaps.phy_type_low); + phy_high = le64toh(pcaps.phy_type_high); /* make sure the added bitmap is zero'd */ memset(already_added, 0, sizeof(already_added)); @@ -1930,10 +1933,17 @@ "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && - !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP) && - !(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) - device_printf(dev, - "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) { + if (!(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) + device_printf(dev, + "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) + device_printf(dev, + "The module's power requirements exceed the device's power supply. Cannot start link.\n"); + if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) + device_printf(dev, + "The installed module is incompatible with the device's NVM image. Cannot start link.\n"); + } if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { @@ -1982,6 +1992,9 @@ case ice_aqc_opc_event_lan_overflow: ice_handle_lan_overflow_event(sc, event); break; + case ice_aqc_opc_get_health_status: + ice_handle_health_status_event(sc, event); + break; default: device_printf(sc->dev, "%s Receive Queue unhandled event 0x%04x ignored\n", @@ -2047,13 +2060,12 @@ if (status) { if (q_type == ICE_CTL_Q_ADMIN) device_printf(sc->dev, - "%s Receive Queue event error %s aq_err %s\n", - qname, ice_status_str(status), - ice_aq_str(cq->rq_last_status)); + "%s Receive Queue event error %s\n", + qname, ice_status_str(status)); else device_printf(sc->dev, - "%s Receive Queue event error %s cq_err %d\n", - qname, ice_status_str(status), cq->rq_last_status); + "%s Receive Queue event error %s\n", + qname, ice_status_str(status)); free(event.msg_buf, M_ICE); return (EIO); } @@ -2753,7 +2765,7 @@ ICE_PHY_TYPE_HIGH_100G_AUI2) /** - * ice_aq_phy_types_to_sysctl_speeds - Convert the PHY Types to speeds + * ice_aq_phy_types_to_link_speeds - Convert the PHY Types to speeds * @phy_type_low: lower 64-bit PHY Type bitmask * @phy_type_high: upper 64-bit PHY Type bitmask * @@ -2762,7 +2774,7 @@ * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. */ static u16 -ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, u64 phy_type_high) +ice_aq_phy_types_to_link_speeds(u64 phy_type_low, u64 phy_type_high) { u16 sysctl_speeds = 0; int bit; @@ -2820,162 +2832,103 @@ } /** - * ice_intersect_media_types_with_caps - Restrict input AQ PHY flags - * @sc: driver private structure - * @sysctl_speeds: current SW configuration of PHY types - * @phy_type_low: input/output flag set for low PHY types - * @phy_type_high: input/output flag set for high PHY types + * @struct ice_phy_data + * @brief PHY caps and link speeds * - * Intersects the input PHY flags with PHY flags retrieved from the adapter to - * ensure the flags are compatible. + * Buffer providing report mode and user speeds; + * returning intersection of PHY types and speeds. + */ +struct ice_phy_data { + u64 phy_low_orig; /* PHY low quad from report */ + u64 phy_high_orig; /* PHY high quad from report */ + u64 phy_low_intr; /* PHY low quad intersection with user speeds */ + u64 phy_high_intr; /* PHY high quad intersection with user speeds */ + u16 user_speeds_orig; /* Input from caller - See ICE_AQ_LINK_SPEED_* */ + u16 user_speeds_intr; /* Intersect with report speeds */ + u8 report_mode; /* See ICE_AQC_REPORT_* */ +}; + +/** + * ice_intersect_phy_types_and_speeds - Return intersection of link speeds + * @sc: device private structure + * @phy_data: device PHY data * - * @returns 0 on success, EIO if an AQ command fails, or EINVAL if input PHY - * types have no intersection with TOPO_CAPS and the adapter is in non-lenient - * mode + * On read: Displays the currently supported speeds + * On write: Sets the device's supported speeds + * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED */ static int -ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, - u64 *phy_type_low, u64 *phy_type_high) +ice_intersect_phy_types_and_speeds(struct ice_softc *sc, + struct ice_phy_data *phy_data) { struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_port_info *pi = sc->hw.port_info; - device_t dev = sc->dev; + const char *report_types[5] = { "w/o MEDIA", + "w/MEDIA", + "ACTIVE", + "EDOOFUS", /* Not used */ + "DFLT" }; + struct ice_hw *hw = &sc->hw; + struct ice_port_info *pi = hw->port_info; enum ice_status status; - u64 temp_phy_low, temp_phy_high; - u64 final_phy_low, final_phy_high; - u16 topo_speeds; + u16 report_speeds, temp_speeds; + u8 report_type; + bool apply_speed_filter = false; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); + switch (phy_data->report_mode) { + case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: + case ICE_AQC_REPORT_TOPO_CAP_MEDIA: + case ICE_AQC_REPORT_ACTIVE_CFG: + case ICE_AQC_REPORT_DFLT_CFG: + report_type = phy_data->report_mode >> 1; + break; + default: + device_printf(sc->dev, + "%s: phy_data.report_mode \"%u\" doesn't exist\n", + __func__, phy_data->report_mode); + return (EINVAL); + } + + /* 0 is treated as "Auto"; the driver will handle selecting the + * correct speeds. Including, in some cases, applying an override + * if provided. + */ + if (phy_data->user_speeds_orig == 0) + phy_data->user_speeds_orig = USHRT_MAX; + else if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) + apply_speed_filter = true; + + status = ice_aq_get_phy_caps(pi, false, phy_data->report_mode, &pcaps, NULL); if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), + device_printf(sc->dev, + "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", + __func__, report_types[report_type], + ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return (EIO); } - final_phy_low = le64toh(pcaps.phy_type_low); - final_phy_high = le64toh(pcaps.phy_type_high); - - topo_speeds = ice_aq_phy_types_to_sysctl_speeds(final_phy_low, - final_phy_high); - - /* - * If the user specifies a subset of speeds the media is already - * capable of supporting, then we're good to go. - */ - if ((sysctl_speeds & topo_speeds) == sysctl_speeds) - goto intersect_final; - - temp_phy_low = final_phy_low; - temp_phy_high = final_phy_high; - /* - * Otherwise, we'll have to use the superset if Lenient Mode is - * supported. - */ - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { - /* - * Start with masks that _don't_ include the PHY types - * discovered by the TOPO_CAP. - */ - ice_sysctl_speeds_to_aq_phy_types(topo_speeds, &final_phy_low, - &final_phy_high); - final_phy_low = ~final_phy_low; - final_phy_high = ~final_phy_high; - - /* Get the PHY types the NVM says we can support */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (NVM_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(sc->hw.adminq.sq_last_status)); - return (status); + phy_data->phy_low_orig = le64toh(pcaps.phy_type_low); + phy_data->phy_high_orig = le64toh(pcaps.phy_type_high); + report_speeds = ice_aq_phy_types_to_link_speeds(phy_data->phy_low_orig, + phy_data->phy_high_orig); + if (apply_speed_filter) { + temp_speeds = ice_apply_supported_speed_filter(report_speeds); + if ((phy_data->user_speeds_orig & temp_speeds) == 0) { + device_printf(sc->dev, + "User-specified speeds (\"0x%04X\") not supported\n", + phy_data->user_speeds_orig); + return (EINVAL); } - - /* - * Clear out the unsupported PHY types, including those - * from TOPO_CAP. - */ - final_phy_low &= le64toh(pcaps.phy_type_low); - final_phy_high &= le64toh(pcaps.phy_type_high); - /* - * Include PHY types from TOPO_CAP (which may be a subset - * of the types the NVM specifies). - */ - final_phy_low |= temp_phy_low; - final_phy_high |= temp_phy_high; + report_speeds = temp_speeds; } - -intersect_final: - - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) - ice_apply_supported_speed_filter(&final_phy_low, &final_phy_high); - - ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &temp_phy_low, - &temp_phy_high); - - final_phy_low &= temp_phy_low; - final_phy_high &= temp_phy_high; - - if (final_phy_low == 0 && final_phy_high == 0) { - device_printf(dev, - "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); - return (EINVAL); - } - - /* Overwrite input phy_type values and return */ - *phy_type_low = final_phy_low; - *phy_type_high = final_phy_high; + ice_sysctl_speeds_to_aq_phy_types(phy_data->user_speeds_orig, + &phy_data->phy_low_intr, &phy_data->phy_high_intr); + phy_data->user_speeds_intr = phy_data->user_speeds_orig & report_speeds; + phy_data->phy_low_intr &= phy_data->phy_low_orig; + phy_data->phy_high_intr &= phy_data->phy_high_orig; return (0); -} - -/** - * ice_get_auto_speeds - Get PHY type flags for "auto" speed - * @sc: driver private structure - * @phy_type_low: output low PHY type flags - * @phy_type_high: output high PHY type flags - * - * Retrieves a suitable set of PHY type flags to use for an "auto" speed - * setting by either using the NVM default overrides for speed, or retrieving - * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode. - * - * @returns 0 on success or EIO on AQ command failure - */ -static int -ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, - u64 *phy_type_high) -{ - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_hw *hw = &sc->hw; - struct ice_port_info *pi = hw->port_info; - device_t dev = sc->dev; - enum ice_status status; - - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE)) { - /* copy over speed settings from LDO TLV */ - *phy_type_low = CPU_TO_LE64(sc->ldo_tlv.phy_type_low); - *phy_type_high = CPU_TO_LE64(sc->ldo_tlv.phy_type_high); - } else { - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - - *phy_type_low = le64toh(pcaps.phy_type_low); - *phy_type_high = le64toh(pcaps.phy_type_high); - } - - return (0); -} + } /** * ice_sysctl_advertise_speed - Display/change link speeds supported by port @@ -2992,15 +2945,11 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; - struct ice_hw *hw = &sc->hw; - struct ice_port_info *pi = hw->port_info; + struct ice_port_info *pi = sc->hw.port_info; + struct ice_phy_data phy_data = { 0 }; device_t dev = sc->dev; - enum ice_status status; - u64 phy_low, phy_high; - u16 sysctl_speeds = 0; - int error = 0; + u16 sysctl_speeds; + int ret; UNREFERENCED_PARAMETER(arg2); @@ -3008,23 +2957,18 @@ return (ESHUTDOWN); /* Get the current speeds from the adapter's "active" configuration. */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (SW_CFG) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); + phy_data.report_mode = ICE_AQC_REPORT_ACTIVE_CFG; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret) { + /* Error message already printed within function */ + return (ret); } - phy_low = le64toh(pcaps.phy_type_low); - phy_high = le64toh(pcaps.phy_type_high); - sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); + sysctl_speeds = phy_data.user_speeds_intr; - error = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (sysctl_speeds > 0x7FF) { device_printf(dev, @@ -3033,53 +2977,10 @@ return (EINVAL); } - /* 0 is treated as "Auto"; the driver will handle selecting the correct speeds, - * or apply an override if one is specified in the NVM. - */ - if (sysctl_speeds == 0) { - error = ice_get_auto_speeds(sc, &phy_low, &phy_high); - if (error) - /* Function already prints appropriate error message */ - return (error); - } else { - error = ice_intersect_media_types_with_caps(sc, sysctl_speeds, - &phy_low, &phy_high); - if (error) - /* Function already prints appropriate error message */ - return (error); - } - sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); - - /* Cache new user setting for speeds */ pi->phy.curr_user_speed_req = sysctl_speeds; - /* Setup new PHY config with new input PHY types */ - ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); - - cfg.phy_type_low = phy_low; - cfg.phy_type_high = phy_high; - cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - - status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); - if (status != ICE_SUCCESS) { - /* Don't indicate failure if there's no media in the port -- the sysctl - * handler has saved the value and will apply it when media is inserted. - */ - if (status == ICE_ERR_AQ_ERROR && - hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - device_printf(dev, - "%s: Setting will be applied when media is inserted\n", __func__); - return (0); - } else { - device_printf(dev, - "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - } - - return (0); + /* Apply settings requested by user */ + return ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS); } #define ICE_SYSCTL_HELP_FEC_CONFIG \ @@ -3106,14 +3007,10 @@ { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; - struct ice_hw *hw = &sc->hw; enum ice_fec_mode new_mode; - enum ice_status status; device_t dev = sc->dev; char req_fec[32]; - int error = 0; + int ret; UNREFERENCED_PARAMETER(arg2); @@ -3123,9 +3020,9 @@ bzero(req_fec, sizeof(req_fec)); strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); - error = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (strcmp(req_fec, "auto") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { @@ -3149,74 +3046,8 @@ /* Cache user FEC mode for later link ups */ pi->phy.curr_user_fec_req = new_mode; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps failed (SW_CFG); status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - - ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); - - /* Get link_fec_opt/AUTO_FEC mode from TOPO caps for base for new FEC mode */ - memset(&pcaps, 0, sizeof(pcaps)); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps failed (TOPO_CAP); status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - - /* Configure new FEC options using TOPO caps */ - cfg.link_fec_opt = pcaps.link_fec_options; - cfg.caps &= ~ICE_AQ_PHY_ENA_AUTO_FEC; - if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC) - cfg.caps |= ICE_AQ_PHY_ENA_AUTO_FEC; - - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE) && - new_mode == ICE_FEC_AUTO) { - /* copy over FEC settings from LDO TLV */ - cfg.link_fec_opt = sc->ldo_tlv.fec_options; - } else { - ice_cfg_phy_fec(pi, &cfg, new_mode); - - /* Check if the new mode is valid, and exit with an error if not */ - if (cfg.link_fec_opt && - !(cfg.link_fec_opt & pcaps.link_fec_options)) { - device_printf(dev, - "%s: The requested FEC mode, %s, is not supported by current media\n", - __func__, ice_fec_str(new_mode)); - return (ENOTSUP); - } - } - - cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); - if (status != ICE_SUCCESS) { - /* Don't indicate failure if there's no media in the port -- the sysctl - * handler has saved the value and will apply it when media is inserted. - */ - if (status == ICE_ERR_AQ_ERROR && - hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - device_printf(dev, - "%s: Setting will be applied when media is inserted\n", __func__); - return (0); - } else { - device_printf(dev, - "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - } - - return (0); + /* Apply settings requested by user */ + return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FEC); } /** @@ -3234,7 +3065,7 @@ struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; char neg_fec[32]; - int error; + int ret; UNREFERENCED_PARAMETER(arg2); @@ -3245,11 +3076,11 @@ bzero(neg_fec, sizeof(neg_fec)); strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); - error = sysctl_handle_string(oidp, neg_fec, 0, req); + ret = sysctl_handle_string(oidp, neg_fec, 0, req); if (req->newptr != NULL) return (EPERM); - return (error); + return (ret); } #define ICE_SYSCTL_HELP_FC_CONFIG \ @@ -3281,19 +3112,18 @@ struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; - int error = 0, fc_num; + int ret, fc_num; bool mode_set = false; struct sbuf buf; char *fc_str_end; char fc_str[32]; - u8 aq_failures; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, @@ -3321,9 +3151,9 @@ sbuf_finish(&buf); sbuf_delete(&buf); - error = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); + if ((ret) || (req->newptr == NULL)) + return (ret); /* Try to parse input as a string, first */ if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { @@ -3372,26 +3202,11 @@ } } - /* Finally, set the flow control mode in FW */ - hw->port_info->fc.req_mode = new_mode; - status = ice_set_fc(pi, &aq_failures, true); - if (status != ICE_SUCCESS) { - /* Don't indicate failure if there's no media in the port -- the sysctl - * handler has saved the value and will apply it when media is inserted. - */ - if (aq_failures == ICE_SET_FC_AQ_FAIL_SET && - hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - device_printf(dev, - "%s: Setting will be applied when media is inserted\n", __func__); - return (0); - } else { - device_printf(dev, - "%s: ice_set_fc AQ failure = %d\n", __func__, aq_failures); - return (EIO); - } - } + /* Set the flow control mode in FW */ + pi->phy.curr_user_fc_req = new_mode; - return (0); + /* Apply settings requested by user */ + return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC); } /** @@ -3443,14 +3258,14 @@ device_t dev = sc->dev; enum ice_status status; uint64_t types; - int error = 0; + int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, @@ -3465,9 +3280,9 @@ else types = pcaps.phy_type_low; - error = sysctl_handle_64(oidp, &types, sizeof(types), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_64(oidp, &types, sizeof(types), req); + if ((ret) || (req->newptr == NULL)) + return (ret); ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); @@ -3542,13 +3357,13 @@ struct ice_port_info *pi = hw->port_info; device_t dev = sc->dev; enum ice_status status; - int error; + int ret; UNREFERENCED_PARAMETER(arg2); - error = priv_check(curthread, PRIV_DRIVER); - if (error) - return (error); + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); @@ -3562,11 +3377,11 @@ return (EIO); } - error = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); + ret = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); if (req->newptr != NULL) return (EPERM); - return (error); + return (ret); } /** @@ -3583,7 +3398,7 @@ ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, - ICE_AQC_REPORT_SW_CFG); + ICE_AQC_REPORT_ACTIVE_CFG); } /** @@ -3600,7 +3415,7 @@ ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, - ICE_AQC_REPORT_NVM_CAP); + ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA); } /** @@ -3617,7 +3432,7 @@ ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, - ICE_AQC_REPORT_TOPO_CAP); + ICE_AQC_REPORT_TOPO_CAP_MEDIA); } /** @@ -3641,7 +3456,7 @@ struct ice_aq_desc desc; device_t dev = sc->dev; enum ice_status status; - int error; + int ret; UNREFERENCED_PARAMETER(arg2); @@ -3649,9 +3464,9 @@ * Ensure that only contexts with driver privilege are allowed to * access this information */ - error = priv_check(curthread, PRIV_DRIVER); - if (error) - return (error); + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); @@ -3669,11 +3484,11 @@ return (EIO); } - error = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); + ret = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); if (req->newptr != NULL) return (EPERM); - return (error); + return (ret); } /** @@ -3780,7 +3595,7 @@ struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; - int error = 0; + int ret; u32 old_state; u8 fw_lldp_enabled; bool retried_start_lldp = false; @@ -3813,9 +3628,9 @@ else fw_lldp_enabled = true; - error = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (old_state == 0 && fw_lldp_enabled == false) return (0); @@ -3864,7 +3679,7 @@ hw->port_info->qos_cfg.is_sw_lldp = false; } - return (error); + return (ret); } /** @@ -4156,6 +3971,47 @@ return sysctl_handle_64(oidp, NULL, stat, req); } +/** + * ice_sysctl_rx_errors_stat - Display aggregate of Rx errors + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: unused + * @req: sysctl request pointer + * + * On read: Sums current values of Rx error statistics and + * displays it. + */ +static int +ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS) +{ + struct ice_vsi *vsi = (struct ice_vsi *)arg1; + struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; + u64 stat = 0; + int i, type; + + UNREFERENCED_PARAMETER(arg2); + + if (ice_driver_is_detaching(vsi->sc)) + return (ESHUTDOWN); + + stat += hs->rx_undersize; + stat += hs->rx_fragments; + stat += hs->rx_oversize; + stat += hs->rx_jabber; + stat += hs->rx_len_errors; + stat += hs->crc_errors; + stat += hs->illegal_bytes; + + /* Checksum error stats */ + for (i = 0; i < vsi->num_rx_queues; i++) + for (type = ICE_CSO_STAT_RX_IP4_ERR; + type < ICE_CSO_STAT_RX_COUNT; + type++) + stat += vsi->rx_queues[i].stats.cso[type]; + + return sysctl_handle_64(oidp, NULL, stat, req); +} + /** * @struct ice_rx_cso_stat_info * @brief sysctl information for an Rx checksum offload statistic @@ -4280,9 +4136,10 @@ CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)"); - SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors", - CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors, - 0, "Rx Packets Discarded Due To Error"); + SYSCTL_ADD_PROC(ctx, hw_list, OID_AUTO, "rx_errors", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + vsi, 0, ice_sysctl_rx_errors_stat, "QU", + "Aggregate of all Rx errors"); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, @@ -4713,16 +4570,16 @@ { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_softc *sc = vsi->sc; - int increment, error = 0; + int increment, ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - error = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (vsi->rx_itr < 0) vsi->rx_itr = ICE_DFLT_RX_ITR; @@ -4765,16 +4622,16 @@ { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_softc *sc = vsi->sc; - int increment, error = 0; + int increment, ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - error = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); /* Allow configuring a negative value to reset to the default */ if (vsi->tx_itr < 0) @@ -4893,6 +4750,12 @@ struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); + sc->enable_health_events = ice_enable_health_events; + + SYSCTL_ADD_BOOL(ctx, ctx_list, OID_AUTO, "enable_health_events", + CTLFLAG_RDTUN, &sc->enable_health_events, 0, + "Enable FW health event reporting for this PF"); + /* Add a node to track VSI sysctls. Keep track of the node in the * softc so that we can hook other sysctls into it later. This * includes both the VSI statistics, as well as potentially dynamic @@ -5321,7 +5184,7 @@ enum ice_status status; enum ice_reset_req reset_type = ICE_RESET_INVAL; const char *reset_message; - int error = 0; + int ret; /* Buffer to store the requested reset string. Must contain enough * space to store the largest expected reset string, which currently @@ -5331,17 +5194,17 @@ UNREFERENCED_PARAMETER(arg2); - error = priv_check(curthread, PRIV_DRIVER); - if (error) - return (error); + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Read in the requested reset type. */ - error = sysctl_handle_string(oidp, reset, sizeof(reset), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_string(oidp, reset, sizeof(reset), req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (strcmp(reset, "pfr") == 0) { reset_message = "Requesting a PF reset"; @@ -6439,7 +6302,7 @@ struct ice_aqc_get_phy_caps_data pcaps = { 0 }; enum ice_status status; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) /* Just report unknown if we can't get capabilities */ @@ -7363,7 +7226,7 @@ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); /* Apply default link settings */ - ice_apply_saved_phy_cfg(sc); + ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); } else { /* Set link down, and poll for media available in timer. This prevents the * driver from receiving spurious link-related events. @@ -7380,8 +7243,7 @@ /** * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data - * @pi: port info struct - * @pcaps: TOPO_CAPS capability data to use for defaults + * @sc: device private structure * @cfg: new PHY config data to be modified * * Applies user settings for advertised speeds to the PHY type fields in the @@ -7389,47 +7251,136 @@ * saved settings are invalid and uses the pcaps data instead if they are * invalid. */ -static void -ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static int +ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg) { + struct ice_phy_data phy_data = { 0 }; + struct ice_port_info *pi = sc->hw.port_info; u64 phy_low = 0, phy_high = 0; + u16 link_speeds; + int ret; - ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); - cfg->phy_type_low = pcaps->phy_type_low & htole64(phy_low); - cfg->phy_type_high = pcaps->phy_type_high & htole64(phy_high); + link_speeds = pi->phy.curr_user_speed_req; - /* Can't use saved user speed request; use NVM default PHY capabilities */ - if (!cfg->phy_type_low && !cfg->phy_type_high) { - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; + if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) { + memset(&phy_data, 0, sizeof(phy_data)); + phy_data.report_mode = ICE_AQC_REPORT_DFLT_CFG; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (link_speeds == 0 || phy_data.user_speeds_intr) + goto finalize_link_speed; + if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { + memset(&phy_data, 0, sizeof(phy_data)); + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (!phy_data.user_speeds_intr) { + phy_low = phy_data.phy_low_orig; + phy_high = phy_data.phy_high_orig; + } + goto finalize_link_speed; + } + /* If we're here, then it means the benefits of Version 2 + * link management aren't utilized. We fall through to + * handling Strict Link Mode the same as Version 1 link + * management. + */ } + + memset(&phy_data, 0, sizeof(phy_data)); + if ((link_speeds == 0) && + (sc->ldo_tlv.phy_type_low || sc->ldo_tlv.phy_type_high)) + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; + else + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { + if (phy_low == 0 && phy_high == 0) { + device_printf(sc->dev, + "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); + return (EINVAL); + } + } else { + if (link_speeds == 0) { + if (sc->ldo_tlv.phy_type_low & phy_low || + sc->ldo_tlv.phy_type_high & phy_high) { + phy_low &= sc->ldo_tlv.phy_type_low; + phy_high &= sc->ldo_tlv.phy_type_high; + } + } else if (phy_low == 0 && phy_high == 0) { + memset(&phy_data, 0, sizeof(phy_data)); + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (!phy_data.user_speeds_intr) { + phy_low = phy_data.phy_low_orig; + phy_high = phy_data.phy_high_orig; + } + } + } + +finalize_link_speed: + + /* Cache new user settings for speeds */ + pi->phy.curr_user_speed_req = phy_data.user_speeds_intr; + cfg->phy_type_low = htole64(phy_low); + cfg->phy_type_high = htole64(phy_high); + + return (ret); } /** * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data - * @pi: port info struct - * @pcaps: TOPO_CAPS capability data to use for defaults + * @sc: device private structure * @cfg: new PHY config data to be modified * * Applies user setting for FEC mode to PHY config struct. It uses the data * from pcaps to check if the saved settings are invalid and uses the pcaps * data instead if they are invalid. */ -static void -ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static int +ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg) { - ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + struct ice_port_info *pi = sc->hw.port_info; + enum ice_status status; - /* Can't use saved user FEC mode; use NVM default PHY capabilities */ - if (cfg->link_fec_opt && - !(cfg->link_fec_opt & pcaps->link_fec_options)) { - cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; - cfg->link_fec_opt = pcaps->link_fec_options; - } + cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + status = ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + if (status) + return (EIO); + + return (0); } /** @@ -7451,7 +7402,7 @@ switch (pi->phy.curr_user_fc_req) { case ICE_FC_FULL: cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | - ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; + ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; break; case ICE_FC_RX_PAUSE: cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; @@ -7465,81 +7416,102 @@ } } -/** - * ice_apply_saved_user_req_to_cfg -- Apply all saved user settings to AQ cfg data - * @pi: port info struct - * @pcaps: TOPO_CAPS capability data to use for defaults - * @cfg: new PHY config data to be modified - * - * Applies user settings for advertised speeds, FEC mode, and flow control - * mode to the supplied PHY config struct; it uses the data from pcaps to check - * if the saved settings are invalid and uses the pcaps data instead if they - * are invalid. - */ -static void -ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, - struct ice_aqc_set_phy_cfg_data *cfg) -{ - ice_apply_saved_phy_req_to_cfg(pi, pcaps, cfg); - ice_apply_saved_fec_req_to_cfg(pi, pcaps, cfg); - ice_apply_saved_fc_req_to_cfg(pi, cfg); -} - /** * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings * @sc: device private structure + * @settings: which settings to apply * - * Takes the saved user PHY config settings, overwrites the NVM - * default with them if they're valid, and uses the Set PHY Config AQ command - * to apply them. + * Applies user settings for advertised speeds, FEC mode, and flow + * control mode to a PHY config struct; it uses the data from pcaps + * to check if the saved settings are invalid and uses the pcaps + * data instead if they are invalid. * - * Intended for use when media is inserted. - * - * @pre Port has media available + * For things like sysctls where only one setting needs to be + * updated, the bitmap allows the caller to specify which setting + * to update. */ -void -ice_apply_saved_phy_cfg(struct ice_softc *sc) +int +ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; + u64 phy_low, phy_high; enum ice_status status; + enum ice_fec_mode dflt_fec_mode; + enum ice_fc_mode dflt_fc_mode; + u16 dflt_user_speed; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + if (!settings || settings > ICE_APPLY_LS_FEC_FC) { + ice_debug(hw, ICE_DBG_LINK, "Settings out-of-bounds: %u\n", + settings); + } + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", + "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); - return; + return (EIO); } + phy_low = le64toh(pcaps.phy_type_low); + phy_high = le64toh(pcaps.phy_type_high); + + /* Save off initial config parameters */ + dflt_user_speed = ice_aq_phy_types_to_link_speeds(phy_low, phy_high); + dflt_fec_mode = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); + dflt_fc_mode = ice_caps_to_fc_mode(pcaps.caps); + /* Setup new PHY config */ ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); - /* Apply settings requested by user */ - ice_apply_saved_user_req_to_cfg(pi, &pcaps, &cfg); + /* On error, restore active configuration values */ + if ((settings & ICE_APPLY_LS) && + ice_apply_saved_phy_req_to_cfg(sc, &cfg)) { + pi->phy.curr_user_speed_req = dflt_user_speed; + cfg.phy_type_low = pcaps.phy_type_low; + cfg.phy_type_high = pcaps.phy_type_high; + } + if ((settings & ICE_APPLY_FEC) && + ice_apply_saved_fec_req_to_cfg(sc, &cfg)) { + pi->phy.curr_user_fec_req = dflt_fec_mode; + } + if (settings & ICE_APPLY_FC) { + /* No real error indicators for this process, + * so we'll just have to assume it works. */ + ice_apply_saved_fc_req_to_cfg(pi, &cfg); + } /* Enable link and re-negotiate it */ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status != ICE_SUCCESS) { + /* Don't indicate failure if there's no media in the port. + * The settings have been saved and will apply when media + * is inserted. + */ if ((status == ICE_ERR_AQ_ERROR) && - (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) + (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) { device_printf(dev, - "%s: User PHY cfg not applied; no media in port\n", + "%s: Setting will be applied when media is inserted\n", __func__); - else + return (0); + } else { device_printf(dev, "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); + return (EIO); + } } + + return (0); } /** @@ -7605,14 +7577,25 @@ (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); + /* FW supports reporting a default configuration */ + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_2) && + ice_fw_supports_report_dflt_cfg(&sc->hw)) { + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_en); + /* Knowing we're at a high enough firmware revision to + * support this link management configuration, we don't + * need to check/support earlier versions. + */ + return; + } + /* Default overrides only work if in lenient link mode */ - if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DEFAULT_OVERRIDE) && + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_1) && ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && (tlv.options & ICE_LINK_OVERRIDE_EN)) - ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_en); + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_en); - /* Cache the LDO TLV structure in the driver, since it won't change - * during the driver's lifetime. + /* Cache the LDO TLV structure in the driver, since it + * won't change during the driver's lifetime. */ sc->ldo_tlv = tlv; } @@ -7638,13 +7621,17 @@ device_t dev = sc->dev; enum ice_status status; u64 phy_low, phy_high; + u8 report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); + if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) + report_mode = ICE_AQC_REPORT_DFLT_CFG; + status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), + "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", + __func__, + report_mode == ICE_AQC_REPORT_DFLT_CFG ? "DFLT" : "w/MEDIA", + ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return; } @@ -7654,7 +7641,7 @@ /* Save off initial config parameters */ pi->phy.curr_user_speed_req = - ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); + ice_aq_phy_types_to_link_speeds(phy_low, phy_high); pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); @@ -7834,7 +7821,7 @@ ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) { struct ice_hw *hw = &sc->hw; - int error = 0, retries = 0; + int ret = 0, retries = 0; enum ice_status status; if (length > 16) @@ -7851,18 +7838,18 @@ offset, 0, 0, data, length, false, NULL); if (!status) { - error = 0; + ret = 0; break; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - error = EBUSY; + ret = EBUSY; continue; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { /* FW says I2C access isn't supported */ - error = EACCES; + ret = EACCES; break; } if (status == ICE_ERR_AQ_ERROR && @@ -7870,24 +7857,24 @@ device_printf(sc->dev, "%s: Module pointer location specified in command does not permit the required operation.\n", __func__); - error = EPERM; + ret = EPERM; break; } else { device_printf(sc->dev, "%s: Error reading I2C data: err %s aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); - error = EIO; + ret = EIO; break; } } while (retries++ < ICE_I2C_MAX_RETRIES); - if (error == EBUSY) + if (ret == EBUSY) device_printf(sc->dev, "%s: Error reading I2C data after %d retries\n", __func__, ICE_I2C_MAX_RETRIES); - return (error); + return (ret); } /** @@ -7926,7 +7913,7 @@ struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; struct sbuf *sbuf; - int error = 0; + int ret; u8 data[16]; UNREFERENCED_PARAMETER(arg2); @@ -7936,13 +7923,13 @@ return (ESHUTDOWN); if (req->oldptr == NULL) { - error = SYSCTL_OUT(req, 0, 128); - return (error); + ret = SYSCTL_OUT(req, 0, 128); + return (ret); } - error = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); - if (error) - return (error); + ret = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); + if (ret) + return (ret); /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ if (data[0] == 0x3) { @@ -8070,72 +8057,234 @@ /** * ice_apply_supported_speed_filter - Mask off unsupported speeds - * @phy_type_low: bit-field for the low quad word of PHY types - * @phy_type_high: bit-field for the high quad word of PHY types + * @report_speeds: bit-field for the desired link speeds * - * Given the two quad words containing the supported PHY types, + * Given a bitmap of the desired lenient mode link speeds, * this function will mask off the speeds that are not currently * supported by the device. */ -static void -ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high) +static u16 +ice_apply_supported_speed_filter(u16 report_speeds) { - u64 phylow_mask; + u16 speed_mask; /* We won't offer anything lower than 1G for any part, * but we also won't offer anything under 25G for 100G - * parts. + * parts or under 10G for 50G parts. */ - phylow_mask = ~(ICE_PHY_TYPE_LOW_1000BASE_T - 1); - if (*phy_type_high || - *phy_type_low & ~(ICE_PHY_TYPE_LOW_100GBASE_CR4 - 1)) - phylow_mask = ~(ICE_PHY_TYPE_LOW_25GBASE_T - 1); - *phy_type_low &= phylow_mask; + speed_mask = ~((u16)ICE_AQ_LINK_SPEED_1000MB - 1); + if (report_speeds & ICE_AQ_LINK_SPEED_50GB) + speed_mask = ~((u16)ICE_AQ_LINK_SPEED_10GB - 1); + if (report_speeds & ICE_AQ_LINK_SPEED_100GB) + speed_mask = ~((u16)ICE_AQ_LINK_SPEED_25GB - 1); + return (report_speeds & speed_mask); } /** - * ice_get_phy_types - Report appropriate PHY types - * @sc: device softc structure - * @phy_type_low: bit-field for the low quad word of PHY types - * @phy_type_high: bit-field for the high quad word of PHY types + * ice_init_health_events - Enable FW health event reporting + * @sc: device softc * - * Populate the two quad words with bits representing the PHY types - * supported by the device. This is really just a wrapper around - * the ice_aq_get_phy_caps() that chooses the appropriate report - * mode (lenient or strict) and reports back only the relevant PHY - * types. In lenient mode the capabilities are retrieved with the - * NVM_CAP report mode, otherwise they're retrieved using the - * TOPO_CAP report mode (NVM intersected with current media). - * - * @returns 0 on success, or an error code on failure. + * Will try to enable firmware health event reporting, but shouldn't + * cause any grief (to the caller) if this fails. */ -static enum ice_status -ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high) +void +ice_init_health_events(struct ice_softc *sc) { - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_port_info *pi = sc->hw.port_info; - device_t dev = sc->dev; enum ice_status status; - u8 report_mode; + u8 health_mask; - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) - report_mode = ICE_AQC_REPORT_NVM_CAP; - else - report_mode = ICE_AQC_REPORT_TOPO_CAP; - status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", - __func__, (report_mode) ? "TOPO_CAP" : "NVM_CAP", + if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) || + (!sc->enable_health_events)) + return; + + health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; + + status = ice_aq_set_health_status_config(&sc->hw, health_mask, NULL); + if (status) + device_printf(sc->dev, + "Failed to enable firmware health events, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); - return (status); + else + ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_en); +} + +/** + * ice_print_health_status_string - Print message for given FW health event + * @dev: the PCIe device + * @elem: health status element containing status code + * + * A rather large list of possible health status codes and their associated + * messages. + */ +static void +ice_print_health_status_string(device_t dev, + struct ice_aqc_health_status_elem *elem) +{ + u16 status_code = le16toh(elem->health_status_code); + + switch (status_code) { + case ICE_AQC_HEALTH_STATUS_INFO_RECOVERY: + device_printf(dev, "The device is in firmware recovery mode.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS: + device_printf(dev, "The flash chip cannot be accessed.\n"); + device_printf(dev, "Possible Solution: If issue persists, call customer support.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH: + device_printf(dev, "NVM authentication failed.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH: + device_printf(dev, "Option ROM authentication failed.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH: + device_printf(dev, "DDP package failed.\n"); + device_printf(dev, "Possible Solution: Update to latest base driver and DDP package.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT: + device_printf(dev, "NVM image is incompatible.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT: + device_printf(dev, "Option ROM is incompatible.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB: + device_printf(dev, "Supplied MIB file is invalid. DCB reverted to default configuration.\n"); + device_printf(dev, "Possible Solution: Disable FW-LLDP and check DCBx system configuration.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT: + device_printf(dev, "An unsupported module was detected.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE: + device_printf(dev, "Module type is not supported.\n"); + device_printf(dev, "Possible Solution: Change or replace the module or cable.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL: + device_printf(dev, "Module is not qualified.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM: + device_printf(dev, "Device cannot communicate with the module.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT: + device_printf(dev, "Unresolved module conflict.\n"); + device_printf(dev, "Possible Solution 1: Manually set speed/duplex or use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT: + device_printf(dev, "Module is not present.\n"); + device_printf(dev, "Possible Solution 1: Check that the module is inserted correctly.\n"); + device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); + break; + case ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED: + device_printf(dev, "Underutilized module.\n"); + device_printf(dev, "Possible Solution 1: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT: + device_printf(dev, "An unsupported module was detected.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG: + device_printf(dev, "Invalid link configuration.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS: + device_printf(dev, "Port hardware access error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE: + device_printf(dev, "A port is unreachable.\n"); + device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED: + device_printf(dev, "Port speed is limited due to module.\n"); + device_printf(dev, "Possible Solution: Change the module or use Intel(R) Ethernet Port Configuration Tool to configure the port option to match the current module speed.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT: + device_printf(dev, "A parallel fault was detected.\n"); + device_printf(dev, "Possible Solution: Check link partner connection and configuration.\n"); + break; + case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED: + device_printf(dev, "Port speed is limited by PHY capabilities.\n"); + device_printf(dev, "Possible Solution 1: Change the module to align to port option.\n"); + device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO: + device_printf(dev, "LOM topology netlist is corrupted.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NETLIST: + device_printf(dev, "Unrecoverable netlist error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT: + device_printf(dev, "Port topology conflict.\n"); + device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS: + device_printf(dev, "Unrecoverable hardware access error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME: + device_printf(dev, "Unrecoverable runtime error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT: + device_printf(dev, "Link management engine failed to initialize.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + default: + break; + } +} + +/** + * ice_handle_health_status_event - helper function to output health status + * @sc: device softc structure + * @event: event received on a control queue + * + * Prints out the appropriate string based on the given Health Status Event + * code. + */ +static void +ice_handle_health_status_event(struct ice_softc *sc, + struct ice_rq_event_info *event) +{ + struct ice_aqc_health_status_elem *health_info; + u16 status_count; + int i; + + if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_HEALTH_STATUS)) + return; + + health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; + status_count = le16toh(event->desc.params.get_health_status.health_status_count); + + if (status_count > (event->buf_len / sizeof(*health_info))) { + device_printf(sc->dev, "Received a health status event with invalid event count\n"); + return; } - *phy_type_low = le64toh(pcaps.phy_type_low); - *phy_type_high = le64toh(pcaps.phy_type_high); - - return (ICE_SUCCESS); + for (i = 0; i < status_count; i++) { + ice_print_health_status_string(sc->dev, health_info); + health_info++; + } } /** Index: sys/dev/ice/ice_nvm.c =================================================================== --- sys/dev/ice/ice_nvm.c +++ sys/dev/ice/ice_nvm.c @@ -956,6 +956,7 @@ /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(tmp); i++) + /* cppcheck-suppress objectIndex */ sum += ((u8 *)&tmp)[i]; if (sum) { Index: sys/dev/ice/ice_protocol_type.h =================================================================== --- sys/dev/ice/ice_protocol_type.h +++ sys/dev/ice/ice_protocol_type.h @@ -96,6 +96,10 @@ ICE_SW_TUN_IPV6_GTP_IPV4_UDP, ICE_SW_TUN_IPV6_GTP_IPV6_TCP, ICE_SW_TUN_IPV6_GTP_IPV6_UDP, + ICE_SW_TUN_IPV4_GTPU_IPV4, + ICE_SW_TUN_IPV4_GTPU_IPV6, + ICE_SW_TUN_IPV6_GTPU_IPV4, + ICE_SW_TUN_IPV6_GTPU_IPV6, ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ }; Index: sys/dev/ice/ice_sched.c =================================================================== --- sys/dev/ice/ice_sched.c +++ sys/dev/ice/ice_sched.c @@ -992,6 +992,50 @@ return status; } +/** + * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer + * @pi: port information structure + * @tc_node: pointer to TC node + * @parent: pointer to parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes to be added + * @first_node_teid: pointer to the first node TEID + * @num_nodes_added: pointer to number of nodes added + * + * Add nodes into specific hw layer. + */ +static enum ice_status +ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) +{ + u16 max_child_nodes; + + *num_nodes_added = 0; + + if (!num_nodes) + return ICE_SUCCESS; + + if (!parent || layer < pi->hw->sw_entry_point_layer) + return ICE_ERR_PARAM; + + /* max children per node per layer */ + max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; + + /* current number of children + required nodes exceed max children */ + if ((parent->num_children + num_nodes) > max_child_nodes) { + /* Fail if the parent is a TC node */ + if (parent == tc_node) + return ICE_ERR_CFG; + return ICE_ERR_MAX_LIMIT; + } + + return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, + num_nodes_added, first_node_teid); +} + /** * ice_sched_add_nodes_to_layer - Add nodes to a given layer * @pi: port information structure @@ -1012,72 +1056,53 @@ u16 *num_nodes_added) { u32 *first_teid_ptr = first_node_teid; - u16 new_num_nodes, max_child_nodes; + u16 new_num_nodes = num_nodes; enum ice_status status = ICE_SUCCESS; - struct ice_hw *hw = pi->hw; - u16 num_added = 0; - u32 temp; *num_nodes_added = 0; + while (*num_nodes_added < num_nodes) { + u16 max_child_nodes, num_added = 0; + /* cppcheck-suppress unusedVariable */ + u32 temp; - if (!num_nodes) - return status; - - if (!parent || layer < hw->sw_entry_point_layer) - return ICE_ERR_PARAM; - - /* max children per node per layer */ - max_child_nodes = hw->max_children[parent->tx_sched_layer]; - - /* current number of children + required nodes exceed max children ? */ - if ((parent->num_children + num_nodes) > max_child_nodes) { - /* Fail if the parent is a TC node */ - if (parent == tc_node) - return ICE_ERR_CFG; - + status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, + layer, new_num_nodes, + first_teid_ptr, + &num_added); + if (status == ICE_SUCCESS) + *num_nodes_added += num_added; + /* added more nodes than requested ? */ + if (*num_nodes_added > num_nodes) { + ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, + *num_nodes_added); + status = ICE_ERR_CFG; + break; + } + /* break if all the nodes are added successfully */ + if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes)) + break; + /* break if the error is not max limit */ + if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT) + break; + /* Exceeded the max children */ + max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; /* utilize all the spaces if the parent is not full */ if (parent->num_children < max_child_nodes) { new_num_nodes = max_child_nodes - parent->num_children; - /* this recursion is intentional, and wouldn't - * go more than 2 calls + } else { + /* This parent is full, try the next sibling */ + parent = parent->sibling; + /* Don't modify the first node TEID memory if the + * first node was added already in the above call. + * Instead send some temp memory for all other + * recursive calls. */ - status = ice_sched_add_nodes_to_layer(pi, tc_node, - parent, layer, - new_num_nodes, - first_node_teid, - &num_added); - if (status != ICE_SUCCESS) - return status; + if (num_added) + first_teid_ptr = &temp; - *num_nodes_added += num_added; + new_num_nodes = num_nodes - *num_nodes_added; } - /* Don't modify the first node TEID memory if the first node was - * added already in the above call. Instead send some temp - * memory for all other recursive calls. - */ - if (num_added) - first_teid_ptr = &temp; - - new_num_nodes = num_nodes - num_added; - - /* This parent is full, try the next sibling */ - parent = parent->sibling; - - /* this recursion is intentional, for 1024 queues - * per VSI, it goes max of 16 iterations. - * 1024 / 8 = 128 layer 8 nodes - * 128 /8 = 16 (add 8 nodes per iteration) - */ - status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, - layer, new_num_nodes, - first_teid_ptr, - &num_added); - *num_nodes_added += num_added; - return status; } - - status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, - num_nodes_added, first_node_teid); return status; } @@ -1373,7 +1398,7 @@ ice_memdup(hw, buf->layer_props, (hw->num_tx_sched_layers * sizeof(*hw->layer_info)), - ICE_DMA_TO_DMA); + ICE_NONDMA_TO_NONDMA); if (!hw->layer_info) { status = ICE_ERR_NO_MEMORY; goto sched_query_out; @@ -4301,7 +4326,7 @@ ice_sched_rm_unused_rl_prof(hw); layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, - node->tx_sched_layer); + node->tx_sched_layer); if (layer_num >= hw->num_tx_sched_layers) return ICE_ERR_PARAM; Index: sys/dev/ice/ice_switch.h =================================================================== --- sys/dev/ice/ice_switch.h +++ sys/dev/ice/ice_switch.h @@ -137,6 +137,8 @@ } mac_vlan; struct { u16 vlan_id; + u16 tpid; + u8 tpid_valid; } vlan; /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE * if just using ethertype as filter. Set lkup_type as @@ -159,7 +161,6 @@ */ u16 q_id:11; u16 hw_vsi_id:10; - u16 vsi_id:10; u16 vsi_list_id:10; } fwd_id; @@ -412,7 +413,6 @@ enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id); -/* Switch/bridge related commands */ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id); enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id); Index: sys/dev/ice/ice_switch.c =================================================================== --- sys/dev/ice/ice_switch.c +++ sys/dev/ice/ice_switch.c @@ -38,6 +38,7 @@ #define ICE_ETH_ETHTYPE_OFFSET 12 #define ICE_ETH_VLAN_TCI_OFFSET 14 #define ICE_MAX_VLAN_ID 0xFFF +#define ICE_ETH_P_8021Q 0x8100 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem * struct to configure any switch filter rules. @@ -1158,6 +1159,7 @@ struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) { u16 vlan_id = ICE_MAX_VLAN_ID + 1; + u16 vlan_tpid = ICE_ETH_P_8021Q; void *daddr = NULL; u16 eth_hdr_sz; u8 *eth_hdr; @@ -1230,6 +1232,8 @@ break; case ICE_SW_LKUP_VLAN: vlan_id = f_info->l_data.vlan.vlan_id; + if (f_info->l_data.vlan.tpid_valid) + vlan_tpid = f_info->l_data.vlan.tpid; if (f_info->fltr_act == ICE_FWD_TO_VSI || f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { act |= ICE_SINGLE_ACT_PRUNE; @@ -1273,6 +1277,8 @@ if (!(vlan_id > ICE_MAX_VLAN_ID)) { off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); *off = CPU_TO_BE16(vlan_id); + off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); + *off = CPU_TO_BE16(vlan_tpid); } /* Create the switch rule with the final dummy Ethernet header */ @@ -1807,6 +1813,9 @@ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); + if (!m_entry->vsi_list_info) + return ICE_ERR_NO_MEMORY; + /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list */ @@ -3011,6 +3020,7 @@ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && fm_entry->fltr_info.vsi_handle == vsi_handle) || (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + fm_entry->vsi_list_info && (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map, vsi_handle)))); } @@ -3085,14 +3095,12 @@ LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head, ice_fltr_mgmt_list_entry, list_entry) { - struct ice_fltr_info *fi; - - fi = &fm_entry->fltr_info; - if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) continue; status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, - vsi_list_head, fi); + vsi_list_head, + &fm_entry->fltr_info); if (status) return status; } @@ -3595,7 +3603,7 @@ &remove_list_head); ice_release_lock(rule_lock); if (status) - return; + goto free_fltr_list; switch (lkup) { case ICE_SW_LKUP_MAC: @@ -3623,6 +3631,7 @@ break; } +free_fltr_list: LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head, ice_fltr_list_entry, list_entry) { LIST_DEL(&fm_entry->list_entry); Index: sys/dev/ice/ice_type.h =================================================================== --- sys/dev/ice/ice_type.h +++ sys/dev/ice/ice_type.h @@ -259,6 +259,7 @@ u16 max_frame_size; u16 link_speed; u16 req_speeds; + u8 link_cfg_err; u8 lse_ena; /* Link Status Event notification */ u8 link_info; u8 an_info; @@ -661,6 +662,8 @@ #define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) LE16_TO_CPU((p)->info.wake_up_calc) #define ICE_TXSCHED_GET_RL_ENCODE(p) LE16_TO_CPU((p)->info.rl_encode) +#define ICE_MAX_PORT_PER_PCI_DEV 8 + /* The following tree example shows the naming conventions followed under * ice_port_info struct for default scheduler tree topology. * @@ -1024,7 +1027,7 @@ struct ice_lock rss_locks; /* protect RSS configuration */ struct LIST_HEAD_TYPE rss_list_head; struct ice_mbx_snapshot mbx_snapshot; - struct ice_vlan_mode_ops vlan_mode_ops; + u8 dvm_ena; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ @@ -1277,4 +1280,14 @@ #define ICE_FW_API_LLDP_FLTR_MAJ 1 #define ICE_FW_API_LLDP_FLTR_MIN 7 #define ICE_FW_API_LLDP_FLTR_PATCH 1 + +/* AQ API version for report default configuration */ +#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1 +#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 +#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 + +/* AQ API version for FW health reports */ +#define ICE_FW_API_HEALTH_REPORT_MAJ 1 +#define ICE_FW_API_HEALTH_REPORT_MIN 7 +#define ICE_FW_API_HEALTH_REPORT_PATCH 6 #endif /* _ICE_TYPE_H_ */ Index: sys/dev/ice/ice_vlan_mode.h =================================================================== --- sys/dev/ice/ice_vlan_mode.h +++ sys/dev/ice/ice_vlan_mode.h @@ -33,28 +33,12 @@ #ifndef _ICE_VLAN_MODE_H_ #define _ICE_VLAN_MODE_H_ +#include "ice_osdep.h" + struct ice_hw; +bool ice_is_dvm_ena(struct ice_hw *hw); +void ice_cache_vlan_mode(struct ice_hw *hw); enum ice_status ice_set_vlan_mode(struct ice_hw *hw); -void ice_init_vlan_mode_ops(struct ice_hw *hw); - -/* This structure defines the VLAN mode configuration interface. It is used to set the VLAN mode. - * - * Note: These operations will be called while the global configuration lock is held. - * - * enum ice_status (*set_svm)(struct ice_hw *hw); - * This function is called when the DDP and/or Firmware don't support double VLAN mode (DVM) or - * if the set_dvm op is not implemented and/or returns failure. It will set the device in - * single VLAN mode (SVM). - * - * enum ice_status (*set_dvm)(struct ice_hw *hw); - * This function is called when the DDP and Firmware support double VLAN mode (DVM). It should - * be implemented to set double VLAN mode. If it fails or remains unimplemented, set_svm will - * be called as a fallback plan. - */ -struct ice_vlan_mode_ops { - enum ice_status (*set_svm)(struct ice_hw *hw); - enum ice_status (*set_dvm)(struct ice_hw *hw); -}; #endif /* _ICE_VLAN_MODE_H */ Index: sys/dev/ice/ice_vlan_mode.c =================================================================== --- sys/dev/ice/ice_vlan_mode.c +++ sys/dev/ice/ice_vlan_mode.c @@ -30,28 +30,241 @@ */ /*$FreeBSD$*/ -#include "ice_vlan_mode.h" #include "ice_common.h" +/** + * ice_pkg_supports_dvm - determine if DDP supports Double VLAN mode (DVM) + * @hw: pointer to the HW struct + * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false) + */ +static enum ice_status +ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm) +{ + u16 meta_init_size = sizeof(struct ice_meta_init_section); + struct ice_meta_init_section *sect; + struct ice_buf_build *bld; + enum ice_status status; + + /* if anything fails, we assume there is no DVM support */ + *dvm = false; + + bld = ice_pkg_buf_alloc_single_section(hw, + ICE_SID_RXPARSER_METADATA_INIT, + meta_init_size, (void **)§); + if (!bld) + return ICE_ERR_NO_MEMORY; + + /* only need to read a single section */ + sect->count = CPU_TO_LE16(1); + sect->offset = CPU_TO_LE16(ICE_META_VLAN_MODE_ENTRY); + + status = ice_aq_upload_section(hw, + (struct ice_buf_hdr *)ice_pkg_buf(bld), + ICE_PKG_BUF_SIZE, NULL); + if (!status) { + ice_declare_bitmap(entry, ICE_META_INIT_BITS); + u32 arr[ICE_META_INIT_DW_CNT]; + u16 i; + + /* convert to host bitmap format */ + for (i = 0; i < ICE_META_INIT_DW_CNT; i++) + arr[i] = LE32_TO_CPU(sect->entry[0].bm[i]); + + ice_bitmap_from_array32(entry, arr, (u16)ICE_META_INIT_BITS); + + /* check if DVM is supported */ + *dvm = ice_is_bit_set(entry, ICE_META_VLAN_MODE_BIT); + } + + ice_pkg_buf_free(hw, bld); + + return status; +} + +/** + * ice_aq_get_vlan_mode - get the VLAN mode of the device + * @hw: pointer to the HW structure + * @get_params: structure FW fills in based on the current VLAN mode config + * + * Get VLAN Mode Parameters (0x020D) + */ +static enum ice_status +ice_aq_get_vlan_mode(struct ice_hw *hw, + struct ice_aqc_get_vlan_mode *get_params) +{ + struct ice_aq_desc desc; + + if (!get_params) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_get_vlan_mode_parameters); + + return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params), + NULL); +} + +/** + * ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled + * @hw: pointer to the HW structure + * + * Returns true if the hardware/firmware is configured in double VLAN mode, + * else return false signaling that the hardware/firmware is configured in + * single VLAN mode. + * + * Also, return false if this call fails for any reason (i.e. firmware doesn't + * support this AQ call). + */ +static bool ice_aq_is_dvm_ena(struct ice_hw *hw) +{ + struct ice_aqc_get_vlan_mode get_params = { 0 }; + enum ice_status status; + + status = ice_aq_get_vlan_mode(hw, &get_params); + if (status) { + ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n", + status); + return false; + } + + return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA); +} + +/** + * ice_is_dvm_ena - check if double VLAN mode is enabled + * @hw: pointer to the HW structure + * + * The device is configured in single or double VLAN mode on initialization and + * this cannot be dynamically changed during runtime. Based on this there is no + * need to make an AQ call every time the driver needs to know the VLAN mode. + * Instead, use the cached VLAN mode. + */ +bool ice_is_dvm_ena(struct ice_hw *hw) +{ + return hw->dvm_ena; +} + +/** + * ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded + * @hw: pointer to the HW structure + * + * This is only called after downloading the DDP and after the global + * configuration lock has been released because all ports on a device need to + * cache the VLAN mode. + */ +void ice_cache_vlan_mode(struct ice_hw *hw) +{ + hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false; +} + +/** + * ice_is_dvm_supported - check if Double VLAN Mode is supported + * @hw: pointer to the hardware structure + * + * Returns true if Double VLAN Mode (DVM) is supported and false if only Single + * VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and + * firmware must support it, otherwise only SVM is supported. This function + * should only be called while the global config lock is held and after the + * package has been successfully downloaded. + */ +static bool ice_is_dvm_supported(struct ice_hw *hw) +{ + struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 }; + enum ice_status status; + bool pkg_supports_dvm; + + status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n", + status); + return false; + } + + if (!pkg_supports_dvm) + return false; + + /* If firmware returns success, then it supports DVM, else it only + * supports SVM + */ + status = ice_aq_get_vlan_mode(hw, &get_vlan_mode); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n", + status); + return false; + } + + return true; +} + +/** + * ice_aq_set_vlan_mode - set the VLAN mode of the device + * @hw: pointer to the HW structure + * @set_params: requested VLAN mode configuration + * + * Set VLAN Mode Parameters (0x020C) + */ +static enum ice_status +ice_aq_set_vlan_mode(struct ice_hw *hw, + struct ice_aqc_set_vlan_mode *set_params) +{ + u8 rdma_packet, mng_vlan_prot_id; + struct ice_aq_desc desc; + + if (!set_params) + return ICE_ERR_PARAM; + + if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX) + return ICE_ERR_PARAM; + + rdma_packet = set_params->rdma_packet; + if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING && + rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING) + return ICE_ERR_PARAM; + + mng_vlan_prot_id = set_params->mng_vlan_prot_id; + if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER && + mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_set_vlan_mode_parameters); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params), + NULL); +} + /** * ice_set_svm - set single VLAN mode * @hw: pointer to the HW structure */ -static enum ice_status ice_set_svm_dflt(struct ice_hw *hw) +static enum ice_status ice_set_svm(struct ice_hw *hw) { - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + struct ice_aqc_set_vlan_mode *set_params; + enum ice_status status; - return ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL); -} + status = ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n"); + return status; + } -/** - * ice_init_vlan_mode_ops - initialize VLAN mode configuration ops - * @hw: pointer to the HW structure - */ -void ice_init_vlan_mode_ops(struct ice_hw *hw) -{ - hw->vlan_mode_ops.set_dvm = NULL; - hw->vlan_mode_ops.set_svm = ice_set_svm_dflt; + set_params = (struct ice_aqc_set_vlan_mode *) + ice_malloc(hw, sizeof(*set_params)); + if (!set_params) + return ICE_ERR_NO_MEMORY; + + /* default configuration for SVM configurations */ + set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG; + set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING; + set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER; + + status = ice_aq_set_vlan_mode(hw, set_params); + if (status) + ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n"); + + ice_free(hw, set_params); + return status; } /** @@ -60,13 +273,9 @@ */ enum ice_status ice_set_vlan_mode(struct ice_hw *hw) { - enum ice_status status = ICE_ERR_NOT_IMPL; - if (hw->vlan_mode_ops.set_dvm) - status = hw->vlan_mode_ops.set_dvm(hw); + if (!ice_is_dvm_supported(hw)) + return ICE_SUCCESS; - if (status) - return hw->vlan_mode_ops.set_svm(hw); - - return status; + return ice_set_svm(hw); } Index: sys/dev/ice/if_ice_iflib.c =================================================================== --- sys/dev/ice/if_ice_iflib.c +++ sys/dev/ice/if_ice_iflib.c @@ -753,6 +753,9 @@ return err; } + /* Enable FW health event reporting */ + ice_init_health_events(sc); + /* Configure the main PF VSI for RSS */ err = ice_config_rss(&sc->pf_vsi); if (err) { @@ -1946,7 +1949,7 @@ enum ice_status status; /* Re-enable link and re-apply user link settings */ - ice_apply_saved_phy_cfg(sc); + ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); /* Update the OS about changes in media capability */ status = ice_add_media_types(sc, sc->media); @@ -2424,6 +2427,9 @@ if (err) goto err_deinit_pf_vsi; + /* Re-enable FW health event reporting */ + ice_init_health_events(sc); + /* Reconfigure the main PF VSI for RSS */ err = ice_config_rss(&sc->pf_vsi); if (err) { @@ -2593,11 +2599,16 @@ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap); ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap); - ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_cap); + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap); + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap); + ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); /* Disable features due to hardware limitations... */ if (!sc->hw.func_caps.common_cap.rss_table_size) ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); + /* Disable features due to firmware limitations... */ + if (!ice_is_fw_health_report_supported(&sc->hw)) + ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); /* Disable capabilities not supported by the OS */ ice_disable_unsupported_features(sc->feat_cap); Index: sys/dev/ice/virtchnl.h =================================================================== --- sys/dev/ice/virtchnl.h +++ sys/dev/ice/virtchnl.h @@ -34,8 +34,9 @@ #define _VIRTCHNL_H_ /* Description: - * This header file describes the VF-PF communication protocol used - * by the drivers for all devices starting from our 40G product line + * This header file describes the Virtual Function (VF) - Physical Function + * (PF) communication protocol used by the drivers for all devices starting + * from our 40G product line * * Admin queue buffer usage: * desc->opcode is always aqc_opc_send_msg_to_pf @@ -49,8 +50,8 @@ * have a maximum of sixteen queues for all of its VSIs. * * The PF is required to return a status code in v_retval for all messages - * except RESET_VF, which does not require any response. The return value - * is of status_code type, defined in the shared type.h. + * except RESET_VF, which does not require any response. The returned value + * is of virtchnl_status_code type, defined in the shared type.h. * * In general, VF driver initialization should roughly follow the order of * these opcodes. The VF driver must first validate the API version of the @@ -157,10 +158,20 @@ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, /* opcode 34 is reserved */ - /* opcodes 39, 40, 41, 42 and 43 are reserved */ + /* opcodes 38, 39, 40, 41, 42 and 43 are reserved */ /* opcode 44 is reserved */ /* opcode 45, 46, 47, 48 and 49 are reserved */ VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50, + VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51, + VIRTCHNL_OP_ADD_VLAN_V2 = 52, + VIRTCHNL_OP_DEL_VLAN_V2 = 53, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, + VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, + VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, + VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58, + VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59, + /* opcodes 60 through 69 are reserved */ VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, @@ -236,6 +247,24 @@ return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; case VIRTCHNL_OP_MAP_QUEUE_VECTOR: return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: + return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"; + case VIRTCHNL_OP_ADD_VLAN_V2: + return "VIRTCHNL_OP_ADD_VLAN_V2"; + case VIRTCHNL_OP_DEL_VLAN_V2: + return "VIRTCHNL_OP_DEL_VLAN_V2"; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2"; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2"; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2"; + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2"; + case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: + return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2"; + case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: + return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2"; case VIRTCHNL_OP_MAX: return "VIRTCHNL_OP_MAX"; default: @@ -259,8 +288,12 @@ struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ - enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ - enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + + /* avoid confusion with desc->opcode */ + enum virtchnl_ops v_opcode; + + /* ditto for desc->retval */ + enum virtchnl_status_code v_retval; u32 vfid; /* used by PF when sending to VF */ }; @@ -282,6 +315,8 @@ */ #define VIRTCHNL_VERSION_MAJOR 1 #define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MAJOR_2 2 +#define VIRTCHNL_VERSION_MINOR_0 0 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 struct virtchnl_version_info { @@ -326,7 +361,9 @@ struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; - enum virtchnl_vsi_type vsi_type; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; u16 qset_handle; u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; }; @@ -347,6 +384,7 @@ #define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 /* 0X00000100 is reserved */ #define VIRTCHNL_VF_LARGE_NUM_QPAIRS 0x00000200 +#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 0x00008000 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 @@ -361,6 +399,7 @@ /* 0X08000000 and 0X10000000 are reserved */ /* 0X20000000 is reserved */ /* 0X40000000 is reserved */ + /* 0X80000000 is reserved */ /* Define below the capability flags that are not offloads */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 @@ -425,7 +464,9 @@ u8 crc_disable; u8 pad1[3]; u64 dma_ring_addr; - enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + + /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ + s32 rx_split_pos; u32 pad2; }; @@ -628,6 +669,388 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); +/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related + * structures and opcodes. + * + * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver + * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED. + * + * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype. + * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype. + * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype. + * + * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported + * by the PF concurrently. For example, if the PF can support + * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it + * would OR the following bits: + * + * VIRTHCNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * The VF would interpret this as VLAN filtering can be supported on both 0x8100 + * and 0x88A8 VLAN ethertypes. + * + * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported + * by the PF concurrently. For example if the PF can support + * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping + * offload it would OR the following bits: + * + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_XOR; + * + * The VF would interpret this as VLAN stripping can be supported on either + * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via + * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override + * the previously set value. + * + * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or + * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors. + * + * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware + * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor. + * + * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware + * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor. + * + * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for + * VLAN filtering if the underlying PF supports it. + * + * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a + * certain VLAN capability can be toggled. For example if the underlying PF/CP + * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should + * set this bit along with the supported ethertypes. + */ +enum virtchnl_vlan_support { + VIRTCHNL_VLAN_UNSUPPORTED = 0, + VIRTCHNL_VLAN_ETHERTYPE_8100 = 0x00000001, + VIRTCHNL_VLAN_ETHERTYPE_88A8 = 0x00000002, + VIRTCHNL_VLAN_ETHERTYPE_9100 = 0x00000004, + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = 0x00000100, + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = 0x00000200, + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = 0x00000400, + VIRTCHNL_VLAN_PRIO = 0x01000000, + VIRTCHNL_VLAN_FILTER_MASK = 0x10000000, + VIRTCHNL_VLAN_ETHERTYPE_AND = 0x20000000, + VIRTCHNL_VLAN_ETHERTYPE_XOR = 0x40000000, + VIRTCHNL_VLAN_TOGGLE = 0x80000000 +}; + +/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS + * for filtering, insertion, and stripping capabilities. + * + * If only outer capabilities are supported (for filtering, insertion, and/or + * stripping) then this refers to the outer most or single VLAN from the VF's + * perspective. + * + * If only inner capabilities are supported (for filtering, insertion, and/or + * stripping) then this refers to the outer most or single VLAN from the VF's + * perspective. Functionally this is the same as if only outer capabilities are + * supported. The VF driver is just forced to use the inner fields when + * adding/deleting filters and enabling/disabling offloads (if supported). + * + * If both outer and inner capabilities are supported (for filtering, insertion, + * and/or stripping) then outer refers to the outer most or single VLAN and + * inner refers to the second VLAN, if it exists, in the packet. + * + * There is no support for tunneled VLAN offloads, so outer or inner are never + * referring to a tunneled packet from the VF's perspective. + */ +struct virtchnl_vlan_supported_caps { + u32 outer; + u32 inner; +}; + +/* The PF populates these fields based on the supported VLAN filtering. If a + * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will + * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using + * the unsupported fields. + * + * Also, a VF is only allowed to toggle its VLAN filtering setting if the + * VIRTCHNL_VLAN_TOGGLE bit is set. + * + * The ethertype(s) specified in the ethertype_init field are the ethertypes + * enabled for VLAN filtering. VLAN filtering in this case refers to the outer + * most VLAN from the VF's perspective. If both inner and outer filtering are + * allowed then ethertype_init only refers to the outer most VLAN as only + * VLAN ethertype supported for inner VLAN filtering is + * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled + * when both inner and outer filtering are allowed. + * + * The max_filters field tells the VF how many VLAN filters it's allowed to have + * at any one time. If it exceeds this amount and tries to add another filter, + * then the request will be rejected by the PF. To prevent failures, the VF + * should keep track of how many VLAN filters it has added and not attempt to + * add more than max_filters. + */ +struct virtchnl_vlan_filtering_caps { + struct virtchnl_vlan_supported_caps filtering_support; + u32 ethertype_init; + u16 max_filters; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps); + +/* This enum is used for the virtchnl_vlan_offload_caps structure to specify + * if the PF supports a different ethertype for stripping and insertion. + * + * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified + * for stripping affect the ethertype(s) specified for insertion and visa versa + * as well. If the VF tries to configure VLAN stripping via + * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then + * that will be the ethertype for both stripping and insertion. + * + * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for + * stripping do not affect the ethertype(s) specified for insertion and visa + * versa. + */ +enum virtchnl_vlan_ethertype_match { + VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0, + VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1, +}; + +/* The PF populates these fields based on the supported VLAN offloads. If a + * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will + * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or + * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields. + * + * Also, a VF is only allowed to toggle its VLAN offload setting if the + * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set. + * + * The VF driver needs to be aware of how the tags are stripped by hardware and + * inserted by the VF driver based on the level of offload support. The PF will + * populate these fields based on where the VLAN tags are expected to be + * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to + * interpret these fields. See the definition of the + * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support + * enumeration. + */ +struct virtchnl_vlan_offload_caps { + struct virtchnl_vlan_supported_caps stripping_support; + struct virtchnl_vlan_supported_caps insertion_support; + u32 ethertype_init; + u8 ethertype_match; + u8 pad[3]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps); + +/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS + * VF sends this message to determine its VLAN capabilities. + * + * PF will mark which capabilities it supports based on hardware support and + * current configuration. For example, if a port VLAN is configured the PF will + * not allow outer VLAN filtering, stripping, or insertion to be configured so + * it will block these features from the VF. + * + * The VF will need to cross reference its capabilities with the PFs + * capabilities in the response message from the PF to determine the VLAN + * support. + */ +struct virtchnl_vlan_caps { + struct virtchnl_vlan_filtering_caps filtering; + struct virtchnl_vlan_offload_caps offloads; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps); + +struct virtchnl_vlan { + u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */ + u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in + * filtering caps + */ + u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in + * filtering caps. Note that tpid here does not refer to + * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the + * actual 2-byte VLAN TPID + */ + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan); + +struct virtchnl_vlan_filter { + struct virtchnl_vlan inner; + struct virtchnl_vlan outer; + u8 pad[16]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter); + +/* VIRTCHNL_OP_ADD_VLAN_V2 + * VIRTCHNL_OP_DEL_VLAN_V2 + * + * VF sends these messages to add/del one or more VLAN tag filters for Rx + * traffic. + * + * The PF attempts to add the filters and returns status. + * + * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the + * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS. + */ +struct virtchnl_vlan_filter_list_v2 { + u16 vport_id; + u16 num_elements; + u8 pad[4]; + struct virtchnl_vlan_filter filters[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2); + +/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 + * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 + * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 + * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 + * + * VF sends this message to enable or disable VLAN stripping or insertion. It + * also needs to specify an ethertype. The VF knows which VLAN ethertypes are + * allowed and whether or not it's allowed to enable/disable the specific + * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to + * parse the virtchnl_vlan_caps.offloads fields to determine which offload + * messages are allowed. + * + * For example, if the PF populates the virtchnl_vlan_caps.offloads in the + * following manner the VF will be allowed to enable and/or disable 0x8100 inner + * VLAN insertion and/or stripping via the opcodes listed above. Inner in this + * case means the outer most or single VLAN from the VF's perspective. This is + * because no outer offloads are supported. See the comments above the + * virtchnl_vlan_supported_caps structure for more details. + * + * virtchnl_vlan_caps.offloads.stripping_support.inner = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100; + * + * virtchnl_vlan_caps.offloads.insertion_support.inner = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100; + * + * In order to enable inner (again note that in this case inner is the outer + * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100 + * VLANs, the VF would populate the virtchnl_vlan_setting structure in the + * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. + * + * virtchnl_vlan_setting.inner_ethertype_setting = + * VIRTCHNL_VLAN_ETHERTYPE_8100; + * + * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on + * initialization. + * + * The reason that VLAN TPID(s) are not being used for the + * outer_ethertype_setting and inner_ethertype_setting fields is because it's + * possible a device could support VLAN insertion and/or stripping offload on + * multiple ethertypes concurrently, so this method allows a VF to request + * multiple ethertypes in one message using the virtchnl_vlan_support + * enumeration. + * + * For example, if the PF populates the virtchnl_vlan_caps.offloads in the + * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer + * VLAN insertion and stripping simultaneously. The + * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be + * populated based on what the PF can support. + * + * virtchnl_vlan_caps.offloads.stripping_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * virtchnl_vlan_caps.offloads.insertion_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF + * would populate the virthcnl_vlan_offload_structure in the following manner + * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. + * + * virtchnl_vlan_setting.outer_ethertype_setting = + * VIRTHCNL_VLAN_ETHERTYPE_8100 | + * VIRTHCNL_VLAN_ETHERTYPE_88A8; + * + * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on + * initialization. + * + * There is also the case where a PF and the underlying hardware can support + * VLAN offloads on multiple ethertypes, but not concurrently. For example, if + * the PF populates the virtchnl_vlan_caps.offloads in the following manner the + * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN + * offloads. The ethertypes must match for stripping and insertion. + * + * virtchnl_vlan_caps.offloads.stripping_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_XOR; + * + * virtchnl_vlan_caps.offloads.insertion_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_XOR; + * + * virtchnl_vlan_caps.offloads.ethertype_match = + * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; + * + * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would + * populate the virtchnl_vlan_setting structure in the following manner and send + * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the + * ethertype for VLAN insertion if it's enabled. So, for completeness, a + * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent. + * + * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8; + * + * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on + * initialization. + * + * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 + * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 + * + * VF sends this message to enable or disable VLAN filtering. It also needs to + * specify an ethertype. The VF knows which VLAN ethertypes are allowed and + * whether or not it's allowed to enable/disable filtering via the + * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to + * parse the virtchnl_vlan_caps.filtering fields to determine which, if any, + * filtering messages are allowed. + * + * For example, if the PF populates the virtchnl_vlan_caps.filtering in the + * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8 + * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND + * means that all filtering ethertypes will to be enabled and disabled together + * regardless of the request from the VF. This means that the underlying + * hardware only supports VLAN filtering for all VLAN the specified ethertypes + * or none of them. + * + * virtchnl_vlan_caps.filtering.filtering_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTHCNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_9100 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100 + * VLANs aren't supported by the VF driver), the VF would populate the + * virtchnl_vlan_setting structure in the following manner and send the + * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used + * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the + * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used. + * + * virtchnl_vlan_setting.outer_ethertype_setting = + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8; + * + */ +struct virtchnl_vlan_setting { + u32 outer_ethertype_setting; + u32 inner_ethertype_setting; + u16 vport_id; + u8 pad[6]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting); + /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE * VF sends VSI id and flags. * PF returns status code in retval. @@ -790,8 +1213,12 @@ struct virtchnl_filter { union virtchnl_flow_spec data; union virtchnl_flow_spec mask; - enum virtchnl_flow_type flow_type; - enum virtchnl_action action; + + /* see enum virtchnl_flow_type */ + s32 flow_type; + + /* see enum virtchnl_action */ + s32 action; u32 action_meta; u8 field_flags; }; @@ -816,7 +1243,8 @@ #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 struct virtchnl_pf_event { - enum virtchnl_event_codes event; + /* see enum virtchnl_event_codes */ + s32 event; union { /* If the PF driver does not support the new speed reporting * capabilities then use link_event else use link_event_adv to @@ -828,16 +1256,25 @@ */ struct { enum virtchnl_link_speed link_speed; - u8 link_status; + bool link_status; + u8 pad[3]; } link_event; struct { /* link_speed provided in Mbps */ u32 link_speed; u8 link_status; + u8 pad[3]; } link_event_adv; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u16 vport_id; + u8 link_status; + u8 pad; + } link_event_adv_vport; } event_data; - int severity; + s32 severity; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); @@ -875,7 +1312,8 @@ /* structure to specify a chunk of contiguous queues */ struct virtchnl_queue_chunk { - enum virtchnl_queue_type type; + /* see enum virtchnl_queue_type */ + s32 type; u16 start_queue_id; u16 num_queues; }; @@ -895,7 +1333,7 @@ * VIRTCHNL_OP_DISABLE_QUEUES_V2 * VIRTCHNL_OP_DEL_QUEUES * - * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0 * then all of these ops are available. * * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES @@ -927,17 +1365,17 @@ u16 queue_id; u16 vector_id; u8 pad[4]; - enum virtchnl_itr_idx itr_idx; - enum virtchnl_queue_type queue_type; + + /* see enum virtchnl_itr_idx */ + s32 itr_idx; + + /* see enum virtchnl_queue_type */ + s32 queue_type; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector); /* VIRTCHNL_OP_MAP_QUEUE_VECTOR - * VIRTCHNL_OP_UNMAP_QUEUE_VECTOR - * - * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES - * then all of these ops are available. * * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available. @@ -989,6 +1427,10 @@ VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX = ((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) / sizeof(struct virtchnl_queue_vector), + + VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) / + sizeof(struct virtchnl_vlan_filter), }; /** @@ -1163,6 +1605,33 @@ case VIRTCHNL_OP_DEL_CLOUD_FILTER: valid_len = sizeof(struct virtchnl_filter); break; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: + break; + case VIRTCHNL_OP_ADD_VLAN_V2: + case VIRTCHNL_OP_DEL_VLAN_V2: + valid_len = sizeof(struct virtchnl_vlan_filter_list_v2); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list_v2 *vfl = + (struct virtchnl_vlan_filter_list_v2 *)msg; + + if (vfl->num_elements == 0 || vfl->num_elements > + VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vfl->num_elements - 1) * + sizeof(struct virtchnl_vlan_filter); + } + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: + valid_len = sizeof(struct virtchnl_vlan_setting); + break; case VIRTCHNL_OP_ENABLE_QUEUES_V2: case VIRTCHNL_OP_DISABLE_QUEUES_V2: valid_len = sizeof(struct virtchnl_del_ena_dis_queues); Index: sys/dev/ice/virtchnl_inline_ipsec.h =================================================================== --- sys/dev/ice/virtchnl_inline_ipsec.h +++ sys/dev/ice/virtchnl_inline_ipsec.h @@ -470,7 +470,7 @@ }; #pragma pack() -/* Add whitelist entry in IES */ +/* Add allowlist entry in IES */ struct virtchnl_ipsec_sp_cfg { u32 spi; u32 dip[4]; @@ -489,7 +489,7 @@ }; #pragma pack(1) -/* Delete whitelist entry in IES */ +/* Delete allowlist entry in IES */ struct virtchnl_ipsec_sp_destroy { /* 0 for IPv4 table, 1 for IPv6 table. */ u8 table_id; @@ -497,7 +497,7 @@ }; #pragma pack() -/* Response from IES to whitelist operations */ +/* Response from IES to allowlist operations */ struct virtchnl_ipsec_sp_cfg_resp { u32 rule_id; }; Index: sys/dev/ice/virtchnl_lan_desc.h =================================================================== --- /dev/null +++ sys/dev/ice/virtchnl_lan_desc.h @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2021, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*$FreeBSD$*/ + +#ifndef _VIRTCHNL_LAN_DESC_H_ +#define _VIRTCHNL_LAN_DESC_H_ + +/* Rx */ +/* For virtchnl_splitq_base_rx_flex desc members */ +#define VIRTCHNL_RXD_FLEX_PTYPE_S 0 +#define VIRTCHNL_RXD_FLEX_PTYPE_M \ + MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_PTYPE_S) +#define VIRTCHNL_RXD_FLEX_UMBCAST_S 10 +#define VIRTCHNL_RXD_FLEX_UMBCAST_M \ + MAKEMASK(0x3UL, VIRTCHNL_RXD_FLEX_UMBCAST_S) +#define VIRTCHNL_RXD_FLEX_FF0_S 12 +#define VIRTCHNL_RXD_FLEX_FF0_M MAKEMASK(0xFUL, VIRTCHNL_RXD_FLEX_FF0_S) +#define VIRTCHNL_RXD_FLEX_LEN_PBUF_S 0 +#define VIRTCHNL_RXD_FLEX_LEN_PBUF_M \ + MAKEMASK(0x3FFFUL, VIRTCHNL_RXD_FLEX_LEN_PBUF_S) +#define VIRTCHNL_RXD_FLEX_GEN_S 14 +#define VIRTCHNL_RXD_FLEX_GEN_M BIT_ULL(VIRTCHNL_RXD_FLEX_GEN_S) +#define VIRTCHNL_RXD_FLEX_BUFQ_ID_S 15 +#define VIRTCHNL_RXD_FLEX_BUFQ_ID_M \ + BIT_ULL(VIRTCHNL_RXD_FLEX_BUFQ_ID_S) +#define VIRTCHNL_RXD_FLEX_LEN_HDR_S 0 +#define VIRTCHNL_RXD_FLEX_LEN_HDR_M \ + MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_LEN_HDR_S) +#define VIRTCHNL_RXD_FLEX_RSC_S 10 +#define VIRTCHNL_RXD_FLEX_RSC_M BIT_ULL(VIRTCHNL_RXD_FLEX_RSC_S) +#define VIRTCHNL_RXD_FLEX_SPH_S 11 +#define VIRTCHNL_RXD_FLEX_SPH_M BIT_ULL(VIRTCHNL_RXD_FLEX_SPH_S) +#define VIRTCHNL_RXD_FLEX_MISS_S 12 +#define VIRTCHNL_RXD_FLEX_MISS_M \ + BIT_ULL(VIRTCHNL_RXD_FLEX_MISS_S) +#define VIRTCHNL_RXD_FLEX_FF1_S 13 +#define VIRTCHNL_RXD_FLEX_FF1_M MAKEMASK(0x7UL, VIRTCHNL_RXD_FLEX_FF1_M) + +/* For virtchnl_singleq_base_rx_legacy desc members */ +#define VIRTCHNL_RXD_QW1_LEN_SPH_S 63 +#define VIRTCHNL_RXD_QW1_LEN_SPH_M BIT_ULL(VIRTCHNL_RXD_QW1_LEN_SPH_S) +#define VIRTCHNL_RXD_QW1_LEN_HBUF_S 52 +#define VIRTCHNL_RXD_QW1_LEN_HBUF_M \ + MAKEMASK(0x7FFULL, VIRTCHNL_RXD_QW1_LEN_HBUF_S) +#define VIRTCHNL_RXD_QW1_LEN_PBUF_S 38 +#define VIRTCHNL_RXD_QW1_LEN_PBUF_M \ + MAKEMASK(0x3FFFULL, VIRTCHNL_RXD_QW1_LEN_PBUF_S) +#define VIRTCHNL_RXD_QW1_PTYPE_S 30 +#define VIRTCHNL_RXD_QW1_PTYPE_M \ + MAKEMASK(0xFFULL, VIRTCHNL_RXD_QW1_PTYPE_S) +#define VIRTCHNL_RXD_QW1_ERROR_S 19 +#define VIRTCHNL_RXD_QW1_ERROR_M \ + MAKEMASK(0xFFUL, VIRTCHNL_RXD_QW1_ERROR_S) +#define VIRTCHNL_RXD_QW1_STATUS_S 0 +#define VIRTCHNL_RXD_QW1_STATUS_M \ + MAKEMASK(0x7FFFFUL, VIRTCHNL_RXD_QW1_STATUS_S) + +enum virtchnl_rx_flex_desc_status_error_0_qw1_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0, + VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, +}; + +enum virtchnl_rx_flex_desc_status_error_0_qw0_bits { + VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S = 0, + VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ +}; + +enum virtchnl_rx_flex_desc_status_error_1_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_FLEX_DESC_STATUS1_RSVD_S = 0, /* 2 bits */ + VIRTCHNL_RX_FLEX_DESC_STATUS1_ATRAEFAIL_S = 2, + VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 3, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 4, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 5, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 6, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 7, + VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ +}; + +enum virtchnl_rx_base_desc_status_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0, + VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1, + VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2, + VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3, + VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4, + VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8, + VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11, + VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14, + VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15, + VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18, + VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +enum virtchnl_rx_desc_fltstat_values { + VIRTCHNL_RX_DESC_FLTSTAT_NO_DATA = 0, + VIRTCHNL_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + VIRTCHNL_RX_DESC_FLTSTAT_RSV = 2, + VIRTCHNL_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +enum virtchnl_rx_base_desc_error_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0, + VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1, + VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2, + VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */ + VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3, + VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4, + VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5, + VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6, + VIRTCHNL_RX_BASE_DESC_ERROR_RSVD_S = 7 +}; + +/* Receive Descriptors */ +/* splitq buf + | 16| 0| + ---------------------------------------------------------------- + | RSV | Buffer ID | + ---------------------------------------------------------------- + | Rx packet buffer adresss | + ---------------------------------------------------------------- + | Rx header buffer adresss | + ---------------------------------------------------------------- + | RSV | + ---------------------------------------------------------------- + | 0| + */ +struct virtchnl_splitq_rx_buf_desc { + struct { + __le16 buf_id; /* Buffer Identifier */ + __le16 rsvd0; + __le32 rsvd1; + } qword0; + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 rsvd2; +}; /* read used with buffer queues*/ + +/* singleq buf + | 0| + ---------------------------------------------------------------- + | Rx packet buffer adresss | + ---------------------------------------------------------------- + | Rx header buffer adresss | + ---------------------------------------------------------------- + | RSV | + ---------------------------------------------------------------- + | RSV | + ---------------------------------------------------------------- + | 0| + */ +struct virtchnl_singleq_rx_buf_desc { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 rsvd1; + __le64 rsvd2; +}; /* read used with buffer queues*/ + +union virtchnl_rx_buf_desc { + struct virtchnl_singleq_rx_buf_desc read; + struct virtchnl_splitq_rx_buf_desc split_rd; +}; + +/* (0x00) singleq wb(compl) */ +struct virtchnl_singleq_base_rx_desc { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow Director filter id */ + } hi_dword; + } qword0; + struct { + /* status/error/PTYPE/length */ + __le64 status_error_ptype_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + __le32 reserved; + __le32 fd_id; + } qword3; +}; /* writeback */ + +/* (0x01) singleq flex compl */ +struct virtchnl_rx_flex_desc { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile id */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 time_stamp_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flex_meta2; + __le16 flex_meta3; + union { + struct { + __le16 flex_meta4; + __le16 flex_meta5; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* (0x02) */ +struct virtchnl_rx_flex_desc_nic { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor Switch Profile + * RxDID Profile Id 3 + * Flex-field 0: Source Vsi + */ +struct virtchnl_rx_flex_desc_sw { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 src_vsi; /* [10:15] are reserved */ + __le16 flex_md1_rsvd; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC VEB Profile + * RxDID Profile Id 4 + * Flex-field 0: Destination Vsi + */ +struct virtchnl_rx_flex_desc_nic_veb_dbg { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 dst_vsi; /* [0:12]: destination vsi */ + /* 13: vsi valid bit */ + /* [14:15] are reserved */ + __le16 flex_field_1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC ACL Profile + * RxDID Profile Id 5 + * Flex-field 0: ACL Counter 0 + * Flex-field 1: ACL Counter 1 + * Flex-field 2: ACL Counter 2 + */ +struct virtchnl_rx_flex_desc_nic_acl_dbg { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 acl_ctr0; + __le16 acl_ctr1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 acl_ctr2; + __le16 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC Profile + * RxDID Profile Id 6 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow Id lower 16-bits + * Flex-field 3: Source Vsi + * Flex-field 4: reserved, Vlan id taken from L2Tag + */ +struct virtchnl_rx_flex_desc_nic_2 { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flow_id; + __le16 src_vsi; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor Advanced (Split Queue Model) + * RxDID Profile Id 7 + */ +struct virtchnl_rx_flex_desc_adv { + /* Qword 0 */ + u8 rxdid_ucast; /* profile_id=[3:0] */ + /* rsvd=[5:4] */ + /* ucast=[7:6] */ + u8 status_err0_qw0; + __le16 ptype_err_fflags0; /* ptype=[9:0] */ + /* ip_hdr_err=[10:10] */ + /* udp_len_err=[11:11] */ + /* ff0=[15:12] */ + __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ + /* gen=[14:14] only in splitq */ + /* bufq_id=[15:15] only in splitq */ + __le16 hdrlen_flags; /* header=[9:0] */ + /* rsc=[10:10] only in splitq */ + /* sph=[11:11] only in splitq */ + /* ext_udp_0=[12:12] */ + /* int_udp_0=[13:13] */ + /* trunc_mirr=[14:14] */ + /* miss_prepend=[15:15] */ + /* Qword 1 */ + u8 status_err0_qw1; + u8 status_err1; + u8 fflags1; + u8 ts_low; + __le16 fmd0; + __le16 fmd1; + /* Qword 2 */ + __le16 fmd2; + u8 fflags2; + u8 hash3; + __le16 fmd3; + __le16 fmd4; + /* Qword 3 */ + __le16 fmd5; + __le16 fmd6; + __le16 fmd7_0; + __le16 fmd7_1; +}; /* writeback */ + +/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile + * RxDID Profile Id 8 + * Flex-field 0: BufferID + * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW) + * Flex-field 2: Hash[15:0] + * Flex-flags 2: Hash[23:16] + * Flex-field 3: L2TAG2 + * Flex-field 5: L2TAG1 + * Flex-field 7: Timestamp (upper 32 bits) + */ +struct virtchnl_rx_flex_desc_adv_nic_3 { + /* Qword 0 */ + u8 rxdid_ucast; /* profile_id=[3:0] */ + /* rsvd=[5:4] */ + /* ucast=[7:6] */ + u8 status_err0_qw0; + __le16 ptype_err_fflags0; /* ptype=[9:0] */ + /* ip_hdr_err=[10:10] */ + /* udp_len_err=[11:11] */ + /* ff0=[15:12] */ + __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ + /* gen=[14:14] only in splitq */ + /* bufq_id=[15:15] only in splitq */ + __le16 hdrlen_flags; /* header=[9:0] */ + /* rsc=[10:10] only in splitq */ + /* sph=[11:11] only in splitq */ + /* ext_udp_0=[12:12] */ + /* int_udp_0=[13:13] */ + /* trunc_mirr=[14:14] */ + /* miss_prepend=[15:15] */ + /* Qword 1 */ + u8 status_err0_qw1; + u8 status_err1; + u8 fflags1; + u8 ts_low; + __le16 buf_id; /* only in splitq */ + union { + __le16 raw_cs; + __le16 l2tag1; + __le16 rscseglen; + } misc; + /* Qword 2 */ + __le16 hash1; + union { + u8 fflags2; + u8 mirrorid; + u8 hash2; + } ff2_mirrid_hash2; + u8 hash3; + __le16 l2tag2; + __le16 fmd4; + /* Qword 3 */ + __le16 l2tag1; + __le16 fmd6; + __le32 ts_high; +}; /* writeback */ + +union virtchnl_rx_desc { + struct virtchnl_singleq_rx_buf_desc read; + struct virtchnl_singleq_base_rx_desc base_wb; + struct virtchnl_rx_flex_desc flex_wb; + struct virtchnl_rx_flex_desc_adv flex_wb_adv; +}; + +#endif /* _VIRTCHNL_LAN_DESC_H_ */