diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h index 9179b0ce1226..5e8560dccb1e 100644 --- a/sys/dev/ice/ice_adminq_cmd.h +++ b/sys/dev/ice/ice_adminq_cmd.h @@ -1,3036 +1,3166 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_ADMINQ_CMD_H_ #define _ICE_ADMINQ_CMD_H_ /* This header file defines the Admin Queue commands, error codes and * descriptor format. It is shared between Firmware and Software. */ #define ICE_MAX_VSI 768 #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 struct ice_aqc_generic { __le32 param0; __le32 param1; __le32 addr_high; __le32 addr_low; }; /* Get version (direct 0x0001) */ struct ice_aqc_get_ver { __le32 rom_ver; __le32 fw_build; u8 fw_branch; u8 fw_major; u8 fw_minor; u8 fw_patch; u8 api_branch; u8 api_major; u8 api_minor; u8 api_patch; }; /* Send driver version (indirect 0x0002) */ struct ice_aqc_driver_ver { u8 major_ver; u8 minor_ver; u8 build_ver; u8 subbuild_ver; u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { u8 driver_unloading; #define ICE_AQC_DRIVER_UNLOADING BIT(0) u8 reserved[15]; }; /* Get Expanded Error Code (0x0005, direct) */ struct ice_aqc_get_exp_err { __le32 reason; #define ICE_AQC_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF __le32 identifier; u8 rsvd[8]; }; /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ struct ice_aqc_req_res { __le16 res_id; #define ICE_AQC_RES_ID_NVM 1 #define ICE_AQC_RES_ID_SDP 2 #define ICE_AQC_RES_ID_CHNG_LOCK 3 #define ICE_AQC_RES_ID_GLBL_LOCK 4 __le16 access_type; #define ICE_AQC_RES_ACCESS_READ 1 #define ICE_AQC_RES_ACCESS_WRITE 2 /* Upon successful completion, FW writes this value and driver is * expected to release resource before timeout. This value is provided * in milliseconds. */ __le32 timeout; #define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 #define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 #define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 #define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 /* For SDP: pin ID of the SDP */ __le32 res_number; /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */ __le16 status; #define ICE_AQ_RES_GLBL_SUCCESS 0 #define ICE_AQ_RES_GLBL_IN_PROG 1 #define ICE_AQ_RES_GLBL_DONE 2 u8 reserved[2]; }; /* Get function capabilities (indirect 0x000A) * Get device capabilities (indirect 0x000B) */ struct ice_aqc_list_caps { u8 cmd_flags; u8 pf_index; u8 reserved[2]; __le32 count; __le32 addr_high; __le32 addr_low; }; /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; #define ICE_AQC_CAPS_SWITCHING_MODE 0x0001 #define ICE_AQC_CAPS_MANAGEABILITY_MODE 0x0002 #define ICE_AQC_CAPS_OS2BMC 0x0004 #define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005 #define ICE_AQC_MAX_VALID_FUNCTIONS 0x8 #define ICE_AQC_CAPS_ALTERNATE_RAM 0x0006 #define ICE_AQC_CAPS_WOL_PROXY 0x0008 #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_802_1QBG 0x0015 #define ICE_AQC_CAPS_802_1BR 0x0016 #define ICE_AQC_CAPS_VSI 0x0017 #define ICE_AQC_CAPS_DCB 0x0018 #define ICE_AQC_CAPS_RSVD 0x0021 #define ICE_AQC_CAPS_ISCSI 0x0022 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_TXQS 0x0042 #define ICE_AQC_CAPS_MSIX 0x0043 #define ICE_AQC_CAPS_MAX_MTU 0x0047 #define ICE_AQC_CAPS_NVM_VER 0x0048 #define ICE_AQC_CAPS_OROM_VER 0x004A #define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_CEM 0x00F2 #define ICE_AQC_CAPS_IWARP 0x0051 #define ICE_AQC_CAPS_LED 0x0061 #define ICE_AQC_CAPS_SDP 0x0062 #define ICE_AQC_CAPS_WR_CSR_PROT 0x0064 #define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073 #define ICE_AQC_CAPS_SKU 0x0074 #define ICE_AQC_CAPS_PORT_MAP 0x0075 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 +#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; u8 minor_ver; /* Number of resources described by this capability */ __le32 number; /* Only meaningful for some types of resources */ __le32 logical_id; /* Only meaningful for some types of resources */ __le32 phys_id; __le64 rsvd1; __le64 rsvd2; }; /* Manage MAC address, read command - indirect (0x0107) * This struct is also used for the response */ struct ice_aqc_manage_mac_read { __le16 flags; /* Zeroed by device driver */ #define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4) #define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5) #define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6) #define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7) #define ICE_AQC_MAN_MAC_MC_MAG_EN BIT(8) #define ICE_AQC_MAN_MAC_WOL_PRESERVE_ON_PFR BIT(9) #define ICE_AQC_MAN_MAC_READ_S 4 #define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S) u8 rsvd[2]; u8 num_addr; /* Used in response */ u8 rsvd1[3]; __le32 addr_high; __le32 addr_low; }; /* Response buffer format for manage MAC read command */ struct ice_aqc_manage_mac_read_resp { u8 lport_num; u8 addr_type; #define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0 #define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1 u8 mac_addr[ETH_ALEN]; }; /* Manage MAC address, write command - direct (0x0108) */ struct ice_aqc_manage_mac_write { u8 rsvd; u8 flags; #define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) #define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) #define ICE_AQC_MAN_MAC_WR_S 6 #define ICE_AQC_MAN_MAC_WR_M MAKEMASK(3, ICE_AQC_MAN_MAC_WR_S) #define ICE_AQC_MAN_MAC_UPDATE_LAA 0 #define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S) /* byte stream in network order */ u8 mac_addr[ETH_ALEN]; __le32 addr_high; __le32 addr_low; }; /* Clear PXE Command and response (direct 0x0110) */ struct ice_aqc_clear_pxe { u8 rx_cnt; #define ICE_AQC_CLEAR_PXE_RX_CNT 0x2 u8 reserved[15]; }; /* Configure No-Drop Policy Command (direct 0x0112) */ struct ice_aqc_config_no_drop_policy { u8 opts; #define ICE_AQC_FORCE_NO_DROP BIT(0) u8 rsvd[15]; }; /* Get switch configuration (0x0200) */ struct ice_aqc_get_sw_cfg { /* Reserved for command and copy of request flags for response */ __le16 flags; /* First desc in case of command and next_elem in case of response * In case of response, if it is not zero, means all the configuration * was not returned and new command shall be sent with this value in * the 'first desc' field */ __le16 element; /* Reserved for command, only used for response */ __le16 num_elems; __le16 rsvd; __le32 addr_high; __le32 addr_low; }; /* Each entry in the response buffer is of the following type: */ struct ice_aqc_get_sw_cfg_resp_elem { /* VSI/Port Number */ __le16 vsi_port_num; #define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0 #define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \ (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S) #define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14 #define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S) #define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0 #define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1 #define ICE_AQC_GET_SW_CONF_RESP_VSI 2 /* SWID VSI/Port belongs to */ __le16 swid; /* Bit 14..0 : PF/VF number VSI belongs to * Bit 15 : VF indication bit */ __le16 pf_vf_num; #define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0 #define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \ (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S) #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; /* Set Port parameters, (direct, 0x0203) */ struct ice_aqc_set_port_params { __le16 cmd_flags; #define ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS BIT(0) #define ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS BIT(1) #define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2) __le16 bad_frame_vsi; #define ICE_AQC_SET_P_PARAMS_VSI_S 0 #define ICE_AQC_SET_P_PARAMS_VSI_M (0x3FF << ICE_AQC_SET_P_PARAMS_VSI_S) #define ICE_AQC_SET_P_PARAMS_VSI_VALID BIT(15) __le16 swid; #define ICE_AQC_SET_P_PARAMS_SWID_S 0 #define ICE_AQC_SET_P_PARAMS_SWID_M (0xFF << ICE_AQC_SET_P_PARAMS_SWID_S) #define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S 8 #define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_M \ (0x3F << ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S) #define ICE_AQC_SET_P_PARAMS_IS_LOGI_PORT BIT(14) #define ICE_AQC_SET_P_PARAMS_SWID_VALID BIT(15) u8 reserved[10]; }; /* These resource type defines are used for all switch resource * commands where a resource type is required, such as: * Get Resource Allocation command (indirect 0x0204) * Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) * Get Allocated Resource Descriptors Command (indirect 0x020A) */ #define ICE_AQC_RES_TYPE_VEB_COUNTER 0x00 #define ICE_AQC_RES_TYPE_VLAN_COUNTER 0x01 #define ICE_AQC_RES_TYPE_MIRROR_RULE 0x02 #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 #define ICE_AQC_RES_TYPE_RECIPE 0x05 #define ICE_AQC_RES_TYPE_PROFILE 0x06 #define ICE_AQC_RES_TYPE_SWID 0x07 #define ICE_AQC_RES_TYPE_VSI 0x08 #define ICE_AQC_RES_TYPE_FLU 0x09 #define ICE_AQC_RES_TYPE_WIDE_TABLE_1 0x0A #define ICE_AQC_RES_TYPE_WIDE_TABLE_2 0x0B #define ICE_AQC_RES_TYPE_WIDE_TABLE_4 0x0C #define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH 0x20 #define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 #define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 #define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 #define ICE_AQC_RES_TYPE_FLEX_DESC_PROG 0x30 #define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID 0x48 #define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49 #define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50 #define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 /* Resource types 0x62-67 are reserved for Hash profile builder */ #define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID 0x68 #define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM 0x69 #define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 #define ICE_AQC_RES_TYPE_S 0 #define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S) /* Get Resource Allocation command (indirect 0x0204) */ struct ice_aqc_get_res_alloc { __le16 resp_elem_num; /* Used in response, reserved in command */ u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Get Resource Allocation Response Buffer per response */ struct ice_aqc_get_res_resp_elem { __le16 res_type; /* Types defined above cmd 0x0204 */ __le16 total_capacity; /* Resources available to all PF's */ __le16 total_function; /* Resources allocated for a PF */ __le16 total_shared; /* Resources allocated as shared */ __le16 total_free; /* Resources un-allocated/not reserved by any PF */ }; /* Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) */ struct ice_aqc_alloc_free_res_cmd { __le16 num_entries; /* Number of Resource entries */ u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Resource descriptor */ struct ice_aqc_res_elem { union { __le16 sw_resp; __le16 flu_resp; } e; }; /* Buffer for Allocate/Free Resources commands */ struct ice_aqc_alloc_free_res_elem { __le16 res_type; /* Types defined above cmd 0x0204 */ #define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8 #define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \ (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S) __le16 num_elems; struct ice_aqc_res_elem elem[STRUCT_HACK_VAR_LEN]; }; /* Get Allocated Resource Descriptors Command (indirect 0x020A) */ struct ice_aqc_get_allocd_res_desc { union { struct { __le16 res; /* Types defined above cmd 0x0204 */ __le16 first_desc; __le32 reserved; } cmd; struct { __le16 res; __le16 next_desc; __le16 num_desc; __le16 reserved; } resp; } ops; __le32 addr_high; __le32 addr_low; }; +/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */ +struct ice_aqc_set_vlan_mode { + u8 reserved; + u8 l2tag_prio_tagging; +#define ICE_AQ_VLAN_PRIO_TAG_S 0 +#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S) +#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0 +#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1 +#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2 +#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3 +#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4 +#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4 +#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7 + u8 l2tag_reserved[64]; + u8 rdma_packet; +#define ICE_AQ_VLAN_RDMA_TAG_S 0 +#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S) +#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10 +#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A + u8 rdma_reserved[2]; + u8 mng_vlan_prot_id; +#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10 +#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11 + u8 prot_id_reserved[30]; +}; + +/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */ +struct ice_aqc_get_vlan_mode { + u8 vlan_mode; +#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0) + u8 l2tag_prio_tagging; + u8 reserved[98]; +}; + /* Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) * Get VSI (indirect 0x0212) * Free VSI (indirect 0x0213) */ struct ice_aqc_add_get_update_free_vsi { __le16 vsi_num; #define ICE_AQ_VSI_NUM_S 0 #define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S) #define ICE_AQ_VSI_IS_VALID BIT(15) __le16 cmd_flags; #define ICE_AQ_VSI_KEEP_ALLOC 0x1 u8 vf_id; u8 reserved; __le16 vsi_flags; #define ICE_AQ_VSI_TYPE_S 0 #define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S) #define ICE_AQ_VSI_TYPE_VF 0x0 #define ICE_AQ_VSI_TYPE_VMDQ2 0x1 #define ICE_AQ_VSI_TYPE_PF 0x2 #define ICE_AQ_VSI_TYPE_EMP_MNG 0x3 __le32 addr_high; __le32 addr_low; }; /* Response descriptor for: * Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) * Free VSI (indirect 0x0213) */ struct ice_aqc_add_update_free_vsi_resp { __le16 vsi_num; __le16 ext_status; __le16 vsi_used; __le16 vsi_free; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_get_vsi_resp { __le16 vsi_num; u8 vf_id; /* The vsi_flags field uses the ICE_AQ_VSI_TYPE_* defines for values. * These are found above in struct ice_aqc_add_get_update_free_vsi. */ u8 vsi_flags; __le16 vsi_used; __le16 vsi_free; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_vsi_props { __le16 valid_sections; #define ICE_AQ_VSI_PROP_SW_VALID BIT(0) #define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1) #define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2) #define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3) #define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4) #define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5) #define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6) #define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7) #define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8) #define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11) #define ICE_AQ_VSI_PROP_PASID_VALID BIT(12) /* switch section */ u8 sw_id; u8 sw_flags; #define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5) #define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6) #define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7) u8 sw_flags2; #define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0 -#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \ - (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) +#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) #define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0) #define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4) u8 veb_stat_id; #define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0 -#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) +#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) #define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5) /* security section */ u8 sec_flags; #define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0) #define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2) -#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 -#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 +#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) #define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0) u8 sec_reserved; /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - u8 pvlan_reserved[2]; - u8 vlan_flags; -#define ICE_AQ_VSI_VLAN_MODE_S 0 -#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S) -#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1 -#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2 -#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3 -#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2) -#define ICE_AQ_VSI_VLAN_EMOD_S 3 -#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S) -#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S) - u8 pvlan_reserved2[3]; + __le16 port_based_inner_vlan; /* VLANS include priority bits */ + u8 inner_vlan_reserved[2]; + u8 inner_vlan_flags; +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0 +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S) +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1 +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2 +#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3 +#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3 +#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) +#define ICE_AQ_VSI_INNER_VLAN_BLOCK_TX_DESC BIT(5) + u8 inner_vlan_reserved2[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ -#define ICE_AQ_VSI_UP_TABLE_UP0_S 0 -#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) -#define ICE_AQ_VSI_UP_TABLE_UP1_S 3 -#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) -#define ICE_AQ_VSI_UP_TABLE_UP2_S 6 -#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) -#define ICE_AQ_VSI_UP_TABLE_UP3_S 9 -#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) -#define ICE_AQ_VSI_UP_TABLE_UP4_S 12 -#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) -#define ICE_AQ_VSI_UP_TABLE_UP5_S 15 -#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) -#define ICE_AQ_VSI_UP_TABLE_UP6_S 18 -#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) -#define ICE_AQ_VSI_UP_TABLE_UP7_S 21 -#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) +#define ICE_AQ_VSI_UP_TABLE_UP0_S 0 +#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) +#define ICE_AQ_VSI_UP_TABLE_UP1_S 3 +#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) +#define ICE_AQ_VSI_UP_TABLE_UP2_S 6 +#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) +#define ICE_AQ_VSI_UP_TABLE_UP3_S 9 +#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) +#define ICE_AQ_VSI_UP_TABLE_UP4_S 12 +#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) +#define ICE_AQ_VSI_UP_TABLE_UP5_S 15 +#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) +#define ICE_AQ_VSI_UP_TABLE_UP6_S 18 +#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) +#define ICE_AQ_VSI_UP_TABLE_UP7_S 21 +#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) __le32 egress_table; /* same defines as for ingress table */ /* outer tags section */ - __le16 outer_tag; - u8 outer_tag_flags; -#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0 -#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S) -#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0 -#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1 -#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2 -#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 -#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) -#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 -#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 -#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 -#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 -#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4) -#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6) - u8 outer_tag_reserved; + __le16 port_based_outer_vlan; + u8 outer_vlan_flags; +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S) +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2 +#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 +#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) +#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 +#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 +#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 +#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4) +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5 +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1 +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2 +#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3 +#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7) + u8 outer_vlan_reserved; /* queue mapping section */ __le16 mapping_flags; -#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 -#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) +#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 +#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) __le16 q_mapping[16]; -#define ICE_AQ_VSI_Q_S 0 -#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) +#define ICE_AQ_VSI_Q_S 0 +#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) __le16 tc_mapping[8]; -#define ICE_AQ_VSI_TC_Q_OFFSET_S 0 -#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) -#define ICE_AQ_VSI_TC_Q_NUM_S 11 -#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) +#define ICE_AQ_VSI_TC_Q_OFFSET_S 0 +#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) +#define ICE_AQ_VSI_TC_Q_NUM_S 11 +#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) /* queueing option section */ u8 q_opt_rss; -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 -#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 -#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 -#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) -#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 -#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) -#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 +#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 +#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 +#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) +#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) u8 q_opt_tc; -#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 -#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) -#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) +#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 +#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) +#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) u8 q_opt_flags; -#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) +#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) u8 q_opt_reserved[3]; /* outer up section */ __le32 outer_up_table; /* same structure and defines as ingress tbl */ /* section 10 */ __le16 sect_10_reserved; /* flow director section */ __le16 fd_options; -#define ICE_AQ_VSI_FD_ENABLE BIT(0) -#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) -#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) +#define ICE_AQ_VSI_FD_ENABLE BIT(0) +#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) +#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) __le16 max_fd_fltr_dedicated; __le16 max_fd_fltr_shared; __le16 fd_def_q; -#define ICE_AQ_VSI_FD_DEF_Q_S 0 -#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) -#define ICE_AQ_VSI_FD_DEF_GRP_S 12 -#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) +#define ICE_AQ_VSI_FD_DEF_Q_S 0 +#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) +#define ICE_AQ_VSI_FD_DEF_GRP_S 12 +#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) __le16 fd_report_opt; -#define ICE_AQ_VSI_FD_REPORT_Q_S 0 -#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) -#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 -#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) -#define ICE_AQ_VSI_FD_DEF_DROP BIT(15) +#define ICE_AQ_VSI_FD_REPORT_Q_S 0 +#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) +#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 +#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) +#define ICE_AQ_VSI_FD_DEF_DROP BIT(15) /* PASID section */ __le32 pasid_id; -#define ICE_AQ_VSI_PASID_ID_S 0 -#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) -#define ICE_AQ_VSI_PASID_ID_VALID BIT(31) +#define ICE_AQ_VSI_PASID_ID_S 0 +#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) +#define ICE_AQ_VSI_PASID_ID_VALID BIT(31) u8 reserved[24]; }; /* Add/update mirror rule - direct (0x0260) */ #define ICE_AQC_RULE_ID_VALID_S 7 #define ICE_AQC_RULE_ID_VALID_M (0x1 << ICE_AQC_RULE_ID_VALID_S) #define ICE_AQC_RULE_ID_S 0 #define ICE_AQC_RULE_ID_M (0x3F << ICE_AQC_RULE_ID_S) /* Following defines to be used while processing caller specified mirror list * of VSI indexes. */ /* Action: Byte.bit (1.7) * 0 = Remove VSI from mirror rule * 1 = Add VSI to mirror rule */ #define ICE_AQC_RULE_ACT_S 15 #define ICE_AQC_RULE_ACT_M (0x1 << ICE_AQC_RULE_ACT_S) /* Action: 1.2:0.0 = Mirrored VSI */ #define ICE_AQC_RULE_MIRRORED_VSI_S 0 #define ICE_AQC_RULE_MIRRORED_VSI_M (0x7FF << ICE_AQC_RULE_MIRRORED_VSI_S) /* This is to be used by add/update mirror rule Admin Queue command. * In case of add mirror rule - if rule ID is specified as * INVAL_MIRROR_RULE_ID, new rule ID is allocated from shared pool. * If specified rule_id is valid, then it is used. If specified rule_id * is in use then new mirroring rule is added. */ #define ICE_INVAL_MIRROR_RULE_ID 0xFFFF struct ice_aqc_add_update_mir_rule { __le16 rule_id; __le16 rule_type; #define ICE_AQC_RULE_TYPE_S 0 #define ICE_AQC_RULE_TYPE_M (0x7 << ICE_AQC_RULE_TYPE_S) /* VPORT ingress/egress */ #define ICE_AQC_RULE_TYPE_VPORT_INGRESS 0x1 #define ICE_AQC_RULE_TYPE_VPORT_EGRESS 0x2 /* Physical port ingress mirroring. * All traffic received by this port */ #define ICE_AQC_RULE_TYPE_PPORT_INGRESS 0x6 /* Physical port egress mirroring. All traffic sent by this port */ #define ICE_AQC_RULE_TYPE_PPORT_EGRESS 0x7 /* Number of mirrored entries. * The values are in the command buffer */ __le16 num_entries; /* Destination VSI */ __le16 dest; __le32 addr_high; __le32 addr_low; }; /* Delete mirror rule - direct(0x0261) */ struct ice_aqc_delete_mir_rule { __le16 rule_id; __le16 rsvd; /* Byte.bit: 20.0 = Keep allocation. If set VSI stays part of * the PF allocated resources, otherwise it is returned to the * shared pool */ #define ICE_AQC_FLAG_KEEP_ALLOCD_S 0 #define ICE_AQC_FLAG_KEEP_ALLOCD_M (0x1 << ICE_AQC_FLAG_KEEP_ALLOCD_S) __le16 flags; u8 reserved[10]; }; /* Set/Get storm config - (direct 0x0280, 0x0281) */ /* This structure holds get storm configuration response and same structure * is used to perform set_storm_cfg */ struct ice_aqc_storm_cfg { __le32 bcast_thresh_size; __le32 mcast_thresh_size; /* Bit 18:0 - Traffic upper threshold size * Bit 31:19 - Reserved */ #define ICE_AQ_THRESHOLD_S 0 #define ICE_AQ_THRESHOLD_M (0x7FFFF << ICE_AQ_THRESHOLD_S) __le32 storm_ctrl_ctrl; /* Bit 0: MDIPW - Drop Multicast packets in previous window * Bit 1: MDICW - Drop multicast packets in current window * Bit 2: BDIPW - Drop broadcast packets in previous window * Bit 3: BDICW - Drop broadcast packets in current window */ #define ICE_AQ_STORM_CTRL_MDIPW_DROP_MULTICAST BIT(0) #define ICE_AQ_STORM_CTRL_MDICW_DROP_MULTICAST BIT(1) #define ICE_AQ_STORM_CTRL_BDIPW_DROP_MULTICAST BIT(2) #define ICE_AQ_STORM_CTRL_BDICW_DROP_MULTICAST BIT(3) /* Bit 7:5 : Reserved */ /* Bit 27:8 : Interval - BSC/MSC Time-interval specification: The * interval size for applying ingress broadcast or multicast storm * control. */ #define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S 8 #define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_M \ (0xFFFFF << ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S) __le32 reserved; }; #define ICE_MAX_NUM_RECIPES 64 /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { /* ops: add switch rules, referring the number of rules. * ops: update switch rules, referring the number of filters * ops: remove switch rules, referring the entry index. * ops: get switch rules, referring to the number of filters. */ __le16 num_rules_fltr_entry_index; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Add/Update/Get/Remove lookup Rx/Tx command/response entry * This structures describes the lookup rules and associated actions. "index" * is returned as part of a response to a successful Add command, and can be * used to identify the rule for Update/Get/Remove commands. */ struct ice_sw_rule_lkup_rx_tx { __le16 recipe_id; #define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10 /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */ __le16 src; __le32 act; /* Bit 0:1 - Action type */ #define ICE_SINGLE_ACT_TYPE_S 0x00 #define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S) /* Bit 2 - Loop back enable * Bit 3 - LAN enable */ #define ICE_SINGLE_ACT_LB_ENABLE BIT(2) #define ICE_SINGLE_ACT_LAN_ENABLE BIT(3) /* Action type = 0 - Forward to VSI or VSI list */ #define ICE_SINGLE_ACT_VSI_FORWARDING 0x0 #define ICE_SINGLE_ACT_VSI_ID_S 4 #define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S) #define ICE_SINGLE_ACT_VSI_LIST_ID_S 4 #define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S) /* This bit needs to be set if action is forward to VSI list */ #define ICE_SINGLE_ACT_VSI_LIST BIT(14) #define ICE_SINGLE_ACT_VALID_BIT BIT(17) #define ICE_SINGLE_ACT_DROP BIT(18) /* Action type = 1 - Forward to Queue of Queue group */ #define ICE_SINGLE_ACT_TO_Q 0x1 #define ICE_SINGLE_ACT_Q_INDEX_S 4 #define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S) #define ICE_SINGLE_ACT_Q_REGION_S 15 #define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S) #define ICE_SINGLE_ACT_Q_PRIORITY BIT(18) /* Action type = 2 - Prune */ #define ICE_SINGLE_ACT_PRUNE 0x2 #define ICE_SINGLE_ACT_EGRESS BIT(15) #define ICE_SINGLE_ACT_INGRESS BIT(16) #define ICE_SINGLE_ACT_PRUNET BIT(17) /* Bit 18 should be set to 0 for this action */ /* Action type = 2 - Pointer */ #define ICE_SINGLE_ACT_PTR 0x2 #define ICE_SINGLE_ACT_PTR_VAL_S 4 #define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S) /* Bit 18 should be set to 1 */ #define ICE_SINGLE_ACT_PTR_BIT BIT(18) /* Action type = 3 - Other actions. Last two bits * are other action identifier */ #define ICE_SINGLE_ACT_OTHER_ACTS 0x3 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) /* Bit 17:18 - Defines other actions */ /* Other action = 0 - Mirror VSI */ #define ICE_SINGLE_OTHER_ACT_MIRROR 0 #define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4 #define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \ (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S) /* Other action = 3 - Set Stat count */ #define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3 #define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4 #define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \ (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S) __le16 index; /* The index of the rule in the lookup table */ /* Length and values of the header to be matched per recipe or * lookup-type */ __le16 hdr_len; u8 hdr[STRUCT_HACK_VAR_LEN]; }; /* Add/Update/Remove large action command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the action for Update/Get/Remove commands. */ struct ice_sw_rule_lg_act { __le16 index; /* Index in large action table */ __le16 size; /* Max number of large actions */ #define ICE_MAX_LG_ACT 4 /* Bit 0:1 - Action type */ #define ICE_LG_ACT_TYPE_S 0 #define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S) /* Action type = 0 - Forward to VSI or VSI list */ #define ICE_LG_ACT_VSI_FORWARDING 0 #define ICE_LG_ACT_VSI_ID_S 3 #define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S) #define ICE_LG_ACT_VSI_LIST_ID_S 3 #define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S) /* This bit needs to be set if action is forward to VSI list */ #define ICE_LG_ACT_VSI_LIST BIT(13) #define ICE_LG_ACT_VALID_BIT BIT(16) /* Action type = 1 - Forward to Queue of Queue group */ #define ICE_LG_ACT_TO_Q 0x1 #define ICE_LG_ACT_Q_INDEX_S 3 #define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S) #define ICE_LG_ACT_Q_REGION_S 14 #define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S) #define ICE_LG_ACT_Q_PRIORITY_SET BIT(17) /* Action type = 2 - Prune */ #define ICE_LG_ACT_PRUNE 0x2 #define ICE_LG_ACT_EGRESS BIT(14) #define ICE_LG_ACT_INGRESS BIT(15) #define ICE_LG_ACT_PRUNET BIT(16) /* Action type = 3 - Mirror VSI */ #define ICE_LG_OTHER_ACT_MIRROR 0x3 #define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) /* Action type = 5 - Generic Value */ #define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) #define ICE_LG_ACT_GENERIC_OFFSET_S 19 #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) #define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 /* Action = 7 - Set Stat count */ #define ICE_LG_ACT_STAT_COUNT 0x7 #define ICE_LG_ACT_STAT_COUNT_S 3 #define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S) __le32 act[STRUCT_HACK_VAR_LEN]; /* array of size for actions */ }; /* Add/Update/Remove VSI list command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the VSI list for Update/Get/Remove commands. */ struct ice_sw_rule_vsi_list { __le16 index; /* Index of VSI/Prune list */ __le16 number_vsi; __le16 vsi[STRUCT_HACK_VAR_LEN]; /* Array of number_vsi VSI numbers */ }; #pragma pack(1) /* Query VSI list command/response entry */ struct ice_sw_rule_vsi_list_query { __le16 index; ice_declare_bitmap(vsi_list, ICE_MAX_VSI); }; #pragma pack() #pragma pack(1) /* Add switch rule response: * Content of return buffer is same as the input buffer. The status field and * LUT index are updated as part of the response */ struct ice_aqc_sw_rules_elem { __le16 type; /* Switch rule type, one of T_... */ #define ICE_AQC_SW_RULES_T_LKUP_RX 0x0 #define ICE_AQC_SW_RULES_T_LKUP_TX 0x1 #define ICE_AQC_SW_RULES_T_LG_ACT 0x2 #define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3 #define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4 #define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5 #define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6 __le16 status; union { struct ice_sw_rule_lkup_rx_tx lkup_tx_rx; struct ice_sw_rule_lg_act lg_act; struct ice_sw_rule_vsi_list vsi_list; struct ice_sw_rule_vsi_list_query vsi_list_query; } pdata; }; #pragma pack() /* PFC Ignore (direct 0x0301) * The command and response use the same descriptor structure */ struct ice_aqc_pfc_ignore { u8 tc_bitmap; u8 cmd_flags; /* unused in response */ #define ICE_AQC_PFC_IGNORE_SET BIT(7) #define ICE_AQC_PFC_IGNORE_CLEAR 0 u8 reserved[14]; }; /* Set PFC Mode (direct 0x0303) * Query PFC Mode (direct 0x0302) */ struct ice_aqc_set_query_pfc_mode { u8 pfc_mode; /* For Set Command response, reserved in all other cases */ #define ICE_AQC_PFC_NOT_CONFIGURED 0 /* For Query Command response, reserved in all other cases */ #define ICE_AQC_DCB_DIS 0 #define ICE_AQC_PFC_VLAN_BASED_PFC 1 #define ICE_AQC_PFC_DSCP_BASED_PFC 2 u8 rsvd[15]; }; /* Set DCB Parameters (direct 0x0306) */ struct ice_aqc_set_dcb_params { u8 cmd_flags; /* unused in response */ #define ICE_AQC_LINK_UP_DCB_CFG BIT(0) #define ICE_AQC_PERSIST_DCB_CFG BIT(1) u8 valid_flags; /* unused in response */ #define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0) #define ICE_AQC_PERSIST_DCB_CFG_VALID BIT(1) u8 rsvd[14]; }; /* Get Default Topology (indirect 0x0400) */ struct ice_aqc_get_topo { u8 port_num; u8 num_branches; __le16 reserved1; __le32 reserved2; __le32 addr_high; __le32 addr_low; }; /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) * Add TSE (indirect 0x0401) * Delete TSE (indirect 0x040F) * Move TSE (indirect 0x0408) * Suspend Nodes (indirect 0x0409) * Resume Nodes (indirect 0x040A) */ struct ice_aqc_sched_elem_cmd { __le16 num_elem_req; /* Used by commands */ __le16 num_elem_resp; /* Used by responses */ __le32 reserved; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_txsched_move_grp_info_hdr { __le32 src_parent_teid; __le32 dest_parent_teid; __le16 num_elems; - __le16 reserved; + u8 flags; + u8 reserved; }; struct ice_aqc_move_elem { struct ice_aqc_txsched_move_grp_info_hdr hdr; __le32 teid[STRUCT_HACK_VAR_LEN]; }; struct ice_aqc_elem_info_bw { __le16 bw_profile_idx; __le16 bw_alloc; }; struct ice_aqc_txsched_elem { u8 elem_type; /* Special field, reserved for some aq calls */ #define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0 #define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1 #define ICE_AQC_ELEM_TYPE_TC 0x2 #define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3 #define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4 #define ICE_AQC_ELEM_TYPE_LEAF 0x5 #define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6 u8 valid_sections; #define ICE_AQC_ELEM_VALID_GENERIC BIT(0) #define ICE_AQC_ELEM_VALID_CIR BIT(1) #define ICE_AQC_ELEM_VALID_EIR BIT(2) #define ICE_AQC_ELEM_VALID_SHARED BIT(3) u8 generic; #define ICE_AQC_ELEM_GENERIC_MODE_M 0x1 #define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1 #define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) #define ICE_AQC_ELEM_GENERIC_SP_S 0x4 #define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S) #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5 #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \ (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S) u8 flags; /* Special field, reserved for some aq calls */ #define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1 struct ice_aqc_elem_info_bw cir_bw; struct ice_aqc_elem_info_bw eir_bw; __le16 srl_id; __le16 reserved2; }; struct ice_aqc_txsched_elem_data { __le32 parent_teid; __le32 node_teid; struct ice_aqc_txsched_elem data; }; struct ice_aqc_txsched_topo_grp_info_hdr { __le32 parent_teid; __le16 num_elems; __le16 reserved2; }; struct ice_aqc_add_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data generic[STRUCT_HACK_VAR_LEN]; }; struct ice_aqc_get_topo_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data generic[ICE_AQC_TOPO_MAX_LEVEL_NUM]; }; struct ice_aqc_delete_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; __le32 teid[STRUCT_HACK_VAR_LEN]; }; /* Query Port ETS (indirect 0x040E) * * This indirect command is used to query port TC node configuration. */ struct ice_aqc_query_port_ets { __le32 port_teid; __le32 reserved; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_port_ets_elem { u8 tc_valid_bits; u8 reserved[3]; /* 3 bits for UP per TC 0-7, 4th byte reserved */ __le32 up2tc; u8 tc_bw_share[8]; __le32 port_eir_prof_id; __le32 port_cir_prof_id; /* 3 bits per Node priority to TC 0-7, 4th byte reserved */ __le32 tc_node_prio; #define ICE_TC_NODE_PRIO_S 0x4 u8 reserved1[4]; __le32 tc_node_teid[8]; /* Used for response, reserved in command */ }; /* Rate limiting profile for * Add RL profile (indirect 0x0410) * Query RL profile (indirect 0x0411) * Remove RL profile (indirect 0x0415) * These indirect commands acts on single or multiple * RL profiles with specified data. */ struct ice_aqc_rl_profile { __le16 num_profiles; __le16 num_processed; /* Only for response. Reserved in Command. */ u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_rl_profile_elem { u8 level; u8 flags; #define ICE_AQC_RL_PROFILE_TYPE_S 0x0 #define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S) #define ICE_AQC_RL_PROFILE_TYPE_CIR 0 #define ICE_AQC_RL_PROFILE_TYPE_EIR 1 #define ICE_AQC_RL_PROFILE_TYPE_SRL 2 /* The following flag is used for Query RL Profile Data */ #define ICE_AQC_RL_PROFILE_INVAL_S 0x7 #define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S) __le16 profile_id; __le16 max_burst_size; __le16 rl_multiply; __le16 wake_up_calc; __le16 rl_encode; }; /* Configure L2 Node CGD (indirect 0x0414) * This indirect command allows configuring a congestion domain for given L2 * node TEIDs in the scheduler topology. */ struct ice_aqc_cfg_l2_node_cgd { __le16 num_l2_nodes; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_cfg_l2_node_cgd_elem { __le32 node_teid; u8 cgd; u8 reserved[3]; }; /* Query Scheduler Resource Allocation (indirect 0x0412) * This indirect command retrieves the scheduler resources allocated by * EMP Firmware to the given PF. */ struct ice_aqc_query_txsched_res { u8 reserved[8]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_generic_sched_props { __le16 phys_levels; __le16 logical_levels; u8 flattening_bitmap; u8 max_device_cgds; u8 max_pf_cgds; u8 rsvd0; __le16 rdma_qsets; u8 rsvd1[22]; }; struct ice_aqc_layer_props { u8 logical_layer; u8 chunk_size; __le16 max_device_nodes; __le16 max_pf_nodes; u8 rsvd0[4]; __le16 max_sibl_grp_sz; __le16 max_cir_rl_profiles; __le16 max_eir_rl_profiles; __le16 max_srl_profiles; u8 rsvd1[14]; }; struct ice_aqc_query_txsched_res_resp { struct ice_aqc_generic_sched_props sched_props; struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM]; }; /* Query Node to Root Topology (indirect 0x0413) * This command uses ice_aqc_get_elem as its data buffer. */ struct ice_aqc_query_node_to_root { __le32 teid; __le32 num_nodes; /* Response only */ __le32 addr_high; __le32 addr_low; }; /* Get PHY capabilities (indirect 0x0600) */ struct ice_aqc_get_phy_caps { u8 lport_num; u8 reserved; __le16 param0; /* 18.0 - Report qualified modules */ #define ICE_AQC_GET_PHY_RQM BIT(0) - /* 18.1 - 18.2 : Report mode - * 00b - Report NVM capabilities - * 01b - Report topology capabilities - * 10b - Report SW configured + /* 18.1 - 18.3 : Report mode + * 000b - Report NVM capabilities + * 001b - Report topology capabilities + * 010b - Report SW configured + * 100b - Report default capabilities */ -#define ICE_AQC_REPORT_MODE_S 1 -#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) -#define ICE_AQC_REPORT_NVM_CAP 0 -#define ICE_AQC_REPORT_TOPO_CAP BIT(1) -#define ICE_AQC_REPORT_SW_CFG BIT(2) +#define ICE_AQC_REPORT_MODE_S 1 +#define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S) +#define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 +#define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) +#define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) +#define ICE_AQC_REPORT_DFLT_CFG BIT(3) __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* This is #define of PHY type (Extended): * The first set of defines is for phy_type_low. */ #define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0) #define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1) #define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2) #define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3) #define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4) #define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5) #define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6) #define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7) #define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8) #define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9) #define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10) #define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11) #define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12) #define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13) #define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14) #define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15) #define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16) #define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17) #define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18) #define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19) #define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20) #define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21) #define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22) #define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23) #define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24) #define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25) #define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26) #define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27) #define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28) #define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29) #define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30) #define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31) #define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32) #define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33) #define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34) #define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35) #define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36) #define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37) #define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38) #define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39) #define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40) #define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41) #define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42) #define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43) #define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44) #define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45) #define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46) #define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47) #define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48) #define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49) #define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50) #define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51) #define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52) #define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53) #define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54) #define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55) #define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56) #define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57) #define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58) #define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59) #define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60) #define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61) #define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62) #define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63) #define ICE_PHY_TYPE_LOW_MAX_INDEX 63 /* The second set of defines is for phy_type_high. */ #define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0) #define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1) #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) #define ICE_PHY_TYPE_HIGH_MAX_INDEX 5 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; #define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0) #define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1) #define ICE_AQC_PHY_LOW_POWER_MODE BIT(2) #define ICE_AQC_PHY_EN_LINK BIT(3) #define ICE_AQC_PHY_AN_MODE BIT(4) #define ICE_AQC_PHY_EN_MOD_QUAL BIT(5) #define ICE_AQC_PHY_EN_LESM BIT(6) #define ICE_AQC_PHY_EN_AUTO_FEC BIT(7) #define ICE_AQC_PHY_CAPS_MASK MAKEMASK(0xff, 0) u8 low_power_ctrl_an; #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) #define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1) #define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2) #define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3) __le16 eee_cap; #define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0) #define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1) #define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2) #define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3) #define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4) #define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5) #define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6) #define ICE_AQC_PHY_EEE_EN_50GBASE_KR2 BIT(7) #define ICE_AQC_PHY_EEE_EN_50GBASE_KR_PAM4 BIT(8) #define ICE_AQC_PHY_EEE_EN_100GBASE_KR4 BIT(9) #define ICE_AQC_PHY_EEE_EN_100GBASE_KR2_PAM4 BIT(10) __le16 eeer_value; u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ u8 phy_fw_ver[8]; u8 link_fec_options; #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) #define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2) #define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3) #define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) #define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0) u8 module_compliance_enforcement; #define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0) u8 extended_compliance_code; #define ICE_MODULE_TYPE_TOTAL_BYTE 3 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; #define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 #define ICE_AQC_MOD_TYPE_IDENT 1 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7) #define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 u8 qualified_module_count; u8 rsvd2[7]; /* Bytes 47:41 reserved */ #define ICE_AQC_QUAL_MOD_COUNT_MAX 16 struct { u8 v_oui[3]; u8 rsvd3; u8 v_part[16]; __le32 v_rev; __le64 rsvd4; } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; }; /* Set PHY capabilities (direct 0x0601) * NOTE: This command must be followed by setup link and restart auto-neg */ struct ice_aqc_set_phy_cfg { u8 lport_num; u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; /* Set PHY config command data structure */ struct ice_aqc_set_phy_cfg_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; #define ICE_AQ_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0) #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) #define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) #define ICE_AQ_PHY_ENA_LINK BIT(3) #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) #define ICE_AQ_PHY_ENA_LESM BIT(6) #define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) u8 low_power_ctrl_an; __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ u8 module_compliance_enforcement; }; /* Set MAC Config command data structure (direct 0x0603) */ struct ice_aqc_set_mac_cfg { __le16 max_frame_size; u8 params; #define ICE_AQ_SET_MAC_PACE_S 3 #define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S) #define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7) #define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0 #define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M u8 tx_tmr_priority; __le16 tx_tmr_value; __le16 fc_refresh_threshold; u8 drop_opts; #define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0) #define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0 #define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0) u8 reserved[7]; }; /* Restart AN command data structure (direct 0x0605) * Also used for response, with only the lport_num field present. */ struct ice_aqc_restart_an { u8 lport_num; u8 reserved; u8 cmd_flags; #define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1) #define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2) u8 reserved2[13]; }; /* Get link status (indirect 0x0607), also used for Link Status Event */ struct ice_aqc_get_link_status { u8 lport_num; u8 reserved; __le16 cmd_flags; #define ICE_AQ_LSE_M 0x3 #define ICE_AQ_LSE_NOP 0x0 #define ICE_AQ_LSE_DIS 0x2 #define ICE_AQ_LSE_ENA 0x3 /* only response uses this flag */ #define ICE_AQ_LSE_IS_ENABLED 0x1 __le32 reserved2; __le32 addr_high; __le32 addr_low; }; /* Get link status response data structure, also used for Link Status Event */ struct ice_aqc_get_link_status_data { u8 topo_media_conflict; #define ICE_AQ_LINK_TOPO_CONFLICT BIT(0) #define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1) #define ICE_AQ_LINK_TOPO_CORRUPT BIT(2) #define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4) #define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5) #define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) #define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) u8 link_cfg_err; -#define ICE_AQ_LINK_CFG_ERR BIT(0) -#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2) +#define ICE_AQ_LINK_CFG_ERR BIT(0) +#define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2) #define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3) #define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4) #define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5) +#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) +#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7) u8 link_info; #define ICE_AQ_LINK_UP BIT(0) /* Link Status */ #define ICE_AQ_LINK_FAULT BIT(1) #define ICE_AQ_LINK_FAULT_TX BIT(2) #define ICE_AQ_LINK_FAULT_RX BIT(3) #define ICE_AQ_LINK_FAULT_REMOTE BIT(4) #define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */ #define ICE_AQ_MEDIA_AVAILABLE BIT(6) #define ICE_AQ_SIGNAL_DETECT BIT(7) u8 an_info; #define ICE_AQ_AN_COMPLETED BIT(0) #define ICE_AQ_LP_AN_ABILITY BIT(1) #define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */ #define ICE_AQ_FEC_EN BIT(3) #define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */ #define ICE_AQ_LINK_PAUSE_TX BIT(5) #define ICE_AQ_LINK_PAUSE_RX BIT(6) #define ICE_AQ_QUALIFIED_MODULE BIT(7) u8 ext_info; #define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0) #define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ /* Port Tx Suspended */ #define ICE_AQ_LINK_TX_S 2 #define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S) #define ICE_AQ_LINK_TX_ACTIVE 0 #define ICE_AQ_LINK_TX_DRAINED 1 #define ICE_AQ_LINK_TX_FLUSHED 3 u8 lb_status; #define ICE_AQ_LINK_LB_PHY_LCL BIT(0) #define ICE_AQ_LINK_LB_PHY_RMT BIT(1) #define ICE_AQ_LINK_LB_MAC_LCL BIT(2) #define ICE_AQ_LINK_LB_PHY_IDX_S 3 #define ICE_AQ_LINK_LB_PHY_IDX_M (0x7 << ICE_AQ_LB_PHY_IDX_S) __le16 max_frame_size; u8 cfg; #define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0) #define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1) #define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2) #define ICE_AQ_FEC_MASK MAKEMASK(0x7, 0) /* Pacing Config */ #define ICE_AQ_CFG_PACING_S 3 #define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S) #define ICE_AQ_CFG_PACING_TYPE_M BIT(7) #define ICE_AQ_CFG_PACING_TYPE_AVG 0 #define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M /* External Device Power Ability */ u8 power_desc; #define ICE_AQ_PWR_CLASS_M 0x3F #define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0 #define ICE_AQ_LINK_PWR_BASET_HIGH 1 #define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0 #define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1 #define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 #define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 __le16 link_speed; #define ICE_AQ_LINK_SPEED_M 0x7FF #define ICE_AQ_LINK_SPEED_10MB BIT(0) #define ICE_AQ_LINK_SPEED_100MB BIT(1) #define ICE_AQ_LINK_SPEED_1000MB BIT(2) #define ICE_AQ_LINK_SPEED_2500MB BIT(3) #define ICE_AQ_LINK_SPEED_5GB BIT(4) #define ICE_AQ_LINK_SPEED_10GB BIT(5) #define ICE_AQ_LINK_SPEED_20GB BIT(6) #define ICE_AQ_LINK_SPEED_25GB BIT(7) #define ICE_AQ_LINK_SPEED_40GB BIT(8) #define ICE_AQ_LINK_SPEED_50GB BIT(9) #define ICE_AQ_LINK_SPEED_100GB BIT(10) #define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) __le32 reserved3; /* Aligns next field to 8-byte boundary */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ }; /* Set event mask command (direct 0x0613) */ struct ice_aqc_set_event_mask { u8 lport_num; u8 reserved[7]; __le16 event_mask; #define ICE_AQ_LINK_EVENT_UPDOWN BIT(1) #define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2) #define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3) #define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4) #define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5) #define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6) #define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) #define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) #define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) #define ICE_AQ_LINK_EVENT_TOPO_CONFLICT BIT(10) #define ICE_AQ_LINK_EVENT_MEDIA_CONFLICT BIT(11) u8 reserved1[6]; }; /* Set PHY Loopback command (direct 0x0619) */ struct ice_aqc_set_phy_lb { u8 lport_num; u8 lport_num_valid; #define ICE_AQ_PHY_LB_PORT_NUM_VALID BIT(0) u8 phy_index; u8 lb_mode; #define ICE_AQ_PHY_LB_EN BIT(0) #define ICE_AQ_PHY_LB_TYPE_M BIT(1) #define ICE_AQ_PHY_LB_TYPE_LOCAL 0 #define ICE_AQ_PHY_LB_TYPE_REMOTE ICE_AQ_PHY_LB_TYPE_M #define ICE_AQ_PHY_LB_LEVEL_M BIT(2) #define ICE_AQ_PHY_LB_LEVEL_PMD 0 #define ICE_AQ_PHY_LB_LEVEL_PCS ICE_AQ_PHY_LB_LEVEL_M u8 reserved2[12]; }; /* Set MAC Loopback command (direct 0x0620) */ struct ice_aqc_set_mac_lb { u8 lb_mode; #define ICE_AQ_MAC_LB_EN BIT(0) #define ICE_AQ_MAC_LB_OSC_CLK BIT(1) u8 reserved[15]; }; /* DNL Get Status command (indirect 0x0680) * Structure used for the response, the command uses the generic * ice_aqc_generic struct to pass a buffer address to the FW. */ struct ice_aqc_dnl_get_status { u8 ctx; u8 status; #define ICE_AQ_DNL_STATUS_IDLE 0x0 #define ICE_AQ_DNL_STATUS_RESERVED 0x1 #define ICE_AQ_DNL_STATUS_STOPPED 0x2 #define ICE_AQ_DNL_STATUS_FATAL 0x3 /* Fatal DNL engine error */ #define ICE_AQ_DNL_SRC_S 3 #define ICE_AQ_DNL_SRC_M (0x3 << ICE_AQ_DNL_SRC_S) #define ICE_AQ_DNL_SRC_NVM (0x0 << ICE_AQ_DNL_SRC_S) #define ICE_AQ_DNL_SRC_NVM_SCRATCH (0x1 << ICE_AQ_DNL_SRC_S) u8 stack_ptr; #define ICE_AQ_DNL_ST_PTR_S 0x0 #define ICE_AQ_DNL_ST_PTR_M (0x7 << ICE_AQ_DNL_ST_PTR_S) u8 engine_flags; #define ICE_AQ_DNL_FLAGS_ERROR BIT(2) #define ICE_AQ_DNL_FLAGS_NEGATIVE BIT(3) #define ICE_AQ_DNL_FLAGS_OVERFLOW BIT(4) #define ICE_AQ_DNL_FLAGS_ZERO BIT(5) #define ICE_AQ_DNL_FLAGS_CARRY BIT(6) #define ICE_AQ_DNL_FLAGS_JUMP BIT(7) __le16 pc; __le16 activity_id; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_dnl_get_status_data { __le16 activity_err_code; __le16 act_err_code; #define ICE_AQ_DNL_ACT_ERR_SUCCESS 0x0000 /* no error */ #define ICE_AQ_DNL_ACT_ERR_PARSE 0x8001 /* NVM parse error */ #define ICE_AQ_DNL_ACT_ERR_UNSUPPORTED 0x8002 /* unsupported action */ #define ICE_AQ_DNL_ACT_ERR_NOT_FOUND 0x8003 /* activity not found */ #define ICE_AQ_DNL_ACT_ERR_BAD_JUMP 0x8004 /* an illegal jump */ #define ICE_AQ_DNL_ACT_ERR_PSTO_OVER 0x8005 /* persistent store overflow */ #define ICE_AQ_DNL_ACT_ERR_ST_OVERFLOW 0x8006 /* stack overflow */ #define ICE_AQ_DNL_ACT_ERR_TIMEOUT 0x8007 /* activity timeout */ #define ICE_AQ_DNL_ACT_ERR_BREAK 0x0008 /* stopped at breakpoint */ #define ICE_AQ_DNL_ACT_ERR_INVAL_ARG 0x0101 /* invalid action argument */ __le32 execution_time; /* in nanoseconds */ __le16 lib_ver; u8 psto_local_sz; u8 psto_global_sz; u8 stack_sz; #define ICE_AQ_DNL_STACK_SZ_S 0 #define ICE_AQ_DNL_STACK_SZ_M (0xF << ICE_AQ_DNL_STACK_SZ_S) u8 port_count; #define ICE_AQ_DNL_PORT_CNT_S 0 #define ICE_AQ_DNL_PORT_CNT_M (0x1F << ICE_AQ_DNL_PORT_CNT_S) __le16 act_cache_cntr; u32 i2c_clk_cntr; u32 mdio_clk_cntr; u32 sb_iosf_clk_cntr; }; /* DNL run command (direct 0x0681) */ struct ice_aqc_dnl_run_command { u8 reserved0; u8 command; #define ICE_AQ_DNL_CMD_S 0 #define ICE_AQ_DNL_CMD_M (0x7 << ICE_AQ_DNL_CMD_S) #define ICE_AQ_DNL_CMD_RESET 0x0 #define ICE_AQ_DNL_CMD_RUN 0x1 #define ICE_AQ_DNL_CMD_STEP 0x3 #define ICE_AQ_DNL_CMD_ABORT 0x4 #define ICE_AQ_DNL_CMD_SET_PC 0x7 #define ICE_AQ_DNL_CMD_SRC_S 3 #define ICE_AQ_DNL_CMD_SRC_M (0x3 << ICE_AQ_DNL_CMD_SRC_S) #define ICE_AQ_DNL_CMD_SRC_DNL 0x0 #define ICE_AQ_DNL_CMD_SRC_SCRATCH 0x1 __le16 new_pc; u8 reserved1[12]; }; /* DNL call command (indirect 0x0682) * Struct is used for both command and response */ struct ice_aqc_dnl_call_command { u8 ctx; /* Used in command, reserved in response */ u8 reserved; __le16 activity_id; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* DNL call command/response buffer (indirect 0x0682) */ struct ice_aqc_dnl_call { __le32 stores[4]; }; /* Used for both commands: * DNL read sto command (indirect 0x0683) * DNL write sto command (indirect 0x0684) */ struct ice_aqc_dnl_read_write_command { u8 ctx; u8 sto_sel; /* STORE select */ #define ICE_AQC_DNL_STORE_SELECT_STORE 0x0 #define ICE_AQC_DNL_STORE_SELECT_PSTO 0x1 #define ICE_AQC_DNL_STORE_SELECT_STACK 0x2 __le16 offset; __le32 data; /* Used for write sto only */ __le32 addr_high; /* Used for read sto only */ __le32 addr_low; /* Used for read sto only */ }; /* Used for both command responses: * DNL read sto response (indirect 0x0683) * DNL write sto response (indirect 0x0684) */ struct ice_aqc_dnl_read_write_response { u8 reserved; u8 status; /* Reserved for read command */ __le16 size; /* Reserved for write command */ __le32 data; /* Reserved for write command */ __le32 addr_high; /* Reserved for write command */ __le32 addr_low; /* Reserved for write command */ }; /* DNL set breakpoints command (indirect 0x0686) */ struct ice_aqc_dnl_set_breakpoints_command { __le32 reserved[2]; __le32 addr_high; __le32 addr_low; }; /* DNL set breakpoints data buffer structure (indirect 0x0686) */ struct ice_aqc_dnl_set_breakpoints { u8 ctx; u8 ena; /* 0- disabled, 1- enabled */ __le16 offset; __le16 activity_id; }; /* DNL read log data command(indirect 0x0687) */ struct ice_aqc_dnl_read_log_command { __le16 reserved0; __le16 offset; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* DNL read log data response(indirect 0x0687) */ struct ice_aqc_dnl_read_log_response { __le16 reserved; __le16 size; __le32 data; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_link_topo_addr { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) u8 node_type_ctx; #define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0 #define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S) #define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0 #define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 #define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3 #define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4 #define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5 #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_M \ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) #define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0 #define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1 #define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2 #define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3 #define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 u8 index; __le16 handle; #define ICE_AQC_LINK_TOPO_HANDLE_S 0 #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) /* Used to decode the handle field */ #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 /* In case of a Mezzanine type */ #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6 #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S) /* In case of a LOM type */ #define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \ (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) }; /* Get Link Topology Handle (direct, 0x06E0) */ struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; u8 rsvd[9]; }; /* Get Link Topology Pin (direct, 0x06E1) */ struct ice_aqc_get_link_topo_pin { struct ice_aqc_link_topo_addr addr; u8 input_io_params; #define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S 0 #define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_M \ (0x1F << ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S) #define ICE_AQC_LINK_TOPO_IO_FUNC_GPIO 0 #define ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N 1 #define ICE_AQC_LINK_TOPO_IO_FUNC_INT_N 2 #define ICE_AQC_LINK_TOPO_IO_FUNC_PRESENT_N 3 #define ICE_AQC_LINK_TOPO_IO_FUNC_TX_DIS 4 #define ICE_AQC_LINK_TOPO_IO_FUNC_MODSEL_N 5 #define ICE_AQC_LINK_TOPO_IO_FUNC_LPMODE 6 #define ICE_AQC_LINK_TOPO_IO_FUNC_TX_FAULT 7 #define ICE_AQC_LINK_TOPO_IO_FUNC_RX_LOSS 8 #define ICE_AQC_LINK_TOPO_IO_FUNC_RS0 9 #define ICE_AQC_LINK_TOPO_IO_FUNC_RS1 10 #define ICE_AQC_LINK_TOPO_IO_FUNC_EEPROM_WP 11 /* 12 repeats intentionally due to two different uses depending on context */ #define ICE_AQC_LINK_TOPO_IO_FUNC_LED 12 #define ICE_AQC_LINK_TOPO_IO_FUNC_RED_LED 12 #define ICE_AQC_LINK_TOPO_IO_FUNC_GREEN_LED 13 #define ICE_AQC_LINK_TOPO_IO_FUNC_BLUE_LED 14 #define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S 5 #define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_M \ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S) /* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */ u8 output_io_params; #define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_S 0 #define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_M \ (0x1F << \ ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_NUM_S) /* Use ICE_AQC_LINK_TOPO_IO_FUNC_* for the non-numerical options */ #define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_S 5 #define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_M \ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S) /* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */ u8 output_io_flags; #define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S 0 #define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_M \ (0x7 << ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S) #define ICE_AQC_LINK_TOPO_OUTPUT_INT_S 3 #define ICE_AQC_LINK_TOPO_OUTPUT_INT_M \ (0x3 << ICE_AQC_LINK_TOPO_OUTPUT_INT_S) #define ICE_AQC_LINK_TOPO_OUTPUT_POLARITY BIT(5) #define ICE_AQC_LINK_TOPO_OUTPUT_VALUE BIT(6) #define ICE_AQC_LINK_TOPO_OUTPUT_DRIVEN BIT(7) u8 rsvd[7]; }; /* Read/Write I2C (direct, 0x06E2/0x06E3) */ struct ice_aqc_i2c { struct ice_aqc_link_topo_addr topo_addr; __le16 i2c_addr; u8 i2c_params; #define ICE_AQC_I2C_DATA_SIZE_S 0 #define ICE_AQC_I2C_DATA_SIZE_M (0xF << ICE_AQC_I2C_DATA_SIZE_S) #define ICE_AQC_I2C_ADDR_TYPE_M BIT(4) #define ICE_AQC_I2C_ADDR_TYPE_7BIT 0 #define ICE_AQC_I2C_ADDR_TYPE_10BIT ICE_AQC_I2C_ADDR_TYPE_M #define ICE_AQC_I2C_DATA_OFFSET_S 5 #define ICE_AQC_I2C_DATA_OFFSET_M (0x3 << ICE_AQC_I2C_DATA_OFFSET_S) #define ICE_AQC_I2C_USE_REPEATED_START BIT(7) u8 rsvd; __le16 i2c_bus_addr; #define ICE_AQC_I2C_ADDR_7BIT_MASK 0x7F #define ICE_AQC_I2C_ADDR_10BIT_MASK 0x3FF u8 i2c_data[4]; /* Used only by write command, reserved in read. */ }; /* Read I2C Response (direct, 0x06E2) */ struct ice_aqc_read_i2c_resp { u8 i2c_data[16]; }; /* Read/Write MDIO (direct, 0x06E4/0x06E5) */ struct ice_aqc_mdio { struct ice_aqc_link_topo_addr topo_addr; u8 mdio_device_addr; #define ICE_AQC_MDIO_DEV_S 0 #define ICE_AQC_MDIO_DEV_M (0x1F << ICE_AQC_MDIO_DEV_S) #define ICE_AQC_MDIO_CLAUSE_22 BIT(5) #define ICE_AQC_MDIO_CLAUSE_45 BIT(6) u8 mdio_bus_address; #define ICE_AQC_MDIO_BUS_ADDR_S 0 #define ICE_AQC_MDIO_BUS_ADDR_M (0x1F << ICE_AQC_MDIO_BUS_ADDR_S) __le16 offset; __le16 data; /* Input in write cmd, output in read cmd. */ u8 rsvd1[4]; }; /* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */ struct ice_aqc_gpio_by_func { struct ice_aqc_link_topo_addr topo_addr; u8 io_func_num; #define ICE_AQC_GPIO_FUNC_S 0 #define ICE_AQC_GPIO_FUNC_M (0x1F << ICE_AQC_GPIO_IO_FUNC_NUM_S) u8 io_value; /* Input in write cmd, output in read cmd. */ #define ICE_AQC_GPIO_ON BIT(0) #define ICE_AQC_GPIO_OFF 0 u8 rsvd[8]; }; /* Set LED (direct, 0x06E8) */ struct ice_aqc_set_led { struct ice_aqc_link_topo_addr topo_addr; u8 color_and_blink; #define ICE_AQC_LED_COLOR_S 0 #define ICE_AQC_LED_COLOR_M (0x7 << ICE_AQC_LED_COLOR_S) #define ICE_AQC_LED_COLOR_SKIP 0 #define ICE_AQC_LED_COLOR_RED 1 #define ICE_AQC_LED_COLOR_ORANGE 2 #define ICE_AQC_LED_COLOR_YELLOW 3 #define ICE_AQC_LED_COLOR_GREEN 4 #define ICE_AQC_LED_COLOR_BLUE 5 #define ICE_AQC_LED_COLOR_PURPLE 6 #define ICE_AQC_LED_BLINK_S 3 #define ICE_AQC_LED_BLINK_M (0x7 << ICE_AQC_LED_BLINK_S) #define ICE_AQC_LED_BLINK_NONE 0 #define ICE_AQC_LED_BLINK_SLOW 1 #define ICE_AQC_LED_BLINK_SLOW_MAC 2 #define ICE_AQC_LED_BLINK_SLOW_FLTR 3 #define ICE_AQC_LED_BLINK_FAST 5 #define ICE_AQC_LED_BLINK_FAST_MAC 6 #define ICE_AQC_LED_BLINK_FAST_FLTR 7 u8 rsvd[9]; }; /* Set Port Identification LED (direct, 0x06E9) */ struct ice_aqc_set_port_id_led { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_PORT_ID_PORT_NUM_VALID BIT(0) u8 ident_mode; #define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0) #define ICE_AQC_PORT_IDENT_LED_ORIG 0 u8 rsvd[13]; }; /* Get Port Options (indirect, 0x06EA) */ struct ice_aqc_get_port_options { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_PORT_OPT_PORT_NUM_VALID BIT(0) u8 port_options_count; #define ICE_AQC_PORT_OPT_COUNT_S 0 #define ICE_AQC_PORT_OPT_COUNT_M (0xF << ICE_AQC_PORT_OPT_COUNT_S) u8 innermost_phy_index; u8 port_options; #define ICE_AQC_PORT_OPT_ACTIVE_S 0 #define ICE_AQC_PORT_OPT_ACTIVE_M (0xF << ICE_AQC_PORT_OPT_ACTIVE_S) #define ICE_AQC_PORT_OPT_FORCED BIT(6) #define ICE_AQC_PORT_OPT_VALID BIT(7) - u8 rsvd[3]; + u8 pending_port_option_status; +#define ICE_AQC_PENDING_PORT_OPT_IDX_S 0 +#define ICE_AQC_PENDING_PORT_OPT_IDX_M (0xF << ICE_AQC_PENDING_PORT_OPT_IDX_S) +#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7) + u8 rsvd[2]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_get_port_options_elem { u8 pmd; #define ICE_AQC_PORT_INV_PORT_OPT 4 #define ICE_AQC_PORT_OPT_PMD_COUNT_S 0 #define ICE_AQC_PORT_OPT_PMD_COUNT_M (0xF << ICE_AQC_PORT_OPT_PMD_COUNT_S) #define ICE_AQC_PORT_OPT_PMD_WIDTH_S 4 #define ICE_AQC_PORT_OPT_PMD_WIDTH_M (0xF << ICE_AQC_PORT_OPT_PMD_WIDTH_S) u8 max_lane_speed; #define ICE_AQC_PORT_OPT_MAX_LANE_S 0 #define ICE_AQC_PORT_OPT_MAX_LANE_M (0xF << ICE_AQC_PORT_OPT_MAX_LANE_S) #define ICE_AQC_PORT_OPT_MAX_LANE_100M 0 #define ICE_AQC_PORT_OPT_MAX_LANE_1G 1 #define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2 #define ICE_AQC_PORT_OPT_MAX_LANE_5G 3 #define ICE_AQC_PORT_OPT_MAX_LANE_10G 4 #define ICE_AQC_PORT_OPT_MAX_LANE_25G 5 #define ICE_AQC_PORT_OPT_MAX_LANE_50G 6 #define ICE_AQC_PORT_OPT_MAX_LANE_100G 7 u8 global_scid[2]; u8 phy_scid[2]; + u8 pf2port_cid[2]; }; /* Set Port Option (direct, 0x06EB) */ struct ice_aqc_set_port_option { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_SET_PORT_OPT_PORT_NUM_VALID BIT(0) u8 selected_port_option; u8 rsvd[13]; }; /* Set/Get GPIO (direct, 0x06EC/0x06ED) */ struct ice_aqc_gpio { __le16 gpio_ctrl_handle; #define ICE_AQC_GPIO_HANDLE_S 0 #define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S) u8 gpio_num; u8 gpio_val; u8 rsvd[12]; }; /* Read/Write SFF EEPROM command (indirect 0x06EE) */ struct ice_aqc_sff_eeprom { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_SFF_PORT_NUM_VALID BIT(0) __le16 i2c_bus_addr; #define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F #define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF #define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10) #define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0 #define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M #define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11 #define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S) #define ICE_AQC_SFF_NO_PAGE_CHANGE 0 #define ICE_AQC_SFF_SET_23_ON_MISMATCH 1 #define ICE_AQC_SFF_SET_22_ON_MISMATCH 2 #define ICE_AQC_SFF_IS_WRITE BIT(15) __le16 i2c_mem_addr; __le16 eeprom_page; #define ICE_AQC_SFF_EEPROM_BANK_S 0 #define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S) #define ICE_AQC_SFF_EEPROM_PAGE_S 8 #define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S) __le32 addr_high; __le32 addr_low; }; /* SW Set GPIO command (indirect 0x6EF) * SW Get GPIO command (indirect 0x6F0) */ struct ice_aqc_sw_gpio { __le16 gpio_ctrl_handle; #define ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_S 0 #define ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_M (0x3FF << ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_S) u8 gpio_num; #define ICE_AQC_SW_GPIO_NUMBER_S 0 #define ICE_AQC_SW_GPIO_NUMBER_M (0x1F << ICE_AQC_SW_GPIO_NUMBER_S) u8 gpio_params; #define ICE_AQC_SW_GPIO_PARAMS_DIRECTION BIT(1) #define ICE_AQC_SW_GPIO_PARAMS_VALUE BIT(0) u8 rsvd[12]; }; +/* Program topology device NVM (direct, 0x06F2) */ +struct ice_aqc_program_topology_device_nvm { + u8 lport_num; + u8 lport_num_valid; + u8 node_type_ctx; + u8 index; + u8 rsvd[12]; +}; + +/* Read topology device NVM (indirect, 0x06F3) */ +struct ice_aqc_read_topology_device_nvm { + u8 lport_num; + u8 lport_num_valid; + u8 node_type_ctx; + u8 index; + __le32 start_address; + u8 data_read[8]; +}; + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Write commands (indirect 0x0703) * NVM Write Activate commands (direct 0x0707) * NVM Shadow RAM Dump commands (direct 0x0707) */ struct ice_aqc_nvm { #define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF __le16 offset_low; u8 offset_high; /* For Write Activate offset_high is used as flags2 */ u8 cmd_flags; #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */ #define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) #define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ #define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4) #define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5) #define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6) #define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ #define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) #define ICE_AQC_NVM_POR_FLAG 0 /* Used by NVM Write completion on ARQ */ #define ICE_AQC_NVM_PERST_FLAG 1 #define ICE_AQC_NVM_EMPR_FLAG 2 +#define ICE_AQC_NVM_EMPR_ENA BIT(0) __le16 module_typeid; __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF __le32 addr_high; __le32 addr_low; }; /* NVM Module_Type ID, needed offset and read_len for struct ice_aqc_nvm. */ #define ICE_AQC_NVM_SECTOR_UNIT 4096 /* In Bytes */ #define ICE_AQC_NVM_WORD_UNIT 2 /* In Bytes */ #define ICE_AQC_NVM_START_POINT 0 #define ICE_AQC_NVM_EMP_SR_PTR_OFFSET 0x90 #define ICE_AQC_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */ #define ICE_AQC_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0) #define ICE_AQC_NVM_EMP_SR_PTR_TYPE_S 15 #define ICE_AQC_NVM_EMP_SR_PTR_TYPE_M BIT(15) #define ICE_AQC_NVM_EMP_SR_PTR_TYPE_SECTOR 1 #define ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET 0x46 #define ICE_AQC_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */ #define ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */ #define ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID 0x129 #define ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */ #define ICE_AQC_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0) #define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */ #define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */ #define ICE_AQC_NVM_MINSREV_MOD_ID 0x130 /* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the * type field is excluded from the section when reading and writing from * a module using the module_typeid field with these AQ commands. */ struct ice_aqc_nvm_minsrev { __le16 length; __le16 validity; #define ICE_AQC_NVM_MINSREV_NVM_VALID BIT(0) #define ICE_AQC_NVM_MINSREV_OROM_VALID BIT(1) __le16 nvm_minsrev_l; __le16 nvm_minsrev_h; __le16 orom_minsrev_l; __le16 orom_minsrev_h; }; /* Used for 0x0704 as well as for 0x0705 commands */ struct ice_aqc_nvm_cfg { u8 cmd_flags; #define ICE_AQC_ANVM_MULTIPLE_ELEMS BIT(0) #define ICE_AQC_ANVM_IMMEDIATE_FIELD BIT(1) #define ICE_AQC_ANVM_NEW_CFG BIT(2) u8 reserved; __le16 count; __le16 id; u8 reserved1[2]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_nvm_cfg_data { __le16 field_id; __le16 field_options; __le16 field_value; }; /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { u8 flags; #define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0) #define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1) u8 rsvd; __le16 checksum; /* Used only by response */ #define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA u8 rsvd2[12]; }; /* * Send to PF command (indirect 0x0801) ID is only used by PF * * Send to VF command (indirect 0x0802) ID is only used by PF * */ struct ice_aqc_pf_vf_msg { __le32 id; u32 reserved; __le32 addr_high; __le32 addr_low; }; /* Write/Read Alternate - Direct (direct 0x0900/0x0902) */ struct ice_aqc_read_write_alt_direct { __le32 dword0_addr; __le32 dword0_value; __le32 dword1_addr; __le32 dword1_value; }; /* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */ struct ice_aqc_read_write_alt_indirect { __le32 base_dword_addr; __le32 num_dwords; __le32 addr_high; __le32 addr_low; }; /* Done Alternate Write (direct 0x0904) */ struct ice_aqc_done_alt_write { u8 flags; #define ICE_AQC_CMD_UEFI_BIOS_MODE BIT(0) #define ICE_AQC_RESP_RESET_NEEDED BIT(1) u8 reserved[15]; }; /* Clear Port Alternate Write (direct 0x0906) */ struct ice_aqc_clear_port_alt_write { u8 reserved[16]; }; /* Get LLDP MIB (indirect 0x0A00) * Note: This is also used by the LLDP MIB Change Event (0x0A01) * as the format is the same. */ struct ice_aqc_lldp_get_mib { u8 type; #define ICE_AQ_LLDP_MIB_TYPE_S 0 #define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S) #define ICE_AQ_LLDP_MIB_LOCAL 0 #define ICE_AQ_LLDP_MIB_REMOTE 1 #define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2 #define ICE_AQ_LLDP_BRID_TYPE_S 2 #define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S) #define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0 #define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1 /* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */ #define ICE_AQ_LLDP_TX_S 0x4 #define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S) #define ICE_AQ_LLDP_TX_ACTIVE 0 #define ICE_AQ_LLDP_TX_SUSPENDED 1 #define ICE_AQ_LLDP_TX_FLUSHED 3 /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) * and in the LLDP MIB Change Event (0x0A01). They are valid for the * Get LLDP MIB (0x0A00) response only. */ u8 reserved1; __le16 local_len; __le16 remote_len; u8 reserved2[2]; __le32 addr_high; __le32 addr_low; }; /* Configure LLDP MIB Change Event (direct 0x0A01) */ /* For MIB Change Event use ice_aqc_lldp_get_mib structure above */ struct ice_aqc_lldp_set_mib_change { u8 command; #define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 u8 reserved[15]; }; /* Add LLDP TLV (indirect 0x0A02) * Delete LLDP TLV (indirect 0x0A04) */ struct ice_aqc_lldp_add_delete_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved1[1]; __le16 len; u8 reserved2[4]; __le32 addr_high; __le32 addr_low; }; /* Update LLDP TLV (indirect 0x0A03) */ struct ice_aqc_lldp_update_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved; __le16 old_len; __le16 new_offset; __le16 new_len; __le32 addr_high; __le32 addr_low; }; /* Stop LLDP (direct 0x0A05) */ struct ice_aqc_lldp_stop { u8 command; #define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0) #define ICE_AQ_LLDP_AGENT_STOP 0x0 #define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK #define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1) u8 reserved[15]; }; /* Start LLDP (direct 0x0A06) */ struct ice_aqc_lldp_start { u8 command; #define ICE_AQ_LLDP_AGENT_START BIT(0) #define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1) u8 reserved[15]; }; /* Get CEE DCBX Oper Config (0x0A07) * The command uses the generic descriptor struct and * returns the struct below as an indirect response. */ struct ice_aqc_get_cee_dcb_cfg_resp { u8 oper_num_tc; u8 oper_prio_tc[4]; u8 oper_tc_bw[8]; u8 oper_pfc_en; __le16 oper_app_prio; #define ICE_AQC_CEE_APP_FCOE_S 0 #define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S) #define ICE_AQC_CEE_APP_ISCSI_S 3 #define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S) #define ICE_AQC_CEE_APP_FIP_S 8 #define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S) __le32 tlv_status; #define ICE_AQC_CEE_PG_STATUS_S 0 #define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S) #define ICE_AQC_CEE_PFC_STATUS_S 3 #define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S) #define ICE_AQC_CEE_FCOE_STATUS_S 8 #define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S) #define ICE_AQC_CEE_ISCSI_STATUS_S 11 #define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S) #define ICE_AQC_CEE_FIP_STATUS_S 16 #define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S) u8 reserved[12]; }; /* Set Local LLDP MIB (indirect 0x0A08) * Used to replace the local MIB of a given LLDP agent. e.g. DCBX */ struct ice_aqc_lldp_set_local_mib { u8 type; #define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0) #define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0 #define SET_LOCAL_MIB_TYPE_CEE_M BIT(1) #define SET_LOCAL_MIB_TYPE_CEE_WILLING 0 #define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M u8 reserved0; __le16 length; u8 reserved1[4]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_lldp_set_local_mib_resp { u8 status; #define SET_LOCAL_MIB_RESP_EVENT_M BIT(0) #define SET_LOCAL_MIB_RESP_MIB_CHANGE_SILENT 0 #define SET_LOCAL_MIB_RESP_MIB_CHANGE_EVENT SET_LOCAL_MIB_RESP_EVENT_M u8 reserved[15]; }; /* Stop/Start LLDP Agent (direct 0x0A09) * Used for stopping/starting specific LLDP agent. e.g. DCBX. * The same structure is used for the response, with the command field * being used as the status field. */ struct ice_aqc_lldp_stop_start_specific_agent { u8 command; #define ICE_AQC_START_STOP_AGENT_M BIT(0) #define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0 #define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M u8 reserved[15]; }; /* LLDP Filter Control (direct 0x0A0A) */ struct ice_aqc_lldp_filter_ctrl { u8 cmd_flags; #define ICE_AQC_LLDP_FILTER_ACTION_M MAKEMASK(3, 0) #define ICE_AQC_LLDP_FILTER_ACTION_ADD 0x0 #define ICE_AQC_LLDP_FILTER_ACTION_DELETE 0x1 #define ICE_AQC_LLDP_FILTER_ACTION_UPDATE 0x2 u8 reserved1; __le16 vsi_num; u8 reserved2[12]; }; /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) __le16 vsi_id; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; #define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 #define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC #define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE) /** * struct ice_aqc_get_set_rss_keys - Get/Set RSS hash key command buffer * @standard_rss_key: 40 most significant bytes of hash key * @extended_hash_key: 12 least significant bytes of hash key * * Set/Get 40 byte hash key using standard_rss_key field, and set * extended_hash_key field to zero. Set/Get 52 byte hash key using * standard_rss_key field for 40 most significant bytes and the * extended_hash_key field for the 12 least significant bytes of hash key. */ struct ice_aqc_get_set_rss_keys { u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE]; }; /* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) __le16 vsi_id; #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2 #define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4 #define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \ (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) __le16 flags; __le32 reserved; __le32 addr_high; __le32 addr_low; }; /* Add Tx LAN Queues (indirect 0x0C30) */ struct ice_aqc_add_txqs { u8 num_qgrps; u8 reserved[3]; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* This is the descriptor of each queue entry for the Add Tx LAN Queues * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp. */ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; u8 txq_ctx[22]; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; }; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of * each struct ice_aqc_add_tx_qgrp is variable due * to the variable number of queues in each group! */ struct ice_aqc_add_tx_qgrp { __le32 parent_teid; u8 num_txqs; u8 rsvd[3]; struct ice_aqc_add_txqs_perq txqs[STRUCT_HACK_VAR_LEN]; }; /* Disable Tx LAN Queues (indirect 0x0C31) */ struct ice_aqc_dis_txqs { u8 cmd_type; #define ICE_AQC_Q_DIS_CMD_S 0 #define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2) #define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3) u8 num_entries; __le16 vmvf_and_timeout; #define ICE_AQC_Q_DIS_VMVF_NUM_S 0 #define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S) #define ICE_AQC_Q_DIS_TIMEOUT_S 10 #define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S) __le32 blocked_cgds; __le32 addr_high; __le32 addr_low; }; /* The buffer for Disable Tx LAN Queues (indirect 0x0C31) * contains the following structures, arrayed one after the * other. * Note: Since the q_id is 16 bits wide, if the * number of queues is even, then 2 bytes of alignment MUST be * added before the start of the next group, to allow correct * alignment of the parent_teid field. */ #pragma pack(1) struct ice_aqc_dis_txq_item { __le32 parent_teid; u8 num_qs; u8 rsvd; /* The length of the q_id array varies according to num_qs */ #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15 #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \ (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \ (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) __le16 q_id[STRUCT_HACK_VAR_LEN]; }; #pragma pack() /* Tx LAN Queues Cleanup Event (0x0C31) */ struct ice_aqc_txqs_cleanup { __le16 caller_opc; __le16 cmd_tag; u8 reserved[12]; }; /* Move / Reconfigure Tx Queues (indirect 0x0C32) */ struct ice_aqc_move_txqs { u8 cmd_type; #define ICE_AQC_Q_CMD_TYPE_S 0 #define ICE_AQC_Q_CMD_TYPE_M (0x3 << ICE_AQC_Q_CMD_TYPE_S) #define ICE_AQC_Q_CMD_TYPE_MOVE 1 #define ICE_AQC_Q_CMD_TYPE_TC_CHANGE 2 #define ICE_AQC_Q_CMD_TYPE_MOVE_AND_TC 3 #define ICE_AQC_Q_CMD_SUBSEQ_CALL BIT(2) #define ICE_AQC_Q_CMD_FLUSH_PIPE BIT(3) u8 num_qs; u8 rsvd; u8 timeout; #define ICE_AQC_Q_CMD_TIMEOUT_S 2 #define ICE_AQC_Q_CMD_TIMEOUT_M (0x3F << ICE_AQC_Q_CMD_TIMEOUT_S) __le32 blocked_cgds; __le32 addr_high; __le32 addr_low; }; /* Per-queue data buffer for the Move Tx LAN Queues command/response */ struct ice_aqc_move_txqs_elem { __le16 txq_id; u8 q_cgd; u8 rsvd; __le32 q_teid; }; /* Indirect data buffer for the Move Tx LAN Queues command/response */ struct ice_aqc_move_txqs_data { __le32 src_teid; __le32 dest_teid; struct ice_aqc_move_txqs_elem txqs[STRUCT_HACK_VAR_LEN]; }; /* Download Package (indirect 0x0C40) */ /* Also used for Update Package (indirect 0x0C42 and 0x0C41) */ struct ice_aqc_download_pkg { u8 flags; #define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01 u8 reserved[3]; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_download_pkg_resp { __le32 error_offset; __le32 error_info; __le32 addr_high; __le32 addr_low; }; /* Get Package Info List (indirect 0x0C43) */ struct ice_aqc_get_pkg_info_list { __le32 reserved1; __le32 reserved2; __le32 addr_high; __le32 addr_low; }; /* Version format for packages */ struct ice_pkg_ver { u8 major; u8 minor; u8 update; u8 draft; }; #define ICE_PKG_NAME_SIZE 32 #define ICE_SEG_ID_SIZE 28 #define ICE_SEG_NAME_SIZE 28 struct ice_aqc_get_pkg_info { struct ice_pkg_ver ver; char name[ICE_SEG_NAME_SIZE]; __le32 track_id; u8 is_in_nvm; u8 is_active; u8 is_active_at_boot; u8 is_modified; }; /* Get Package Info List response buffer format (0x0C43) */ struct ice_aqc_get_pkg_info_resp { __le32 count; struct ice_aqc_get_pkg_info pkg_info[STRUCT_HACK_VAR_LEN]; }; /* Driver Shared Parameters (direct, 0x0C90) */ struct ice_aqc_driver_shared_params { u8 set_or_get_op; #define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0) #define ICE_AQC_DRIVER_PARAM_SET 0 #define ICE_AQC_DRIVER_PARAM_GET 1 u8 param_indx; #define ICE_AQC_DRIVER_PARAM_MAX_IDX 15 u8 rsvd[2]; __le32 param_val; __le32 addr_high; __le32 addr_low; }; /* Lan Queue Overflow Event (direct, 0x1001) */ struct ice_aqc_event_lan_overflow { __le32 prtdcb_ruptq; __le32 qtx_ctl; u8 reserved[8]; }; /* Set Health Status (direct 0xFF20) */ struct ice_aqc_set_health_status_config { u8 event_source; #define ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0) #define ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1) #define ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2) u8 reserved[15]; }; #define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE 0x102 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL 0x103 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM 0x104 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106 #define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107 #define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108 #define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B #define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C #define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D #define ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F #define ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110 #define ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111 #define ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112 #define ICE_AQC_HEALTH_STATUS_ERR_NETLIST 0x113 #define ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114 #define ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115 #define ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116 #define ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT 0x117 #define ICE_AQC_HEALTH_STATUS_INFO_RECOVERY 0x500 #define ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501 #define ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH 0x502 #define ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH 0x503 #define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504 #define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505 #define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506 #define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509 /* Get Health Status codes (indirect 0xFF21) */ struct ice_aqc_get_supported_health_status_codes { __le16 health_code_count; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Get Health Status (indirect 0xFF22) */ struct ice_aqc_get_health_status { __le16 health_status_count; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Get Health Status event buffer entry, (0xFF22) * repeated per reported health status */ struct ice_aqc_health_status_elem { __le16 health_status_code; __le16 event_source; #define ICE_AQC_HEALTH_STATUS_PF (0x1) #define ICE_AQC_HEALTH_STATUS_PORT (0x2) #define ICE_AQC_HEALTH_STATUS_GLOBAL (0x3) __le32 internal_data1; #define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF) __le32 internal_data2; }; /* Clear Health Status (direct 0xFF23) */ struct ice_aqc_clear_health_status { __le32 reserved[4]; }; +/* Set FW Logging configuration (indirect 0xFF30) + * Register for FW Logging (indirect 0xFF31) + * Query FW Logging (indirect 0xFF32) + * FW Log Event (indirect 0xFF33) + * Get FW Log (indirect 0xFF34) + * Clear FW Log (indirect 0xFF35) + */ + +struct ice_aqc_fw_log { + u8 cmd_flags; +#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0) +#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1) +#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3) +#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0) +#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2) +#define ICE_AQC_FW_LOG_PERSISTENT BIT(0) + u8 rsp_flag; +#define ICE_AQC_FW_LOG_MORE_DATA BIT(1) + __le16 fw_rt_msb; + union { + struct { + __le32 fw_rt_lsb; + } sync; + struct { + __le16 log_resolution; +#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1) +#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128) + __le16 mdl_cnt; + } cfg; + } ops; + __le32 addr_high; + __le32 addr_low; +}; + +/* Response Buffer for: + * Set Firmware Logging Configuration (0xFF30) + * Query FW Logging (0xFF32) + */ +struct ice_aqc_fw_log_cfg_resp { + __le16 module_identifier; + u8 log_level; + u8 rsvd0; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags * @opcode: AQ command opcode * @datalen: length in bytes of indirect/external data buffer * @retval: return value from firmware * @cookie_high: opaque data high-half * @cookie_low: opaque data low-half * @params: command-specific parameters * * Descriptor format for commands the driver posts on the Admin Transmit Queue * (ATQ). The firmware writes back onto the command descriptor and returns * the result of the command. Asynchronous events that are not an immediate * result of the command are written to the Admin Receive Queue (ARQ) using * the same descriptor format. Descriptors are in little-endian notation with * 32-bit words. */ struct ice_aq_desc { __le16 flags; __le16 opcode; __le16 datalen; __le16 retval; __le32 cookie_high; __le32 cookie_low; union { u8 raw[16]; struct ice_aqc_generic generic; struct ice_aqc_get_ver get_ver; struct ice_aqc_driver_ver driver_ver; struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_get_exp_err exp_err; struct ice_aqc_req_res res_owner; struct ice_aqc_manage_mac_read mac_read; struct ice_aqc_manage_mac_write mac_write; struct ice_aqc_clear_pxe clear_pxe; struct ice_aqc_config_no_drop_policy no_drop; struct ice_aqc_add_update_mir_rule add_update_rule; struct ice_aqc_delete_mir_rule del_rule; struct ice_aqc_list_caps get_cap; struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; struct ice_aqc_dnl_get_status get_status; struct ice_aqc_dnl_run_command dnl_run; struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_dnl_read_write_command dnl_read_write; struct ice_aqc_dnl_read_write_response dnl_read_write_resp; struct ice_aqc_dnl_set_breakpoints_command dnl_set_brk; struct ice_aqc_dnl_read_log_command dnl_read_log; struct ice_aqc_dnl_read_log_response dnl_read_log_resp; struct ice_aqc_i2c read_write_i2c; + struct ice_aqc_read_i2c_resp read_i2c_resp; struct ice_aqc_mdio read_write_mdio; struct ice_aqc_gpio_by_func read_write_gpio_by_func; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_set_led set_led; + struct ice_aqc_mdio read_mdio; + struct ice_aqc_mdio write_mdio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_port_options get_port_options; struct ice_aqc_set_port_option set_port_option; struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_set_port_params set_port_params; struct ice_aqc_sw_rules sw_rules; struct ice_aqc_storm_cfg storm_conf; struct ice_aqc_get_topo get_topo; struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_node_to_root query_node_to_root; struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd; struct ice_aqc_query_port_ets port_ets; struct ice_aqc_rl_profile rl_profile; struct ice_aqc_nvm nvm; struct ice_aqc_nvm_cfg nvm_cfg; struct ice_aqc_nvm_checksum nvm_checksum; struct ice_aqc_pf_vf_msg virt; struct ice_aqc_read_write_alt_direct read_write_alt_direct; struct ice_aqc_read_write_alt_indirect read_write_alt_indirect; struct ice_aqc_done_alt_write done_alt_write; struct ice_aqc_clear_port_alt_write clear_port_alt_write; struct ice_aqc_pfc_ignore pfc_ignore; struct ice_aqc_set_query_pfc_mode set_query_pfc_mode; struct ice_aqc_set_dcb_params set_dcb_params; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; struct ice_aqc_lldp_add_delete_tlv lldp_add_delete_tlv; struct ice_aqc_lldp_update_tlv lldp_update_tlv; struct ice_aqc_lldp_stop lldp_stop; struct ice_aqc_lldp_start lldp_start; struct ice_aqc_lldp_set_local_mib lldp_set_mib; struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl; struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; struct ice_aqc_move_txqs move_txqs; struct ice_aqc_txqs_cleanup txqs_cleanup; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_get_vsi_resp get_vsi_resp; struct ice_aqc_download_pkg download_pkg; struct ice_aqc_get_pkg_info_list get_pkg_info_list; struct ice_aqc_driver_shared_params drv_shared_params; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_get_res_alloc get_res; struct ice_aqc_get_allocd_res_desc get_res_desc; struct ice_aqc_set_mac_cfg set_mac_cfg; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; struct ice_aqc_set_health_status_config set_health_status_config; struct ice_aqc_get_supported_health_status_codes get_supported_health_status_codes; struct ice_aqc_get_health_status get_health_status; struct ice_aqc_clear_health_status clear_health_status; } params; }; /* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ #define ICE_AQ_LG_BUF 512 /* Flags sub-structure * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | */ /* command flags and offsets */ #define ICE_AQ_FLAG_DD_S 0 #define ICE_AQ_FLAG_CMP_S 1 #define ICE_AQ_FLAG_ERR_S 2 #define ICE_AQ_FLAG_VFE_S 3 #define ICE_AQ_FLAG_LB_S 9 #define ICE_AQ_FLAG_RD_S 10 #define ICE_AQ_FLAG_VFC_S 11 #define ICE_AQ_FLAG_BUF_S 12 #define ICE_AQ_FLAG_SI_S 13 #define ICE_AQ_FLAG_EI_S 14 #define ICE_AQ_FLAG_FE_S 15 #define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */ #define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */ #define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ #define ICE_AQ_FLAG_VFE BIT(ICE_AQ_FLAG_VFE_S) /* 0x8 */ #define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ #define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */ #define ICE_AQ_FLAG_VFC BIT(ICE_AQ_FLAG_VFC_S) /* 0x800 */ #define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */ #define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */ #define ICE_AQ_FLAG_EI BIT(ICE_AQ_FLAG_EI_S) /* 0x4000 */ #define ICE_AQ_FLAG_FE BIT(ICE_AQ_FLAG_FE_S) /* 0x8000 */ /* error codes */ enum ice_aq_err { ICE_AQ_RC_OK = 0, /* Success */ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */ ICE_AQ_RC_ENOENT = 2, /* No such element */ ICE_AQ_RC_ESRCH = 3, /* Bad opcode */ ICE_AQ_RC_EINTR = 4, /* Operation interrupted */ ICE_AQ_RC_EIO = 5, /* I/O error */ ICE_AQ_RC_ENXIO = 6, /* No such resource */ ICE_AQ_RC_E2BIG = 7, /* Arg too long */ ICE_AQ_RC_EAGAIN = 8, /* Try again */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EACCES = 10, /* Permission denied */ ICE_AQ_RC_EFAULT = 11, /* Bad address */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */ ICE_AQ_RC_ENOTTY = 15, /* Not a typewriter */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ ICE_AQ_RC_ERANGE = 18, /* Parameter out of range */ ICE_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ ICE_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ ICE_AQ_RC_EFBIG = 22, /* File too big */ ICE_AQ_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ ICE_AQ_RC_EACCES_BMCU = 29, /* BMC Update in progress */ }; /* Admin Queue command opcodes */ enum ice_adminq_opc { /* AQ commands */ ice_aqc_opc_get_ver = 0x0001, ice_aqc_opc_driver_ver = 0x0002, ice_aqc_opc_q_shutdown = 0x0003, ice_aqc_opc_get_exp_err = 0x0005, /* resource ownership */ ice_aqc_opc_req_res = 0x0008, ice_aqc_opc_release_res = 0x0009, /* device/function capabilities */ ice_aqc_opc_list_func_caps = 0x000A, ice_aqc_opc_list_dev_caps = 0x000B, /* manage MAC address */ ice_aqc_opc_manage_mac_read = 0x0107, ice_aqc_opc_manage_mac_write = 0x0108, /* PXE */ ice_aqc_opc_clear_pxe_mode = 0x0110, ice_aqc_opc_config_no_drop_policy = 0x0112, /* internal switch commands */ ice_aqc_opc_get_sw_cfg = 0x0200, ice_aqc_opc_set_port_params = 0x0203, /* Alloc/Free/Get Resources */ ice_aqc_opc_get_res_alloc = 0x0204, ice_aqc_opc_alloc_res = 0x0208, ice_aqc_opc_free_res = 0x0209, ice_aqc_opc_get_allocd_res_desc = 0x020A, + ice_aqc_opc_set_vlan_mode_parameters = 0x020C, + ice_aqc_opc_get_vlan_mode_parameters = 0x020D, /* VSI commands */ ice_aqc_opc_add_vsi = 0x0210, ice_aqc_opc_update_vsi = 0x0211, ice_aqc_opc_get_vsi_params = 0x0212, ice_aqc_opc_free_vsi = 0x0213, /* Mirroring rules - add/update, delete */ ice_aqc_opc_add_update_mir_rule = 0x0260, ice_aqc_opc_del_mir_rule = 0x0261, /* storm configuration */ ice_aqc_opc_set_storm_cfg = 0x0280, ice_aqc_opc_get_storm_cfg = 0x0281, /* switch rules population commands */ ice_aqc_opc_add_sw_rules = 0x02A0, ice_aqc_opc_update_sw_rules = 0x02A1, ice_aqc_opc_remove_sw_rules = 0x02A2, ice_aqc_opc_get_sw_rules = 0x02A3, ice_aqc_opc_clear_pf_cfg = 0x02A4, /* DCB commands */ ice_aqc_opc_pfc_ignore = 0x0301, ice_aqc_opc_query_pfc_mode = 0x0302, ice_aqc_opc_set_pfc_mode = 0x0303, ice_aqc_opc_set_dcb_params = 0x0306, /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, ice_aqc_opc_cfg_sched_elems = 0x0403, ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_move_sched_elems = 0x0408, ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_resume_sched_elems = 0x040A, ice_aqc_opc_query_port_ets = 0x040E, ice_aqc_opc_delete_sched_elems = 0x040F, ice_aqc_opc_add_rl_profiles = 0x0410, ice_aqc_opc_query_rl_profiles = 0x0411, ice_aqc_opc_query_sched_res = 0x0412, ice_aqc_opc_query_node_to_root = 0x0413, ice_aqc_opc_cfg_l2_node_cgd = 0x0414, ice_aqc_opc_remove_rl_profiles = 0x0415, /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, ice_aqc_opc_set_phy_cfg = 0x0601, ice_aqc_opc_set_mac_cfg = 0x0603, ice_aqc_opc_restart_an = 0x0605, ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_dnl_get_status = 0x0680, ice_aqc_opc_dnl_run = 0x0681, ice_aqc_opc_dnl_call = 0x0682, ice_aqc_opc_dnl_read_sto = 0x0683, ice_aqc_opc_dnl_write_sto = 0x0684, ice_aqc_opc_dnl_set_breakpoints = 0x0686, ice_aqc_opc_dnl_read_log = 0x0687, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_get_link_topo_pin = 0x06E1, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, ice_aqc_opc_read_mdio = 0x06E4, ice_aqc_opc_write_mdio = 0x06E5, ice_aqc_opc_set_gpio_by_func = 0x06E6, ice_aqc_opc_get_gpio_by_func = 0x06E7, ice_aqc_opc_set_led = 0x06E8, ice_aqc_opc_set_port_id_led = 0x06E9, ice_aqc_opc_get_port_options = 0x06EA, ice_aqc_opc_set_port_option = 0x06EB, ice_aqc_opc_set_gpio = 0x06EC, ice_aqc_opc_get_gpio = 0x06ED, ice_aqc_opc_sff_eeprom = 0x06EE, ice_aqc_opc_sw_set_gpio = 0x06EF, ice_aqc_opc_sw_get_gpio = 0x06F0, + ice_aqc_opc_program_topology_device_nvm = 0x06F2, + ice_aqc_opc_read_topology_device_nvm = 0x06F3, /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, ice_aqc_opc_nvm_erase = 0x0702, ice_aqc_opc_nvm_write = 0x0703, ice_aqc_opc_nvm_cfg_read = 0x0704, ice_aqc_opc_nvm_cfg_write = 0x0705, ice_aqc_opc_nvm_checksum = 0x0706, ice_aqc_opc_nvm_write_activate = 0x0707, ice_aqc_opc_nvm_sr_dump = 0x0707, ice_aqc_opc_nvm_save_factory_settings = 0x0708, ice_aqc_opc_nvm_update_empr = 0x0709, ice_aqc_opc_nvm_pkg_data = 0x070A, ice_aqc_opc_nvm_pass_component_tbl = 0x070B, /* PF/VF mailbox commands */ ice_mbx_opc_send_msg_to_pf = 0x0801, ice_mbx_opc_send_msg_to_vf = 0x0802, /* Alternate Structure Commands */ ice_aqc_opc_write_alt_direct = 0x0900, ice_aqc_opc_write_alt_indirect = 0x0901, ice_aqc_opc_read_alt_direct = 0x0902, ice_aqc_opc_read_alt_indirect = 0x0903, ice_aqc_opc_done_alt_write = 0x0904, ice_aqc_opc_clear_port_alt_write = 0x0906, /* LLDP commands */ ice_aqc_opc_lldp_get_mib = 0x0A00, ice_aqc_opc_lldp_set_mib_change = 0x0A01, ice_aqc_opc_lldp_add_tlv = 0x0A02, ice_aqc_opc_lldp_update_tlv = 0x0A03, ice_aqc_opc_lldp_delete_tlv = 0x0A04, ice_aqc_opc_lldp_stop = 0x0A05, ice_aqc_opc_lldp_start = 0x0A06, ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, ice_aqc_opc_lldp_set_local_mib = 0x0A08, ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, ice_aqc_opc_lldp_filter_ctrl = 0x0A0A, /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, ice_aqc_opc_set_rss_lut = 0x0B03, ice_aqc_opc_get_rss_key = 0x0B04, ice_aqc_opc_get_rss_lut = 0x0B05, /* Tx queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, ice_aqc_opc_txqs_cleanup = 0x0C31, ice_aqc_opc_move_recfg_txqs = 0x0C32, /* package commands */ ice_aqc_opc_download_pkg = 0x0C40, ice_aqc_opc_upload_section = 0x0C41, ice_aqc_opc_update_pkg = 0x0C42, ice_aqc_opc_get_pkg_info_list = 0x0C43, ice_aqc_opc_driver_shared_params = 0x0C90, /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, /* SystemDiagnostic commands */ ice_aqc_opc_set_health_status_config = 0xFF20, ice_aqc_opc_get_supported_health_status_codes = 0xFF21, ice_aqc_opc_get_health_status = 0xFF22, - ice_aqc_opc_clear_health_status = 0xFF23 + ice_aqc_opc_clear_health_status = 0xFF23, + + /* FW Logging Commands */ + ice_aqc_opc_fw_logs_config = 0xFF30, + ice_aqc_opc_fw_logs_register = 0xFF31, + ice_aqc_opc_fw_logs_query = 0xFF32, + ice_aqc_opc_fw_logs_event = 0xFF33, + ice_aqc_opc_fw_logs_get = 0xFF34, + ice_aqc_opc_fw_logs_clear = 0xFF35 }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h index a7f729060b78..0e04cab87be9 100644 --- a/sys/dev/ice/ice_bitops.h +++ b/sys/dev/ice/ice_bitops.h @@ -1,485 +1,530 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_BITOPS_H_ #define _ICE_BITOPS_H_ /* Define the size of the bitmap chunk */ typedef u32 ice_bitmap_t; /* Number of bits per bitmap chunk */ #define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t)) /* Determine which chunk a bit belongs in */ #define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK) /* How many chunks are required to store this many bits */ #define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK) /* Which bit inside a chunk this bit corresponds to */ #define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK) /* How many bits are valid in the last chunk, assumes nr > 0 */ #define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1) /* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */ #define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \ (BITS_PER_CHUNK - LAST_CHUNK_BITS(nr))) #define ice_declare_bitmap(A, sz) \ ice_bitmap_t A[BITS_TO_CHUNKS(sz)] static inline bool ice_is_bit_set_internal(u16 nr, const ice_bitmap_t *bitmap) { return !!(*bitmap & BIT(nr)); } /* * If atomic version of the bitops are required, each specific OS * implementation will need to implement OS/platform specific atomic * version of the functions below: * * ice_clear_bit_internal * ice_set_bit_internal * ice_test_and_clear_bit_internal * ice_test_and_set_bit_internal * * and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic * implementation. */ static inline void ice_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap) { *bitmap &= ~BIT(nr); } static inline void ice_set_bit_internal(u16 nr, ice_bitmap_t *bitmap) { *bitmap |= BIT(nr); } static inline bool ice_test_and_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap) { if (ice_is_bit_set_internal(nr, bitmap)) { ice_clear_bit_internal(nr, bitmap); return true; } return false; } static inline bool ice_test_and_set_bit_internal(u16 nr, ice_bitmap_t *bitmap) { if (ice_is_bit_set_internal(nr, bitmap)) return true; ice_set_bit_internal(nr, bitmap); return false; } /** * ice_is_bit_set - Check state of a bit in a bitmap * @bitmap: the bitmap to check * @nr: the bit to check * * Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr * is less than the size of the bitmap. */ static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr) { return ice_is_bit_set_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_clear_bit - Clear a bit in a bitmap * @bitmap: the bitmap to change * @nr: the bit to change * * Clears the bit nr in bitmap. Assumes that nr is less than the size of the * bitmap. */ static inline void ice_clear_bit(u16 nr, ice_bitmap_t *bitmap) { ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_set_bit - Set a bit in a bitmap * @bitmap: the bitmap to change * @nr: the bit to change * * Sets the bit nr in bitmap. Assumes that nr is less than the size of the * bitmap. */ static inline void ice_set_bit(u16 nr, ice_bitmap_t *bitmap) { ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_test_and_clear_bit - Atomically clear a bit and return the old bit value * @nr: the bit to change * @bitmap: the bitmap to change * * Check and clear the bit nr in bitmap. Assumes that nr is less than the size * of the bitmap. */ static inline bool ice_test_and_clear_bit(u16 nr, ice_bitmap_t *bitmap) { return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_test_and_set_bit - Atomically set a bit and return the old bit value * @nr: the bit to change * @bitmap: the bitmap to change * * Check and set the bit nr in bitmap. Assumes that nr is less than the size of * the bitmap. */ static inline bool ice_test_and_set_bit(u16 nr, ice_bitmap_t *bitmap) { return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /* ice_zero_bitmap - set bits of bitmap to zero. * @bmp: bitmap to set zeros * @size: Size of the bitmaps in bits * * Set all of the bits in a bitmap to zero. Note that this function assumes it * operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It * will zero every bit in the last chunk, even if those bits are beyond the * size. */ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size) { ice_memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t), ICE_NONDMA_MEM); } /** * ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap to intersect * @bmp2: The second bitmap to intersect wit the first * @size: Size of the bitmaps in bits * * This function performs a bitwise AND on two "source" bitmaps of the same size * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same * size as the "source" bitmaps to avoid buffer overflows. This function returns * a non-zero value if at least one bit location from both "source" bitmaps is * non-zero. */ static inline int ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t res = 0, mask; u16 i; /* Handle all but the last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) { dst[i] = bmp1[i] & bmp2[i]; res |= dst[i]; } /* We want to take care not to modify any bits outside of the bitmap * size, even in the destination bitmap. Thus, we won't directly * assign the last bitmap, but instead use a bitmask to ensure we only * modify bits which are within the size, and leave any bits above the * size value alone. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask); res |= dst[i] & mask; return res != 0; } /** * ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap to intersect * @bmp2: The second bitmap to intersect wit the first * @size: Size of the bitmaps in bits * * This function performs a bitwise OR on two "source" bitmaps of the same size * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same * size as the "source" bitmaps to avoid buffer overflows. */ static inline void ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) dst[i] = bmp1[i] | bmp2[i]; /* We want to only OR bits within the size. Furthermore, we also do * not want to modify destination bits which are beyond the specified * size. Use a bitmask to ensure that we only modify the bits that are * within the specified size. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask); } /** * ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap of XOR operation * @bmp2: The second bitmap to XOR with the first * @size: Size of the bitmaps in bits * * This function performs a bitwise XOR on two "source" bitmaps of the same size * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same * size as the "source" bitmaps to avoid buffer overflows. */ static inline void ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) dst[i] = bmp1[i] ^ bmp2[i]; /* We want to only XOR bits within the size. Furthermore, we also do * not want to modify destination bits which are beyond the specified * size. Use a bitmask to ensure that we only modify the bits that are * within the specified size. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask); } /** * ice_andnot_bitmap - bitwise ANDNOT 2 bitmaps and result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap of ANDNOT operation * @bmp2: The second bitmap to ANDNOT operation * @size: Size of the bitmaps in bits * * This function performs a bitwise ANDNOT on two "source" bitmaps of the same * size, and stores the result to "dst" bitmap. The "dst" bitmap must be of the * same size as the "source" bitmaps to avoid buffer overflows. */ static inline void ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) dst[i] = bmp1[i] & ~bmp2[i]; /* We want to only clear bits within the size. Furthermore, we also do * not want to modify destination bits which are beyond the specified * size. Use a bitmask to ensure that we only modify the bits that are * within the specified size. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask); } /** * ice_find_next_bit - Find the index of the next set bit of a bitmap * @bitmap: the bitmap to scan * @size: the size in bits of the bitmap * @offset: the offset to start at * * Scans the bitmap and returns the index of the first set bit which is equal * to or after the specified offset. Will return size if no bits are set. */ static inline u16 ice_find_next_bit(const ice_bitmap_t *bitmap, u16 size, u16 offset) { u16 i, j; if (offset >= size) return size; /* Since the starting position may not be directly on a chunk * boundary, we need to be careful to handle the first chunk specially */ i = BIT_CHUNK(offset); if (bitmap[i] != 0) { u16 off = i * BITS_PER_CHUNK; for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) { if (ice_is_bit_set(bitmap, off + j)) return min(size, (u16)(off + j)); } } /* Now we handle the remaining chunks, if any */ for (i++; i < BITS_TO_CHUNKS(size); i++) { if (bitmap[i] != 0) { u16 off = i * BITS_PER_CHUNK; for (j = 0; j < BITS_PER_CHUNK; j++) { if (ice_is_bit_set(bitmap, off + j)) return min(size, (u16)(off + j)); } } } return size; } /** * ice_find_first_bit - Find the index of the first set bit of a bitmap * @bitmap: the bitmap to scan * @size: the size in bits of the bitmap * * Scans the bitmap and returns the index of the first set bit. Will return * size if no bits are set. */ static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size) { return ice_find_next_bit(bitmap, size, 0); } #define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \ for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \ (_bitpos) < (_maxlen); \ (_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1)) /** * ice_is_any_bit_set - Return true of any bit in the bitmap is set * @bitmap: the bitmap to check * @size: the size of the bitmap * * Equivalent to checking if ice_find_first_bit returns a value less than the * bitmap size. */ static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size) { return ice_find_first_bit(bitmap, size) < size; } /** * ice_cp_bitmap - copy bitmaps. * @dst: bitmap destination * @src: bitmap to copy from * @size: Size of the bitmaps in bits * * This function copy bitmap from src to dst. Note that this function assumes * it is operating on a bitmap declared using ice_declare_bitmap. It will copy * the entire last chunk even if this contains bits beyond the size. */ static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size) { ice_memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t), ICE_NONDMA_TO_NONDMA); } /** * ice_bitmap_set - set a number of bits in bitmap from a starting position * @dst: bitmap destination * @pos: first bit position to set * @num_bits: number of bits to set * * This function sets bits in a bitmap from pos to (pos + num_bits) - 1. * Note that this function assumes it is operating on a bitmap declared using * ice_declare_bitmap. */ static inline void ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits) { u16 i; for (i = pos; i < pos + num_bits; i++) ice_set_bit(i, dst); } /** * ice_bitmap_hweight - hamming weight of bitmap * @bm: bitmap pointer * @size: size of bitmap (in bits) * * This function determines the number of set bits in a bitmap. * Note that this function assumes it is operating on a bitmap declared using * ice_declare_bitmap. */ static inline int ice_bitmap_hweight(ice_bitmap_t *bm, u16 size) { int count = 0; u16 bit = 0; while (size > (bit = ice_find_next_bit(bm, size, bit))) { count++; bit++; } return count; } /** * ice_cmp_bitmaps - compares two bitmaps. * @bmp1: the bitmap to compare * @bmp2: the bitmap to compare with bmp1 * @size: Size of the bitmaps in bits * * This function compares two bitmaps, and returns result as true or false. */ static inline bool ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) if (bmp1[i] != bmp2[i]) return false; /* We want to only compare bits within the size */ mask = LAST_CHUNK_MASK(size); if ((bmp1[i] & mask) != (bmp2[i] & mask)) return false; return true; } +/** + * ice_bitmap_from_array32 - copies u32 array source into bitmap destination + * @dst: the destination bitmap + * @src: the source u32 array + * @size: size of the bitmap (in bits) + * + * This function copies the src bitmap stored in an u32 array into the dst + * bitmap stored as an ice_bitmap_t. + */ +static inline void +ice_bitmap_from_array32(ice_bitmap_t *dst, u32 *src, u16 size) +{ + u32 remaining_bits, i; + +#define BITS_PER_U32 (sizeof(u32) * BITS_PER_BYTE) + /* clear bitmap so we only have to set when iterating */ + ice_zero_bitmap(dst, size); + + for (i = 0; i < (u32)(size / BITS_PER_U32); i++) { + u32 bit_offset = i * BITS_PER_U32; + u32 entry = src[i]; + u32 j; + + for (j = 0; j < BITS_PER_U32; j++) { + if (entry & BIT(j)) + ice_set_bit((u16)(j + bit_offset), dst); + } + } + + /* still need to check the leftover bits (i.e. if size isn't evenly + * divisible by BITS_PER_U32 + **/ + remaining_bits = size % BITS_PER_U32; + if (remaining_bits) { + u32 bit_offset = i * BITS_PER_U32; + u32 entry = src[i]; + u32 j; + + for (j = 0; j < remaining_bits; j++) { + if (entry & BIT(j)) + ice_set_bit((u16)(j + bit_offset), dst); + } + } +} + #undef BIT_CHUNK #undef BIT_IN_CHUNK #undef LAST_CHUNK_BITS #undef LAST_CHUNK_MASK #endif /* _ICE_BITOPS_H_ */ diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c index 5e2e4340c75c..1e5d6dcb7d30 100644 --- a/sys/dev/ice/ice_common.c +++ b/sys/dev/ice/ice_common.c @@ -1,5248 +1,5490 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_switch.h" #define ICE_PF_RESET_WAIT_COUNT 300 /** * ice_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure. */ enum ice_status ice_set_mac_type(struct ice_hw *hw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (hw->vendor_id != ICE_INTEL_VENDOR_ID) return ICE_ERR_DEVICE_NOT_SUPPORTED; switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_SFP: case ICE_DEV_ID_E810_XXV_BACKPLANE: case ICE_DEV_ID_E810_XXV_QSFP: case ICE_DEV_ID_E810_XXV_SFP: hw->mac_type = ICE_MAC_E810; break; case ICE_DEV_ID_E822C_10G_BASE_T: case ICE_DEV_ID_E822C_BACKPLANE: case ICE_DEV_ID_E822C_QSFP: case ICE_DEV_ID_E822C_SFP: case ICE_DEV_ID_E822C_SGMII: case ICE_DEV_ID_E822L_10G_BASE_T: case ICE_DEV_ID_E822L_BACKPLANE: case ICE_DEV_ID_E822L_SFP: case ICE_DEV_ID_E822L_SGMII: case ICE_DEV_ID_E823L_10G_BASE_T: case ICE_DEV_ID_E823L_1GBE: case ICE_DEV_ID_E823L_BACKPLANE: case ICE_DEV_ID_E823L_QSFP: case ICE_DEV_ID_E823L_SFP: hw->mac_type = ICE_MAC_GENERIC; break; default: hw->mac_type = ICE_MAC_UNKNOWN; break; } ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); return ICE_SUCCESS; } /** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port * configuration, flow director filters, etc.). */ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_aq_manage_mac_read - manage MAC address read command * @hw: pointer to the HW struct * @buf: a virtual buffer to hold the manage MAC read response * @buf_size: Size of the virtual buffer * @cd: pointer to command details structure or NULL * * This function is used to return per PF station MAC address (0x0107). * NOTE: Upon successful completion of this command, MAC address information * is returned in user specified buffer. Please interpret user specified * buffer as "manage_mac_read" response. * Response such as various MAC addresses are stored in HW struct (port.mac) * ice_discover_dev_caps is expected to be called before this function is * called. */ enum ice_status ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct ice_aq_desc desc; enum ice_status status; u16 flags; u8 i; cmd = &desc.params.mac_read; if (buf_size < sizeof(*resp)) return ICE_ERR_BUF_TOO_SHORT; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (status) return status; resp = (struct ice_aqc_manage_mac_read_resp *)buf; flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); return ICE_ERR_CFG; } /* A single port can report up to two (LAN and WoL) addresses */ for (i = 0; i < cmd->num_addr; i++) if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { ice_memcpy(hw->port_info->mac.lan_addr, resp[i].mac_addr, ETH_ALEN, ICE_DMA_TO_NONDMA); ice_memcpy(hw->port_info->mac.perm_addr, resp[i].mac_addr, ETH_ALEN, ICE_DMA_TO_NONDMA); break; } return ICE_SUCCESS; } /** * ice_aq_get_phy_caps - returns PHY capabilities * @pi: port information structure * @qual_mods: report qualified modules * @report_mode: report mode capabilities * @pcaps: structure for PHY capabilities to be filled * @cd: pointer to command details structure or NULL * * Returns the various PHY capabilities supported on the Port (0x0600) */ enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) { struct ice_aqc_get_phy_caps *cmd; u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; enum ice_status status; struct ice_hw *hw; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) return ICE_ERR_PARAM; hw = pi->hw; + if (report_mode == ICE_AQC_REPORT_DFLT_CFG && + !ice_fw_supports_report_dflt_cfg(hw)) + return ICE_ERR_PARAM; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); if (qual_mods) cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM); cmd->param0 |= CPU_TO_LE16(report_mode); status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", report_mode); ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low)); ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high)); ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", pcaps->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", pcaps->eeer_value); ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", pcaps->link_fec_options); ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", pcaps->module_compliance_enforcement); ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", pcaps->extended_compliance_code); ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", pcaps->module_type[0]); ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", pcaps->module_type[1]); ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", pcaps->module_type[2]); - if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) { + if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type, sizeof(pi->phy.link_info.module_type), ICE_NONDMA_TO_NONDMA); } return status; } /** * ice_aq_get_link_topo_handle - get link topology node return status * @pi: port information structure * @node_type: requested node type * @cd: pointer to command details structure or NULL * * Get link topology node return status for specified node type (0x06E0) * * Node type cage can be used to determine if cage is present. If AQC * returns error (ENOENT), then no cage present. If no cage present, then * connection type is backplane or BASE-T. */ static enum ice_status ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, struct ice_sq_cd *cd) { struct ice_aqc_get_link_topo *cmd; struct ice_aq_desc desc; cmd = &desc.params.get_link_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S); /* set node type */ cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } /** * ice_is_media_cage_present * @pi: port information structure * * Returns true if media cage is present, else false. If no cage, then * media type is backplane or BASE-T. */ static bool ice_is_media_cage_present(struct ice_port_info *pi) { /* Node type cage can be used to determine if cage is present. If AQC * returns error (ENOENT), then no cage present. If no cage present then * connection type is backplane or BASE-T. */ return !ice_aq_get_link_topo_handle(pi, ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, NULL); } /** * ice_get_media_type - Gets media type * @pi: port information structure */ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) { struct ice_link_status *hw_link_info; if (!pi) return ICE_MEDIA_UNKNOWN; hw_link_info = &pi->phy.link_info; if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) /* If more than one media type is selected, report unknown */ return ICE_MEDIA_UNKNOWN; if (hw_link_info->phy_type_low) { /* 1G SGMII is a special case where some DA cable PHYs * may show this as an option when it really shouldn't * be since SGMII is meant to be between a MAC and a PHY * in a backplane. Try to detect this case and handle it */ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) return ICE_MEDIA_DA; switch (hw_link_info->phy_type_low) { case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_50GBASE_SR2: case ICE_PHY_TYPE_LOW_50GBASE_LR2: case ICE_PHY_TYPE_LOW_50GBASE_SR: case ICE_PHY_TYPE_LOW_50GBASE_FR: case ICE_PHY_TYPE_LOW_50GBASE_LR: case ICE_PHY_TYPE_LOW_100GBASE_SR4: case ICE_PHY_TYPE_LOW_100GBASE_LR4: case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_2500BASE_T: case ICE_PHY_TYPE_LOW_5GBASE_T: case ICE_PHY_TYPE_LOW_10GBASE_T: case ICE_PHY_TYPE_LOW_25GBASE_T: return ICE_MEDIA_BASET; case ICE_PHY_TYPE_LOW_10G_SFI_DA: case ICE_PHY_TYPE_LOW_25GBASE_CR: case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_40GBASE_CR4: case ICE_PHY_TYPE_LOW_50GBASE_CR2: case ICE_PHY_TYPE_LOW_50GBASE_CP: case ICE_PHY_TYPE_LOW_100GBASE_CR4: case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: return ICE_MEDIA_DA; case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_40G_XLAUI: case ICE_PHY_TYPE_LOW_50G_LAUI2: case ICE_PHY_TYPE_LOW_50G_AUI2: case ICE_PHY_TYPE_LOW_50G_AUI1: case ICE_PHY_TYPE_LOW_100G_AUI4: case ICE_PHY_TYPE_LOW_100G_CAUI4: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_AUI; /* fall-through */ case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_5GBASE_KR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_40GBASE_KR4: case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_50GBASE_KR2: case ICE_PHY_TYPE_LOW_100GBASE_KR4: case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: return ICE_MEDIA_BACKPLANE; } } else { switch (hw_link_info->phy_type_high) { case ICE_PHY_TYPE_HIGH_100G_AUI2: case ICE_PHY_TYPE_HIGH_100G_CAUI2: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_AUI; /* fall-through */ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: return ICE_MEDIA_FIBER; } } return ICE_MEDIA_UNKNOWN; } /** * ice_aq_get_link_info * @pi: port information structure * @ena_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cd: pointer to command details structure or NULL * * Get Link Status (0x607). Returns the link status of the adapter. */ enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status *resp; struct ice_link_status *li_old, *li; enum ice_media_type *hw_media_type; struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; enum ice_status status; struct ice_hw *hw; u16 cmd_flags; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; li = &pi->phy.link_info; hw_fc_info = &pi->fc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; resp = &desc.params.get_link_status; resp->cmd_flags = CPU_TO_LE16(cmd_flags); resp->lport_num = pi->lport; status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); if (status != ICE_SUCCESS) return status; /* save off old link status information */ *li_old = *li; /* update current link status information */ li->link_speed = LE16_TO_CPU(link_data.link_speed); li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low); li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); li->link_info = link_data.link_info; + li->link_cfg_err = link_data.link_cfg_err; li->an_info = link_data.an_info; li->ext_info = link_data.ext_info; li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size); li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; li->topo_media_conflict = link_data.topo_media_conflict; li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | ICE_AQ_CFG_PACING_TYPE_M); /* update fc info */ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); if (tx_pause && rx_pause) hw_fc_info->current_mode = ICE_FC_FULL; else if (tx_pause) hw_fc_info->current_mode = ICE_FC_TX_PAUSE; else if (rx_pause) hw_fc_info->current_mode = ICE_FC_RX_PAUSE; else hw_fc_info->current_mode = ICE_FC_NONE; li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED)); ice_debug(hw, ICE_DBG_LINK, "get link info\n"); ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)li->phy_type_low); ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)li->phy_type_high); ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", li->max_frame_size); ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); /* save link status information */ if (link) *link = *li; /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; return ICE_SUCCESS; } /** * ice_fill_tx_timer_and_fc_thresh * @hw: pointer to the HW struct * @cmd: pointer to MAC cfg structure * * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command * descriptor */ static void ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, struct ice_aqc_set_mac_cfg *cmd) { u16 fc_thres_val, tx_timer_val; u32 val; /* We read back the transmit timer and fc threshold value of * LFC. Thus, we will use index = * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. * * Also, because we are opearating on transmit timer and fc * threshold of LFC, we don't turn on any bit in tx_tmr_priority */ #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX /* Retrieve the transmit timer */ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); tx_timer_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); /* Retrieve the fc threshold */ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val); } /** * ice_aq_set_mac_cfg * @hw: pointer to the HW struct * @max_frame_size: Maximum Frame Size to be supported * @cd: pointer to command details structure or NULL * * Set MAC configuration (0x0603) */ enum ice_status ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_cfg *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_mac_cfg; if (max_frame_size == 0) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); cmd->max_frame_size = CPU_TO_LE16(max_frame_size); ice_fill_tx_timer_and_fc_thresh(hw, cmd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; enum ice_status status; hw->switch_info = (struct ice_switch_info *) ice_malloc(hw, sizeof(*hw->switch_info)); sw = hw->switch_info; if (!sw) return ICE_ERR_NO_MEMORY; INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); if (status) { ice_free(hw, hw->switch_info); return status; } return ICE_SUCCESS; } /** * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct * @hw: pointer to the HW struct * @sw: pointer to switch info struct for which function clears filters */ static void ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) { struct ice_vsi_list_map_info *v_pos_map; struct ice_vsi_list_map_info *v_tmp_map; struct ice_sw_recipe *recps; u8 i; if (!sw) return; LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, ice_vsi_list_map_info, list_entry) { LIST_DEL(&v_pos_map->list_entry); ice_free(hw, v_pos_map); } recps = sw->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct ice_recp_grp_entry *rg_entry, *tmprg_entry; recps[i].root_rid = i; LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry, &recps[i].rg_list, ice_recp_grp_entry, l_entry) { LIST_DEL(&rg_entry->l_entry); ice_free(hw, rg_entry); } if (recps[i].adv_rule) { struct ice_adv_fltr_mgmt_list_entry *tmp_entry; struct ice_adv_fltr_mgmt_list_entry *lst_itr; ice_destroy_lock(&recps[i].filt_rule_lock); LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, &recps[i].filt_rules, ice_adv_fltr_mgmt_list_entry, list_entry) { LIST_DEL(&lst_itr->list_entry); ice_free(hw, lst_itr->lkups); ice_free(hw, lst_itr); } } else { struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; ice_destroy_lock(&recps[i].filt_rule_lock); LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, &recps[i].filt_rules, ice_fltr_mgmt_list_entry, list_entry) { LIST_DEL(&lst_itr->list_entry); ice_free(hw, lst_itr); } } if (recps[i].root_buf) ice_free(hw, recps[i].root_buf); } ice_rm_sw_replay_rule_info(hw, sw); ice_free(hw, sw->recp_list); ice_free(hw, sw); } /** * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks * @hw: pointer to the HW struct */ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) { ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); } /** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct * * Determines the ITR/INTRL granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on. */ static void ice_get_itr_intrl_gran(struct ice_hw *hw) { u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> GL_PWR_MODE_CTL_CAR_MAX_BW_S; switch (max_agg_bw) { case ICE_MAX_AGG_BW_200G: case ICE_MAX_AGG_BW_100G: case ICE_MAX_AGG_BW_50G: hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; break; case ICE_MAX_AGG_BW_25G: hw->itr_gran = ICE_ITR_GRAN_MAX_25; hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; break; } } /** * ice_print_rollback_msg - print FW rollback message * @hw: pointer to the hardware structure */ void ice_print_rollback_msg(struct ice_hw *hw) { char nvm_str[ICE_NVM_VER_LEN] = { 0 }; struct ice_orom_info *orom; struct ice_nvm_info *nvm; orom = &hw->flash.orom; nvm = &hw->flash.nvm; SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, nvm->eetrack, orom->major, orom->build, orom->patch); ice_warn(hw, "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n", nvm_str, hw->fw_maj_ver, hw->fw_min_ver); } /** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ enum ice_status ice_init_hw(struct ice_hw *hw) { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; u16 mac_buf_len; void *mac_buf; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); if (status) return status; hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >> PF_FUNC_RID_FUNCTION_NUMBER_S; status = ice_reset(hw, ICE_RESET_PFR); if (status) return status; ice_get_itr_intrl_gran(hw); status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; status = ice_init_nvm(hw); if (status) goto err_unroll_cqinit; if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK) ice_print_rollback_msg(hw); status = ice_clear_pf_cfg(hw); if (status) goto err_unroll_cqinit; ice_clear_pxe_mode(hw); status = ice_get_caps(hw); if (status) goto err_unroll_cqinit; hw->port_info = (struct ice_port_info *) ice_malloc(hw, sizeof(*hw->port_info)); if (!hw->port_info) { status = ICE_ERR_NO_MEMORY; goto err_unroll_cqinit; } /* set the back pointer to HW */ hw->port_info->hw = hw; /* Initialize port_info struct with switch configuration data */ status = ice_get_initial_sw_cfg(hw); if (status) goto err_unroll_alloc; hw->evb_veb = true; /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); goto err_unroll_alloc; } ice_sched_get_psm_clk_freq(hw); /* Initialize port_info struct with scheduler data */ status = ice_sched_init_port(hw->port_info); if (status) goto err_unroll_sched; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) { status = ICE_ERR_NO_MEMORY; goto err_unroll_sched; } /* Initialize port_info struct with PHY capabilities */ status = ice_aq_get_phy_caps(hw->port_info, false, - ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); + ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); ice_free(hw, pcaps); if (status) - ice_debug(hw, ICE_DBG_PHY, "Get PHY capabilities failed, continuing anyway\n"); + ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n", + status); /* Initialize port_info struct with link information */ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); if (status) goto err_unroll_sched; /* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) { ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); status = ICE_ERR_CFG; goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); /* Initialize max burst size */ if (!hw->max_burst_size) ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); status = ice_init_fltr_mgmt_struct(hw); if (status) goto err_unroll_sched; /* Get MAC information */ /* A single port can report up to two (LAN and WoL) addresses */ mac_buf = ice_calloc(hw, 2, sizeof(struct ice_aqc_manage_mac_read_resp)); mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { status = ICE_ERR_NO_MEMORY; goto err_unroll_fltr_mgmt_struct; } status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); ice_free(hw, mac_buf); if (status) goto err_unroll_fltr_mgmt_struct; /* enable jumbo frame support at MAC level */ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); if (status) goto err_unroll_fltr_mgmt_struct; status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; ice_init_lock(&hw->tnl_lock); - ice_init_vlan_mode_ops(hw); - return ICE_SUCCESS; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); err_unroll_sched: ice_sched_cleanup_all(hw); err_unroll_alloc: ice_free(hw, hw->port_info); hw->port_info = NULL; err_unroll_cqinit: ice_destroy_all_ctrlq(hw); return status; } /** * ice_deinit_hw - unroll initialization operations done by ice_init_hw * @hw: pointer to the hardware structure * * This should be called only during nominal operation, not as a result of * ice_init_hw() failing since ice_init_hw() will take care of unrolling * applicable initializations if it fails for any reason. */ void ice_deinit_hw(struct ice_hw *hw) { ice_cleanup_fltr_mgmt_struct(hw); ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); ice_free_seg(hw); ice_free_hw_tbls(hw); ice_destroy_lock(&hw->tnl_lock); if (hw->port_info) { ice_free(hw, hw->port_info); hw->port_info = NULL; } ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); } /** * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure */ enum ice_status ice_check_reset(struct ice_hw *hw) { u32 cnt, reg = 0, grst_timeout, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> GLGEN_RSTCTL_GRSTDEL_S) + 10; for (cnt = 0; cnt < grst_timeout; cnt++) { ice_msec_delay(100, true); reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break; } if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ GLNVM_ULD_PCIER_DONE_1_M |\ GLNVM_ULD_CORER_DONE_M |\ GLNVM_ULD_GLOBR_DONE_M |\ GLNVM_ULD_POR_DONE_M |\ GLNVM_ULD_POR_DONE_1_M |\ GLNVM_ULD_PCIER_DONE_2_M) uld_mask = ICE_RESET_DONE_MASK; /* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, GLNVM_ULD) & uld_mask; if (reg == uld_mask) { ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; } ice_msec_delay(10, true); } if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); return ICE_ERR_RESET_FAILED; } return ICE_SUCCESS; } /** * ice_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset */ static enum ice_status ice_pf_reset(struct ice_hw *hw) { u32 cnt, reg; /* If at function entry a global reset was already in progress, i.e. * state is not 'device active' or any of the reset done bits are not * set in GLNVM_ULD, there is no need for a PF Reset; poll until the * global reset is done. */ if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) return ICE_ERR_RESET_FAILED; return ICE_SUCCESS; } /* Reset the PF */ reg = rd32(hw, PFGEN_CTRL); wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); /* Wait for the PFR to complete. The wait time is the global config lock * timeout plus the PFR timeout which will account for a possible reset * that is occurring during a download package operation. */ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, PFGEN_CTRL); if (!(reg & PFGEN_CTRL_PFSWR_M)) break; ice_msec_delay(1, true); } if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } return ICE_SUCCESS; } /** * ice_reset - Perform different types of reset * @hw: pointer to the hardware structure * @req: reset request * * This function triggers a reset as specified by the req parameter. * * Note: * If anything other than a PF reset is triggered, PXE mode is restored. * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow. */ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) { u32 val = 0; switch (req) { case ICE_RESET_PFR: return ice_pf_reset(hw); case ICE_RESET_CORER: ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); val = GLGEN_RTRIG_CORER_M; break; case ICE_RESET_GLOBR: ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); val = GLGEN_RTRIG_GLOBR_M; break; default: return ICE_ERR_PARAM; } val |= rd32(hw, GLGEN_RTRIG); wr32(hw, GLGEN_RTRIG, val); ice_flush(hw); /* wait for the FW to be ready */ return ice_check_reset(hw); } /** * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_rxq_ctx: pointer to the rxq context * @rxq_index: the index of the Rx queue * * Copies rxq context from dense structure to HW register space */ static enum ice_status ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) { u8 i; if (!ice_rxq_ctx) return ICE_ERR_BAD_PTR; if (rxq_index > QRX_CTRL_MAX_INDEX) return ICE_ERR_PARAM; /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { wr32(hw, QRX_CONTEXT(i, rxq_index), *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); } return ICE_SUCCESS; } /* LAN Rx Queue Context */ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; /** * ice_write_rxq_ctx * @hw: pointer to the hardware structure * @rlan_ctx: pointer to the rxq context * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; if (!rlan_ctx) return ICE_ERR_BAD_PTR; rlan_ctx->prefena = 1; ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } /** * ice_clear_rxq_ctx * @hw: pointer to the hardware structure * @rxq_index: the index of the Rx queue to clear * * Clears rxq context in HW register space */ enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) { u8 i; if (rxq_index > QRX_CTRL_MAX_INDEX) return ICE_ERR_PARAM; /* Clear each dword register separately */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) wr32(hw, QRX_CONTEXT(i, rxq_index), 0); return ICE_SUCCESS; } /* LAN Tx Queue Context */ const struct ice_ctx_ele ice_tlan_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), { 0 } }; /** * ice_copy_tx_cmpltnq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context * @tx_cmpltnq_index: the index of the completion queue * * Copies Tx completion queue context from dense structure to HW register space */ static enum ice_status ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx, u32 tx_cmpltnq_index) { u8 i; if (!ice_tx_cmpltnq_ctx) return ICE_ERR_BAD_PTR; if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) return ICE_ERR_PARAM; /* Copy each dword separately to HW */ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) { wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i, *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); } return ICE_SUCCESS; } /* LAN Tx Completion Queue Context */ static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192), { 0 } }; /** * ice_write_tx_cmpltnq_ctx * @hw: pointer to the hardware structure * @tx_cmpltnq_ctx: pointer to the completion queue context * @tx_cmpltnq_index: the index of the completion queue * * Converts completion queue context from sparse to dense structure and then * writes it to HW register space */ enum ice_status ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, u32 tx_cmpltnq_index) { u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index); } /** * ice_clear_tx_cmpltnq_ctx * @hw: pointer to the hardware structure * @tx_cmpltnq_index: the index of the completion queue to clear * * Clears Tx completion queue context in HW register space */ enum ice_status ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) { u8 i; if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) return ICE_ERR_PARAM; /* Clear each dword register separately */ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0); return ICE_SUCCESS; } /** * ice_copy_tx_drbell_q_ctx_to_hw * @hw: pointer to the hardware structure * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context * @tx_drbell_q_index: the index of the doorbell queue * * Copies doorbell queue context from dense structure to HW register space */ static enum ice_status ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx, u32 tx_drbell_q_index) { u8 i; if (!ice_tx_drbell_q_ctx) return ICE_ERR_BAD_PTR; if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) return ICE_ERR_PARAM; /* Copy each dword separately to HW */ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) { wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i, *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); } return ICE_SUCCESS; } /* LAN Tx Doorbell Queue Context info */ static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0), ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64), ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80), ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84), ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94), ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96), ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104), ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108), ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112), ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128), ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144), { 0 } }; /** * ice_write_tx_drbell_q_ctx * @hw: pointer to the hardware structure * @tx_drbell_q_ctx: pointer to the doorbell queue context * @tx_drbell_q_index: the index of the doorbell queue * * Converts doorbell queue context from sparse to dense structure and then * writes it to HW register space */ enum ice_status ice_write_tx_drbell_q_ctx(struct ice_hw *hw, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, u32 tx_drbell_q_index) { u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info); return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index); } /** * ice_clear_tx_drbell_q_ctx * @hw: pointer to the hardware structure * @tx_drbell_q_index: the index of the doorbell queue to clear * * Clears doorbell queue context in HW register space */ enum ice_status ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) { u8 i; if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) return ICE_ERR_PARAM; /* Clear each dword register separately */ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0); return ICE_SUCCESS; } /* FW Admin Queue command wrappers */ +/** + * ice_should_retry_sq_send_cmd + * @opcode: AQ opcode + * + * Decide if we should retry the send command routine for the ATQ, depending + * on the opcode. + */ +static bool ice_should_retry_sq_send_cmd(u16 opcode) +{ + switch (opcode) { + case ice_aqc_opc_dnl_get_status: + case ice_aqc_opc_dnl_run: + case ice_aqc_opc_dnl_call: + case ice_aqc_opc_dnl_read_sto: + case ice_aqc_opc_dnl_write_sto: + case ice_aqc_opc_dnl_set_breakpoints: + case ice_aqc_opc_dnl_read_log: + case ice_aqc_opc_get_link_topo: + case ice_aqc_opc_done_alt_write: + case ice_aqc_opc_lldp_stop: + case ice_aqc_opc_lldp_start: + case ice_aqc_opc_lldp_filter_ctrl: + return true; + } + + return false; +} + +/** + * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) + * @hw: pointer to the HW struct + * @cq: pointer to the specific Control queue + * @desc: prefilled descriptor describing the command + * @buf: buffer to use for indirect commands (or NULL for direct commands) + * @buf_size: size of buffer for indirect commands (or 0 for direct commands) + * @cd: pointer to command details structure + * + * Retry sending the FW Admin Queue command, multiple times, to the FW Admin + * Queue if the EBUSY AQ error is returned. + */ +static enum ice_status +ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, + struct ice_aq_desc *desc, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc_cpy; + enum ice_status status; + bool is_cmd_for_retry; + u8 *buf_cpy = NULL; + u8 idx = 0; + u16 opcode; + + opcode = LE16_TO_CPU(desc->opcode); + is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); + ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM); + + if (is_cmd_for_retry) { + if (buf) { + buf_cpy = (u8 *)ice_malloc(hw, buf_size); + if (!buf_cpy) + return ICE_ERR_NO_MEMORY; + } + + ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy), + ICE_NONDMA_TO_NONDMA); + } + + do { + status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); + + if (!is_cmd_for_retry || status == ICE_SUCCESS || + hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) + break; + + if (buf_cpy) + ice_memcpy(buf, buf_cpy, buf_size, + ICE_NONDMA_TO_NONDMA); + + ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy), + ICE_NONDMA_TO_NONDMA); + + ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false); + + } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); + + if (buf_cpy) + ice_free(hw, buf_cpy); + + return status; +} + /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct * @desc: descriptor describing the command * @buf: buffer to use for indirect commands (NULL for direct commands) * @buf_size: size of buffer for indirect commands (0 for direct commands) * @cd: pointer to command details structure * * Helper function to send FW Admin Queue commands to the FW Admin Queue. */ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { - return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); + return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); } /** * ice_aq_get_fw_ver * @hw: pointer to the HW struct * @cd: pointer to command details structure or NULL * * Get the firmware version (0x0001) from the admin queue commands */ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aqc_get_ver *resp; struct ice_aq_desc desc; enum ice_status status; resp = &desc.params.get_ver; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) { hw->fw_branch = resp->fw_branch; hw->fw_maj_ver = resp->fw_major; hw->fw_min_ver = resp->fw_minor; hw->fw_patch = resp->fw_patch; hw->fw_build = LE32_TO_CPU(resp->fw_build); hw->api_branch = resp->api_branch; hw->api_maj_ver = resp->api_major; hw->api_min_ver = resp->api_minor; hw->api_patch = resp->api_patch; } return status; } /** * ice_aq_send_driver_ver * @hw: pointer to the HW struct * @dv: driver's major, minor version * @cd: pointer to command details structure or NULL * * Send the driver version (0x0002) to the firmware */ enum ice_status ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd) { struct ice_aqc_driver_ver *cmd; struct ice_aq_desc desc; u16 len; cmd = &desc.params.driver_ver; if (!dv) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->major_ver = dv->major_ver; cmd->minor_ver = dv->minor_ver; cmd->build_ver = dv->build_ver; cmd->subbuild_ver = dv->subbuild_ver; len = 0; while (len < sizeof(dv->driver_string) && IS_ASCII(dv->driver_string[len]) && dv->driver_string[len]) len++; return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); } /** * ice_aq_q_shutdown * @hw: pointer to the HW struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003). */ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) { struct ice_aqc_q_shutdown *cmd; struct ice_aq_desc desc; cmd = &desc.params.q_shutdown; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); if (unloading) cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_aq_req_res * @hw: pointer to the HW struct * @res: resource ID * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: * 1) ICE_SUCCESS - acquired lock, and can perform download package * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has * successfully downloaded the package; the driver does * not have to download the package and can continue * loading * * Note that if the caller is in an acquire lock, perform action, release lock * phase of operation, it is possible that the FW may detect a timeout and issue * a CORER. In this case, the driver will receive a CORER interrupt and will * have to determine its cause. The calling thread that is handling this flow * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out. */ static enum ice_status ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd_resp; struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd_resp = &desc.params.res_owner; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); cmd_resp->res_id = CPU_TO_LE16(res); cmd_resp->access_type = CPU_TO_LE16(access); cmd_resp->res_number = CPU_TO_LE32(sdp_number); cmd_resp->timeout = CPU_TO_LE32(*timeout); *timeout = 0; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. */ /* Global config lock response utilizes an additional status field. * * If the Global config lock resource is held by some other driver, the * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field * and the timeout field indicates the maximum time the current owner * of the resource has to free it. */ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { *timeout = LE32_TO_CPU(cmd_resp->timeout); return ICE_SUCCESS; } else if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_IN_PROG) { *timeout = LE32_TO_CPU(cmd_resp->timeout); return ICE_ERR_AQ_ERROR; } else if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_DONE) { return ICE_ERR_AQ_NO_WORK; } /* invalid FW response, force a timeout immediately */ *timeout = 0; return ICE_ERR_AQ_ERROR; } /* If the resource is held by some other driver, the command completes * with a busy return value and the timeout field indicates the maximum * time the current owner of the resource has to free it. */ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) *timeout = LE32_TO_CPU(cmd_resp->timeout); return status; } /** * ice_aq_release_res * @hw: pointer to the HW struct * @res: resource ID * @sdp_number: resource number * @cd: pointer to command details structure or NULL * * release common resource using the admin queue commands (0x0009) */ static enum ice_status ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd; struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.res_owner; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); cmd->res_id = CPU_TO_LE16(res); cmd->res_number = CPU_TO_LE32(sdp_number); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_acquire_res * @hw: pointer to the HW structure * @res: resource ID * @access: access type (read or write) * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource. */ enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; u32 time_left = timeout; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do. */ if (status == ICE_ERR_AQ_NO_WORK) goto ice_acquire_res_exit; if (status) ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); /* If necessary, poll until the current lock owner timeouts */ timeout = time_left; while (status && timeout && time_left) { ice_msec_delay(delay, true); timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); if (status == ICE_ERR_AQ_NO_WORK) /* lock free, but no work to do */ break; if (!status) /* lock acquired */ break; } if (status && status != ICE_ERR_AQ_NO_WORK) ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_acquire_res_exit: if (status == ICE_ERR_AQ_NO_WORK) { if (access == ICE_RES_WRITE) ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); } return status; } /** * ice_release_res * @hw: pointer to the HW structure * @res: resource ID * * This function will release a resource using the proper Admin Command. */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { enum ice_status status; u32 total_delay = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ while ((status == ICE_ERR_AQ_TIMEOUT) && (total_delay < hw->adminq.sq_cmd_timeout)) { ice_msec_delay(1, true); status = ice_aq_release_res(hw, res, 0, NULL); total_delay++; } } /** * ice_aq_alloc_free_res - command to allocate/free resources * @hw: pointer to the HW struct * @num_entries: number of resource entries in buffer * @buf: Indirect buffer to hold data parameters and response * @buf_size: size of buffer for indirect commands * @opc: pass in the command opcode * @cd: pointer to command details structure or NULL * * Helper function to allocate/free resources using the admin queue commands */ enum ice_status ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_alloc_free_res_cmd *cmd; struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.sw_res_ctrl; if (!buf) return ICE_ERR_PARAM; if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries)) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, opc); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_entries = CPU_TO_LE16(num_entries); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_alloc_hw_res - allocate resource * @hw: pointer to the HW struct * @type: type of resource * @num: number of resources to allocate * @btm: allocate from bottom * @res: pointer to array that will receive the resources */ enum ice_status ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(buf, elem, num); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; /* Prepare buffer to allocate resource. */ buf->num_elems = CPU_TO_LE16(num); buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); if (btm) buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, ice_aqc_opc_alloc_res, NULL); if (status) goto ice_alloc_res_exit; ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num, ICE_NONDMA_TO_NONDMA); ice_alloc_res_exit: ice_free(hw, buf); return status; } /** * ice_free_hw_res - free allocated HW resource * @hw: pointer to the HW struct * @type: type of resource to free * @num: number of resources * @res: pointer to array that contains the resources to free */ enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(buf, elem, num); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; /* Prepare buffer to free resource. */ buf->num_elems = CPU_TO_LE16(num); buf->res_type = CPU_TO_LE16(type); ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num, ICE_NONDMA_TO_NONDMA); status = ice_aq_alloc_free_res(hw, num, buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); ice_free(hw, buf); return status; } /** * ice_get_num_per_func - determine number of resources per PF * @hw: pointer to the HW structure * @max: value to be evenly split between each PF * * Determine the number of valid functions by going through the bitmap returned * from parsing capabilities and use this to calculate the number of resources * per PF based on the max value passed in. */ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) { u8 funcs; #define ICE_CAPS_VALID_FUNCS_M 0xFF funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions & ICE_CAPS_VALID_FUNCS_M); if (!funcs) return 0; return max / funcs; } /** * ice_print_led_caps - print LED capabilities * @hw: pointer to the ice_hw instance * @caps: pointer to common caps instance * @prefix: string to prefix when printing - * @debug: set to indicate debug print + * @dbg: set to indicate debug print */ static void ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, - char const *prefix, bool debug) + char const *prefix, bool dbg) { u8 i; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix, caps->led_pin_num); else ice_info(hw, "%s: led_pin_num = %d\n", prefix, caps->led_pin_num); for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) { if (!caps->led[i]) continue; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n", prefix, i, caps->led[i]); else ice_info(hw, "%s: led[%d] = %d\n", prefix, i, caps->led[i]); } } /** * ice_print_sdp_caps - print SDP capabilities * @hw: pointer to the ice_hw instance * @caps: pointer to common caps instance * @prefix: string to prefix when printing - * @debug: set to indicate debug print + * @dbg: set to indicate debug print */ static void ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, - char const *prefix, bool debug) + char const *prefix, bool dbg) { u8 i; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix, caps->sdp_pin_num); else ice_info(hw, "%s: sdp_pin_num = %d\n", prefix, caps->sdp_pin_num); for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) { if (!caps->sdp[i]) continue; - if (debug) + if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n", prefix, i, caps->sdp[i]); else ice_info(hw, "%s: sdp[%d] = %d\n", prefix, i, caps->sdp[i]); } } /** * ice_parse_common_caps - parse common device/function capabilities * @hw: pointer to the HW struct * @caps: pointer to common capabilities structure * @elem: the capability element to parse * @prefix: message prefix for tracing capabilities * * Given a capability element, extract relevant details into the common * capability structure. * * Returns: true if the capability matches one of the common capability ids, * false otherwise. */ static bool ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, struct ice_aqc_list_caps_elem *elem, const char *prefix) { u32 logical_id = LE32_TO_CPU(elem->logical_id); u32 phys_id = LE32_TO_CPU(elem->phys_id); u32 number = LE32_TO_CPU(elem->number); u16 cap = LE16_TO_CPU(elem->cap); bool found = true; switch (cap) { case ICE_AQC_CAPS_SWITCHING_MODE: caps->switching_mode = number; ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix, caps->switching_mode); break; case ICE_AQC_CAPS_MANAGEABILITY_MODE: caps->mgmt_mode = number; caps->mgmt_protocols_mctp = logical_id; ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix, caps->mgmt_mode); ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix, caps->mgmt_protocols_mctp); break; case ICE_AQC_CAPS_OS2BMC: caps->os2bmc = number; ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc); break; case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_802_1QBG: caps->evb_802_1_qbg = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number); break; case ICE_AQC_CAPS_802_1BR: caps->evb_802_1_qbh = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number); break; case ICE_AQC_CAPS_DCB: caps->dcb = (number == 1); caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, caps->active_tc_bitmap); ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_ISCSI: caps->iscsi = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, caps->num_txq); ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; case ICE_AQC_CAPS_NVM_VER: break; case ICE_AQC_CAPS_NVM_MGMT: caps->sec_rev_disabled = (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ? true : false; ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix, caps->sec_rev_disabled); caps->update_disabled = (number & ICE_NVM_MGMT_UPDATE_DISABLED) ? true : false; ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix, caps->update_disabled); caps->nvm_unified_update = (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? true : false; ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, caps->nvm_unified_update); break; case ICE_AQC_CAPS_CEM: caps->mgmt_cem = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix, caps->mgmt_cem); break; case ICE_AQC_CAPS_LED: if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) { caps->led[phys_id] = true; caps->led_pin_num++; ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id); } break; case ICE_AQC_CAPS_SDP: if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) { caps->sdp[phys_id] = true; caps->sdp_pin_num++; ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id); } break; case ICE_AQC_CAPS_WR_CSR_PROT: caps->wr_csr_prot = number; caps->wr_csr_prot |= (u64)logical_id << 32; ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix, (unsigned long long)caps->wr_csr_prot); break; case ICE_AQC_CAPS_WOL_PROXY: caps->num_wol_proxy_fltr = number; caps->wol_proxy_vsi_seid = logical_id; caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M); caps->acpi_prog_mthd = !!(phys_id & ICE_ACPI_PROG_MTHD_M); caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M); ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix, caps->num_wol_proxy_fltr); ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix, caps->wol_proxy_vsi_seid); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; default: /* Not one of the recognized common capabilities */ found = false; } return found; } /** * ice_recalc_port_limited_caps - Recalculate port limited capabilities * @hw: pointer to the HW structure * @caps: pointer to capabilities structure to fix * * Re-calculate the capabilities that are dependent on the number of physical * ports; i.e. some features are not supported or function differently on * devices with more than 4 ports. */ static void ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) { /* This assumes device capabilities are always scanned before function * capabilities during the initialization flow. */ if (hw->dev_caps.num_funcs > 4) { /* Max 4 TCs per port */ caps->maxtc = 4; ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", caps->maxtc); } } /** * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps * @hw: pointer to the HW struct * @func_p: pointer to function capabilities structure * @cap: pointer to the capability element to parse * * Extract function capabilities for ICE_AQC_CAPS_VF. */ static void ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); u32 logical_id = LE32_TO_CPU(cap->logical_id); func_p->num_allocd_vfs = number; func_p->vf_base_id = logical_id; ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", func_p->num_allocd_vfs); ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", func_p->vf_base_id); } /** * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps * @hw: pointer to the HW struct * @func_p: pointer to function capabilities structure * @cap: pointer to the capability element to parse * * Extract function capabilities for ICE_AQC_CAPS_VSI. */ static void ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, struct ice_aqc_list_caps_elem *cap) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", LE32_TO_CPU(cap->number)); ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", func_p->guar_num_vsi); } /** * ice_parse_func_caps - Parse function capabilities * @hw: pointer to the HW struct * @func_p: pointer to function capabilities structure * @buf: buffer containing the function capability records * @cap_count: the number of capabilities * * Helper function to parse function (0x000A) capabilities list. For * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant * data into the function capabilities structured. */ static void ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, void *buf, u32 cap_count) { struct ice_aqc_list_caps_elem *cap_resp; u32 i; cap_resp = (struct ice_aqc_list_caps_elem *)buf; ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM); for (i = 0; i < cap_count; i++) { u16 cap = LE16_TO_CPU(cap_resp[i].cap); bool found; found = ice_parse_common_caps(hw, &func_p->common_cap, &cap_resp[i], "func caps"); switch (cap) { case ICE_AQC_CAPS_VF: ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); break; case ICE_AQC_CAPS_VSI: ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); break; default: /* Don't list common capabilities as unknown */ if (!found) ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", i, cap); break; } } ice_print_led_caps(hw, &func_p->common_cap, "func caps", true); ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true); ice_recalc_port_limited_caps(hw, &func_p->common_cap); } /** * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @cap: capability element to parse * * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. */ static void ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); dev_p->num_funcs = ice_hweight32(number); ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", dev_p->num_funcs); } /** * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @cap: capability element to parse * * Parse ICE_AQC_CAPS_VF for device capabilities. */ static void ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); dev_p->num_vfs_exposed = number; ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", dev_p->num_vfs_exposed); } /** * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @cap: capability element to parse * * Parse ICE_AQC_CAPS_VSI for device capabilities. */ static void ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", dev_p->num_vsi_allocd_to_host); } /** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @buf: buffer containing the device capability records * @cap_count: the number of capabilities * * Helper device to parse device (0x000B) capabilities list. For * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant * data into the device capabilities structured. */ static void ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, void *buf, u32 cap_count) { struct ice_aqc_list_caps_elem *cap_resp; u32 i; cap_resp = (struct ice_aqc_list_caps_elem *)buf; ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM); for (i = 0; i < cap_count; i++) { u16 cap = LE16_TO_CPU(cap_resp[i].cap); bool found; found = ice_parse_common_caps(hw, &dev_p->common_cap, &cap_resp[i], "dev caps"); switch (cap) { case ICE_AQC_CAPS_VALID_FUNCTIONS: ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); break; case ICE_AQC_CAPS_VF: ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); break; case ICE_AQC_CAPS_VSI: ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); break; default: /* Don't list common capabilities as unknown */ if (!found) ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", i, cap); break; } } ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true); ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true); ice_recalc_port_limited_caps(hw, &dev_p->common_cap); } /** * ice_aq_list_caps - query function/device capabilities * @hw: pointer to the HW struct * @buf: a buffer to hold the capabilities * @buf_size: size of the buffer * @cap_count: if not NULL, set to the number of capabilities reported * @opc: capabilities type to discover, device or function * @cd: pointer to command details structure or NULL * * Get the function (0x000A) or device (0x000B) capabilities description from * firmware and store it in the buffer. * * If the cap_count pointer is not NULL, then it is set to the number of * capabilities firmware will report. Note that if the buffer size is too * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The * cap_count will still be updated in this case. It is recommended that the * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that * firmware could return) to avoid this. */ static enum ice_status ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.get_cap; if (opc != ice_aqc_opc_list_func_caps && opc != ice_aqc_opc_list_dev_caps) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, opc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (cap_count) *cap_count = LE32_TO_CPU(cmd->count); return status; } /** * ice_discover_dev_caps - Read and extract device capabilities * @hw: pointer to the hardware structure * @dev_caps: pointer to device capabilities structure * * Read the device capabilities and extract them into the dev_caps structure * for later use. */ static enum ice_status ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { enum ice_status status; u32 cap_count = 0; void *cbuf; cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); if (!cbuf) return ICE_ERR_NO_MEMORY; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum * possible size that firmware can return. */ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, ice_aqc_opc_list_dev_caps, NULL); if (!status) ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); ice_free(hw, cbuf); return status; } /** * ice_discover_func_caps - Read and extract function capabilities * @hw: pointer to the hardware structure * @func_caps: pointer to function capabilities structure * * Read the function capabilities and extract them into the func_caps structure * for later use. */ static enum ice_status ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) { enum ice_status status; u32 cap_count = 0; void *cbuf; cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); if (!cbuf) return ICE_ERR_NO_MEMORY; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum * possible size that firmware can return. */ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, ice_aqc_opc_list_func_caps, NULL); if (!status) ice_parse_func_caps(hw, func_caps, cbuf, cap_count); ice_free(hw, cbuf); return status; } /** * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode * @hw: pointer to the hardware structure */ void ice_set_safe_mode_caps(struct ice_hw *hw) { struct ice_hw_func_caps *func_caps = &hw->func_caps; struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; struct ice_hw_common_caps cached_caps; u32 num_funcs; /* cache some func_caps values that should be restored after memset */ cached_caps = func_caps->common_cap; /* unset func capabilities */ memset(func_caps, 0, sizeof(*func_caps)); #define ICE_RESTORE_FUNC_CAP(name) \ func_caps->common_cap.name = cached_caps.name /* restore cached values */ ICE_RESTORE_FUNC_CAP(valid_functions); ICE_RESTORE_FUNC_CAP(txq_first_id); ICE_RESTORE_FUNC_CAP(rxq_first_id); ICE_RESTORE_FUNC_CAP(msix_vector_first_id); ICE_RESTORE_FUNC_CAP(max_mtu); ICE_RESTORE_FUNC_CAP(nvm_unified_update); /* one Tx and one Rx queue in safe mode */ func_caps->common_cap.num_rxq = 1; func_caps->common_cap.num_txq = 1; /* two MSIX vectors, one for traffic and one for misc causes */ func_caps->common_cap.num_msix_vectors = 2; func_caps->guar_num_vsi = 1; /* cache some dev_caps values that should be restored after memset */ cached_caps = dev_caps->common_cap; num_funcs = dev_caps->num_funcs; /* unset dev capabilities */ memset(dev_caps, 0, sizeof(*dev_caps)); #define ICE_RESTORE_DEV_CAP(name) \ dev_caps->common_cap.name = cached_caps.name /* restore cached values */ ICE_RESTORE_DEV_CAP(valid_functions); ICE_RESTORE_DEV_CAP(txq_first_id); ICE_RESTORE_DEV_CAP(rxq_first_id); ICE_RESTORE_DEV_CAP(msix_vector_first_id); ICE_RESTORE_DEV_CAP(max_mtu); ICE_RESTORE_DEV_CAP(nvm_unified_update); dev_caps->num_funcs = num_funcs; /* one Tx and one Rx queue per function in safe mode */ dev_caps->common_cap.num_rxq = num_funcs; dev_caps->common_cap.num_txq = num_funcs; /* two MSIX vectors per function */ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; } /** * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ enum ice_status ice_get_caps(struct ice_hw *hw) { enum ice_status status; status = ice_discover_dev_caps(hw, &hw->dev_caps); if (status) return status; return ice_discover_func_caps(hw, &hw->func_caps); } /** * ice_aq_manage_mac_write - manage MAC address write command * @hw: pointer to the HW struct * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address * @flags: flags to control write behavior * @cd: pointer to command details structure or NULL * * This function is used to write MAC address to the NVM (0x0108). */ enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_write *cmd; struct ice_aq_desc desc; cmd = &desc.params.mac_write; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); cmd->flags = flags; ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_clear_pxe_mode * @hw: pointer to the HW struct * * Tell the firmware that the driver is taking over from PXE (0x0110). */ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the HW struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. */ void ice_clear_pxe_mode(struct ice_hw *hw) { if (ice_check_sq_alive(hw, &hw->adminq)) ice_aq_clear_pxe_mode(hw); } /** * ice_aq_set_port_params - set physical port parameters. * @pi: pointer to the port info struct * @bad_frame_vsi: defines the VSI to which bad frames are forwarded * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded * @double_vlan: if set double VLAN is enabled * @cd: pointer to command details structure or NULL * * Set Physical port parameters (0x0203) */ enum ice_status ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct ice_sq_cd *cd) { struct ice_aqc_set_port_params *cmd; struct ice_hw *hw = pi->hw; struct ice_aq_desc desc; u16 cmd_flags = 0; cmd = &desc.params.set_port_params; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); if (save_bad_pac) cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS; if (pad_short_pac) cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS; if (double_vlan) cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->cmd_flags = CPU_TO_LE16(cmd_flags); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_get_link_speed_based_on_phy_type - returns link speed * @phy_type_low: lower part of phy_type * @phy_type_high: higher part of phy_type * * This helper function will convert an entry in PHY type structure * [phy_type_low, phy_type_high] to its corresponding link speed. * Note: In the structure of [phy_type_low, phy_type_high], there should * be one bit set, as this function will convert one PHY type to its * speed. * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned */ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) { u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; switch (phy_type_low) { case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_100M_SGMII: speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; break; case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_1G_SGMII: speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; break; case ICE_PHY_TYPE_LOW_2500BASE_T: case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_2500BASE_KX: speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; break; case ICE_PHY_TYPE_LOW_5GBASE_T: case ICE_PHY_TYPE_LOW_5GBASE_KR: speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; break; case ICE_PHY_TYPE_LOW_10GBASE_T: case ICE_PHY_TYPE_LOW_10G_SFI_DA: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: case ICE_PHY_TYPE_LOW_10G_SFI_C2C: speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; break; case ICE_PHY_TYPE_LOW_25GBASE_T: case ICE_PHY_TYPE_LOW_25GBASE_CR: case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: case ICE_PHY_TYPE_LOW_25G_AUI_C2C: speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; break; case ICE_PHY_TYPE_LOW_40GBASE_CR4: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_40GBASE_KR4: case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: case ICE_PHY_TYPE_LOW_40G_XLAUI: speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; break; case ICE_PHY_TYPE_LOW_50GBASE_CR2: case ICE_PHY_TYPE_LOW_50GBASE_SR2: case ICE_PHY_TYPE_LOW_50GBASE_LR2: case ICE_PHY_TYPE_LOW_50GBASE_KR2: case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_LAUI2: case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI2: case ICE_PHY_TYPE_LOW_50GBASE_CP: case ICE_PHY_TYPE_LOW_50GBASE_SR: case ICE_PHY_TYPE_LOW_50GBASE_FR: case ICE_PHY_TYPE_LOW_50GBASE_LR: case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI1: speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; break; case ICE_PHY_TYPE_LOW_100GBASE_CR4: case ICE_PHY_TYPE_LOW_100GBASE_SR4: case ICE_PHY_TYPE_LOW_100GBASE_LR4: case ICE_PHY_TYPE_LOW_100GBASE_KR4: case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_CAUI4: case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_AUI4: case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; break; default: speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; break; } switch (phy_type_high) { case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_CAUI2: case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_AUI2: speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; break; default: speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; break; } if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) return ICE_AQ_LINK_SPEED_UNKNOWN; else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) return ICE_AQ_LINK_SPEED_UNKNOWN; else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) return speed_phy_type_low; else return speed_phy_type_high; } /** * ice_update_phy_type * @phy_type_low: pointer to the lower part of phy_type * @phy_type_high: pointer to the higher part of phy_type * @link_speeds_bitmap: targeted link speeds bitmap * * Note: For the link_speeds_bitmap structure, you can check it at * [ice_aqc_get_link_status->link_speed]. Caller can pass in * link_speeds_bitmap include multiple speeds. * * Each entry in this [phy_type_low, phy_type_high] structure will * present a certain link speed. This helper function will turn on bits * in [phy_type_low, phy_type_high] structure based on the value of * link_speeds_bitmap input parameter. */ void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap) { u64 pt_high; u64 pt_low; int index; u16 speed; /* We first check with low part of phy_type */ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { pt_low = BIT_ULL(index); speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); if (link_speeds_bitmap & speed) *phy_type_low |= BIT_ULL(index); } /* We then check with high part of phy_type */ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { pt_high = BIT_ULL(index); speed = ice_get_link_speed_based_on_phy_type(0, pt_high); if (link_speeds_bitmap & speed) *phy_type_high |= BIT_ULL(index); } } /** * ice_aq_set_phy_cfg * @hw: pointer to the HW struct * @pi: port info structure of the interested logical port * @cfg: structure with PHY configuration data to be set * @cd: pointer to command details structure or NULL * * Set the various PHY configuration parameters supported on the Port. * One or more of the Set PHY config parameters may be ignored in an MFP * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; enum ice_status status; if (!cfg) return ICE_ERR_PARAM; /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", cfg->caps); cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); desc.params.set_phy.lport_num = pi->lport; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)LE64_TO_CPU(cfg->phy_type_low)); ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)LE64_TO_CPU(cfg->phy_type_high)); ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", cfg->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", cfg->link_fec_opt); status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) status = ICE_SUCCESS; if (!status) pi->phy.curr_user_phy_cfg = *cfg; return status; } /** * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ enum ice_status ice_update_link_info(struct ice_port_info *pi) { struct ice_link_status *li; enum ice_status status; if (!pi) return ICE_ERR_PARAM; li = &pi->phy.link_info; status = ice_aq_get_link_info(pi, true, NULL, NULL); if (status) return status; if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_hw *hw; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status == ICE_SUCCESS) ice_memcpy(li->module_type, &pcaps->module_type, sizeof(li->module_type), ICE_NONDMA_TO_NONDMA); ice_free(hw, pcaps); } return status; } /** * ice_cache_phy_user_req * @pi: port information structure * @cache_data: PHY logging data * @cache_mode: PHY logging mode * * Log the user request on (FC, FEC, SPEED) for later user. */ static void ice_cache_phy_user_req(struct ice_port_info *pi, struct ice_phy_cache_mode_data cache_data, enum ice_phy_cache_mode cache_mode) { if (!pi) return; switch (cache_mode) { case ICE_FC_MODE: pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; break; case ICE_SPEED_MODE: pi->phy.curr_user_speed_req = cache_data.data.curr_user_speed_req; break; case ICE_FEC_MODE: pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; break; default: break; } } /** * ice_caps_to_fc_mode * @caps: PHY capabilities * * Convert PHY FC capabilities to ice FC mode */ enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) { if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) return ICE_FC_FULL; if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) return ICE_FC_TX_PAUSE; if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) return ICE_FC_RX_PAUSE; return ICE_FC_NONE; } /** * ice_caps_to_fec_mode * @caps: PHY capabilities * @fec_options: Link FEC options * * Convert PHY FEC capabilities to ice FEC mode */ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) { if (caps & ICE_AQC_PHY_EN_AUTO_FEC) return ICE_FEC_AUTO; if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | ICE_AQC_PHY_FEC_25G_KR_REQ)) return ICE_FEC_BASER; if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ | ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) return ICE_FEC_RS; return ICE_FEC_NONE; } /** * ice_cfg_phy_fc - Configure PHY FC data based on FC mode * @pi: port information structure * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ static enum ice_status ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { struct ice_phy_cache_mode_data cache_data; u8 pause_mask = 0x0; if (!pi || !cfg) return ICE_ERR_BAD_PTR; - switch (req_mode) { case ICE_FC_AUTO: { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(pi->hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; - /* Query the value of FC that both the NIC and attached media * can do. */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { ice_free(pi->hw, pcaps); return status; } pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE; pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE; ice_free(pi->hw, pcaps); break; } case ICE_FC_FULL: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; break; case ICE_FC_RX_PAUSE: pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; break; case ICE_FC_TX_PAUSE: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; break; default: break; } /* clear the old pause settings */ cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | ICE_AQC_PHY_EN_RX_LINK_PAUSE); /* set the new capabilities */ cfg->caps |= pause_mask; /* Cache user FC request */ cache_data.data.curr_user_fc_req = req_mode; ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); return ICE_SUCCESS; } /** * ice_set_fc * @pi: port information structure * @aq_failures: pointer to status code, specific to ice_set_fc routine * @ena_auto_link_update: enable automatic link update * * Set the requested flow control mode. */ enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; struct ice_hw *hw; if (!pi || !aq_failures) return ICE_ERR_BAD_PTR; *aq_failures = 0; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; /* Get the current PHY config */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + pcaps, NULL); + if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; } ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); /* Configure the set PHY data */ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); if (status) { if (status != ICE_ERR_BAD_PTR) *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; } /* If the capabilities have changed, then set the new config */ if (cfg.caps != pcaps->caps) { int retry_count, retry_max = 10; /* Auto restart link so settings take effect */ if (ena_auto_link_update) cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_SET; goto out; } /* Update the link info * It sometimes takes a really long time for link to * come back from the atomic reset. Thus, we wait a * little bit. */ for (retry_count = 0; retry_count < retry_max; retry_count++) { status = ice_update_link_info(pi); if (status == ICE_SUCCESS) break; ice_msec_delay(100, true); } if (status) *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; } out: ice_free(hw, pcaps); return status; } /** * ice_phy_caps_equals_cfg * @phy_caps: PHY capabilities * @phy_cfg: PHY configuration * * Helper function to determine if PHY capabilities matches PHY * configuration */ bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, struct ice_aqc_set_phy_cfg_data *phy_cfg) { u8 caps_mask, cfg_mask; if (!phy_caps || !phy_cfg) return false; /* These bits are not common between capabilities and configuration. * Do not use them to determine equality. */ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | ICE_AQC_PHY_EN_MOD_QUAL); cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; if (phy_caps->phy_type_low != phy_cfg->phy_type_low || phy_caps->phy_type_high != phy_cfg->phy_type_high || ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || phy_caps->eee_cap != phy_cfg->eee_cap || phy_caps->eeer_value != phy_cfg->eeer_value || phy_caps->link_fec_options != phy_cfg->link_fec_opt) return false; return true; } /** * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data * @pi: port information structure * @caps: PHY ability structure to copy date from * @cfg: PHY configuration structure to copy data to * * Helper function to copy AQC PHY get ability data to PHY set configuration * data structure */ void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg) { if (!pi || !caps || !cfg) return; ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); cfg->phy_type_low = caps->phy_type_low; cfg->phy_type_high = caps->phy_type_high; cfg->caps = caps->caps; cfg->low_power_ctrl_an = caps->low_power_ctrl_an; cfg->eee_cap = caps->eee_cap; cfg->eeer_value = caps->eeer_value; cfg->link_fec_opt = caps->link_fec_options; cfg->module_compliance_enforcement = caps->module_compliance_enforcement; - - if (ice_fw_supports_link_override(pi->hw)) { - struct ice_link_default_override_tlv tlv; - - if (ice_get_link_default_override(&tlv, pi)) - return; - - if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) - cfg->module_compliance_enforcement |= - ICE_LINK_OVERRIDE_STRICT_MODE; - } } /** * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode * @pi: port information structure * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure */ enum ice_status ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw; if (!pi || !cfg) return ICE_ERR_BAD_PTR; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, - NULL); + status = ice_aq_get_phy_caps(pi, false, + (ice_fw_supports_report_dflt_cfg(hw) ? + ICE_AQC_REPORT_DFLT_CFG : + ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); + if (status) goto out; cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC); cfg->link_fec_opt = pcaps->link_fec_options; switch (fec) { case ICE_FEC_BASER: /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: /* Clear BASE-R bits, and AND RS ability * bits and OR request bits. */ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; case ICE_FEC_AUTO: /* AND auto FEC bit, and all caps bits. */ cfg->caps &= ICE_AQC_PHY_CAPS_MASK; cfg->link_fec_opt |= pcaps->link_fec_options; break; default: status = ICE_ERR_PARAM; break; } - if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) && + !ice_fw_supports_report_dflt_cfg(pi->hw)) { struct ice_link_default_override_tlv tlv; if (ice_get_link_default_override(&tlv, pi)) goto out; if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && (tlv.options & ICE_LINK_OVERRIDE_EN)) cfg->link_fec_opt = tlv.fec_options; } out: ice_free(hw, pcaps); return status; } /** * ice_get_link_status - get status of the HW network link * @pi: port information structure * @link_up: pointer to bool (true/false = linkup/linkdown) * * Variable link_up is true if link is up, false if link is down. * The variable link_up is invalid if status is non zero. As a * result of this call, link status reporting becomes enabled */ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) { struct ice_phy_info *phy_info; enum ice_status status = ICE_SUCCESS; if (!pi || !link_up) return ICE_ERR_PARAM; phy_info = &pi->phy; if (phy_info->get_link_info) { status = ice_update_link_info(pi); if (status) ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", status); } *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; return status; } /** * ice_aq_set_link_restart_an * @pi: pointer to the port information structure * @ena_link: if true: enable link, if false: disable link * @cd: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. */ enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd) { struct ice_aqc_restart_an *cmd; struct ice_aq_desc desc; cmd = &desc.params.restart_an; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; cmd->lport_num = pi->lport; if (ena_link) cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; else cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } /** * ice_aq_set_event_mask * @hw: pointer to the HW struct * @port_num: port number of the physical function * @mask: event mask to be set * @cd: pointer to command details structure or NULL * * Set event mask (0x0613) */ enum ice_status ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd) { struct ice_aqc_set_event_mask *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_event_mask; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); cmd->lport_num = port_num; cmd->event_mask = CPU_TO_LE16(mask); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_set_mac_loopback * @hw: pointer to the HW struct * @ena_lpbk: Enable or Disable loopback * @cd: pointer to command details structure or NULL * * Enable/disable loopback on a given port */ enum ice_status ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_lb *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_mac_lb; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); if (ena_lpbk) cmd->lb_mode = ICE_AQ_MAC_LB_EN; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_set_port_id_led * @pi: pointer to the port information * @is_orig_mode: is this LED set to original mode (by the net-list) * @cd: pointer to command details structure or NULL * * Set LED value for the given port (0x06e9) */ enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_port_id_led *cmd; struct ice_hw *hw = pi->hw; struct ice_aq_desc desc; cmd = &desc.params.set_port_id_led; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); if (is_orig_mode) cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; else cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_sff_eeprom * @hw: pointer to the HW struct * @lport: bits [7:0] = logical port, bit [8] = logical port valid * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. * @page: QSFP page * @set_page: set or ignore the page * @data: pointer to data buffer to be read/written to the I2C device. * @length: 1-16 for read, 1 for write. * @write: 0 read, 1 for write. * @cd: pointer to command details structure or NULL * * Read/Write SFF EEPROM (0x06EE) */ enum ice_status ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd) { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; enum ice_status status; if (!data || (mem_addr & 0xff00)) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->lport_num = (u8)(lport & 0xff); cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) & ICE_AQC_SFF_I2CBUS_7BIT_M) | ((set_page << ICE_AQC_SFF_SET_EEPROM_PAGE_S) & ICE_AQC_SFF_SET_EEPROM_PAGE_M)); cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff); cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); if (write) cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE); status = ice_aq_send_cmd(hw, &desc, data, length, cd); return status; } /** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @params: RSS LUT parameters * @set: set true to set the table, false to get the table * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ static enum ice_status __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; enum ice_status status; u8 *lut; if (!params) return ICE_ERR_PARAM; vsi_handle = params->vsi_handle; lut = params->lut; if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) return ICE_ERR_PARAM; lut_size = params->lut_size; lut_type = params->lut_type; glob_lut_idx = params->global_lut_id; vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); cmd_resp = &desc.params.get_set_rss_lut; if (set) { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); } cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | ICE_AQC_GSET_RSS_LUT_VSI_VALID); switch (lut_type) { case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); break; default: status = ICE_ERR_PARAM; goto ice_aq_get_set_rss_lut_exit; } if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); if (!set) goto ice_aq_get_set_rss_lut_send; } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { if (!set) goto ice_aq_get_set_rss_lut_send; } else { goto ice_aq_get_set_rss_lut_send; } /* LUT size is only valid for Global and PF table types */ switch (lut_size) { case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; } /* fall-through */ default: status = ICE_ERR_PARAM; goto ice_aq_get_set_rss_lut_exit; } ice_aq_get_set_rss_lut_send: cmd_resp->flags = CPU_TO_LE16(flags); status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); ice_aq_get_set_rss_lut_exit: return status; } /** * ice_aq_get_rss_lut * @hw: pointer to the hardware structure * @get_params: RSS LUT parameters used to specify which RSS LUT to get * * get the RSS lookup table, PF or VSI type */ enum ice_status ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { return __ice_aq_get_set_rss_lut(hw, get_params, false); } /** * ice_aq_set_rss_lut * @hw: pointer to the hardware structure * @set_params: RSS LUT parameters used to specify how to set the RSS LUT * * set the RSS lookup table, PF or VSI type */ enum ice_status ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { return __ice_aq_get_set_rss_lut(hw, set_params, true); } /** * __ice_aq_get_set_rss_key * @hw: pointer to the HW struct * @vsi_id: VSI FW index * @key: pointer to key info struct * @set: set true to set the key, false to get the key * * get (0x0B04) or set (0x0B02) the RSS key per VSI */ static enum ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, struct ice_aqc_get_set_rss_keys *key, bool set) { struct ice_aqc_get_set_rss_key *cmd_resp; u16 key_size = sizeof(*key); struct ice_aq_desc desc; cmd_resp = &desc.params.get_set_rss_key; if (set) { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); } cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | ICE_AQC_GSET_RSS_KEY_VSI_VALID); return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); } /** * ice_aq_get_rss_key * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @key: pointer to key info struct * * get the RSS key per VSI */ enum ice_status ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { if (!ice_is_vsi_valid(hw, vsi_handle) || !key) return ICE_ERR_PARAM; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), key, false); } /** * ice_aq_set_rss_key * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @keys: pointer to key info struct * * set the RSS key per VSI */ enum ice_status ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) return ICE_ERR_PARAM; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), keys, true); } /** * ice_aq_add_lan_txq * @hw: pointer to the hardware structure * @num_qgrps: Number of added queue groups * @qg_list: list of queue groups to be added * @buf_size: size of buffer for indirect command * @cd: pointer to command details structure or NULL * * Add Tx LAN queue (0x0C30) * * NOTE: * Prior to calling add Tx LAN queue: * Initialize the following as part of the Tx queue context: * Completion queue ID if the queue uses Completion queue, Quanta profile, * Cache profile and Packet shaper profile. * * After add Tx LAN queue AQ command is completed: * Interrupts should be associated with specific queues, * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue * flow. */ enum ice_status ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_add_tx_qgrp *list; struct ice_aqc_add_txqs *cmd; struct ice_aq_desc desc; u16 i, sum_size = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.add_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); if (!qg_list) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; for (i = 0, list = qg_list; i < num_qgrps; i++) { sum_size += ice_struct_size(list, txqs, list->num_txqs); list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + list->num_txqs); } if (buf_size != sum_size) return ICE_ERR_PARAM; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_qgrps = num_qgrps; return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); } /** * ice_aq_dis_lan_txq * @hw: pointer to the hardware structure * @num_qgrps: number of groups in the list * @qg_list: the list of groups to disable * @buf_size: the total size of the qg_list buffer in bytes * @rst_src: if called due to reset, specifies the reset source * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * Disable LAN Tx queue (0x0C31) */ static enum ice_status ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; enum ice_status status; u16 i, sz = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); /* qg_list can be NULL only in VM/VF reset flow */ if (!qg_list && !rst_src) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; cmd->num_entries = num_qgrps; cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & ICE_AQC_Q_DIS_TIMEOUT_M); switch (rst_src) { case ICE_VM_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; cmd->vmvf_and_timeout |= CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); break; case ICE_VF_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; /* In this case, FW expects vmvf_num to be absolute VF ID */ cmd->vmvf_and_timeout |= CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) & ICE_AQC_Q_DIS_VMVF_NUM_M); break; case ICE_NO_RESET: default: break; } /* flush pipe on time out */ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; /* If no queue group info, we are in a reset flow. Issue the AQ */ if (!qg_list) goto do_aq; /* set RD bit to indicate that command buffer is provided by the driver * and it needs to be read by the firmware */ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); for (i = 0, item = qg_list; i < num_qgrps; i++) { u16 item_size = ice_struct_size(item, q_id, item->num_qs); /* If the num of queues is even, add 2 bytes of padding */ if ((item->num_qs % 2) == 0) item_size += 2; sz += item_size; item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); } if (buf_size != sz) return ICE_ERR_PARAM; do_aq: status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); if (status) { if (!qg_list) ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", vmvf_num, hw->adminq.sq_last_status); else ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", LE16_TO_CPU(qg_list[0].q_id[0]), hw->adminq.sq_last_status); } return status; } /** * ice_aq_move_recfg_lan_txq * @hw: pointer to the hardware structure * @num_qs: number of queues to move/reconfigure * @is_move: true if this operation involves node movement * @is_tc_change: true if this operation involves a TC change * @subseq_call: true if this operation is a subsequent call * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN * @timeout: timeout in units of 100 usec (valid values 0-50) * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN * @buf: struct containing src/dest TEID and per-queue info * @buf_size: size of buffer for indirect command * @txqs_moved: out param, number of queues successfully moved * @cd: pointer to command details structure or NULL * * Move / Reconfigure Tx LAN queues (0x0C32) */ enum ice_status ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, bool is_tc_change, bool subseq_call, bool flush_pipe, u8 timeout, u32 *blocked_cgds, struct ice_aqc_move_txqs_data *buf, u16 buf_size, u8 *txqs_moved, struct ice_sq_cd *cd) { struct ice_aqc_move_txqs *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.move_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) return ICE_ERR_PARAM; if (is_tc_change && !flush_pipe && !blocked_cgds) return ICE_ERR_PARAM; if (!is_move && !is_tc_change) return ICE_ERR_PARAM; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); if (is_move) cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE; if (is_tc_change) cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE; if (subseq_call) cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL; if (flush_pipe) cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE; cmd->num_qs = num_qs; cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) & ICE_AQC_Q_CMD_TIMEOUT_M); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && txqs_moved) *txqs_moved = cmd->num_qs; if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN && is_tc_change && !flush_pipe) *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds); return status; } /* End of FW Admin Queue command wrappers */ /** * ice_write_byte - write a byte to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u8 src_byte, dest_byte, mask; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; /* shift to correct alignment */ mask <<= shift_width; src_byte <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA); dest_byte &= ~mask; /* get the bits not changing */ dest_byte |= src_byte; /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); } /** * ice_write_word - write a word to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u16 src_word, mask; __le16 dest_word; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_word = *(u16 *)from; src_word &= mask; /* shift to correct alignment */ mask <<= shift_width; src_word <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA); dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); } /** * ice_write_dword - write a dword to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u32 src_dword, mask; __le32 dest_dword; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = (u32)~0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_dword = *(u32 *)from; src_dword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_dword <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA); dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); } /** * ice_write_qword - write a qword to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u64 src_qword, mask; __le64 dest_qword; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = (u64)~0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_qword = *(u64 *)from; src_qword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_qword <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA); dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); } /** * ice_set_ctx - set context bits in packed structure * @hw: pointer to the hardware structure * @src_ctx: pointer to a generic non-packed context structure * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ enum ice_status ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { int f; for (f = 0; ce_info[f].width; f++) { /* We have to deal with each element of the FW response * using the correct size so that we are correct regardless * of the endianness of the machine. */ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", f, ce_info[f].width, ce_info[f].size_of); continue; } switch (ce_info[f].size_of) { case sizeof(u8): ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u16): ice_write_word(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u32): ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u64): ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: return ICE_ERR_INVAL_SIZE; } } return ICE_SUCCESS; } /** * ice_read_byte - read context byte into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u8 dest_byte, mask; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); dest_byte &= ~(mask); dest_byte >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); } /** * ice_read_word - read context word into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u16 dest_word, mask; u8 *src, *target; __le16 src_word; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_word &= ~(CPU_TO_LE16(mask)); /* get the data back into host order before shifting */ dest_word = LE16_TO_CPU(src_word); dest_word >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); } /** * ice_read_dword - read context dword into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u32 dest_dword, mask; __le32 src_dword; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = (u32)~0; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_dword &= ~(CPU_TO_LE32(mask)); /* get the data back into host order before shifting */ dest_dword = LE32_TO_CPU(src_dword); dest_dword >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); } /** * ice_read_qword - read context qword into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u64 dest_qword, mask; __le64 src_qword; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = (u64)~0; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_qword &= ~(CPU_TO_LE64(mask)); /* get the data back into host order before shifting */ dest_qword = LE64_TO_CPU(src_qword); dest_qword >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); } /** * ice_get_ctx - extract context bits from a packed structure * @src_ctx: pointer to a generic packed context structure * @dest_ctx: pointer to a generic non-packed context structure * @ce_info: a description of the structure to be read from */ enum ice_status ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { int f; for (f = 0; ce_info[f].width; f++) { switch (ce_info[f].size_of) { case 1: ice_read_byte(src_ctx, dest_ctx, &ce_info[f]); break; case 2: ice_read_word(src_ctx, dest_ctx, &ce_info[f]); break; case 4: ice_read_dword(src_ctx, dest_ctx, &ce_info[f]); break; case 8: ice_read_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: /* nothing to do, just keep going */ break; } } return ICE_SUCCESS; } /** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @tc: TC number * @q_handle: software queue handle */ struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) { struct ice_vsi_ctx *vsi; struct ice_q_ctx *q_ctx; vsi = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi) return NULL; if (q_handle >= vsi->num_lan_q_entries[tc]) return NULL; if (!vsi->lan_q_ctx[tc]) return NULL; q_ctx = vsi->lan_q_ctx[tc]; return &q_ctx[q_handle]; } /** * ice_ena_vsi_txq * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @q_handle: software queue handle * @num_qgrps: Number of added queue groups * @buf: list of queue groups to be added * @buf_size: size of buffer for indirect command * @cd: pointer to command details structure or NULL * * This function adds one LAN queue */ enum ice_status ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; struct ice_q_ctx *q_ctx; enum ice_status status; struct ice_hw *hw; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; if (num_qgrps > 1 || buf->num_txqs > 1) return ICE_ERR_MAX_LIMIT; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", q_handle); status = ICE_ERR_PARAM; goto ena_txq_exit; } /* find a parent node */ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { status = ICE_ERR_PARAM; goto ena_txq_exit; } buf->parent_teid = parent->info.node_teid; node.parent_teid = parent->info.node_teid; /* Mark that the values in the "generic" section as valid. The default * value in the "generic" section is zero. This means that : * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. * - 0 priority among siblings, indicated by Bit 1-3. * - WFQ, indicated by Bit 4. * - 0 Adjustment value is used in PSM credit update flow, indicated by * Bit 5-6. * - Bit 7 is reserved. * Without setting the generic section as valid in valid_sections, the * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. */ buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR; buf->txqs[0].info.generic = 0; buf->txqs[0].info.cir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->txqs[0].info.cir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); buf->txqs[0].info.eir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->txqs[0].info.eir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", LE16_TO_CPU(buf->txqs[0].txq_id), hw->adminq.sq_last_status); goto ena_txq_exit; } node.node_teid = buf->txqs[0].q_teid; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; q_ctx->q_handle = q_handle; q_ctx->q_teid = LE32_TO_CPU(node.node_teid); /* add a leaf node into scheduler tree queue layer */ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); if (!status) status = ice_sched_replay_q_bw(pi, q_ctx); ena_txq_exit: ice_release_lock(&pi->sched_lock); return status; } /** * ice_dis_vsi_txq * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @num_queues: number of queues * @q_handles: pointer to software queue handle array * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids * @rst_src: if called due to reset, specifies the reset source * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * This function removes queues and their corresponding nodes in SW DB */ enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handles, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; hw = pi->hw; if (!num_queues) { /* if queue is disabled already yet the disable queue command * has to be sent to complete the VF reset, then call * ice_aq_dis_lan_txq without any queue information */ if (rst_src) return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); return ICE_ERR_CFG; } buf_size = ice_struct_size(qg_list, q_id, 1); qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size); if (!qg_list) return ICE_ERR_NO_MEMORY; ice_acquire_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { struct ice_sched_node *node; node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); if (!node) continue; q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", q_handles[i]); continue; } if (q_ctx->q_handle != q_handles[i]) { ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", q_ctx->q_handle, q_handles[i]); continue; } qg_list->parent_teid = node->info.parent_teid; qg_list->num_qs = 1; qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]); status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, vmvf_num, cd); if (status != ICE_SUCCESS) break; ice_free_sched_node(pi, node); q_ctx->q_handle = ICE_INVAL_Q_HANDLE; } ice_release_lock(&pi->sched_lock); ice_free(hw, qg_list); return status; } /** * ice_cfg_vsi_qs - configure the new/existing VSI queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @maxqs: max queues array per TC * @owner: LAN or RDMA * * This function adds/updates the VSI queues per TC. */ static enum ice_status ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *maxqs, u8 owner) { enum ice_status status = ICE_SUCCESS; u8 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); ice_for_each_traffic_class(i) { /* configuration is possible only if TC node is present */ if (!ice_sched_get_tc_node(pi, i)) continue; status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, ice_is_tc_ena(tc_bitmap, i)); if (status) break; } ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_vsi_lan - configure VSI LAN queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @max_lanqs: max LAN queues array per TC * * This function adds/updates the VSI LAN queues per TC. */ enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs) { return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, ICE_SCHED_NODE_OWNER_LAN); } /** * ice_is_main_vsi - checks whether the VSI is main VSI * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * Checks whether the VSI is the main VSI (the first PF VSI created on * given PF). */ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) { return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle]; } /** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct * @sw: pointer to switch info struct for which function initializes filters * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ static enum ice_status ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) { enum ice_status status; u8 i; /* Delete old entries from replay filter list head if there is any */ ice_rm_sw_replay_rule_info(hw, sw); /* In start of replay, move entries into replay_rules list, it * will allow adding rules entries back to filt_rules list, * which is operational list. */ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules, &sw->recp_list[i].filt_replay_rules); ice_sched_replay_agg_vsi_preinit(hw); status = ice_sched_replay_root_node_bw(hw->port_info); if (status) return status; return ice_sched_replay_tc_node_bw(hw->port_info); } /** * ice_replay_vsi - replay VSI configuration * @hw: pointer to the HW struct * @vsi_handle: driver VSI handle * * Restore all VSI configuration after reset. It is required to call this * function with main VSI first. */ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; struct ice_port_info *pi = hw->port_info; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; /* Replay pre-initialization if there is any */ if (ice_is_main_vsi(hw, vsi_handle)) { status = ice_replay_pre_init(hw, sw); if (status) return status; } /* Replay per VSI all RSS configurations */ status = ice_replay_rss_cfg(hw, vsi_handle); if (status) return status; /* Replay per VSI all filters */ status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle); if (!status) status = ice_replay_vsi_agg(hw, vsi_handle); return status; } /** * ice_replay_post - post replay configuration cleanup * @hw: pointer to the HW struct * * Post replay cleanup. */ void ice_replay_post(struct ice_hw *hw) { /* Delete old entries from replay filter list head */ ice_rm_all_sw_replay_rule_info(hw); ice_sched_replay_agg(hw); } /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) { u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed * when the driver starts. Thus, save the value from the first read * without adding to the statistic value so that we report stats which * count up from zero. */ if (!prev_stat_loaded) { *prev_stat = new_data; return; } /* Calculate the difference between the new and old values, and then * add it to the software stat value. */ if (new_data >= *prev_stat) *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; /* Update the previously stored value to prepare for next read */ *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) { u32 new_data; new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed * when the driver starts. Thus, save the value from the first read * without adding to the statistic value so that we report stats which * count up from zero. */ if (!prev_stat_loaded) { *prev_stat = new_data; return; } /* Calculate the difference between the new and old values, and then * add it to the software stat value. */ if (new_data >= *prev_stat) *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; /* Update the previously stored value to prepare for next read */ *prev_stat = new_data; } /** * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values * @hw: ptr to the hardware info * @vsi_handle: VSI handle * @prev_stat_loaded: bool to specify if the previous stat values are loaded * @cur_stats: ptr to current stats structure * * The GLV_REPC statistic register actually tracks two 16bit statistics, and * thus cannot be read using the normal ice_stat_update32 function. * * Read the GLV_REPC register associated with the given VSI, and update the * rx_no_desc and rx_error values in the ice_eth_stats structure. * * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be * cleared each time it's read. * * Note that the GLV_RDPC register also counts the causes that would trigger * GLV_REPC. However, it does not give the finer grained detail about why the * packets are being dropped. The GLV_REPC values can be used to distinguish * whether Rx packets are dropped due to errors or due to no available * descriptors. */ void ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, struct ice_eth_stats *cur_stats) { u16 vsi_num, no_desc, error_cnt; u32 repc; if (!ice_is_vsi_valid(hw, vsi_handle)) return; vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); /* If we haven't loaded stats yet, just clear the current value */ if (!prev_stat_loaded) { wr32(hw, GLV_REPC(vsi_num), 0); return; } repc = rd32(hw, GLV_REPC(vsi_num)); no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S; error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S; /* Clear the count by writing to the stats register */ wr32(hw, GLV_REPC(vsi_num), 0); cur_stats->rx_no_desc += no_desc; cur_stats->rx_errors += error_cnt; } /** * ice_aq_alternate_write * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be written * @reg_val0: value to be written under 'reg_addr0' * @reg_addr1: address of second dword to be written * @reg_val1: value to be written under 'reg_addr1' * * Write one or two dwords to alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. */ enum ice_status ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1) { struct ice_aqc_read_write_alt_direct *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.read_write_alt_direct; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct); cmd->dword0_addr = CPU_TO_LE32(reg_addr0); cmd->dword1_addr = CPU_TO_LE32(reg_addr1); cmd->dword0_value = CPU_TO_LE32(reg_val0); cmd->dword1_value = CPU_TO_LE32(reg_val1); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); return status; } /** * ice_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: pointer for data read from 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: pointer for data read from 'reg_addr1' * * Read one or two dwords from alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer * is not passed then only register at 'reg_addr0' is read. */ enum ice_status ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1) { struct ice_aqc_read_write_alt_direct *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.read_write_alt_direct; if (!reg_val0) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct); cmd->dword0_addr = CPU_TO_LE32(reg_addr0); cmd->dword1_addr = CPU_TO_LE32(reg_addr1); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (status == ICE_SUCCESS) { *reg_val0 = LE32_TO_CPU(cmd->dword0_value); if (reg_val1) *reg_val1 = LE32_TO_CPU(cmd->dword1_value); } return status; } /** * ice_aq_alternate_write_done * @hw: pointer to the HW structure. * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS * @reset_needed: indicates the SW should trigger GLOBAL reset * * Indicates to the FW that alternate structures have been changed. */ enum ice_status ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed) { struct ice_aqc_done_alt_write *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.done_alt_write; if (!reset_needed) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write); cmd->flags = bios_mode; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (!status) *reset_needed = (LE16_TO_CPU(cmd->flags) & ICE_AQC_RESP_RESET_NEEDED) != 0; return status; } /** * ice_aq_alternate_clear * @hw: pointer to the HW structure. * * Clear the alternate structures of the port from which the function * is called. */ enum ice_status ice_aq_alternate_clear(struct ice_hw *hw) { struct ice_aq_desc desc; enum ice_status status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); return status; } /** * ice_sched_query_elem - query element information from HW * @hw: pointer to the HW struct * @node_teid: node TEID to be queried * @buf: buffer to element information * * This function queries HW element information */ enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; enum ice_status status; buf_size = sizeof(*buf); ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM); buf->node_teid = CPU_TO_LE32(node_teid); status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, NULL); if (status != ICE_SUCCESS || num_elem_ret != 1) ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); return status; } /** * ice_get_fw_mode - returns FW mode * @hw: pointer to the HW struct */ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) { #define ICE_FW_MODE_DBG_M BIT(0) #define ICE_FW_MODE_REC_M BIT(1) #define ICE_FW_MODE_ROLLBACK_M BIT(2) u32 fw_mode; /* check the current FW mode */ fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M; if (fw_mode & ICE_FW_MODE_DBG_M) return ICE_FW_MODE_DBG; else if (fw_mode & ICE_FW_MODE_REC_M) return ICE_FW_MODE_REC; else if (fw_mode & ICE_FW_MODE_ROLLBACK_M) return ICE_FW_MODE_ROLLBACK; else return ICE_FW_MODE_NORMAL; } /** * ice_cfg_get_cur_lldp_persist_status * @hw: pointer to the HW struct * @lldp_status: return value of LLDP persistent status * * Get the current status of LLDP persistent */ enum ice_status ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) { struct ice_port_info *pi = hw->port_info; enum ice_status ret; __le32 raw_data; u32 data, mask; if (!lldp_status) return ICE_ERR_BAD_PTR; ret = ice_acquire_nvm(hw, ICE_RES_READ); if (ret) return ret; ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID, ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET, ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false, true, NULL); if (!ret) { data = LE32_TO_CPU(raw_data); mask = ICE_AQC_NVM_LLDP_STATUS_M << (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); data = data & mask; *lldp_status = data >> (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); } ice_release_nvm(hw); return ret; } /** * ice_get_dflt_lldp_persist_status * @hw: pointer to the HW struct * @lldp_status: return value of LLDP persistent status * * Get the default status of LLDP persistent */ enum ice_status ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) { struct ice_port_info *pi = hw->port_info; u32 data, mask, loc_data, loc_data_tmp; enum ice_status ret; __le16 loc_raw_data; __le32 raw_data; if (!lldp_status) return ICE_ERR_BAD_PTR; ret = ice_acquire_nvm(hw, ICE_RES_READ); if (ret) return ret; /* Read the offset of EMP_SR_PTR */ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, ICE_AQC_NVM_EMP_SR_PTR_OFFSET, ICE_AQC_NVM_EMP_SR_PTR_RD_LEN, &loc_raw_data, false, true, NULL); if (ret) goto exit; loc_data = LE16_TO_CPU(loc_raw_data); if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) { loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M; loc_data *= ICE_AQC_NVM_SECTOR_UNIT; } else { loc_data *= ICE_AQC_NVM_WORD_UNIT; } /* Read the offset of LLDP configuration pointer */ loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET; ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data, false, true, NULL); if (ret) goto exit; loc_data_tmp = LE16_TO_CPU(loc_raw_data); loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT; loc_data += loc_data_tmp; /* We need to skip LLDP configuration section length (2 bytes) */ loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN; /* Read the LLDP Default Configure */ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false, true, NULL); if (!ret) { data = LE32_TO_CPU(raw_data); mask = ICE_AQC_NVM_LLDP_STATUS_M << (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); data = data & mask; *lldp_status = data >> (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); } exit: ice_release_nvm(hw); return ret; } /** * ice_fw_supports_link_override * @hw: pointer to the hardware structure * * Checks if the firmware supports link override */ bool ice_fw_supports_link_override(struct ice_hw *hw) { if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) return true; if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) return true; } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { return true; } return false; } /** * ice_get_link_default_override * @ldo: pointer to the link default override struct * @pi: pointer to the port info struct * * Gets the link default override for a port */ enum ice_status ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi) { u16 i, tlv, tlv_len, tlv_start, buf, offset; struct ice_hw *hw = pi->hw; enum ice_status status; status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); return status; } /* Each port has its own config; calculate for our port */ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + ICE_SR_PFA_LINK_OVERRIDE_OFFSET; /* link options first */ status = ice_read_sr_word(hw, tlv_start, &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> ICE_LINK_OVERRIDE_PHY_CFG_S; /* link PHY config */ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; status = ice_read_sr_word(hw, offset, &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); return status; } ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; /* PHY types low */ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ ldo->phy_type_low |= ((u64)buf << (i * 16)); } /* PHY types high */ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ ldo->phy_type_high |= ((u64)buf << (i * 16)); } return status; } /** * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled * @caps: get PHY capability data */ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) { if (caps->caps & ICE_AQC_PHY_AN_MODE || caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | ICE_AQC_PHY_AN_EN_CLAUSE73 | ICE_AQC_PHY_AN_EN_CLAUSE37)) return true; return false; } +/** + * ice_is_fw_health_report_supported + * @hw: pointer to the hardware structure + * + * Return true if firmware supports health status reports, + * false otherwise + */ +bool ice_is_fw_health_report_supported(struct ice_hw *hw) +{ + if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ) + return true; + + if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) { + if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN && + hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH) + return true; + } + + return false; +} + +/** + * ice_aq_set_health_status_config - Configure FW health events + * @hw: pointer to the HW struct + * @event_source: type of diagnostic events to enable + * @cd: pointer to command details structure or NULL + * + * Configure the health status event types that the firmware will send to this + * PF. The supported event types are: PF-specific, all PFs, and global + */ +enum ice_status +ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_health_status_config *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_health_status_config; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_set_health_status_config); + + cmd->event_source = event_source; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + +/** + * ice_aq_get_port_options + * @hw: pointer to the hw struct + * @options: buffer for the resultant port options + * @option_count: input - size of the buffer in port options structures, + * output - number of returned port options + * @lport: logical port to call the command with (optional) + * @lport_valid: when false, FW uses port owned by the PF instead of lport, + * when PF owns more than 1 port it must be true + * @active_option_idx: index of active port option in returned buffer + * @active_option_valid: active option in returned buffer is valid + * + * Calls Get Port Options AQC (0x06ea) and verifies result. + */ +enum ice_status +ice_aq_get_port_options(struct ice_hw *hw, + struct ice_aqc_get_port_options_elem *options, + u8 *option_count, u8 lport, bool lport_valid, + u8 *active_option_idx, bool *active_option_valid) +{ + struct ice_aqc_get_port_options *cmd; + struct ice_aq_desc desc; + enum ice_status status; + u8 pmd_count; + u8 max_speed; + u8 i; + + ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + + /* options buffer shall be able to hold max returned options */ + if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) + return ICE_ERR_PARAM; + + cmd = &desc.params.get_port_options; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); + + if (lport_valid) + cmd->lport_num = lport; + cmd->lport_num_valid = lport_valid; + + status = ice_aq_send_cmd(hw, &desc, options, + *option_count * sizeof(*options), NULL); + if (status != ICE_SUCCESS) + return status; + + /* verify direct FW response & set output parameters */ + *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M; + ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); + *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID; + if (*active_option_valid) { + *active_option_idx = cmd->port_options & + ICE_AQC_PORT_OPT_ACTIVE_M; + if (*active_option_idx > (*option_count - 1)) + return ICE_ERR_OUT_OF_RANGE; + ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", + *active_option_idx); + } + + /* verify indirect FW response & mask output options fields */ + for (i = 0; i < *option_count; i++) { + options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M; + options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M; + pmd_count = options[i].pmd; + max_speed = options[i].max_lane_speed; + ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", + pmd_count, max_speed); + + /* check only entries containing valid max pmd speed values, + * other reserved values may be returned, when logical port + * used is unrelated to specific option + */ + if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) { + if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV) + return ICE_ERR_OUT_OF_RANGE; + if (pmd_count > 2 && + max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G) + return ICE_ERR_CFG; + if (pmd_count > 7 && + max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G) + return ICE_ERR_CFG; + } + } + + return ICE_SUCCESS; +} + /** * ice_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the HW struct * @mib_type: Local, Remote or both Local and Remote MIBs * @buf: pointer to the caller-supplied buffer to store the MIB block * @buf_size: size of the buffer (in bytes) * @cd: pointer to command details structure or NULL * * Set the LLDP MIB. (0x0A08) */ enum ice_status ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_lldp_set_local_mib *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_set_mib; if (buf_size == 0 || !buf) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD); desc.datalen = CPU_TO_LE16(buf_size); cmd->type = mib_type; cmd->length = CPU_TO_LE16(buf_size); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl * @hw: pointer to HW struct */ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) { if (hw->mac_type != ICE_MAC_E810) return false; if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) return true; if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) return true; } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { return true; } return false; } /** * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter * @hw: pointer to HW struct * @vsi_num: absolute HW index for VSI * @add: boolean for if adding or removing a filter */ enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_filter_ctrl; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); if (add) cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; else cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; cmd->vsi_num = CPU_TO_LE16(vsi_num); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } + +/** + * ice_fw_supports_report_dflt_cfg + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports report default configuration + */ +bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) +{ + if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && + hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) + return true; + } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { + return true; + } + return false; +} diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h index 3a28816fb131..3f5cb9c7e901 100644 --- a/sys/dev/ice/ice_common.h +++ b/sys/dev/ice/ice_common.h @@ -1,278 +1,291 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_COMMON_H_ #define _ICE_COMMON_H_ #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" #include "virtchnl.h" #include "ice_switch.h" +#define ICE_SQ_SEND_DELAY_TIME_MS 10 +#define ICE_SQ_SEND_MAX_EXECUTE 3 + enum ice_fw_modes { ICE_FW_MODE_NORMAL, ICE_FW_MODE_DBG, ICE_FW_MODE_REC, ICE_FW_MODE_ROLLBACK }; void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq); bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq); enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); void ice_shutdown_all_ctrlq(struct ice_hw *hw); void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up); enum ice_status ice_update_link_info(struct ice_port_info *pi); enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); enum ice_status ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd); enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); void ice_clear_pxe_mode(struct ice_hw *hw); enum ice_status ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); enum ice_status ice_set_mac_type(struct ice_hw *hw); /* Define a macro that will align a pointer to point to the next memory address * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For * example, given the variable pointer = 0x1006, then after the following call: * * pointer = ICE_ALIGN(pointer, 4) * * ... the value of pointer would equal 0x1008, since 0x1008 is the next * address after 0x1006 which is divisible by 4. */ #define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1)) enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index); enum ice_status ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index); enum ice_status ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, u32 tx_cmpltnq_index); enum ice_status ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index); enum ice_status ice_write_tx_drbell_q_ctx(struct ice_hw *hw, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, u32 tx_drbell_q_index); enum ice_status ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); enum ice_status ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); enum ice_status ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); enum ice_status ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); enum ice_status ice_aq_add_lan_txq(struct ice_hw *hw, u8 count, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, bool is_tc_change, bool subseq_call, bool flush_pipe, u8 timeout, u32 *blocked_cgds, struct ice_aqc_move_txqs_data *buf, u16 buf_size, u8 *txqs_moved, struct ice_sq_cd *cd); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; enum ice_status ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd); enum ice_status ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct ice_sq_cd *cd); enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); enum ice_status ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); enum ice_status ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); enum ice_status ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); enum ice_status ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); enum ice_status ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); enum ice_status ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); enum ice_status +ice_aq_get_port_options(struct ice_hw *hw, + struct ice_aqc_get_port_options_elem *options, + u8 *option_count, u8 lport, bool lport_valid, + u8 *active_option_idx, bool *active_option_valid); +enum ice_status ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info); enum ice_status __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data); enum ice_status __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd); enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs); enum ice_status ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); void ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, struct ice_eth_stats *cur_stats); enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw); void ice_print_rollback_msg(struct ice_hw *hw); enum ice_status ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1); enum ice_status ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1); enum ice_status ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed); enum ice_status ice_aq_alternate_clear(struct ice_hw *hw); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); enum ice_status ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status); enum ice_status ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status); enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist); enum ice_status ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +enum ice_status +ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, + struct ice_sq_cd *cd); +bool ice_is_fw_health_report_supported(struct ice_hw *hw); +bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/sys/dev/ice/ice_common_sysctls.h b/sys/dev/ice/ice_common_sysctls.h index 17c2fce72ee7..f1a23ce828ff 100644 --- a/sys/dev/ice/ice_common_sysctls.h +++ b/sys/dev/ice/ice_common_sysctls.h @@ -1,100 +1,116 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_common_sysctls.h * @brief driver wide sysctls not related to the iflib stack * * Contains static sysctl values which are driver wide and configure all * devices of the driver at once. * * Device specific sysctls are setup by functions in ice_lib.c */ #ifndef _ICE_COMMON_SYSCTLS_H_ #define _ICE_COMMON_SYSCTLS_H_ #include /** * @var ice_enable_tx_fc_filter * @brief boolean indicating if the Tx Flow Control filter should be enabled * * Global sysctl variable indicating whether the Tx Flow Control filters * should be enabled. If true, Ethertype 0x8808 packets will be dropped if * they come from non-HW sources. If false, packets coming from software will * not be dropped. Leave this on if unless you must send flow control frames * (or other control frames) from software. * * @remark each PF has a separate sysctl which can override this value. */ bool ice_enable_tx_fc_filter = true; /** * @var ice_enable_tx_lldp_filter * @brief boolean indicating if the Tx LLDP filter should be enabled * * Global sysctl variable indicating whether the Tx Flow Control filters * should be enabled. If true, Ethertype 0x88cc packets will be dropped if * they come from non-HW sources. If false, packets coming from software will * not be dropped. Leave this on if unless you must send LLDP frames from * software. * * @remark each PF has a separate sysctl which can override this value. */ bool ice_enable_tx_lldp_filter = true; +/** + * @var ice_enable_health_events + * @brief boolean indicating if health status events from the FW should be reported + * + * Global sysctl variable indicating whether the Health Status events from the + * FW should be enabled. If true, if an event occurs, the driver will print out + * a message with a description of the event and possible actions to take. + * + * @remark each PF has a separate sysctl which can override this value. + */ +bool ice_enable_health_events = true; + /* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will * automatically load tunable values, without the need to manually create the * TUNABLE definition. * * This works since at least FreeBSD 11, and was backported into FreeBSD 10 * before the FreeBSD 10.1-RELEASE. * * If the tunable needs a custom loader, mark the SYSCTL as CTLFLAG_NOFETCH, * and create the tunable manually. */ static SYSCTL_NODE(_hw, OID_AUTO, ice, CTLFLAG_RD, 0, "ICE driver parameters"); static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0, "ICE driver debug parameters"); +SYSCTL_BOOL(_hw_ice, OID_AUTO, enable_health_events, CTLFLAG_RDTUN, + &ice_enable_health_events, 0, + "Enable FW health event reporting globally"); + SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ice_enable_tx_fc_filter, 0, "Drop Ethertype 0x8808 control frames originating from non-HW sources"); SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN, &ice_enable_tx_lldp_filter, 0, "Drop Ethertype 0x88cc LLDP frames originating from non-HW sources"); #endif /* _ICE_COMMON_SYSCTLS_H_ */ diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c index ee8d7f5c3e8f..636c2da3e4a1 100644 --- a/sys/dev/ice/ice_controlq.c +++ b/sys/dev/ice/ice_controlq.c @@ -1,1239 +1,1239 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #define ICE_CQ_INIT_REGS(qinfo, prefix) \ do { \ (qinfo)->sq.head = prefix##_ATQH; \ (qinfo)->sq.tail = prefix##_ATQT; \ (qinfo)->sq.len = prefix##_ATQLEN; \ (qinfo)->sq.bah = prefix##_ATQBAH; \ (qinfo)->sq.bal = prefix##_ATQBAL; \ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ (qinfo)->rq.head = prefix##_ARQH; \ (qinfo)->rq.tail = prefix##_ARQT; \ (qinfo)->rq.len = prefix##_ARQLEN; \ (qinfo)->rq.bah = prefix##_ARQBAH; \ (qinfo)->rq.bal = prefix##_ARQBAL; \ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ } while (0) /** * ice_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * * This assumes the alloc_sq and alloc_rq functions have already been called */ static void ice_adminq_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ICE_CQ_INIT_REGS(cq, PF_FW); } /** * ice_mailbox_init_regs - Initialize Mailbox registers * @hw: pointer to the hardware structure * * This assumes the alloc_sq and alloc_rq functions have already been called */ static void ice_mailbox_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->mailboxq; ICE_CQ_INIT_REGS(cq, PF_MBX); } /** * ice_check_sq_alive * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * * Returns true if Queue is enabled else false. */ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) { /* check both queue-length and queue-enable fields */ if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | cq->sq.len_ena_mask)) == (cq->num_sq_entries | cq->sq.len_ena_mask); return false; } /** * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size); if (!cq->sq.desc_buf.va) return ICE_ERR_NO_MEMORY; cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries, sizeof(struct ice_sq_cd)); if (!cq->sq.cmd_buf) { ice_free_dma_mem(hw, &cq->sq.desc_buf); return ICE_ERR_NO_MEMORY; } return ICE_SUCCESS; } /** * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size); if (!cq->rq.desc_buf.va) return ICE_ERR_NO_MEMORY; return ICE_SUCCESS; } /** * ice_free_cq_ring - Free control queue ring * @hw: pointer to the hardware structure * @ring: pointer to the specific control queue ring * * This assumes the posted buffers have already been cleaned * and de-allocated */ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) { ice_free_dma_mem(hw, &ring->desc_buf); } /** * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; /* We'll be allocating the buffer info memory first, then we can * allocate the mapped buffers for the event processing */ cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries, sizeof(cq->rq.desc_buf)); if (!cq->rq.dma_head) return ICE_ERR_NO_MEMORY; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; /* allocate the mapped buffers */ for (i = 0; i < cq->num_rq_entries; i++) { struct ice_aq_desc *desc; struct ice_dma_mem *bi; bi = &cq->rq.r.rq_bi[i]; bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size); if (!bi->va) goto unwind_alloc_rq_bufs; /* now configure the descriptors for use */ desc = ICE_CTL_Q_DESC(cq->rq, i); desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); if (cq->rq_buf_size > ICE_AQ_LG_BUF) desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ desc->datalen = CPU_TO_LE16(bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); desc->params.generic.param0 = 0; desc->params.generic.param1 = 0; } return ICE_SUCCESS; unwind_alloc_rq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]); cq->rq.r.rq_bi = NULL; ice_free(hw, cq->rq.dma_head); cq->rq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } /** * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; /* No mapped memory needed yet, just the buffer info structures */ cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries, sizeof(cq->sq.desc_buf)); if (!cq->sq.dma_head) return ICE_ERR_NO_MEMORY; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; /* allocate the mapped buffers */ for (i = 0; i < cq->num_sq_entries; i++) { struct ice_dma_mem *bi; bi = &cq->sq.r.sq_bi[i]; bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size); if (!bi->va) goto unwind_alloc_sq_bufs; } return ICE_SUCCESS; unwind_alloc_sq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]); cq->sq.r.sq_bi = NULL; ice_free(hw, cq->sq.dma_head); cq->sq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } static enum ice_status ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { /* Clear Head and Tail */ wr32(hw, ring->head, 0); wr32(hw, ring->tail, 0); /* set starting point */ wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa)); wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa)); /* Check one register to verify that config was applied */ if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa)) return ICE_ERR_AQ_ERROR; return ICE_SUCCESS; } /** * ice_cfg_sq_regs - configure Control ATQ registers * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * Configure base address and length registers for the transmit queue */ static enum ice_status ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } /** * ice_cfg_rq_regs - configure Control ARQ register * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * Configure base address and length registers for the receive (event queue) */ static enum ice_status ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status status; status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); if (status) return status; /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); return ICE_SUCCESS; } #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ do { \ /* free descriptors */ \ if ((qi)->ring.r.ring##_bi) { \ int i; \ \ for (i = 0; i < (qi)->num_##ring##_entries; i++) \ if ((qi)->ring.r.ring##_bi[i].pa) \ ice_free_dma_mem((hw), \ &(qi)->ring.r.ring##_bi[i]); \ } \ /* free the buffer info list */ \ if ((qi)->ring.cmd_buf) \ ice_free(hw, (qi)->ring.cmd_buf); \ /* free DMA head */ \ ice_free(hw, (qi)->ring.dma_head); \ } while (0) /** * ice_init_sq - main initialization routine for Control ATQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * This is the main initialization routine for the Control Send Queue * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->sq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (cq->sq.count > 0) { /* queue already initialized */ ret_code = ICE_ERR_NOT_READY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_sq_entries || !cq->sq_buf_size) { ret_code = ICE_ERR_CFG; goto init_ctrlq_exit; } cq->sq.next_to_use = 0; cq->sq.next_to_clean = 0; /* allocate the ring memory */ ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); if (ret_code) goto init_ctrlq_exit; /* allocate buffers in the rings */ ret_code = ice_alloc_sq_bufs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* initialize base registers */ ret_code = ice_cfg_sq_regs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* success! */ cq->sq.count = cq->num_sq_entries; goto init_ctrlq_exit; init_ctrlq_free_rings: ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: return ret_code; } /** * ice_init_rq - initialize ARQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries * - cq->rq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (cq->rq.count > 0) { /* queue already initialized */ ret_code = ICE_ERR_NOT_READY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->rq_buf_size) { ret_code = ICE_ERR_CFG; goto init_ctrlq_exit; } cq->rq.next_to_use = 0; cq->rq.next_to_clean = 0; /* allocate the ring memory */ ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); if (ret_code) goto init_ctrlq_exit; /* allocate buffers in the rings */ ret_code = ice_alloc_rq_bufs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* initialize base registers */ ret_code = ice_cfg_rq_regs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* success! */ cq->rq.count = cq->num_rq_entries; goto init_ctrlq_exit; init_ctrlq_free_rings: ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * The main shutdown routine for the Control Transmit Queue */ static enum ice_status ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code = ICE_SUCCESS; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_acquire_lock(&cq->sq_lock); if (!cq->sq.count) { ret_code = ICE_ERR_NOT_READY; goto shutdown_sq_out; } /* Stop firmware AdminQ processing */ wr32(hw, cq->sq.head, 0); wr32(hw, cq->sq.tail, 0); wr32(hw, cq->sq.len, 0); wr32(hw, cq->sq.bal, 0); wr32(hw, cq->sq.bah, 0); cq->sq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers and the ring itself */ ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); shutdown_sq_out: ice_release_lock(&cq->sq_lock); return ret_code; } /** * ice_aq_ver_check - Check the reported AQ API version. * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. * * Return: 'true' iff the driver should attempt to load. 'false' otherwise. */ static bool ice_aq_ver_check(struct ice_hw *hw) { if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { /* Major API version is newer than expected, don't load */ ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); } else { /* Major API version is older than expected, log a warning */ ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); } return true; } /** * ice_shutdown_rq - shutdown Control ARQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * The main shutdown routine for the Control Receive Queue */ static enum ice_status ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code = ICE_SUCCESS; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_acquire_lock(&cq->rq_lock); if (!cq->rq.count) { ret_code = ICE_ERR_NOT_READY; goto shutdown_rq_out; } /* Stop Control Queue processing */ wr32(hw, cq->rq.head, 0); wr32(hw, cq->rq.tail, 0); wr32(hw, cq->rq.len, 0); wr32(hw, cq->rq.bal, 0); wr32(hw, cq->rq.bah, 0); /* set rq.count to 0 to indicate uninitialized queue */ cq->rq.count = 0; /* free ring buffers and the ring itself */ ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); shutdown_rq_out: ice_release_lock(&cq->rq_lock); return ret_code; } /** * ice_idle_aq - stop ARQ/ATQ processing momentarily * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { wr32(hw, cq->sq.len, 0); wr32(hw, cq->rq.len, 0); ice_msec_delay(2, false); } /** * ice_init_check_adminq - Check version for Admin Queue to know if its alive * @hw: pointer to the hardware structure */ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_get_fw_ver(hw, NULL); if (status) goto init_ctrlq_free_rq; if (!ice_aq_ver_check(hw)) { status = ICE_ERR_FW_API_VER; goto init_ctrlq_free_rq; } return ICE_SUCCESS; init_ctrlq_free_rq: ice_shutdown_rq(hw, cq); ice_shutdown_sq(hw, cq); return status; } /** * ice_init_ctrlq - main initialization routine for any control Queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type * * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size * * NOTE: this function does not initialize the controlq locks */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; enum ice_status ret_code; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); switch (q_type) { case ICE_CTL_Q_ADMIN: ice_adminq_init_regs(hw); cq = &hw->adminq; break; case ICE_CTL_Q_MAILBOX: ice_mailbox_init_regs(hw); cq = &hw->mailboxq; break; default: return ICE_ERR_PARAM; } cq->qtype = q_type; /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->num_sq_entries || !cq->rq_buf_size || !cq->sq_buf_size) { return ICE_ERR_CFG; } /* setup SQ command write back timeout */ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) return ret_code; /* allocate the ARQ */ ret_code = ice_init_rq(hw, cq); if (ret_code) goto init_ctrlq_free_sq; /* success! */ return ICE_SUCCESS; init_ctrlq_free_sq: ice_shutdown_sq(hw, cq); return ret_code; } /** * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type * * NOTE: this function does not destroy the control queue locks. */ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); switch (q_type) { case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) ice_aq_q_shutdown(hw, true); break; case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; break; default: return; } ice_shutdown_sq(hw, cq); ice_shutdown_rq(hw, cq); } /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ void ice_shutdown_all_ctrlq(struct ice_hw *hw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Shutdown FW admin queue */ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); /* Shutdown PF-VF Mailbox */ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * * Prior to calling this function, the driver MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size * * NOTE: this function does not initialize the controlq locks. */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { enum ice_status status; u32 retry = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Init FW admin queue */ do { status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); if (status) return status; status = ice_init_check_adminq(hw); if (status != ICE_ERR_AQ_FW_CRITICAL) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); if (status) return status; /* Init Mailbox queue */ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** * ice_init_ctrlq_locks - Initialize locks for a control queue * @cq: pointer to the control queue * * Initializes the send and receive queue locks for a given control queue. */ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) { ice_init_lock(&cq->sq_lock); ice_init_lock(&cq->rq_lock); } /** * ice_create_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size * * This function creates all the control queue locks and then calls * ice_init_all_ctrlq. It should be called once during driver load. If the * driver needs to re-initialize control queues at run time it should call * ice_init_all_ctrlq instead. */ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) { ice_init_ctrlq_locks(&hw->adminq); ice_init_ctrlq_locks(&hw->mailboxq); return ice_init_all_ctrlq(hw); } /** * ice_destroy_ctrlq_locks - Destroy locks for a control queue * @cq: pointer to the control queue * * Destroys the send and receive queue locks for a given control queue. */ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) { ice_destroy_lock(&cq->sq_lock); ice_destroy_lock(&cq->rq_lock); } /** * ice_destroy_all_ctrlq - exit routine for all control queues * @hw: pointer to the hardware structure * * This function shuts down all the control queues and then destroys the * control queue locks. It should be called once during driver unload. The * driver should call ice_shutdown_all_ctrlq if it needs to shut down and * reinitialize control queues, such as in response to a reset event. */ void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ ice_shutdown_all_ctrlq(hw); ice_destroy_ctrlq_locks(&hw->adminq); ice_destroy_ctrlq_locks(&hw->mailboxq); } /** * ice_clean_sq - cleans Admin send queue (ATQ) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * returns the number of free desc */ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { struct ice_ctl_q_ring *sq = &cq->sq; u16 ntc = sq->next_to_clean; struct ice_sq_cd *details; struct ice_aq_desc *desc; desc = ICE_CTL_Q_DESC(*sq, ntc); details = ICE_CTL_Q_DETAILS(*sq, ntc); while (rd32(hw, cq->sq.head) != ntc) { ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); ntc++; if (ntc == sq->count) ntc = 0; desc = ICE_CTL_Q_DESC(*sq, ntc); details = ICE_CTL_Q_DETAILS(*sq, ntc); } sq->next_to_clean = ntc; return ICE_CTL_Q_DESC_UNUSED(sq); } /** * ice_debug_cq * @hw: pointer to the hardware structure * @desc: pointer to control queue descriptor * @buf: pointer to command buffer * @buf_len: max length of buf * * Dumps debug log about control command with descriptor contents. */ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) { struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; u16 datalen, flags; if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) return; if (!desc) return; datalen = LE16_TO_CPU(cq_desc->datalen); flags = LE16_TO_CPU(cq_desc->flags); ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", LE16_TO_CPU(cq_desc->opcode), flags, datalen, LE16_TO_CPU(cq_desc->retval)); ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(cq_desc->cookie_high), LE32_TO_CPU(cq_desc->cookie_low)); ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", LE32_TO_CPU(cq_desc->params.generic.param0), LE32_TO_CPU(cq_desc->params.generic.param1)); ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(cq_desc->params.generic.addr_high), LE32_TO_CPU(cq_desc->params.generic.addr_low)); /* Dump buffer iff 1) one exists and 2) is either a response indicated * by the DD and/or CMP flag set or a command with the RD flag set. */ if (buf && cq_desc->datalen != 0 && (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) || flags & ICE_AQ_FLAG_RD)) { ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, MIN_T(u16, buf_len, datalen)); } } /** * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. */ bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, cq->sq.head) == cq->sq.next_to_use; } /** * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command (non DMA mem) * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ static enum ice_status ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; enum ice_status status = ICE_SUCCESS; struct ice_sq_cd *details; u32 total_delay = 0; u16 retval = 0; u32 val = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) return ICE_ERR_RESET_ONGOING; cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; } if ((buf && !buf_size) || (!buf && buf_size)) { status = ICE_ERR_PARAM; goto sq_send_command_error; } if (buf) { if (buf_size > cq->sq_buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); status = ICE_ERR_INVAL_SIZE; goto sq_send_command_error; } desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF); if (buf_size > ICE_AQ_LG_BUF) desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); } val = rd32(hw, cq->sq.head); if (val >= cq->num_sq_entries) { ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; } details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); if (cd) *details = *cd; else ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); /* Call clean and check queue available function to reclaim the * descriptors that were processed by FW/MBX; the function returns the * number of desc available. The clean function called here could be * called in a separate thread in case of asynchronous completions. */ if (ice_clean_sq(hw, cq) == 0) { ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); status = ICE_ERR_AQ_FULL; goto sq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); /* if the desc is available copy the temp desc to the right place */ ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring), ICE_NONDMA_TO_DMA); /* if buf is not NULL assume indirect command */ if (buf) { dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; /* copy the user buf into the respective DMA buf */ ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA); desc_on_ring->datalen = CPU_TO_LE16(buf_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa)); desc_on_ring->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa)); } /* Debug desc and buffer */ ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); (cq->sq.next_to_use)++; if (cq->sq.next_to_use == cq->sq.count) cq->sq.next_to_use = 0; wr32(hw, cq->sq.tail, cq->sq.next_to_use); do { if (ice_sq_done(hw, cq)) break; ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false); total_delay++; } while (total_delay < cq->sq_cmd_timeout); /* if ready, copy the desc back to temp */ if (ice_sq_done(hw, cq)) { ice_memcpy(desc, desc_on_ring, sizeof(*desc), ICE_DMA_TO_NONDMA); if (buf) { /* get returned length to copy */ u16 copy_size = LE16_TO_CPU(desc->datalen); if (copy_size > buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); status = ICE_ERR_AQ_ERROR; } else { ice_memcpy(buf, dma_buf->va, copy_size, ICE_DMA_TO_NONDMA); } } retval = LE16_TO_CPU(desc->retval); if (retval) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", LE16_TO_CPU(desc->opcode), retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = true; if (!status && retval != ICE_AQ_RC_OK) status = ICE_ERR_AQ_ERROR; cq->sq_last_status = (enum ice_aq_err)retval; } ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); ice_debug_cq(hw, (void *)desc, buf, buf_size); /* save writeback AQ if requested */ if (details->wb_desc) ice_memcpy(details->wb_desc, desc_on_ring, sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA); /* update the error if time out occurred */ if (!cmd_completed) { if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); status = ICE_ERR_AQ_FW_CRITICAL; } else { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); status = ICE_ERR_AQ_TIMEOUT; } } sq_send_command_error: return status; } /** * ice_sq_send_cmd - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue - * @desc: prefilled descriptor describing the command (non DMA mem) + * @desc: prefilled descriptor describing the command * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { enum ice_status status = ICE_SUCCESS; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) return ICE_ERR_RESET_ONGOING; ice_acquire_lock(&cq->sq_lock); status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd); ice_release_lock(&cq->sq_lock); return status; } /** * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values */ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) { /* zero out the desc */ ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM); desc->opcode = CPU_TO_LE16(opcode); desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI); } /** * ice_clean_rq_elem * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending'. */ enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; + enum ice_aq_err rq_last_status; enum ice_status ret_code = ICE_SUCCESS; struct ice_aq_desc *desc; struct ice_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* pre-clean the event info */ ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM); /* take the lock before we start messing with the ring */ ice_acquire_lock(&cq->rq_lock); if (!cq->rq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); ret_code = ICE_ERR_AQ_EMPTY; goto clean_rq_elem_err; } /* set next_to_use to head */ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = ICE_ERR_AQ_NO_WORK; goto clean_rq_elem_out; } /* now clean the next descriptor */ desc = ICE_CTL_Q_DESC(cq->rq, ntc); desc_idx = ntc; - cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); + rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); flags = LE16_TO_CPU(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", - LE16_TO_CPU(desc->opcode), - cq->rq_last_status); + LE16_TO_CPU(desc->opcode), rq_last_status); } ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); e->msg_len = MIN_T(u16, datalen, e->buf_len); if (e->msg_buf && e->msg_len) ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len, ICE_DMA_TO_NONDMA); ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message size */ bi = &cq->rq.r.rq_bi[ntc]; ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); if (cq->rq_buf_size > ICE_AQ_LG_BUF) desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); desc->datalen = CPU_TO_LE16(bi->size); desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, cq->rq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == cq->num_rq_entries) ntc = 0; cq->rq.next_to_clean = ntc; cq->rq.next_to_use = ntu; clean_rq_elem_out: /* Set pending if needed, unlock and return */ if (pending) { /* re-read HW head to calculate actual pending messages */ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); } clean_rq_elem_err: ice_release_lock(&cq->rq_lock); return ret_code; } diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h index 947f1d6d1767..4ed0809f2bad 100644 --- a/sys/dev/ice/ice_controlq.h +++ b/sys/dev/ice/ice_controlq.h @@ -1,128 +1,127 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_CONTROLQ_H_ #define _ICE_CONTROLQ_H_ #include "ice_adminq_cmd.h" /* Maximum buffer lengths for all control queue types */ #define ICE_AQ_MAX_BUF_LEN 4096 #define ICE_MBXQ_MAX_BUF_LEN 4096 #define ICE_CTL_Q_DESC(R, i) \ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) #define ICE_CTL_Q_DESC_UNUSED(R) \ - (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) + ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1)) /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. */ #define EXP_FW_API_VER_BRANCH 0x00 #define EXP_FW_API_VER_MAJOR 0x01 #define EXP_FW_API_VER_MINOR 0x05 /* Different control queue types: These are mainly for SW consumption. */ enum ice_ctl_q { ICE_CTL_Q_UNKNOWN = 0, ICE_CTL_Q_ADMIN, ICE_CTL_Q_MAILBOX, }; /* Control Queue timeout settings - max delay 1s */ #define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ struct ice_dma_mem desc_buf; /* descriptor ring memory */ void *cmd_buf; /* command buffer memory */ union { struct ice_dma_mem *sq_bi; struct ice_dma_mem *rq_bi; } r; u16 count; /* Number of descriptors */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; /* used for queue tracking */ u32 head; u32 tail; u32 len; u32 bah; u32 bal; u32 len_mask; u32 len_ena_mask; u32 len_crit_mask; u32 head_mask; }; /* sq transaction details */ struct ice_sq_cd { struct ice_aq_desc *wb_desc; }; #define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) /* rq event information */ struct ice_rq_event_info { struct ice_aq_desc desc; u16 msg_len; u16 buf_len; u8 *msg_buf; }; /* Control Queue information */ struct ice_ctl_q_info { enum ice_ctl_q qtype; - enum ice_aq_err rq_last_status; /* last status on receive queue */ struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring sq; /* send queue */ u32 sq_cmd_timeout; /* send queue cmd write back timeout */ u16 num_rq_entries; /* receive queue depth */ u16 num_sq_entries; /* send queue depth */ u16 rq_buf_size; /* receive queue buffer size */ u16 sq_buf_size; /* send queue buffer size */ enum ice_aq_err sq_last_status; /* last status on send queue */ struct ice_lock sq_lock; /* Send queue lock */ struct ice_lock rq_lock; /* Receive queue lock */ }; #endif /* _ICE_CONTROLQ_H_ */ diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h index 340d53e4a671..b344c010a35f 100644 --- a/sys/dev/ice/ice_drv_info.h +++ b/sys/dev/ice/ice_drv_info.h @@ -1,186 +1,192 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_drv_info.h * @brief device IDs and driver version * * Contains the device IDs tables and the driver version string. * * This file contains static or constant definitions intended to be included * exactly once in the main driver interface file. It implicitly depends on * the main driver header file. * * These definitions could be placed directly in the interface file, but are * kept separate for organizational purposes. */ /** * @var ice_driver_version * @brief driver version string * * Driver version information, used for display as part of an informational * sysctl, and as part of the driver information sent to the firmware at load. * * @var ice_major_version * @brief driver major version number * * @var ice_minor_version * @brief driver minor version number * * @var ice_patch_version * @brief driver patch version number * * @var ice_rc_version * @brief driver release candidate version number */ -const char ice_driver_version[] = "0.28.1-k"; +const char ice_driver_version[] = "0.29.4-k"; const uint8_t ice_major_version = 0; -const uint8_t ice_minor_version = 28; -const uint8_t ice_patch_version = 1; +const uint8_t ice_minor_version = 29; +const uint8_t ice_patch_version = 4; const uint8_t ice_rc_version = 0; #define PVIDV(vendor, devid, name) \ - PVID(vendor, devid, name " - 0.28.1-k") + PVID(vendor, devid, name " - 0.29.4-k") #define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \ - PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.28.1-k") + PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.29.4-k") /** * @var ice_vendor_info_array * @brief array of PCI devices supported by this driver * * Array of PCI devices which are supported by this driver. Used to determine * whether a given device should be loaded by this driver. This information is * also exported as part of the module information for other tools to analyze. * * @remark Each type of device ID needs to be listed from most-specific entry * to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before * the PVIDV() for it. */ static pci_vendor_info_t ice_vendor_info_array[] = { PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE, "Intel(R) Ethernet Controller E810-C for backplane"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0001, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0002, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0008, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x000D, 0, "Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"), + PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, + ICE_INTEL_VENDOR_ID, 0x000E, 0, + "Intel(R) Ethernet Network Adapter E810-2C-Q2"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, "Intel(R) Ethernet Controller E810-C for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0008, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0009, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"), + PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, + ICE_INTEL_VENDOR_ID, 0x000C, 0, + "Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, "Intel(R) Ethernet Controller E810-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE, "Intel(R) Ethernet Connection E822-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP, "Intel(R) Ethernet Connection E822-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP, "Intel(R) Ethernet Connection E822-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T, "Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII, "Intel(R) Ethernet Connection E822-C 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE, "Intel(R) Ethernet Connection E822-L for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP, "Intel(R) Ethernet Connection E822-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T, "Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII, "Intel(R) Ethernet Connection E822-L 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE, "Intel(R) Ethernet Connection E823-L for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP, "Intel(R) Ethernet Connection E823-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP, "Intel(R) Ethernet Connection E823-L for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T, "Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE, "Intel(R) Ethernet Connection E823-L 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE, "Intel(R) Ethernet Controller E810-XXV for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP, "Intel(R) Ethernet Controller E810-XXV for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, "Intel(R) Ethernet Controller E810-XXV for SFP"), PVID_END }; diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h index dcb096509f73..93f57b23170d 100644 --- a/sys/dev/ice/ice_features.h +++ b/sys/dev/ice/ice_features.h @@ -1,91 +1,93 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_features.h * @brief device feature controls * * Contains a list of various device features which could be enabled or * disabled. */ #ifndef _ICE_FEATURES_H_ #define _ICE_FEATURES_H_ /** * @enum feat_list * @brief driver feature enumeration * * Enumeration of possible device driver features that can be enabled or * disabled. Each possible value represents a different feature which can be * enabled or disabled. * * The driver stores a bitmap of the features that the device and OS are * capable of, as well as another bitmap indicating which features are * currently enabled for that device. */ enum feat_list { ICE_FEATURE_SRIOV, ICE_FEATURE_RSS, ICE_FEATURE_NETMAP, ICE_FEATURE_FDIR, ICE_FEATURE_MSI, ICE_FEATURE_MSIX, ICE_FEATURE_RDMA, ICE_FEATURE_SAFE_MODE, ICE_FEATURE_LENIENT_LINK_MODE, - ICE_FEATURE_DEFAULT_OVERRIDE, + ICE_FEATURE_LINK_MGMT_VER_1, + ICE_FEATURE_LINK_MGMT_VER_2, + ICE_FEATURE_HEALTH_STATUS, /* Must be last entry */ ICE_FEATURE_COUNT }; /** * ice_disable_unsupported_features - Disable features not enabled by OS * @bitmap: the feature bitmap * * Check for OS support of various driver features. Clear the feature bit for * any feature which is not enabled by the OS. This should be called early * during driver attach after setting up the feature bitmap. * * @remark the bitmap parameter is marked as unused in order to avoid an * unused parameter warning in case none of the features need to be disabled. */ static inline void ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap) { ice_clear_bit(ICE_FEATURE_SRIOV, bitmap); #ifndef DEV_NETMAP ice_clear_bit(ICE_FEATURE_NETMAP, bitmap); #endif } #endif diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c index e8e4403a23fe..0d0dc3c8c80d 100644 --- a/sys/dev/ice/ice_flex_pipe.c +++ b/sys/dev/ice/ice_flex_pipe.c @@ -1,5656 +1,5700 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_flex_pipe.h" #include "ice_protocol_type.h" #include "ice_flow.h" /* To support tunneling entries by PF, the package will append the PF number to * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. */ +#define ICE_TNL_PRE "TNL_" static const struct ice_tunnel_type_scan tnls[] = { { TNL_VXLAN, "TNL_VXLAN_PF" }, { TNL_GENEVE, "TNL_GENEVE_PF" }, { TNL_LAST, "" } }; static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { /* SWITCH */ { ICE_SID_XLT0_SW, ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT1_SW, ICE_SID_XLT2_SW, ICE_SID_PROFID_TCAM_SW, ICE_SID_PROFID_REDIR_SW, ICE_SID_FLD_VEC_SW, ICE_SID_CDID_KEY_BUILDER_SW, ICE_SID_CDID_REDIR_SW }, /* ACL */ { ICE_SID_XLT0_ACL, ICE_SID_XLT_KEY_BUILDER_ACL, ICE_SID_XLT1_ACL, ICE_SID_XLT2_ACL, ICE_SID_PROFID_TCAM_ACL, ICE_SID_PROFID_REDIR_ACL, ICE_SID_FLD_VEC_ACL, ICE_SID_CDID_KEY_BUILDER_ACL, ICE_SID_CDID_REDIR_ACL }, /* FD */ { ICE_SID_XLT0_FD, ICE_SID_XLT_KEY_BUILDER_FD, ICE_SID_XLT1_FD, ICE_SID_XLT2_FD, ICE_SID_PROFID_TCAM_FD, ICE_SID_PROFID_REDIR_FD, ICE_SID_FLD_VEC_FD, ICE_SID_CDID_KEY_BUILDER_FD, ICE_SID_CDID_REDIR_FD }, /* RSS */ { ICE_SID_XLT0_RSS, ICE_SID_XLT_KEY_BUILDER_RSS, ICE_SID_XLT1_RSS, ICE_SID_XLT2_RSS, ICE_SID_PROFID_TCAM_RSS, ICE_SID_PROFID_REDIR_RSS, ICE_SID_FLD_VEC_RSS, ICE_SID_CDID_KEY_BUILDER_RSS, ICE_SID_CDID_REDIR_RSS }, /* PE */ { ICE_SID_XLT0_PE, ICE_SID_XLT_KEY_BUILDER_PE, ICE_SID_XLT1_PE, ICE_SID_XLT2_PE, ICE_SID_PROFID_TCAM_PE, ICE_SID_PROFID_REDIR_PE, ICE_SID_FLD_VEC_PE, ICE_SID_CDID_KEY_BUILDER_PE, ICE_SID_CDID_REDIR_PE } }; /** * ice_sect_id - returns section ID * @blk: block type * @sect: section type * * This helper function returns the proper section ID given a block type and a * section type. */ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) { return ice_sect_lkup[blk][sect]; } /** * ice_pkg_val_buf * @buf: pointer to the ice buffer * * This helper function validates a buffer's header. */ static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) { struct ice_buf_hdr *hdr; u16 section_count; u16 data_end; hdr = (struct ice_buf_hdr *)buf->buf; /* verify data */ section_count = LE16_TO_CPU(hdr->section_count); if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) return NULL; data_end = LE16_TO_CPU(hdr->data_end); if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) return NULL; return hdr; } /** * ice_find_buf_table * @ice_seg: pointer to the ice segment * * Returns the address of the buffer table within the ice segment. */ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) { struct ice_nvm_table *nvms; nvms = (struct ice_nvm_table *) (ice_seg->device_table + LE32_TO_CPU(ice_seg->device_table_count)); return (_FORCE_ struct ice_buf_table *) (nvms->vers + LE32_TO_CPU(nvms->table_count)); } /** * ice_pkg_enum_buf * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * * This function will enumerate all the buffers in the ice segment. The first * call is made with the ice_seg parameter non-NULL; on subsequent calls, * ice_seg is set to NULL which continues the enumeration. When the function * returns a NULL pointer, then the end of the buffers has been reached, or an * unexpected value has been detected (for example an invalid section count or * an invalid buffer end value). */ static struct ice_buf_hdr * ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) { if (ice_seg) { state->buf_table = ice_find_buf_table(ice_seg); if (!state->buf_table) return NULL; state->buf_idx = 0; return ice_pkg_val_buf(state->buf_table->buf_array); } if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) return ice_pkg_val_buf(state->buf_table->buf_array + state->buf_idx); else return NULL; } /** * ice_pkg_advance_sect * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * * This helper function will advance the section within the ice segment, * also advancing the buffer if needed. */ static bool ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) { if (!ice_seg && !state->buf) return false; if (!ice_seg && state->buf) if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) return true; state->buf = ice_pkg_enum_buf(ice_seg, state); if (!state->buf) return false; /* start of new buffer, reset section index */ state->sect_idx = 0; return true; } /** * ice_pkg_enum_section * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * @sect_type: section type to enumerate * * This function will enumerate all the sections of a particular type in the * ice segment. The first call is made with the ice_seg parameter non-NULL; * on subsequent calls, ice_seg is set to NULL which continues the enumeration. * When the function returns a NULL pointer, then the end of the matching * sections has been reached. */ static void * ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type) { u16 offset, size; if (ice_seg) state->type = sect_type; if (!ice_pkg_advance_sect(ice_seg, state)) return NULL; /* scan for next matching section */ while (state->buf->section_entry[state->sect_idx].type != CPU_TO_LE32(state->type)) if (!ice_pkg_advance_sect(NULL, state)) return NULL; /* validate section */ offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) return NULL; size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) return NULL; /* make sure the section fits in the buffer */ if (offset + size > ICE_PKG_BUF_SIZE) return NULL; state->sect_type = LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); /* calc pointer to this section */ state->sect = ((u8 *)state->buf) + LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); return state->sect; } /** * ice_pkg_enum_entry * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) * @state: pointer to the enum state * @sect_type: section type to enumerate * @offset: pointer to variable that receives the offset in the table (optional) * @handler: function that handles access to the entries into the section type * * This function will enumerate all the entries in particular section type in * the ice segment. The first call is made with the ice_seg parameter non-NULL; * on subsequent calls, ice_seg is set to NULL which continues the enumeration. * When the function returns a NULL pointer, then the end of the entries has * been reached. * * Since each section may have a different header and entry size, the handler * function is needed to determine the number and location entries in each * section. * * The offset parameter is optional, but should be used for sections that * contain an offset for each section table. For such cases, the section handler * function must return the appropriate offset + index to give the absolution * offset for each entry. For example, if the base for a section's header * indicates a base offset of 10, and the index for the entry is 2, then * section handler function should set the offset to 10 + 2 = 12. */ static void * ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type, u32 *offset, void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset)) { void *entry; if (ice_seg) { if (!handler) return NULL; if (!ice_pkg_enum_section(ice_seg, state, sect_type)) return NULL; state->entry_idx = 0; state->handler = handler; } else { state->entry_idx++; } if (!state->handler) return NULL; /* get entry */ entry = state->handler(state->sect_type, state->sect, state->entry_idx, offset); if (!entry) { /* end of a section, look for another section of this type */ if (!ice_pkg_enum_section(NULL, state, 0)) return NULL; state->entry_idx = 0; entry = state->handler(state->sect_type, state->sect, state->entry_idx, offset); } return entry; } /** * ice_boost_tcam_handler * @sect_type: section type * @section: pointer to section * @index: index of the boost TCAM entry to be returned * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections * * This is a callback function that can be passed to ice_pkg_enum_entry. * Handles enumeration of individual boost TCAM entries. */ static void * ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) { struct ice_boost_tcam_section *boost; if (!section) return NULL; if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_BST_TCAMS_IN_BUF) return NULL; if (offset) *offset = 0; boost = (struct ice_boost_tcam_section *)section; if (index >= LE16_TO_CPU(boost->count)) return NULL; return boost->tcam + index; } /** * ice_find_boost_entry * @ice_seg: pointer to the ice segment (non-NULL) * @addr: Boost TCAM address of entry to search for * @entry: returns pointer to the entry * * Finds a particular Boost TCAM entry and returns a pointer to that entry * if it is found. The ice_seg parameter must not be NULL since the first call * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. */ static enum ice_status ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, struct ice_boost_tcam_entry **entry) { struct ice_boost_tcam_entry *tcam; struct ice_pkg_enum state; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!ice_seg) return ICE_ERR_PARAM; do { tcam = (struct ice_boost_tcam_entry *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_RXPARSER_BOOST_TCAM, NULL, ice_boost_tcam_handler); if (tcam && LE16_TO_CPU(tcam->addr) == addr) { *entry = tcam; return ICE_SUCCESS; } ice_seg = NULL; } while (tcam); *entry = NULL; return ICE_ERR_CFG; } /** * ice_label_enum_handler * @sect_type: section type * @section: pointer to section * @index: index of the label entry to be returned * @offset: pointer to receive absolute offset, always zero for label sections * * This is a callback function that can be passed to ice_pkg_enum_entry. * Handles enumeration of individual label entries. */ static void * ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, u32 *offset) { struct ice_label_section *labels; if (!section) return NULL; + /* cppcheck-suppress nullPointer */ if (index > ICE_MAX_LABELS_IN_BUF) return NULL; if (offset) *offset = 0; labels = (struct ice_label_section *)section; if (index >= LE16_TO_CPU(labels->count)) return NULL; return labels->label + index; } /** * ice_enum_labels * @ice_seg: pointer to the ice segment (NULL on subsequent calls) * @type: the section type that will contain the label (0 on subsequent calls) * @state: ice_pkg_enum structure that will hold the state of the enumeration * @value: pointer to a value that will return the label's value if found * * Enumerates a list of labels in the package. The caller will call * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL * the end of the list has been reached. */ static char * ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, u16 *value) { struct ice_label *label; /* Check for valid label section on first call */ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) return NULL; label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, NULL, ice_label_enum_handler); if (!label) return NULL; *value = LE16_TO_CPU(label->value); return label->name; } /** - * ice_init_pkg_hints + * ice_add_tunnel_hint * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. + * @label_name: label text + * @val: value of the tunnel port boost entry */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) { - struct ice_pkg_enum state; - char *label_name; - u16 val; - int i; - - ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return; - - label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, - &val); + if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { + u16 i; - while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { for (i = 0; tnls[i].type != TNL_LAST; i++) { size_t len = strlen(tnls[i].label_prefix); /* Look for matching label start, before continuing */ if (strncmp(label_name, tnls[i].label_prefix, len)) continue; /* Make sure this label matches our PF. Note that the PF * character ('0' - '7') will be located where our * prefix string's null terminator is located. */ if ((label_name[len] - '0') == hw->pf_id) { hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; hw->tnl.tbl[hw->tnl.count].valid = false; hw->tnl.tbl[hw->tnl.count].in_use = false; hw->tnl.tbl[hw->tnl.count].marked = false; hw->tnl.tbl[hw->tnl.count].boost_addr = val; hw->tnl.tbl[hw->tnl.count].port = 0; hw->tnl.count++; break; } } + } +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name) { + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); label_name = ice_enum_labels(NULL, 0, &state, &val); } - /* Cache the appropriate boost TCAM entry pointers */ + /* Cache the appropriate boost TCAM entry pointers for tunnels */ for (i = 0; i < hw->tnl.count; i++) { ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, &hw->tnl.tbl[i].boost_entry); if (hw->tnl.tbl[i].boost_entry) hw->tnl.tbl[i].valid = true; } } /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ #define ICE_DC_KEYINV 0x1 #define ICE_NM_KEY 0x0 /* never match */ #define ICE_NM_KEYINV 0x0 #define ICE_0_KEY 0x1 /* match 0 */ #define ICE_0_KEYINV 0x0 #define ICE_1_KEY 0x0 /* match 1 */ #define ICE_1_KEYINV 0x1 /** * ice_gen_key_word - generate 16-bits of a key/mask word * @val: the value * @valid: valid bits mask (change only the valid bits) * @dont_care: don't care mask * @nvr_mtch: never match mask * @key: pointer to an array of where the resulting key portion * @key_inv: pointer to an array of where the resulting key invert portion * * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits * of key and 8 bits of key invert. * * '0' = b01, always match a 0 bit * '1' = b10, always match a 1 bit * '?' = b11, don't care bit (always matches) * '~' = b00, never match bit * * Input: * val: b0 1 0 1 0 1 * dont_care: b0 0 1 1 0 0 * never_mtch: b0 0 0 0 1 1 * ------------------------------ * Result: key: b01 10 11 11 00 00 */ static enum ice_status ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, u8 *key_inv) { u8 in_key = *key, in_key_inv = *key_inv; u8 i; /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) return ICE_ERR_CFG; *key = 0; *key_inv = 0; /* encode the 8 bits into 8-bit key and 8-bit key invert */ for (i = 0; i < 8; i++) { *key >>= 1; *key_inv >>= 1; if (!(valid & 0x1)) { /* change only valid bits */ *key |= (in_key & 0x1) << 7; *key_inv |= (in_key_inv & 0x1) << 7; } else if (dont_care & 0x1) { /* don't care bit */ *key |= ICE_DC_KEY << 7; *key_inv |= ICE_DC_KEYINV << 7; } else if (nvr_mtch & 0x1) { /* never match bit */ *key |= ICE_NM_KEY << 7; *key_inv |= ICE_NM_KEYINV << 7; } else if (val & 0x01) { /* exact 1 match */ *key |= ICE_1_KEY << 7; *key_inv |= ICE_1_KEYINV << 7; } else { /* exact 0 match */ *key |= ICE_0_KEY << 7; *key_inv |= ICE_0_KEYINV << 7; } dont_care >>= 1; nvr_mtch >>= 1; valid >>= 1; val >>= 1; in_key >>= 1; in_key_inv >>= 1; } return ICE_SUCCESS; } /** * ice_bits_max_set - determine if the number of bits set is within a maximum * @mask: pointer to the byte array which is the mask * @size: the number of bytes in the mask * @max: the max number of set bits * * This function determines if there are at most 'max' number of bits set in an * array. Returns true if the number for bits set is <= max or will return false * otherwise. */ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) { u16 count = 0; u16 i; /* check each byte */ for (i = 0; i < size; i++) { /* if 0, go to next byte */ if (!mask[i]) continue; /* We know there is at least one set bit in this byte because of * the above check; if we already have found 'max' number of * bits set, then we can return failure now. */ if (count == max) return false; /* count the bits in this byte, checking threshold */ count += ice_hweight8(mask[i]); if (count > max) return false; } return true; } /** * ice_set_key - generate a variable sized key with multiples of 16-bits * @key: pointer to where the key will be stored * @size: the size of the complete key in bytes (must be even) * @val: array of 8-bit values that makes up the value portion of the key * @upd: array of 8-bit masks that determine what key portion to update * @dc: array of 8-bit masks that make up the don't care mask * @nm: array of 8-bit masks that make up the never match mask * @off: the offset of the first byte in the key to update * @len: the number of bytes in the key update * * This function generates a key from a value, a don't care mask and a never * match mask. * upd, dc, and nm are optional parameters, and can be NULL: * upd == NULL --> upd mask is all 1's (update all bits) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ static enum ice_status ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, u16 len) { u16 half_size; u16 i; /* size must be a multiple of 2 bytes. */ if (size % 2) return ICE_ERR_CFG; half_size = size / 2; if (off + len > half_size) return ICE_ERR_CFG; /* Make sure at most one bit is set in the never match mask. Having more * than one never match mask bit set will cause HW to consume excessive * power otherwise; this is a power management efficiency check. */ #define ICE_NVR_MTCH_BITS_MAX 1 if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) return ICE_ERR_CFG; for (i = 0; i < len; i++) if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, dc ? dc[i] : 0, nm ? nm[i] : 0, key + off + i, key + half_size + off + i)) return ICE_ERR_CFG; return ICE_SUCCESS; } /** * ice_acquire_global_cfg_lock * @hw: pointer to the HW structure * @access: access type (read or write) * * This function will request ownership of the global config lock for reading * or writing of the package. When attempting to obtain write access, the * caller must check for the following two return values: * * ICE_SUCCESS - Means the caller has acquired the global config lock * and can perform writing of the package. * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the * package or has found that no update was necessary; in * this case, the caller can just skip performing any * update of the package. */ static enum ice_status ice_acquire_global_cfg_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, ICE_GLOBAL_CFG_LOCK_TIMEOUT); if (status == ICE_ERR_AQ_NO_WORK) ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); return status; } /** * ice_release_global_cfg_lock * @hw: pointer to the HW structure * * This function will release the global config lock. */ static void ice_release_global_cfg_lock(struct ice_hw *hw) { ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); } /** * ice_acquire_change_lock * @hw: pointer to the HW structure * @access: access type (read or write) * * This function will request ownership of the change lock. */ static enum ice_status ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, ICE_CHANGE_LOCK_TIMEOUT); } /** * ice_release_change_lock * @hw: pointer to the HW structure * * This function will release the change lock using the proper Admin Command. */ static void ice_release_change_lock(struct ice_hw *hw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); } /** * ice_aq_download_pkg * @hw: pointer to the hardware structure * @pkg_buf: the package buffer to transfer * @buf_size: the size of the package buffer * @last_buf: last buffer indicator * @error_offset: returns error offset * @error_info: returns error information * @cd: pointer to command details structure or NULL * * Download Package (0x0C40) */ static enum ice_status ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (error_offset) *error_offset = 0; if (error_info) *error_info = 0; cmd = &desc.params.download_pkg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); if (last_buf) cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); if (status == ICE_ERR_AQ_ERROR) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; if (error_offset) *error_offset = LE32_TO_CPU(resp->error_offset); if (error_info) *error_info = LE32_TO_CPU(resp->error_info); } return status; } /** * ice_aq_upload_section * @hw: pointer to the hardware structure * @pkg_buf: the package buffer which will receive the section * @buf_size: the size of the package buffer * @cd: pointer to command details structure or NULL * * Upload Section (0x0C41) */ enum ice_status ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); } /** * ice_aq_update_pkg * @hw: pointer to the hardware structure * @pkg_buf: the package cmd buffer * @buf_size: the size of the package cmd buffer * @last_buf: last buffer indicator * @error_offset: returns error offset * @error_info: returns error information * @cd: pointer to command details structure or NULL * * Update Package (0x0C42) */ static enum ice_status ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, bool last_buf, u32 *error_offset, u32 *error_info, struct ice_sq_cd *cd) { struct ice_aqc_download_pkg *cmd; struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (error_offset) *error_offset = 0; if (error_info) *error_info = 0; cmd = &desc.params.download_pkg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); if (last_buf) cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); if (status == ICE_ERR_AQ_ERROR) { /* Read error from buffer only when the FW returned an error */ struct ice_aqc_download_pkg_resp *resp; resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; if (error_offset) *error_offset = LE32_TO_CPU(resp->error_offset); if (error_info) *error_info = LE32_TO_CPU(resp->error_info); } return status; } /** * ice_find_seg_in_pkg * @hw: pointer to the hardware structure * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) * @pkg_hdr: pointer to the package header to be searched * * This function searches a package file for a particular segment type. On * success it returns a pointer to the segment header, otherwise it will * return NULL. */ static struct ice_generic_seg_hdr * ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, struct ice_pkg_hdr *pkg_hdr) { u32 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, pkg_hdr->pkg_format_ver.update, pkg_hdr->pkg_format_ver.draft); /* Search all package segments for the requested segment type */ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { struct ice_generic_seg_hdr *seg; seg = (struct ice_generic_seg_hdr *) ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); if (LE32_TO_CPU(seg->seg_type) == seg_type) return seg; } return NULL; } /** - * ice_update_pkg + * ice_update_pkg_no_lock * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @count: the number of buffers in the array - * - * Obtains change lock and updates package. */ -enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +static enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { - enum ice_status status; - u32 offset, info, i; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; + enum ice_status status = ICE_SUCCESS; + u32 i; for (i = 0; i < count; i++) { struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); bool last = ((i + 1) == count); + u32 offset, info; status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), last, &offset, &info, NULL); if (status) { ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", status, offset, info); break; } } + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + ice_release_change_lock(hw); return status; } /** * ice_dwnld_cfg_bufs * @hw: pointer to the hardware structure * @bufs: pointer to an array of buffers * @count: the number of buffers in the array * * Obtains global config lock and downloads the package configuration buffers * to the firmware. Metadata buffers are skipped, and the first metadata buffer * found indicates that the rest of the buffers are all metadata buffers. */ static enum ice_status ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { enum ice_status status; struct ice_buf_hdr *bh; u32 offset, info, i; if (!bufs || !count) return ICE_ERR_PARAM; /* If the first buffer's first section has its metadata bit set * then there are no buffers to be downloaded, and the operation is * considered a success. */ bh = (struct ice_buf_hdr *)bufs; if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) return ICE_SUCCESS; /* reset pkg_dwnld_status in case this function is called in the * reset/rebuild flow */ hw->pkg_dwnld_status = ICE_AQ_RC_OK; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); if (status) { if (status == ICE_ERR_AQ_NO_WORK) hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; else hw->pkg_dwnld_status = hw->adminq.sq_last_status; return status; } for (i = 0; i < count; i++) { bool last = ((i + 1) == count); if (!last) { /* check next buffer for metadata flag */ bh = (struct ice_buf_hdr *)(bufs + i + 1); /* A set metadata flag in the next buffer will signal * that the current buffer will be the last buffer * downloaded */ if (LE16_TO_CPU(bh->section_count)) if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) last = true; } bh = (struct ice_buf_hdr *)(bufs + i); status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, &offset, &info, NULL); /* Save AQ status from download package */ hw->pkg_dwnld_status = hw->adminq.sq_last_status; if (status) { ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", status, offset, info); break; } if (last) break; } if (!status) { status = ice_set_vlan_mode(hw); if (status) ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", status); } ice_release_global_cfg_lock(hw); return status; } /** * ice_aq_get_pkg_info_list * @hw: pointer to the hardware structure * @pkg_info: the buffer which will receive the information list * @buf_size: the size of the pkg_info information buffer * @cd: pointer to command details structure or NULL * * Get Package Info List (0x0C43) */ static enum ice_status ice_aq_get_pkg_info_list(struct ice_hw *hw, struct ice_aqc_get_pkg_info_resp *pkg_info, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); } /** * ice_download_pkg * @hw: pointer to the hardware structure * @ice_seg: pointer to the segment of the package to be downloaded * * Handles the download of a complete package. */ static enum ice_status ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) { struct ice_buf_table *ice_buf_tbl; + enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", ice_seg->hdr.seg_format_ver.major, ice_seg->hdr.seg_format_ver.minor, ice_seg->hdr.seg_format_ver.update, ice_seg->hdr.seg_format_ver.draft); ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", LE32_TO_CPU(ice_seg->hdr.seg_type), LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); ice_buf_tbl = ice_find_buf_table(ice_seg); ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", LE32_TO_CPU(ice_buf_tbl->buf_count)); - return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - LE32_TO_CPU(ice_buf_tbl->buf_count)); + status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + ice_cache_vlan_mode(hw); + + return status; } /** * ice_init_pkg_info * @hw: pointer to the hardware structure * @pkg_hdr: pointer to the driver's package hdr * * Saves off the package details into the HW structure. */ static enum ice_status ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { struct ice_generic_seg_hdr *seg_hdr; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (!pkg_hdr) return ICE_ERR_PARAM; seg_hdr = (struct ice_generic_seg_hdr *) ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr); if (seg_hdr) { struct ice_meta_sect *meta; struct ice_pkg_enum state; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); /* Get package information from the Metadata Section */ meta = (struct ice_meta_sect *) ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, ICE_SID_METADATA); if (!meta) { ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); return ICE_ERR_CFG; } hw->pkg_ver = meta->ver; ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), ICE_NONDMA_TO_NONDMA); ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", meta->ver.major, meta->ver.minor, meta->ver.update, meta->ver.draft, meta->name); hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", seg_hdr->seg_format_ver.major, seg_hdr->seg_format_ver.minor, seg_hdr->seg_format_ver.update, seg_hdr->seg_format_ver.draft, seg_hdr->seg_id); } else { ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); return ICE_ERR_CFG; } return ICE_SUCCESS; } /** * ice_get_pkg_info * @hw: pointer to the hardware structure * * Store details of the package currently loaded in HW into the HW structure. */ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) { struct ice_aqc_get_pkg_info_resp *pkg_info; enum ice_status status; u16 size; u32 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); if (!pkg_info) return ICE_ERR_NO_MEMORY; status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); if (status) goto init_pkg_free_alloc; for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { #define ICE_PKG_FLAG_COUNT 4 char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; u8 place = 0; if (pkg_info->pkg_info[i].is_active) { flags[place++] = 'A'; hw->active_pkg_ver = pkg_info->pkg_info[i].ver; hw->active_track_id = LE32_TO_CPU(pkg_info->pkg_info[i].track_id); ice_memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name, sizeof(pkg_info->pkg_info[i].name), ICE_NONDMA_TO_NONDMA); hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; } if (pkg_info->pkg_info[i].is_active_at_boot) flags[place++] = 'B'; if (pkg_info->pkg_info[i].is_modified) flags[place++] = 'M'; if (pkg_info->pkg_info[i].is_in_nvm) flags[place++] = 'N'; ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i, pkg_info->pkg_info[i].ver.major, pkg_info->pkg_info[i].ver.minor, pkg_info->pkg_info[i].ver.update, pkg_info->pkg_info[i].ver.draft, pkg_info->pkg_info[i].name, flags); } init_pkg_free_alloc: ice_free(hw, pkg_info); return status; } /** * ice_find_label_value * @ice_seg: pointer to the ice segment (non-NULL) * @name: name of the label to search for * @type: the section type that will contain the label * @value: pointer to a value that will return the label's value if found * * Finds a label's value given the label name and the section type to search. * The ice_seg parameter must not be NULL since the first call to * ice_enum_labels requires a pointer to an actual ice_seg structure. */ enum ice_status ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, u16 *value) { struct ice_pkg_enum state; char *label_name; u16 val; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!ice_seg) return ICE_ERR_PARAM; do { label_name = ice_enum_labels(ice_seg, type, &state, &val); if (label_name && !strcmp(label_name, name)) { *value = val; return ICE_SUCCESS; } ice_seg = NULL; } while (label_name); return ICE_ERR_CFG; } /** * ice_verify_pkg - verify package * @pkg: pointer to the package buffer * @len: size of the package buffer * * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; if (len < ice_struct_size(pkg, seg_offset, 1)) return ICE_ERR_BUF_TOO_SHORT; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) return ICE_ERR_CFG; /* pkg must have at least one segment */ seg_count = LE32_TO_CPU(pkg->seg_count); if (seg_count < 1) return ICE_ERR_CFG; /* make sure segment array fits in package length */ if (len < ice_struct_size(pkg, seg_offset, seg_count)) return ICE_ERR_BUF_TOO_SHORT; /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { u32 off = LE32_TO_CPU(pkg->seg_offset[i]); struct ice_generic_seg_hdr *seg; /* segment header must fit */ if (len < off + sizeof(*seg)) return ICE_ERR_BUF_TOO_SHORT; seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); /* segment body must fit */ if (len < off + LE32_TO_CPU(seg->seg_size)) return ICE_ERR_BUF_TOO_SHORT; } return ICE_SUCCESS; } /** * ice_free_seg - free package segment pointer * @hw: pointer to the hardware structure * * Frees the package segment pointer in the proper manner, depending on if the * segment was allocated or just the passed in pointer was stored. */ void ice_free_seg(struct ice_hw *hw) { if (hw->pkg_copy) { ice_free(hw, hw->pkg_copy); hw->pkg_copy = NULL; hw->pkg_size = 0; } hw->seg = NULL; } /** * ice_init_pkg_regs - initialize additional package registers * @hw: pointer to the hardware structure */ static void ice_init_pkg_regs(struct ice_hw *hw) { #define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF #define ICE_SW_BLK_INP_MASK_H 0x0000FFFF #define ICE_SW_BLK_IDX 0 /* setup Switch block input mask, which is 48-bits in two parts */ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); } /** * ice_chk_pkg_version - check package version for compatibility with driver * @pkg_ver: pointer to a version structure to check * * Check to make sure that the package about to be downloaded is compatible with * the driver. To be compatible, the major and minor components of the package * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR * definitions. */ static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) { if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) return ICE_ERR_NOT_SUPPORTED; return ICE_SUCCESS; } /** * ice_chk_pkg_compat * @hw: pointer to the hardware structure * @ospkg: pointer to the package hdr * @seg: pointer to the package segment hdr * * This function checks the package version compatibility with driver and NVM */ static enum ice_status ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, struct ice_seg **seg) { struct ice_aqc_get_pkg_info_resp *pkg; enum ice_status status; u16 size; u32 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Check package version compatibility */ status = ice_chk_pkg_version(&hw->pkg_ver); if (status) { ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); return status; } /* find ICE segment in given package */ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, ospkg); if (!*seg) { ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); return ICE_ERR_CFG; } /* Check if FW is compatible with the OS package */ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); if (!pkg) return ICE_ERR_NO_MEMORY; status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); if (status) goto fw_ddp_compat_free_alloc; for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { /* loop till we find the NVM package */ if (!pkg->pkg_info[i].is_in_nvm) continue; if ((*seg)->hdr.seg_format_ver.major != pkg->pkg_info[i].ver.major || (*seg)->hdr.seg_format_ver.minor > pkg->pkg_info[i].ver.minor) { status = ICE_ERR_FW_DDP_MISMATCH; ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); } /* done processing NVM package so break */ break; } fw_ddp_compat_free_alloc: ice_free(hw, pkg); return status; } /** * ice_sw_fv_handler * @sect_type: section type * @section: pointer to section * @index: index of the field vector entry to be returned * @offset: ptr to variable that receives the offset in the field vector table * * This is a callback function that can be passed to ice_pkg_enum_entry. * This function treats the given section as of type ice_sw_fv_section and * enumerates offset field. "offset" is an index into the field vector table. */ static void * ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) { struct ice_sw_fv_section *fv_section = (struct ice_sw_fv_section *)section; if (!section || sect_type != ICE_SID_FLD_VEC_SW) return NULL; if (index >= LE16_TO_CPU(fv_section->count)) return NULL; if (offset) /* "index" passed in to this function is relative to a given * 4k block. To get to the true index into the field vector * table need to add the relative index to the base_offset * field of this section */ *offset = LE16_TO_CPU(fv_section->base_offset) + index; return fv_section->fv + index; } /** * ice_get_prof_index_max - get the max profile index for used profile * @hw: pointer to the HW struct * * Calling this function will get the max profile index for used profile * and store the index number in struct ice_switch_info *switch_info * in hw for following use. */ static int ice_get_prof_index_max(struct ice_hw *hw) { u16 prof_index = 0, j, max_prof_index = 0; struct ice_pkg_enum state; struct ice_seg *ice_seg; bool flag = false; struct ice_fv *fv; u32 offset; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!hw->seg) return ICE_ERR_PARAM; ice_seg = hw->seg; do { fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &offset, ice_sw_fv_handler); if (!fv) break; ice_seg = NULL; /* in the profile that not be used, the prot_id is set to 0xff * and the off is set to 0x1ff for all the field vectors. */ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) if (fv->ew[j].prot_id != ICE_PROT_INVALID || fv->ew[j].off != ICE_FV_OFFSET_INVAL) flag = true; if (flag && prof_index > max_prof_index) max_prof_index = prof_index; prof_index++; flag = false; } while (fv); hw->switch_info->max_used_prof_index = max_prof_index; return ICE_SUCCESS; } /** * ice_init_pkg - initialize/download package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer * @len: size of the package buffer * * This function initializes a package. The package contains HW tables * required to do packet processing. First, the function extracts package * information such as version. Then it finds the ice configuration segment * within the package; this function then saves a copy of the segment pointer * within the supplied package buffer. Next, the function will cache any hints * from the package, followed by downloading the package itself. Note, that if * a previous PF driver has already downloaded the package successfully, then * the current driver will not have to download the package again. * * The local package contents will be used to query default behavior and to * update specific sections of the HW's version of the package (e.g. to update * the parse graph to understand new protocols). * * This function stores a pointer to the package buffer memory, and it is * expected that the supplied buffer will not be freed immediately. If the * package buffer needs to be freed, such as when read from a file, use * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this * case. */ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) { struct ice_pkg_hdr *pkg; enum ice_status status; struct ice_seg *seg; if (!buf || !len) return ICE_ERR_PARAM; pkg = (struct ice_pkg_hdr *)buf; status = ice_verify_pkg(pkg, len); if (status) { ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", status); return status; } /* initialize package info */ status = ice_init_pkg_info(hw, pkg); if (status) return status; /* before downloading the package, check package version for * compatibility with driver */ status = ice_chk_pkg_compat(hw, pkg, &seg); if (status) return status; /* initialize package hints and then download package */ ice_init_pkg_hints(hw, seg); status = ice_download_pkg(hw, seg); if (status == ICE_ERR_AQ_NO_WORK) { ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); status = ICE_SUCCESS; } /* Get information on the package currently loaded in HW, then make sure * the driver is compatible with this version. */ if (!status) { status = ice_get_pkg_info(hw); if (!status) status = ice_chk_pkg_version(&hw->active_pkg_ver); } if (!status) { hw->seg = seg; /* on successful package download update other required * registers to support the package and fill HW tables * with package content. */ ice_init_pkg_regs(hw); ice_fill_blk_tbls(hw); ice_get_prof_index_max(hw); } else { ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", status); } return status; } /** * ice_copy_and_init_pkg - initialize/download a copy of the package * @hw: pointer to the hardware structure * @buf: pointer to the package buffer * @len: size of the package buffer * * This function copies the package buffer, and then calls ice_init_pkg() to * initialize the copied package contents. * * The copying is necessary if the package buffer supplied is constant, or if * the memory may disappear shortly after calling this function. * * If the package buffer resides in the data segment and can be modified, the * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). * * However, if the package buffer needs to be copied first, such as when being * read from a file, the caller should use ice_copy_and_init_pkg(). * * This function will first copy the package buffer, before calling * ice_init_pkg(). The caller is free to immediately destroy the original * package buffer, as the new copy will be managed by this function and * related routines. */ enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) { enum ice_status status; u8 *buf_copy; if (!buf || !len) return ICE_ERR_PARAM; buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); status = ice_init_pkg(hw, buf_copy, len); if (status) { /* Free the copy, since we failed to initialize the package */ ice_free(hw, buf_copy); } else { /* Track the copied pkg so we can free it later */ hw->pkg_copy = buf_copy; hw->pkg_size = len; } return status; } /** * ice_pkg_buf_alloc * @hw: pointer to the HW structure * * Allocates a package buffer and returns a pointer to the buffer header. * Note: all package contents must be in Little Endian form. */ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) { struct ice_buf_build *bld; struct ice_buf_hdr *buf; bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); if (!bld) return NULL; buf = (struct ice_buf_hdr *)bld; buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, section_entry)); return bld; } /** * ice_get_sw_prof_type - determine switch profile type * @hw: pointer to the HW structure * @fv: pointer to the switch field vector */ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) { u16 i; for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && fv->ew[i].off == ICE_VNI_OFFSET) return ICE_PROF_TUN_UDP; /* GRE tunnel will have GRE protocol */ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) return ICE_PROF_TUN_GRE; } return ICE_PROF_NON_TUN; } /** * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type * @hw: pointer to hardware structure * @req_profs: type of profiles requested * @bm: pointer to memory for returning the bitmap of field vectors */ void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, ice_bitmap_t *bm) { struct ice_pkg_enum state; struct ice_seg *ice_seg; struct ice_fv *fv; if (req_profs == ICE_PROF_ALL) { ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); return; } ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); ice_seg = hw->seg; do { enum ice_prof_type prof_type; u32 offset; fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &offset, ice_sw_fv_handler); ice_seg = NULL; if (fv) { /* Determine field vector type */ prof_type = ice_get_sw_prof_type(hw, fv); if (req_profs & prof_type) ice_set_bit((u16)offset, bm); } } while (fv); } /** * ice_get_sw_fv_list * @hw: pointer to the HW structure * @prot_ids: field vector to search for with a given protocol ID * @ids_cnt: lookup/protocol count * @bm: bitmap of field vectors to consider * @fv_list: Head of a list * * Finds all the field vector entries from switch block that contain * a given protocol ID and returns a list of structures of type * "ice_sw_fv_list_entry". Every structure in the list has a field vector * definition and profile ID information * NOTE: The caller of the function is responsible for freeing the memory * allocated for every list entry. */ enum ice_status ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) { struct ice_sw_fv_list_entry *fvl; struct ice_sw_fv_list_entry *tmp; struct ice_pkg_enum state; struct ice_seg *ice_seg; struct ice_fv *fv; u32 offset; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!ids_cnt || !hw->seg) return ICE_ERR_PARAM; ice_seg = hw->seg; do { u16 i; fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &offset, ice_sw_fv_handler); if (!fv) break; ice_seg = NULL; /* If field vector is not in the bitmap list, then skip this * profile. */ if (!ice_is_bit_set(bm, (u16)offset)) continue; for (i = 0; i < ids_cnt; i++) { int j; /* This code assumes that if a switch field vector line * has a matching protocol, then this line will contain * the entries necessary to represent every field in * that protocol header. */ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) if (fv->ew[j].prot_id == prot_ids[i]) break; if (j >= hw->blk[ICE_BLK_SW].es.fvw) break; if (i + 1 == ids_cnt) { fvl = (struct ice_sw_fv_list_entry *) ice_malloc(hw, sizeof(*fvl)); if (!fvl) goto err; fvl->fv_ptr = fv; fvl->profile_id = offset; LIST_ADD(&fvl->list_entry, fv_list); break; } } } while (fv); if (LIST_EMPTY(fv_list)) return ICE_ERR_CFG; return ICE_SUCCESS; err: LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, list_entry) { LIST_DEL(&fvl->list_entry); ice_free(hw, fvl); } return ICE_ERR_NO_MEMORY; } /** * ice_init_prof_result_bm - Initialize the profile result index bitmap * @hw: pointer to hardware structure */ void ice_init_prof_result_bm(struct ice_hw *hw) { struct ice_pkg_enum state; struct ice_seg *ice_seg; struct ice_fv *fv; ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); if (!hw->seg) return; ice_seg = hw->seg; do { u32 off; u16 i; fv = (struct ice_fv *) ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, &off, ice_sw_fv_handler); ice_seg = NULL; if (!fv) break; ice_zero_bitmap(hw->switch_info->prof_res_bm[off], ICE_MAX_FV_WORDS); /* Determine empty field vector indices, these can be * used for recipe results. Skip index 0, since it is * always used for Switch ID. */ for (i = 1; i < ICE_MAX_FV_WORDS; i++) if (fv->ew[i].prot_id == ICE_PROT_INVALID && fv->ew[i].off == ICE_FV_OFFSET_INVAL) ice_set_bit(i, hw->switch_info->prof_res_bm[off]); } while (fv); } /** * ice_pkg_buf_free * @hw: pointer to the HW structure * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Frees a package buffer */ -static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) { ice_free(hw, bld); } /** * ice_pkg_buf_reserve_section * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * @count: the number of sections to reserve * * Reserves one or more section table entries in a package buffer. This routine * can be called multiple times as long as they are made before calling * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() * is called once, the number of sections that can be allocated will not be able * to be increased; not using all reserved sections is fine, but this will * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ static enum ice_status ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; u16 section_count; u16 data_end; if (!bld) return ICE_ERR_PARAM; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't increase table size */ section_count = LE16_TO_CPU(buf->section_count); if (section_count > 0) return ICE_ERR_CFG; if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) return ICE_ERR_CFG; bld->reserved_section_table_entries += count; data_end = LE16_TO_CPU(buf->data_end) + FLEX_ARRAY_SIZE(buf, section_entry, count); buf->data_end = CPU_TO_LE16(data_end); return ICE_SUCCESS; } /** * ice_pkg_buf_alloc_section * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * @type: the section type value * @size: the size of the section to reserve (in bytes) * * Reserves memory in the buffer for a section's content and updates the * buffers' status accordingly. This routine returns a pointer to the first * byte of the section start within the buffer, which is used to fill in the * section contents. * Note: all package contents must be in Little Endian form. */ static void * ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) { struct ice_buf_hdr *buf; u16 sect_count; u16 data_end; if (!bld || !type || !size) return NULL; buf = (struct ice_buf_hdr *)&bld->buf; /* check for enough space left in buffer */ data_end = LE16_TO_CPU(buf->data_end); /* section start must align on 4 byte boundary */ data_end = ICE_ALIGN(data_end, 4); if ((data_end + size) > ICE_MAX_S_DATA_END) return NULL; /* check for more available section table entries */ sect_count = LE16_TO_CPU(buf->section_count); if (sect_count < bld->reserved_section_table_entries) { void *section_ptr = ((u8 *)buf) + data_end; buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); buf->section_entry[sect_count].size = CPU_TO_LE16(size); buf->section_entry[sect_count].type = CPU_TO_LE32(type); data_end += size; buf->data_end = CPU_TO_LE16(data_end); buf->section_count = CPU_TO_LE16(sect_count + 1); return section_ptr; } /* no free section table entries */ return NULL; } /** * ice_pkg_buf_alloc_single_section * @hw: pointer to the HW structure * @type: the section type value * @size: the size of the section to reserve (in bytes) * @section: returns pointer to the section * * Allocates a package buffer with a single section. * Note: all package contents must be in Little Endian form. */ -static struct ice_buf_build * +struct ice_buf_build * ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, void **section) { struct ice_buf_build *buf; if (!section) return NULL; buf = ice_pkg_buf_alloc(hw); if (!buf) return NULL; if (ice_pkg_buf_reserve_section(buf, 1)) goto ice_pkg_buf_alloc_single_section_err; *section = ice_pkg_buf_alloc_section(buf, type, size); if (!*section) goto ice_pkg_buf_alloc_single_section_err; return buf; ice_pkg_buf_alloc_single_section_err: ice_pkg_buf_free(hw, buf); return NULL; } /** * ice_pkg_buf_unreserve_section * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * @count: the number of sections to unreserve * * Unreserves one or more section table entries in a package buffer, releasing * space that can be used for section data. This routine can be called * multiple times as long as they are made before calling * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() * is called once, the number of sections that can be allocated will not be able * to be increased; not using all reserved sections is fine, but this will * result in some wasted space in the buffer. * Note: all package contents must be in Little Endian form. */ enum ice_status ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) { struct ice_buf_hdr *buf; u16 section_count; u16 data_end; if (!bld) return ICE_ERR_PARAM; buf = (struct ice_buf_hdr *)&bld->buf; /* already an active section, can't decrease table size */ section_count = LE16_TO_CPU(buf->section_count); if (section_count > 0) return ICE_ERR_CFG; if (count > bld->reserved_section_table_entries) return ICE_ERR_CFG; bld->reserved_section_table_entries -= count; data_end = LE16_TO_CPU(buf->data_end) - FLEX_ARRAY_SIZE(buf, section_entry, count); buf->data_end = CPU_TO_LE16(data_end); return ICE_SUCCESS; } /** * ice_pkg_buf_get_free_space * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Returns the number of free bytes remaining in the buffer. * Note: all package contents must be in Little Endian form. */ u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) { struct ice_buf_hdr *buf; if (!bld) return 0; buf = (struct ice_buf_hdr *)&bld->buf; return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); } /** * ice_pkg_buf_get_active_sections * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Returns the number of active sections. Before using the package buffer * in an update package command, the caller should make sure that there is at * least one active section - otherwise, the buffer is not legal and should * not be used. * Note: all package contents must be in Little Endian form. */ static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) { struct ice_buf_hdr *buf; if (!bld) return 0; buf = (struct ice_buf_hdr *)&bld->buf; return LE16_TO_CPU(buf->section_count); } /** * ice_pkg_buf * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) * * Return a pointer to the buffer's header */ -static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) { if (!bld) return NULL; return &bld->buf; } /** * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage * @hw: pointer to the HW structure * @port: port to search for * @index: optionally returns index * * Returns whether a port is already in use as a tunnel, and optionally its * index */ static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) { u16 i; for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { if (index) *index = i; return true; } return false; } /** * ice_tunnel_port_in_use * @hw: pointer to the HW structure * @port: port to search for * @index: optionally returns index * * Returns whether a port is already in use as a tunnel, and optionally its * index */ bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) { bool res; ice_acquire_lock(&hw->tnl_lock); res = ice_tunnel_port_in_use_hlpr(hw, port, index); ice_release_lock(&hw->tnl_lock); return res; } /** * ice_tunnel_get_type * @hw: pointer to the HW structure * @port: port to search for * @type: returns tunnel index * * For a given port number, will return the type of tunnel. */ bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) { bool res = false; u16 i; ice_acquire_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { *type = hw->tnl.tbl[i].type; res = true; break; } ice_release_lock(&hw->tnl_lock); return res; } /** * ice_find_free_tunnel_entry * @hw: pointer to the HW structure * @type: tunnel type * @index: optionally returns index * * Returns whether there is a free tunnel entry, and optionally its index */ static bool ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, u16 *index) { u16 i; for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].type == type) { if (index) *index = i; return true; } return false; } /** * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @type: tunnel type (TNL_ALL will return any open port) * @port: returns open port */ bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port) { bool res = false; u16 i; ice_acquire_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { *port = hw->tnl.tbl[i].port; res = true; break; } ice_release_lock(&hw->tnl_lock); return res; } /** * ice_create_tunnel * @hw: pointer to the HW structure * @type: type of tunnel * @port: port of tunnel to create * * Create a tunnel by updating the parse graph in the parser. We do that by * creating a package buffer with the tunnel info and issuing an update package * command. */ enum ice_status ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; u16 index; ice_acquire_lock(&hw->tnl_lock); if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { hw->tnl.tbl[index].ref++; status = ICE_SUCCESS; goto ice_create_tunnel_end; } if (!ice_find_free_tunnel_entry(hw, type, &index)) { status = ICE_ERR_OUT_OF_RANGE; goto ice_create_tunnel_end; } bld = ice_pkg_buf_alloc(hw); if (!bld) { status = ICE_ERR_NO_MEMORY; goto ice_create_tunnel_end; } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) goto ice_create_tunnel_err; sect_rx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, ice_struct_size(sect_rx, tcam, 1)); if (!sect_rx) goto ice_create_tunnel_err; sect_rx->count = CPU_TO_LE16(1); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, ice_struct_size(sect_tx, tcam, 1)); if (!sect_tx) goto ice_create_tunnel_err; sect_tx->count = CPU_TO_LE16(1); /* copy original boost entry to update package buffer */ ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); /* over-write the never-match dest port key bits with the encoded port * bits */ ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), (u8 *)&port, NULL, NULL, NULL, (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); /* exact copy of entry to Tx section entry */ ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), ICE_NONDMA_TO_NONDMA); status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); if (!status) { hw->tnl.tbl[index].port = port; hw->tnl.tbl[index].in_use = true; hw->tnl.tbl[index].ref = 1; } ice_create_tunnel_err: ice_pkg_buf_free(hw, bld); ice_create_tunnel_end: ice_release_lock(&hw->tnl_lock); return status; } /** * ice_destroy_tunnel * @hw: pointer to the HW structure * @port: port of tunnel to destroy (ignored if the all parameter is true) * @all: flag that states to destroy all tunnels * * Destroys a tunnel or all tunnels by creating an update package buffer * targeting the specific updates requested and then performing an update * package. */ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) { struct ice_boost_tcam_section *sect_rx, *sect_tx; enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; u16 count = 0; u16 index; u16 size; - u16 i; + u16 i, j; ice_acquire_lock(&hw->tnl_lock); if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) if (hw->tnl.tbl[index].ref > 1) { hw->tnl.tbl[index].ref--; status = ICE_SUCCESS; goto ice_destroy_tunnel_end; } /* determine count */ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) count++; if (!count) { status = ICE_ERR_PARAM; goto ice_destroy_tunnel_end; } /* size of section - there is at least one entry */ size = ice_struct_size(sect_rx, tcam, count); bld = ice_pkg_buf_alloc(hw); if (!bld) { status = ICE_ERR_NO_MEMORY; goto ice_destroy_tunnel_end; } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) goto ice_destroy_tunnel_err; sect_rx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, size); if (!sect_rx) goto ice_destroy_tunnel_err; - sect_rx->count = CPU_TO_LE16(1); + sect_rx->count = CPU_TO_LE16(count); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, size); if (!sect_tx) goto ice_destroy_tunnel_err; - sect_tx->count = CPU_TO_LE16(1); + sect_tx->count = CPU_TO_LE16(count); /* copy original boost entry to update package buffer, one copy to Rx * section, another copy to the Tx section */ - for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) + for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) { - ice_memcpy(sect_rx->tcam + i, + ice_memcpy(sect_rx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); - ice_memcpy(sect_tx->tcam + i, + ice_memcpy(sect_tx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_tx->tcam), ICE_NONDMA_TO_NONDMA); hw->tnl.tbl[i].marked = true; + j++; } status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); if (!status) for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].marked) { hw->tnl.tbl[i].ref = 0; hw->tnl.tbl[i].port = 0; hw->tnl.tbl[i].in_use = false; hw->tnl.tbl[i].marked = false; } ice_destroy_tunnel_err: ice_pkg_buf_free(hw, bld); ice_destroy_tunnel_end: ice_release_lock(&hw->tnl_lock); return status; } /** * ice_replay_tunnels * @hw: pointer to the HW structure * * Replays all tunnels */ enum ice_status ice_replay_tunnels(struct ice_hw *hw) { enum ice_status status = ICE_SUCCESS; u16 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) { enum ice_tunnel_type type = hw->tnl.tbl[i].type; u16 refs = hw->tnl.tbl[i].ref; u16 port = hw->tnl.tbl[i].port; if (!hw->tnl.tbl[i].in_use) continue; /* Replay tunnels one at a time by destroying them, then * recreating them */ hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */ status = ice_destroy_tunnel(hw, port, false); if (status) { ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n", status, port); break; } status = ice_create_tunnel(hw, type, port); if (status) { ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n", status, port); break; } /* reset to original ref count */ hw->tnl.tbl[i].ref = refs; } return status; } /** * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index * @hw: pointer to the hardware structure * @blk: hardware block * @prof: profile ID * @fv_idx: field vector word index * @prot: variable to receive the protocol ID * @off: variable to receive the protocol offset */ enum ice_status ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; if (prof >= hw->blk[blk].es.count) return ICE_ERR_PARAM; if (fv_idx >= hw->blk[blk].es.fvw) return ICE_ERR_PARAM; fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); *prot = fv_ext[fv_idx].prot_id; *off = fv_ext[fv_idx].off; return ICE_SUCCESS; } /* PTG Management */ /** * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table * @hw: pointer to the hardware structure * @blk: HW block * * This function will update the XLT1 hardware table to reflect the new * packet type group configuration. */ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) { struct ice_xlt1_section *sect; struct ice_buf_build *bld; enum ice_status status; u16 index; bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1), ice_struct_size(sect, value, ICE_XLT1_CNT), (void **)§); if (!bld) return ICE_ERR_NO_MEMORY; sect->count = CPU_TO_LE16(ICE_XLT1_CNT); sect->offset = CPU_TO_LE16(0); for (index = 0; index < ICE_XLT1_CNT; index++) sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg; status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); ice_pkg_buf_free(hw, bld); return status; } /** * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) * @hw: pointer to the hardware structure * @blk: HW block * @ptype: the ptype to search for * @ptg: pointer to variable that receives the PTG * * This function will search the PTGs for a particular ptype, returning the * PTG ID that contains it through the PTG parameter, with the value of * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. */ static enum ice_status ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) { if (ptype >= ICE_XLT1_CNT || !ptg) return ICE_ERR_PARAM; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; return ICE_SUCCESS; } /** * ice_ptg_alloc_val - Allocates a new packet type group ID by value * @hw: pointer to the hardware structure * @blk: HW block * @ptg: the PTG to allocate * * This function allocates a given packet type group ID specified by the PTG * parameter. */ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) { hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; } /** * ice_ptg_free - Frees a packet type group * @hw: pointer to the hardware structure * @blk: HW block * @ptg: the PTG ID to free * * This function frees a packet type group, and returns all the current ptypes * within it to the default PTG. */ void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg) { struct ice_ptg_ptype *p, *temp; hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false; p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; while (p) { p->ptg = ICE_DEFAULT_PTG; temp = p->next_ptype; p->next_ptype = NULL; p = temp; } hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL; } /** * ice_ptg_remove_ptype - Removes ptype from a particular packet type group * @hw: pointer to the hardware structure * @blk: HW block * @ptype: the ptype to remove * @ptg: the PTG to remove the ptype from * * This function will remove the ptype from the specific PTG, and move it to * the default PTG (ICE_DEFAULT_PTG). */ static enum ice_status ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { struct ice_ptg_ptype **ch; struct ice_ptg_ptype *p; if (ptype > ICE_XLT1_CNT - 1) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) return ICE_ERR_DOES_NOT_EXIST; /* Should not happen if .in_use is set, bad config */ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) return ICE_ERR_CFG; /* find the ptype within this PTG, and bypass the link over it */ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; while (p) { if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { *ch = p->next_ptype; break; } ch = &p->next_ptype; p = p->next_ptype; } hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; return ICE_SUCCESS; } /** * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group * @hw: pointer to the hardware structure * @blk: HW block * @ptype: the ptype to add or move * @ptg: the PTG to add or move the ptype to * * This function will either add or move a ptype to a particular PTG depending * on if the ptype is already part of another group. Note that using a * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the * default PTG. */ static enum ice_status ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { enum ice_status status; u8 original_ptg; if (ptype > ICE_XLT1_CNT - 1) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) return ICE_ERR_DOES_NOT_EXIST; status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); if (status) return status; /* Is ptype already in the correct PTG? */ if (original_ptg == ptg) return ICE_SUCCESS; /* Remove from original PTG and move back to the default PTG */ if (original_ptg != ICE_DEFAULT_PTG) ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); /* Moving to default PTG? Then we're done with this request */ if (ptg == ICE_DEFAULT_PTG) return ICE_SUCCESS; /* Add ptype to PTG at beginning of list */ hw->blk[blk].xlt1.ptypes[ptype].next_ptype = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = &hw->blk[blk].xlt1.ptypes[ptype]; hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; hw->blk[blk].xlt1.t[ptype] = ptg; return ICE_SUCCESS; } /* Block / table size info */ struct ice_blk_size_details { u16 xlt1; /* # XLT1 entries */ u16 xlt2; /* # XLT2 entries */ u16 prof_tcam; /* # profile ID TCAM entries */ u16 prof_id; /* # profile IDs */ u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ u16 prof_redir; /* # profile redirection entries */ u16 es; /* # extraction sequence entries */ u16 fvw; /* # field vector words */ u8 overwrite; /* overwrite existing entries allowed */ u8 reverse; /* reverse FV order */ }; static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { /** * Table Definitions * XLT1 - Number of entries in XLT1 table * XLT2 - Number of entries in XLT2 table * TCAM - Number of entries Profile ID TCAM table * CDID - Control Domain ID of the hardware block * PRED - Number of entries in the Profile Redirection Table * FV - Number of entries in the Field Vector * FVW - Width (in WORDs) of the Field Vector * OVR - Overwrite existing table entries * REV - Reverse FV */ /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ /* Overwrite , Reverse FV */ /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, false, false }, /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, false, false }, /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, false, true }, /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, true, true }, /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, false, false }, }; enum ice_sid_all { ICE_SID_XLT1_OFF = 0, ICE_SID_XLT2_OFF, ICE_SID_PR_OFF, ICE_SID_PR_REDIR_OFF, ICE_SID_ES_OFF, ICE_SID_OFF_COUNT, }; /* Characteristic handling */ /** * ice_match_prop_lst - determine if properties of two lists match * @list1: first properties list * @list2: second properties list * * Count, cookies and the order must match in order to be considered equivalent. */ static bool ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) { struct ice_vsig_prof *tmp1; struct ice_vsig_prof *tmp2; u16 chk_count = 0; u16 count = 0; /* compare counts */ LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) count++; LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) chk_count++; + /* cppcheck-suppress knownConditionTrueFalse */ if (!count || count != chk_count) return false; tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list); tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list); /* profile cookies must compare, and in the exact same order to take * into account priority */ while (count--) { if (tmp2->profile_cookie != tmp1->profile_cookie) return false; tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list); tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list); } return true; } /* VSIG Management */ /** * ice_vsig_update_xlt2_sect - update one section of XLT2 table * @hw: pointer to the hardware structure * @blk: HW block * @vsi: HW VSI number to program * @vsig: VSIG for the VSI * * This function will update the XLT2 hardware table with the input VSI * group configuration. */ static enum ice_status ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_xlt2_section *sect; struct ice_buf_build *bld; enum ice_status status; bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2), ice_struct_size(sect, value, 1), (void **)§); if (!bld) return ICE_ERR_NO_MEMORY; sect->count = CPU_TO_LE16(1); sect->offset = CPU_TO_LE16(vsi); sect->value[0] = CPU_TO_LE16(vsig); status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); ice_pkg_buf_free(hw, bld); return status; } /** * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration * @hw: pointer to the hardware structure * @blk: HW block * * This function will update the XLT2 hardware table with the input VSI * group configuration of used vsis. */ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) { u16 vsi; for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) { /* update only vsis that have been changed */ if (hw->blk[blk].xlt2.vsis[vsi].changed) { enum ice_status status; u16 vsig; vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig); if (status) return status; hw->blk[blk].xlt2.vsis[vsi].changed = 0; } } return ICE_SUCCESS; } /** * ice_vsig_find_vsi - find a VSIG that contains a specified VSI * @hw: pointer to the hardware structure * @blk: HW block * @vsi: VSI of interest * @vsig: pointer to receive the VSI group * * This function will lookup the VSI entry in the XLT2 list and return * the VSI group its associated with. */ enum ice_status ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) { if (!vsig || vsi >= ICE_MAX_VSI) return ICE_ERR_PARAM; /* As long as there's a default or valid VSIG associated with the input * VSI, the functions returns a success. Any handling of VSIG will be * done by the following add, update or remove functions. */ *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; return ICE_SUCCESS; } /** * ice_vsig_alloc_val - allocate a new VSIG by value * @hw: pointer to the hardware structure * @blk: HW block * @vsig: the VSIG to allocate * * This function will allocate a given VSIG specified by the VSIG parameter. */ static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) { u16 idx = vsig & ICE_VSIG_IDX_M; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; } return ICE_VSIG_VALUE(idx, hw->pf_id); } /** * ice_vsig_alloc - Finds a free entry and allocates a new VSIG * @hw: pointer to the hardware structure * @blk: HW block * * This function will iterate through the VSIG list and mark the first * unused entry for the new VSIG entry as used and return that value. */ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) { u16 i; for (i = 1; i < ICE_MAX_VSIGS; i++) if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) return ice_vsig_alloc_val(hw, blk, i); return ICE_DEFAULT_VSIG; } /** * ice_find_dup_props_vsig - find VSI group with a specified set of properties * @hw: pointer to the hardware structure * @blk: HW block * @chs: characteristic list * @vsig: returns the VSIG with the matching profiles, if found * * Each VSIG is associated with a characteristic set; i.e. all VSIs under * a group have the same characteristic set. To check if there exists a VSIG * which has the same characteristics as the input characteristics; this * function will iterate through the XLT2 list and return the VSIG that has a * matching configuration. In order to make sure that priorities are accounted * for, the list must match exactly, including the order in which the * characteristics are listed. */ static enum ice_status ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *chs, u16 *vsig) { struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; u16 i; for (i = 0; i < xlt2->count; i++) if (xlt2->vsig_tbl[i].in_use && ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { *vsig = ICE_VSIG_VALUE(i, hw->pf_id); return ICE_SUCCESS; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_vsig_free - free VSI group * @hw: pointer to the hardware structure * @blk: HW block * @vsig: VSIG to remove * * The function will remove all VSIs associated with the input VSIG and move * them to the DEFAULT_VSIG and mark the VSIG available. */ static enum ice_status ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) { struct ice_vsig_prof *dtmp, *del; struct ice_vsig_vsi *vsi_cur; u16 idx; idx = vsig & ICE_VSIG_IDX_M; if (idx >= ICE_MAX_VSIGS) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) return ICE_ERR_DOES_NOT_EXIST; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; /* If the VSIG has at least 1 VSI then iterate through the * list and remove the VSIs before deleting the group. */ if (vsi_cur) { /* remove all vsis associated with this VSIG XLT2 entry */ do { struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; vsi_cur->next_vsi = NULL; vsi_cur = tmp; } while (vsi_cur); /* NULL terminate head of VSI list */ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; } /* free characteristic list */ LIST_FOR_EACH_ENTRY_SAFE(del, dtmp, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { LIST_DEL(&del->list); ice_free(hw, del); } /* if VSIG characteristic list was cleared for reset * re-initialize the list head */ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); return ICE_SUCCESS; } /** * ice_vsig_remove_vsi - remove VSI from VSIG * @hw: pointer to the hardware structure * @blk: HW block * @vsi: VSI to remove * @vsig: VSI group to remove from * * The function will remove the input VSI from its VSI group and move it * to the DEFAULT_VSIG. */ static enum ice_status ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; u16 idx; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) return ICE_ERR_DOES_NOT_EXIST; /* entry already in default VSIG, don't have to remove */ if (idx == ICE_DEFAULT_VSIG) return ICE_SUCCESS; vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; if (!(*vsi_head)) return ICE_ERR_CFG; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_cur = (*vsi_head); /* iterate the VSI list, skip over the entry to be removed */ while (vsi_cur) { if (vsi_tgt == vsi_cur) { (*vsi_head) = vsi_cur->next_vsi; break; } vsi_head = &vsi_cur->next_vsi; vsi_cur = vsi_cur->next_vsi; } /* verify if VSI was removed from group list */ if (!vsi_cur) return ICE_ERR_DOES_NOT_EXIST; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; vsi_cur->next_vsi = NULL; return ICE_SUCCESS; } /** * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group * @hw: pointer to the hardware structure * @blk: HW block * @vsi: VSI to move * @vsig: destination VSI group * * This function will move or add the input VSI to the target VSIG. * The function will find the original VSIG the VSI belongs to and * move the entry to the DEFAULT_VSIG, update the original VSIG and * then move entry to the new VSIG. */ static enum ice_status ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi *tmp; enum ice_status status; u16 orig_vsig, idx; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) return ICE_ERR_PARAM; /* if VSIG not in use and VSIG is not default type this VSIG * doesn't exist. */ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && vsig != ICE_DEFAULT_VSIG) return ICE_ERR_DOES_NOT_EXIST; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (status) return status; /* no update required if vsigs match */ if (orig_vsig == vsig) return ICE_SUCCESS; if (orig_vsig != ICE_DEFAULT_VSIG) { /* remove entry from orig_vsig and add to default VSIG */ status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); if (status) return status; } if (idx == ICE_DEFAULT_VSIG) return ICE_SUCCESS; /* Create VSI entry and add VSIG and prop_mask values */ hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; hw->blk[blk].xlt2.vsis[vsi].changed = 1; /* Add new entry to the head of the VSIG list */ tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = &hw->blk[blk].xlt2.vsis[vsi]; hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; hw->blk[blk].xlt2.t[vsi] = vsig; return ICE_SUCCESS; } /** * ice_find_prof_id - find profile ID for a given field vector * @hw: pointer to the hardware structure * @blk: HW block * @fv: field vector to search for * @prof_id: receives the profile ID */ static enum ice_status ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; u16 off; u8 i; for (i = 0; i < (u8)es->count; i++) { off = i * es->fvw; if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; *prof_id = i; return ICE_SUCCESS; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_prof_id_rsrc_type - get profile ID resource type for a block type * @blk: the block type * @rsrc_type: pointer to variable to receive the resource type */ static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) { switch (blk) { case ICE_BLK_RSS: *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; break; case ICE_BLK_PE: *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID; break; default: return false; } return true; } /** * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type * @blk: the block type * @rsrc_type: pointer to variable to receive the resource type */ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) { switch (blk) { case ICE_BLK_RSS: *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; break; case ICE_BLK_PE: *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM; break; default: return false; } return true; } /** * ice_alloc_tcam_ent - allocate hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block to allocate the TCAM for * @btm: true to allocate from bottom of table, false to allocate from top * @tcam_idx: pointer to variable to receive the TCAM entry * * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ static enum ice_status ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } /** * ice_free_tcam_ent - free hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block from which to free the TCAM entry * @tcam_idx: the TCAM entry to free * * This function frees an entry in a Profile ID TCAM for a specific block. */ static enum ice_status ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; return ice_free_hw_res(hw, res_type, 1, &tcam_idx); } /** * ice_alloc_prof_id - allocate profile ID * @hw: pointer to the HW struct * @blk: the block to allocate the profile ID for * @prof_id: pointer to variable to receive the profile ID * * This function allocates a new profile ID, which also corresponds to a Field * Vector (Extraction Sequence) entry. */ static enum ice_status ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) { enum ice_status status; u16 res_type; u16 get_prof; if (!ice_prof_id_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); if (!status) *prof_id = (u8)get_prof; return status; } /** * ice_free_prof_id - free profile ID * @hw: pointer to the HW struct * @blk: the block from which to free the profile ID * @prof_id: the profile ID to free * * This function frees a profile ID, which also corresponds to a Field Vector. */ static enum ice_status ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { u16 tmp_prof_id = (u16)prof_id; u16 res_type; if (!ice_prof_id_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); } /** * ice_prof_inc_ref - increment reference count for profile * @hw: pointer to the HW struct * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to increment the reference count */ static enum ice_status ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) return ICE_ERR_PARAM; hw->blk[blk].es.ref_count[prof_id]++; return ICE_SUCCESS; } /** * ice_write_es - write an extraction sequence to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence * @prof_id: the profile ID to write * @fv: pointer to the extraction sequence to write - NULL to clear extraction */ static void ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, struct ice_fv_word *fv) { u16 off; off = prof_id * hw->blk[blk].es.fvw; if (!fv) { ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw * sizeof(*fv), ICE_NONDMA_MEM); hw->blk[blk].es.written[prof_id] = false; } else { ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * sizeof(*fv), ICE_NONDMA_TO_NONDMA); } } /** * ice_prof_dec_ref - decrement reference count for profile * @hw: pointer to the HW struct * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to decrement the reference count */ static enum ice_status ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) return ICE_ERR_PARAM; if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { ice_write_es(hw, blk, prof_id, NULL); return ice_free_prof_id(hw, blk, prof_id); } } return ICE_SUCCESS; } /* Block / table section IDs */ static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { /* SWITCH */ { ICE_SID_XLT1_SW, ICE_SID_XLT2_SW, ICE_SID_PROFID_TCAM_SW, ICE_SID_PROFID_REDIR_SW, ICE_SID_FLD_VEC_SW }, /* ACL */ { ICE_SID_XLT1_ACL, ICE_SID_XLT2_ACL, ICE_SID_PROFID_TCAM_ACL, ICE_SID_PROFID_REDIR_ACL, ICE_SID_FLD_VEC_ACL }, /* FD */ { ICE_SID_XLT1_FD, ICE_SID_XLT2_FD, ICE_SID_PROFID_TCAM_FD, ICE_SID_PROFID_REDIR_FD, ICE_SID_FLD_VEC_FD }, /* RSS */ { ICE_SID_XLT1_RSS, ICE_SID_XLT2_RSS, ICE_SID_PROFID_TCAM_RSS, ICE_SID_PROFID_REDIR_RSS, ICE_SID_FLD_VEC_RSS }, /* PE */ { ICE_SID_XLT1_PE, ICE_SID_XLT2_PE, ICE_SID_PROFID_TCAM_PE, ICE_SID_PROFID_REDIR_PE, ICE_SID_FLD_VEC_PE } }; /** * ice_init_sw_xlt1_db - init software XLT1 database from HW tables * @hw: pointer to the hardware structure * @blk: the HW block to initialize */ static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) { u16 pt; for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { u8 ptg; ptg = hw->blk[blk].xlt1.t[pt]; if (ptg != ICE_DEFAULT_PTG) { ice_ptg_alloc_val(hw, blk, ptg); ice_ptg_add_mv_ptype(hw, blk, pt, ptg); } } } /** * ice_init_sw_xlt2_db - init software XLT2 database from HW tables * @hw: pointer to the hardware structure * @blk: the HW block to initialize */ static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) { u16 vsi; for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { u16 vsig; vsig = hw->blk[blk].xlt2.t[vsi]; if (vsig) { ice_vsig_alloc_val(hw, blk, vsig); ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); /* no changes at this time, since this has been * initialized from the original package */ hw->blk[blk].xlt2.vsis[vsi].changed = 0; } } } /** * ice_init_sw_db - init software database from HW tables * @hw: pointer to the hardware structure */ static void ice_init_sw_db(struct ice_hw *hw) { u16 i; for (i = 0; i < ICE_BLK_COUNT; i++) { ice_init_sw_xlt1_db(hw, (enum ice_block)i); ice_init_sw_xlt2_db(hw, (enum ice_block)i); } } /** * ice_fill_tbl - Reads content of a single table type into database * @hw: pointer to the hardware structure * @block_id: Block ID of the table to copy * @sid: Section ID of the table to copy * * Will attempt to read the entire content of a given table of a single block * into the driver database. We assume that the buffer will always * be as large or larger than the data contained in the package. If * this condition is not met, there is most likely an error in the package * contents. */ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) { u32 dst_len, sect_len, offset = 0; struct ice_prof_redir_section *pr; struct ice_prof_id_section *pid; struct ice_xlt1_section *xlt1; struct ice_xlt2_section *xlt2; struct ice_sw_fv_section *es; struct ice_pkg_enum state; u8 *src, *dst; void *sect; /* if the HW segment pointer is null then the first iteration of * ice_pkg_enum_section() will fail. In this case the HW tables will * not be filled and return success. */ if (!hw->seg) { ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); return; } ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); sect = ice_pkg_enum_section(hw->seg, &state, sid); while (sect) { switch (sid) { case ICE_SID_XLT1_SW: case ICE_SID_XLT1_FD: case ICE_SID_XLT1_RSS: case ICE_SID_XLT1_ACL: case ICE_SID_XLT1_PE: xlt1 = (struct ice_xlt1_section *)sect; src = xlt1->value; sect_len = LE16_TO_CPU(xlt1->count) * sizeof(*hw->blk[block_id].xlt1.t); dst = hw->blk[block_id].xlt1.t; dst_len = hw->blk[block_id].xlt1.count * sizeof(*hw->blk[block_id].xlt1.t); break; case ICE_SID_XLT2_SW: case ICE_SID_XLT2_FD: case ICE_SID_XLT2_RSS: case ICE_SID_XLT2_ACL: case ICE_SID_XLT2_PE: xlt2 = (struct ice_xlt2_section *)sect; src = (_FORCE_ u8 *)xlt2->value; sect_len = LE16_TO_CPU(xlt2->count) * sizeof(*hw->blk[block_id].xlt2.t); dst = (u8 *)hw->blk[block_id].xlt2.t; dst_len = hw->blk[block_id].xlt2.count * sizeof(*hw->blk[block_id].xlt2.t); break; case ICE_SID_PROFID_TCAM_SW: case ICE_SID_PROFID_TCAM_FD: case ICE_SID_PROFID_TCAM_RSS: case ICE_SID_PROFID_TCAM_ACL: case ICE_SID_PROFID_TCAM_PE: pid = (struct ice_prof_id_section *)sect; src = (u8 *)pid->entry; sect_len = LE16_TO_CPU(pid->count) * sizeof(*hw->blk[block_id].prof.t); dst = (u8 *)hw->blk[block_id].prof.t; dst_len = hw->blk[block_id].prof.count * sizeof(*hw->blk[block_id].prof.t); break; case ICE_SID_PROFID_REDIR_SW: case ICE_SID_PROFID_REDIR_FD: case ICE_SID_PROFID_REDIR_RSS: case ICE_SID_PROFID_REDIR_ACL: case ICE_SID_PROFID_REDIR_PE: pr = (struct ice_prof_redir_section *)sect; src = pr->redir_value; sect_len = LE16_TO_CPU(pr->count) * sizeof(*hw->blk[block_id].prof_redir.t); dst = hw->blk[block_id].prof_redir.t; dst_len = hw->blk[block_id].prof_redir.count * sizeof(*hw->blk[block_id].prof_redir.t); break; case ICE_SID_FLD_VEC_SW: case ICE_SID_FLD_VEC_FD: case ICE_SID_FLD_VEC_RSS: case ICE_SID_FLD_VEC_ACL: case ICE_SID_FLD_VEC_PE: es = (struct ice_sw_fv_section *)sect; src = (u8 *)es->fv; sect_len = (u32)(LE16_TO_CPU(es->count) * hw->blk[block_id].es.fvw) * sizeof(*hw->blk[block_id].es.t); dst = (u8 *)hw->blk[block_id].es.t; dst_len = (u32)(hw->blk[block_id].es.count * hw->blk[block_id].es.fvw) * sizeof(*hw->blk[block_id].es.t); break; default: return; } /* if the section offset exceeds destination length, terminate * table fill. */ if (offset > dst_len) return; /* if the sum of section size and offset exceed destination size * then we are out of bounds of the HW table size for that PF. * Changing section length to fill the remaining table space * of that PF. */ if ((offset + sect_len) > dst_len) sect_len = dst_len - offset; ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); offset += sect_len; sect = ice_pkg_enum_section(NULL, &state, sid); } } /** * ice_fill_blk_tbls - Read package context for tables * @hw: pointer to the hardware structure * * Reads the current package contents and populates the driver * database with the data iteratively for all advanced feature * blocks. Assume that the HW tables have been allocated. */ void ice_fill_blk_tbls(struct ice_hw *hw) { u8 i; for (i = 0; i < ICE_BLK_COUNT; i++) { enum ice_block blk_id = (enum ice_block)i; ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); } ice_init_sw_db(hw); } /** * ice_free_prof_map - free profile map * @hw: pointer to the hardware structure * @blk_idx: HW block index */ static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) { struct ice_es *es = &hw->blk[blk_idx].es; struct ice_prof_map *del, *tmp; ice_acquire_lock(&es->prof_map_lock); LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map, ice_prof_map, list) { LIST_DEL(&del->list); ice_free(hw, del); } INIT_LIST_HEAD(&es->prof_map); ice_release_lock(&es->prof_map_lock); } /** * ice_free_flow_profs - free flow profile entries * @hw: pointer to the hardware structure * @blk_idx: HW block index */ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) { struct ice_flow_prof *p, *tmp; ice_acquire_lock(&hw->fl_profs_locks[blk_idx]); LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx], ice_flow_prof, l_entry) { LIST_DEL(&p->l_entry); ice_free(hw, p); } ice_release_lock(&hw->fl_profs_locks[blk_idx]); /* if driver is in reset and tables are being cleared * re-initialize the flow profile list heads */ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); } /** * ice_free_vsig_tbl - free complete VSIG table entries * @hw: pointer to the hardware structure * @blk: the HW block on which to free the VSIG table entries */ static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) { u16 i; if (!hw->blk[blk].xlt2.vsig_tbl) return; for (i = 1; i < ICE_MAX_VSIGS; i++) if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) ice_vsig_free(hw, blk, i); } /** * ice_free_hw_tbls - free hardware table memory * @hw: pointer to the hardware structure */ void ice_free_hw_tbls(struct ice_hw *hw) { struct ice_rss_cfg *r, *rt; u8 i; for (i = 0; i < ICE_BLK_COUNT; i++) { if (hw->blk[i].is_list_init) { struct ice_es *es = &hw->blk[i].es; ice_free_prof_map(hw, i); ice_destroy_lock(&es->prof_map_lock); ice_free_flow_profs(hw, i); ice_destroy_lock(&hw->fl_profs_locks[i]); hw->blk[i].is_list_init = false; } ice_free_vsig_tbl(hw, (enum ice_block)i); ice_free(hw, hw->blk[i].xlt1.ptypes); ice_free(hw, hw->blk[i].xlt1.ptg_tbl); ice_free(hw, hw->blk[i].xlt1.t); ice_free(hw, hw->blk[i].xlt2.t); ice_free(hw, hw->blk[i].xlt2.vsig_tbl); ice_free(hw, hw->blk[i].xlt2.vsis); ice_free(hw, hw->blk[i].prof.t); ice_free(hw, hw->blk[i].prof_redir.t); ice_free(hw, hw->blk[i].es.t); ice_free(hw, hw->blk[i].es.ref_count); ice_free(hw, hw->blk[i].es.written); } LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head, ice_rss_cfg, l_entry) { LIST_DEL(&r->l_entry); ice_free(hw, r); } ice_destroy_lock(&hw->rss_locks); ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); } /** * ice_init_flow_profs - init flow profile locks and list heads * @hw: pointer to the hardware structure * @blk_idx: HW block index */ static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) { ice_init_lock(&hw->fl_profs_locks[blk_idx]); INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); } /** * ice_clear_hw_tbls - clear HW tables and flow profiles * @hw: pointer to the hardware structure */ void ice_clear_hw_tbls(struct ice_hw *hw) { u8 i; for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; struct ice_es *es = &hw->blk[i].es; if (hw->blk[i].is_list_init) { ice_free_prof_map(hw, i); ice_free_flow_profs(hw, i); } ice_free_vsig_tbl(hw, (enum ice_block)i); ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes), ICE_NONDMA_MEM); ice_memset(xlt1->ptg_tbl, 0, ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), ICE_NONDMA_MEM); ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), ICE_NONDMA_MEM); ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis), ICE_NONDMA_MEM); ice_memset(xlt2->vsig_tbl, 0, xlt2->count * sizeof(*xlt2->vsig_tbl), ICE_NONDMA_MEM); ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), ICE_NONDMA_MEM); ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), ICE_NONDMA_MEM); ice_memset(prof_redir->t, 0, prof_redir->count * sizeof(*prof_redir->t), ICE_NONDMA_MEM); ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw, ICE_NONDMA_MEM); ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), ICE_NONDMA_MEM); ice_memset(es->written, 0, es->count * sizeof(*es->written), ICE_NONDMA_MEM); } } /** * ice_init_hw_tbls - init hardware table memory * @hw: pointer to the hardware structure */ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) { u8 i; ice_init_lock(&hw->rss_locks); INIT_LIST_HEAD(&hw->rss_list_head); for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; struct ice_es *es = &hw->blk[i].es; u16 j; if (hw->blk[i].is_list_init) continue; ice_init_flow_profs(hw, i); ice_init_lock(&es->prof_map_lock); INIT_LIST_HEAD(&es->prof_map); hw->blk[i].is_list_init = true; hw->blk[i].overwrite = blk_sizes[i].overwrite; es->reverse = blk_sizes[i].reverse; xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; xlt1->count = blk_sizes[i].xlt1; xlt1->ptypes = (struct ice_ptg_ptype *) ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); if (!xlt1->ptypes) goto err; xlt1->ptg_tbl = (struct ice_ptg_entry *) ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); if (!xlt1->ptg_tbl) goto err; xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); if (!xlt1->t) goto err; xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; xlt2->count = blk_sizes[i].xlt2; xlt2->vsis = (struct ice_vsig_vsi *) ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); if (!xlt2->vsis) goto err; xlt2->vsig_tbl = (struct ice_vsig_entry *) ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); if (!xlt2->vsig_tbl) goto err; for (j = 0; j < xlt2->count; j++) INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); if (!xlt2->t) goto err; prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; prof->count = blk_sizes[i].prof_tcam; prof->max_prof_id = blk_sizes[i].prof_id; prof->cdid_bits = blk_sizes[i].prof_cdid_bits; prof->t = (struct ice_prof_tcam_entry *) ice_calloc(hw, prof->count, sizeof(*prof->t)); if (!prof->t) goto err; prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; prof_redir->count = blk_sizes[i].prof_redir; prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, sizeof(*prof_redir->t)); if (!prof_redir->t) goto err; es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; es->count = blk_sizes[i].es; es->fvw = blk_sizes[i].fvw; es->t = (struct ice_fv_word *) ice_calloc(hw, (u32)(es->count * es->fvw), sizeof(*es->t)); if (!es->t) goto err; es->ref_count = (u16 *) ice_calloc(hw, es->count, sizeof(*es->ref_count)); if (!es->ref_count) goto err; es->written = (u8 *) ice_calloc(hw, es->count, sizeof(*es->written)); if (!es->written) goto err; } return ICE_SUCCESS; err: ice_free_hw_tbls(hw); return ICE_ERR_NO_MEMORY; } /** * ice_prof_gen_key - generate profile ID key * @hw: pointer to the HW struct * @blk: the block in which to write profile ID to * @ptg: packet type group (PTG) portion of key * @vsig: VSIG portion of key * @cdid: CDID portion of key * @flags: flag portion of key * @vl_msk: valid mask * @dc_msk: don't care mask * @nm_msk: never match mask * @key: output of profile ID key */ static enum ice_status ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], u8 key[ICE_TCAM_KEY_SZ]) { struct ice_prof_id_key inkey; inkey.xlt1 = ptg; inkey.xlt2_cdid = CPU_TO_LE16(vsig); inkey.flags = CPU_TO_LE16(flags); switch (hw->blk[blk].prof.cdid_bits) { case 0: break; case 2: #define ICE_CD_2_M 0xC000U #define ICE_CD_2_S 14 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M); inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S); break; case 4: #define ICE_CD_4_M 0xF000U #define ICE_CD_4_S 12 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M); inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S); break; case 8: #define ICE_CD_8_M 0xFF00U #define ICE_CD_8_S 16 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M); inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S); break; default: ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); break; } return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, nm_msk, 0, ICE_TCAM_KEY_SZ / 2); } /** * ice_tcam_write_entry - write TCAM entry * @hw: pointer to the HW struct * @blk: the block in which to write profile ID to * @idx: the entry index to write to * @prof_id: profile ID * @ptg: packet type group (PTG) portion of key * @vsig: VSIG portion of key * @cdid: CDID portion of key * @flags: flag portion of key * @vl_msk: valid mask * @dc_msk: don't care mask * @nm_msk: never match mask */ static enum ice_status ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) { struct ice_prof_tcam_entry; enum ice_status status; status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); if (!status) { hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx); hw->blk[blk].prof.t[idx].prof_id = prof_id; } return status; } /** * ice_vsig_get_ref - returns number of VSIs belong to a VSIG * @hw: pointer to the hardware structure * @blk: HW block * @vsig: VSIG to query * @refs: pointer to variable to receive the reference count */ static enum ice_status ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *ptr; *refs = 0; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) return ICE_ERR_DOES_NOT_EXIST; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; while (ptr) { (*refs)++; ptr = ptr->next_vsi; } return ICE_SUCCESS; } /** * ice_has_prof_vsig - check to see if VSIG has a specific profile * @hw: pointer to the hardware structure * @blk: HW block * @vsig: VSIG to check against * @hdl: profile handle */ static bool ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *ent; LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) if (ent->profile_cookie == hdl) return true; ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", vsig); return false; } /** * ice_prof_bld_es - build profile ID extraction sequence changes * @hw: pointer to the HW struct * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { u16 off = tmp->prof_id * hw->blk[blk].es.fvw; struct ice_pkg_es *p; u32 id; id = ice_sect_id(blk, ICE_VEC_TBL); p = (struct ice_pkg_es *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, es, 1) + vec_size - sizeof(p->es[0])); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->offset = CPU_TO_LE16(tmp->prof_id); ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, ICE_NONDMA_TO_NONDMA); } return ICE_SUCCESS; } /** * ice_prof_bld_tcam - build profile ID TCAM changes * @hw: pointer to the HW struct * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { struct ice_prof_id_section *p; u32 id; id = ice_sect_id(blk, ICE_PROF_TCAM); p = (struct ice_prof_id_section *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, entry, 1)); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx); p->entry[0].prof_id = tmp->prof_id; ice_memcpy(p->entry[0].key, &hw->blk[blk].prof.t[tmp->tcam_idx].key, sizeof(hw->blk[blk].prof.t->key), ICE_NONDMA_TO_NONDMA); } return ICE_SUCCESS; } /** * ice_prof_bld_xlt1 - build XLT1 changes * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { struct ice_xlt1_section *p; u32 id; id = ice_sect_id(blk, ICE_XLT1); p = (struct ice_xlt1_section *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, value, 1)); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->offset = CPU_TO_LE16(tmp->ptype); p->value[0] = tmp->ptg; } return ICE_SUCCESS; } /** * ice_prof_bld_xlt2 - build XLT2 changes * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { struct ice_xlt2_section *p; u32 id; switch (tmp->type) { case ICE_VSIG_ADD: case ICE_VSI_MOVE: case ICE_VSIG_REM: id = ice_sect_id(blk, ICE_XLT2); p = (struct ice_xlt2_section *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, value, 1)); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->offset = CPU_TO_LE16(tmp->vsi); p->value[0] = CPU_TO_LE16(tmp->vsig); break; default: break; } } return ICE_SUCCESS; } /** * ice_upd_prof_hw - update hardware using the change list * @hw: pointer to the HW struct * @blk: hardware block * @chgs: the list of changes to make in hardware */ static enum ice_status ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *chgs) { struct ice_buf_build *b; struct ice_chs_chg *tmp; enum ice_status status; u16 pkg_sects; u16 xlt1 = 0; u16 xlt2 = 0; u16 tcam = 0; u16 es = 0; u16 sects; /* count number of sections we need */ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { switch (tmp->type) { case ICE_PTG_ES_ADD: if (tmp->add_ptg) xlt1++; if (tmp->add_prof) es++; break; case ICE_TCAM_ADD: tcam++; break; case ICE_VSIG_ADD: case ICE_VSI_MOVE: case ICE_VSIG_REM: xlt2++; break; default: break; } } sects = xlt1 + xlt2 + tcam + es; if (!sects) return ICE_SUCCESS; /* Build update package buffer */ b = ice_pkg_buf_alloc(hw); if (!b) return ICE_ERR_NO_MEMORY; status = ice_pkg_buf_reserve_section(b, sects); if (status) goto error_tmp; /* Preserve order of table update: ES, TCAM, PTG, VSIG */ if (es) { status = ice_prof_bld_es(hw, blk, b, chgs); if (status) goto error_tmp; } if (tcam) { status = ice_prof_bld_tcam(hw, blk, b, chgs); if (status) goto error_tmp; } if (xlt1) { status = ice_prof_bld_xlt1(blk, b, chgs); if (status) goto error_tmp; } if (xlt2) { status = ice_prof_bld_xlt2(blk, b, chgs); if (status) goto error_tmp; } /* After package buffer build check if the section count in buffer is * non-zero and matches the number of sections detected for package * update. */ pkg_sects = ice_pkg_buf_get_active_sections(b); if (!pkg_sects || pkg_sects != sects) { status = ICE_ERR_INVAL_SIZE; goto error_tmp; } /* update package */ status = ice_update_pkg(hw, ice_pkg_buf(b), 1); if (status == ICE_ERR_AQ_ERROR) ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); error_tmp: ice_pkg_buf_free(hw, b); return status; } /** * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) * @es: extraction sequence (length of array is determined by the block) * * This function registers a profile, which matches a set of PTGs with a * particular extraction sequence. While the hardware profile is allocated * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], struct ice_fv_word *es) { u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; enum ice_status status; u8 byte = 0; u8 prof_id; ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ status = ice_find_prof_id(hw, blk, es, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); if (status) goto err_ice_add_prof; /* and write new es */ ice_write_es(hw, blk, prof_id, es); } ice_prof_inc_ref(hw, blk, prof_id); /* add profile info */ prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof)); if (!prof) goto err_ice_add_prof; prof->profile_cookie = id; prof->prof_id = prof_id; prof->ptg_cnt = 0; prof->context = 0; /* build list of ptgs */ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { u8 bit; if (!ptypes[byte]) { bytes--; byte++; continue; } /* Examine 8 bits per byte */ ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte], BITS_PER_BYTE) { u16 ptype; u8 ptg; ptype = byte * BITS_PER_BYTE + bit; /* The package should place all ptypes in a non-zero * PTG, so the following call should never fail. */ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) continue; /* If PTG is already added, skip and continue */ if (ice_is_bit_set(ptgs_used, ptg)) continue; ice_set_bit(ptg, ptgs_used); prof->ptg[prof->ptg_cnt] = ptg; if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) break; } bytes--; byte++; } LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); status = ICE_SUCCESS; err_ice_add_prof: ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_search_prof_id - Search for a profile tracking ID * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * * This will search for a profile tracking ID which was previously added. * The profile map lock should be held before calling this function. */ struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; struct ice_prof_map *map; LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list) if (map->profile_cookie == id) { entry = map; break; } return entry; } /** * ice_set_prof_context - Set context for a given profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @cntxt: context */ enum ice_status ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *entry; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); entry = ice_search_prof_id(hw, blk, id); if (entry) { entry->context = cntxt; status = ICE_SUCCESS; } ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_get_prof_context - Get context for a given profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @cntxt: pointer to variable to receive the context */ enum ice_status ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *entry; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); entry = ice_search_prof_id(hw, blk, id); if (entry) { *cntxt = entry->context; status = ICE_SUCCESS; } ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_vsig_prof_id_count - count profiles in a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: VSIG to remove the profile from */ static u16 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) { u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; struct ice_vsig_prof *p; LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) count++; return count; } /** * ice_rel_tcam_idx - release a TCAM index * @hw: pointer to the HW struct * @blk: hardware block * @idx: the index to release */ static enum ice_status ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) { /* Masks to invoke a never match entry */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; enum ice_status status; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, dc_msk, nm_msk); if (status) return status; /* release the TCAM entry */ status = ice_free_tcam_ent(hw, blk, idx); return status; } /** * ice_rem_prof_id - remove one profile from a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @prof: pointer to profile structure to remove */ static enum ice_status ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_vsig_prof *prof) { enum ice_status status; u16 i; for (i = 0; i < prof->tcam_count; i++) if (prof->tcam[i].in_use) { prof->tcam[i].in_use = false; status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx); if (status) return ICE_ERR_HW_TABLE; } return ICE_SUCCESS; } /** * ice_rem_vsig - remove VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: the VSIG to remove * @chg: the change list */ static enum ice_status ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct LIST_HEAD_TYPE *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; enum ice_status status; /* remove TCAM entries */ LIST_FOR_EACH_ENTRY_SAFE(d, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { status = ice_rem_prof_id(hw, blk, d); if (status) return status; LIST_DEL(&d->list); ice_free(hw, d); } /* Move all VSIS associated with this VSIG to the default VSIG */ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; /* If the VSIG has at least 1 VSI then iterate through the list * and remove the VSIs before deleting the group. */ if (vsi_cur) do { struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; struct ice_chs_chg *p; p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; p->type = ICE_VSIG_REM; p->orig_vsig = vsig; p->vsig = ICE_DEFAULT_VSIG; p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; LIST_ADD(&p->list_entry, chg); vsi_cur = tmp; } while (vsi_cur); return ice_vsig_free(hw, blk, vsig); } /** * ice_rem_prof_id_vsig - remove a specific profile from a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: VSIG to remove the profile from * @hdl: profile handle indicating which profile to remove * @chg: list to receive a record of changes */ static enum ice_status ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct LIST_HEAD_TYPE *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; enum ice_status status; LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) if (p->profile_cookie == hdl) { if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ return ice_rem_vsig(hw, blk, vsig, chg); status = ice_rem_prof_id(hw, blk, p); if (!status) { LIST_DEL(&p->list); ice_free(hw, p); } return status; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_rem_flow_all - remove all flows with a particular profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID */ static enum ice_status ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_chs_chg *del, *tmp; enum ice_status status; struct LIST_HEAD_TYPE chg; u16 i; INIT_LIST_HEAD(&chg); for (i = 1; i < ICE_MAX_VSIGS; i++) if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { if (ice_has_prof_vsig(hw, blk, i, id)) { status = ice_rem_prof_id_vsig(hw, blk, i, id, &chg); if (status) goto err_ice_rem_flow_all; } } status = ice_upd_prof_hw(hw, blk, &chg); err_ice_rem_flow_all: LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } return status; } /** * ice_rem_prof - remove profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * * This will remove the profile specified by the ID parameter, which was * previously created through ice_add_prof. If any existing entries * are associated with this profile, they will be removed as well. */ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *pmap; enum ice_status status; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_rem_prof; } /* remove all flows with this profile */ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); if (status) goto err_ice_rem_prof; /* dereference profile, and possibly remove */ ice_prof_dec_ref(hw, blk, pmap->prof_id); LIST_DEL(&pmap->list); ice_free(hw, pmap); err_ice_rem_prof: ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_get_prof - get profile * @hw: pointer to the HW struct * @blk: hardware block * @hdl: profile handle * @chg: change list */ static enum ice_status ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct LIST_HEAD_TYPE *chg) { enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_chs_chg *p; u16 i; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_get_prof; } for (i = 0; i < map->ptg_cnt; i++) if (!hw->blk[blk].es.written[map->prof_id]) { /* add ES to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) { status = ICE_ERR_NO_MEMORY; goto err_ice_get_prof; } p->type = ICE_PTG_ES_ADD; p->ptype = 0; p->ptg = map->ptg[i]; p->add_ptg = 0; p->add_prof = 1; p->prof_id = map->prof_id; hw->blk[blk].es.written[map->prof_id] = true; LIST_ADD(&p->list_entry, chg); } err_ice_get_prof: ice_release_lock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ return status; } /** * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: VSIG from which to copy the list * @lst: output list * * This routine makes a copy of the list of profiles in the specified VSIG. */ static enum ice_status ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct LIST_HEAD_TYPE *lst) { struct ice_vsig_prof *ent1, *ent2; u16 idx = vsig & ICE_VSIG_IDX_M; LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { struct ice_vsig_prof *p; /* copy to the input list */ p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA); if (!p) goto err_ice_get_profs_vsig; LIST_ADD_TAIL(&p->list, lst); } return ICE_SUCCESS; err_ice_get_profs_vsig: LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) { LIST_DEL(&ent1->list); ice_free(hw, ent1); } return ICE_ERR_NO_MEMORY; } /** * ice_add_prof_to_lst - add profile entry to a list * @hw: pointer to the HW struct * @blk: hardware block * @lst: the list to be added to * @hdl: profile handle of entry to add */ static enum ice_status ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *lst, u64 hdl) { enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_vsig_prof *p; u16 i; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); if (!map) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_add_prof_to_lst; } p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); if (!p) { status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_to_lst; } p->profile_cookie = map->profile_cookie; p->prof_id = map->prof_id; p->tcam_count = map->ptg_cnt; for (i = 0; i < map->ptg_cnt; i++) { p->tcam[i].prof_id = map->prof_id; p->tcam[i].tcam_idx = ICE_INVALID_TCAM; p->tcam[i].ptg = map->ptg[i]; } LIST_ADD(&p->list, lst); err_ice_add_prof_to_lst: ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_move_vsi - move VSI to another VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the VSI to move * @vsig: the VSIG to move the VSI to * @chg: the change list */ static enum ice_status ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, struct LIST_HEAD_TYPE *chg) { enum ice_status status; struct ice_chs_chg *p; u16 orig_vsig; p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (!status) status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); if (status) { ice_free(hw, p); return status; } p->type = ICE_VSI_MOVE; p->vsi = vsi; p->orig_vsig = orig_vsig; p->vsig = vsig; LIST_ADD(&p->list_entry, chg); return ICE_SUCCESS; } /** * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list * @hw: pointer to the HW struct * @idx: the index of the TCAM entry to remove * @chg: the list of change structures to search */ static void ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) { struct ice_chs_chg *pos, *tmp; LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { LIST_DEL(&tmp->list_entry); ice_free(hw, tmp); } } /** * ice_prof_tcam_ena_dis - add enable or disable TCAM change * @hw: pointer to the HW struct * @blk: hardware block * @enable: true to enable, false to disable * @vsig: the VSIG of the TCAM entry * @tcam: pointer the TCAM info structure of the TCAM to disable * @chg: the change list * * This function appends an enable or disable TCAM entry in the change log */ static enum ice_status ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, u16 vsig, struct ice_tcam_inf *tcam, struct LIST_HEAD_TYPE *chg) { enum ice_status status; struct ice_chs_chg *p; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; /* if disabling, free the TCAM */ if (!enable) { status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); /* if we have already created a change for this TCAM entry, then * we need to remove that entry, in order to prevent writing to * a TCAM entry we no longer will have ownership of. */ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); tcam->tcam_idx = 0; tcam->in_use = 0; return status; } /* for re-enabling, reallocate a TCAM */ status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx); if (status) return status; /* add TCAM to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, nm_msk); if (status) goto err_ice_prof_tcam_ena_dis; tcam->in_use = 1; p->type = ICE_TCAM_ADD; p->add_tcam_idx = true; p->prof_id = tcam->prof_id; p->ptg = tcam->ptg; p->vsig = 0; p->tcam_idx = tcam->tcam_idx; /* log change */ LIST_ADD(&p->list_entry, chg); return ICE_SUCCESS; err_ice_prof_tcam_ena_dis: ice_free(hw, p); return status; } /** * ice_adj_prof_priorities - adjust profile based on priorities * @hw: pointer to the HW struct * @blk: hardware block * @vsig: the VSIG for which to adjust profile priorities * @chg: the change list */ static enum ice_status ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct LIST_HEAD_TYPE *chg) { ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); enum ice_status status = ICE_SUCCESS; struct ice_vsig_prof *t; u16 idx; ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); idx = vsig & ICE_VSIG_IDX_M; /* Priority is based on the order in which the profiles are added. The * newest added profile has highest priority and the oldest added * profile has the lowest priority. Since the profile property list for * a VSIG is sorted from newest to oldest, this code traverses the list * in order and enables the first of each PTG that it finds (that is not * already enabled); it also disables any duplicate PTGs that it finds * in the older profiles (that are currently enabled). */ LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { u16 i; for (i = 0; i < t->tcam_count; i++) { bool used; /* Scan the priorities from newest to oldest. * Make sure that the newest profiles take priority. */ used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg); if (used && t->tcam[i].in_use) { /* need to mark this PTG as never match, as it * was already in use and therefore duplicate * (and lower priority) */ status = ice_prof_tcam_ena_dis(hw, blk, false, vsig, &t->tcam[i], chg); if (status) return status; } else if (!used && !t->tcam[i].in_use) { /* need to enable this PTG, as it in not in use * and not enabled (highest priority) */ status = ice_prof_tcam_ena_dis(hw, blk, true, vsig, &t->tcam[i], chg); if (status) return status; } /* keep track of used ptgs */ ice_set_bit(t->tcam[i].ptg, ptgs_used); } } return status; } /** * ice_add_prof_id_vsig - add profile to VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: the VSIG to which this profile is to be added * @hdl: the profile handle indicating the profile to add * @rev: true to add entries to the end of the list * @chg: the change list */ static enum ice_status ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, bool rev, struct LIST_HEAD_TYPE *chg) { /* Masks that ignore flags */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) return ICE_ERR_ALREADY_EXISTS; /* new VSIG profile structure */ t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); if (!t) return ICE_ERR_NO_MEMORY; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_add_prof_id_vsig; } t->profile_cookie = map->profile_cookie; t->prof_id = map->prof_id; t->tcam_count = map->ptg_cnt; /* create TCAM entries */ for (i = 0; i < map->ptg_cnt; i++) { u16 tcam_idx; /* add TCAM to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) { status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_id_vsig; } /* allocate the TCAM entry index */ status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx); if (status) { ice_free(hw, p); goto err_ice_add_prof_id_vsig; } t->tcam[i].ptg = map->ptg[i]; t->tcam[i].prof_id = map->prof_id; t->tcam[i].tcam_idx = tcam_idx; t->tcam[i].in_use = true; p->type = ICE_TCAM_ADD; p->add_tcam_idx = true; p->prof_id = t->tcam[i].prof_id; p->ptg = t->tcam[i].ptg; p->vsig = vsig; p->tcam_idx = t->tcam[i].tcam_idx; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, t->tcam[i].prof_id, t->tcam[i].ptg, vsig, 0, 0, vl_msk, dc_msk, nm_msk); if (status) { ice_free(hw, p); goto err_ice_add_prof_id_vsig; } /* log change */ LIST_ADD(&p->list_entry, chg); } /* add profile to VSIG */ vsig_idx = vsig & ICE_VSIG_IDX_M; if (rev) LIST_ADD_TAIL(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); else LIST_ADD(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; err_ice_add_prof_id_vsig: ice_release_lock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ ice_free(hw, t); return status; } /** * ice_create_prof_id_vsig - add a new VSIG with a single profile * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the initial VSI that will be in VSIG * @hdl: the profile handle of the profile that will be added to the VSIG * @chg: the change list */ static enum ice_status ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, struct LIST_HEAD_TYPE *chg) { enum ice_status status; struct ice_chs_chg *p; u16 new_vsig; p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; new_vsig = ice_vsig_alloc(hw, blk); if (!new_vsig) { status = ICE_ERR_HW_TABLE; goto err_ice_create_prof_id_vsig; } status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); if (status) goto err_ice_create_prof_id_vsig; status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); if (status) goto err_ice_create_prof_id_vsig; p->type = ICE_VSIG_ADD; p->vsi = vsi; p->orig_vsig = ICE_DEFAULT_VSIG; p->vsig = new_vsig; LIST_ADD(&p->list_entry, chg); return ICE_SUCCESS; err_ice_create_prof_id_vsig: /* let caller clean up the change list */ ice_free(hw, p); return status; } /** * ice_create_vsig_from_lst - create a new VSIG with a list of profiles * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the initial VSI that will be in VSIG * @lst: the list of profile that will be added to the VSIG * @new_vsig: return of new VSIG * @chg: the change list */ static enum ice_status ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, struct LIST_HEAD_TYPE *lst, u16 *new_vsig, struct LIST_HEAD_TYPE *chg) { struct ice_vsig_prof *t; enum ice_status status; u16 vsig; vsig = ice_vsig_alloc(hw, blk); if (!vsig) return ICE_ERR_HW_TABLE; status = ice_move_vsi(hw, blk, vsi, vsig, chg); if (status) return status; LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) { /* Reverse the order here since we are copying the list */ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, true, chg); if (status) return status; } *new_vsig = vsig; return ICE_SUCCESS; } /** * ice_find_prof_vsig - find a VSIG with a specific profile handle * @hw: pointer to the HW struct * @blk: hardware block * @hdl: the profile handle of the profile to search for * @vsig: returns the VSIG with the matching profile */ static bool ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) { struct ice_vsig_prof *t; enum ice_status status; struct LIST_HEAD_TYPE lst; INIT_LIST_HEAD(&lst); t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); if (!t) return false; t->profile_cookie = hdl; LIST_ADD(&t->list, &lst); status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); LIST_DEL(&t->list); ice_free(hw, t); return status == ICE_SUCCESS; } /** * ice_add_vsi_flow - add VSI flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: input VSI * @vsig: target VSIG to include the input VSI * * Calling this function will add the VSI to a given VSIG and * update the HW tables accordingly. This call can be used to * add multiple VSIs to a VSIG if we know beforehand that those * VSIs have the same characteristics of the VSIG. This will * save time in generating a new VSIG and TCAMs till a match is * found and subsequent rollback when a matching VSIG is found. */ enum ice_status ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_chs_chg *tmp, *del; struct LIST_HEAD_TYPE chg; enum ice_status status; /* if target VSIG is default the move is invalid */ if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) return ICE_ERR_PARAM; INIT_LIST_HEAD(&chg); /* move VSI to the VSIG that matches */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); /* update hardware if success */ if (!status) status = ice_upd_prof_hw(hw, blk, &chg); LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } return status; } /** * ice_add_prof_id_flow - add profile flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the VSI to enable with the profile specified by ID * @hdl: profile handle * * Calling this function will update the hardware tables to enable the * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ enum ice_status ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct LIST_HEAD_TYPE union_lst; enum ice_status status; struct LIST_HEAD_TYPE chg; u16 vsig; INIT_LIST_HEAD(&union_lst); INIT_LIST_HEAD(&chg); /* Get profile */ status = ice_get_prof(hw, blk, hdl, &chg); if (status) return status; /* determine if VSI is already part of a VSIG */ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); if (!status && vsig) { bool only_vsi; u16 or_vsig; u16 ref; /* found in VSIG */ or_vsig = vsig; /* make sure that there is no overlap/conflict between the new * characteristics and the existing ones; we don't support that * scenario */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { status = ICE_ERR_ALREADY_EXISTS; goto err_ice_add_prof_id_flow; } /* last VSI in the VSIG? */ status = ice_vsig_get_ref(hw, blk, vsig, &ref); if (status) goto err_ice_add_prof_id_flow; only_vsi = (ref == 1); /* create a union of the current profiles and the one being * added */ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); if (status) goto err_ice_add_prof_id_flow; status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); if (status) goto err_ice_add_prof_id_flow; /* search for an existing VSIG with an exact charc match */ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); if (!status) { /* move VSI to the VSIG that matches */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; /* VSI has been moved out of or_vsig. If the or_vsig had * only that VSI it is now empty and can be removed. */ if (only_vsi) { status = ice_rem_vsig(hw, blk, or_vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } } else if (only_vsi) { /* If the original VSIG only contains one VSI, then it * will be the requesting VSI. In this case the VSI is * not sharing entries and we can simply add the new * profile to the VSIG. */ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, &chg); if (status) goto err_ice_add_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } else { /* No match, so we need a new VSIG */ status = ice_create_vsig_from_lst(hw, blk, vsi, &union_lst, &vsig, &chg); if (status) goto err_ice_add_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } } else { /* need to find or add a VSIG */ /* search for an existing VSIG with an exact charc match */ if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { /* found an exact match */ /* add or move VSI to the VSIG that matches */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } else { /* we did not find an exact match */ /* we need to add a VSIG */ status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, &chg); if (status) goto err_ice_add_prof_id_flow; } } /* update hardware */ if (!status) status = ice_upd_prof_hw(hw, blk, &chg); err_ice_add_prof_id_flow: LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) { LIST_DEL(&del1->list); ice_free(hw, del1); } return status; } /** * ice_add_flow - add flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: array of VSIs to enable with the profile specified by ID * @count: number of elements in the VSI array * @id: profile tracking ID * * Calling this function will update the hardware tables to enable the * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ enum ice_status ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { enum ice_status status; u16 i; for (i = 0; i < count; i++) { status = ice_add_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; } return ICE_SUCCESS; } /** * ice_rem_prof_from_list - remove a profile from list * @hw: pointer to the HW struct * @lst: list to remove the profile from * @hdl: the profile handle indicating the profile to remove */ static enum ice_status ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) if (ent->profile_cookie == hdl) { LIST_DEL(&ent->list); ice_free(hw, ent); return ICE_SUCCESS; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_rem_prof_id_flow - remove flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the VSI from which to remove the profile specified by ID * @hdl: profile tracking handle * * Calling this function will update the hardware tables to remove the * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be disabled. */ enum ice_status ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct LIST_HEAD_TYPE chg, copy; enum ice_status status; u16 vsig; INIT_LIST_HEAD(©); INIT_LIST_HEAD(&chg); /* determine if VSI is already part of a VSIG */ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); if (!status && vsig) { bool last_profile; bool only_vsi; u16 ref; /* found in VSIG */ last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; status = ice_vsig_get_ref(hw, blk, vsig, &ref); if (status) goto err_ice_rem_prof_id_flow; only_vsi = (ref == 1); if (only_vsi) { /* If the original VSIG only contains one reference, * which will be the requesting VSI, then the VSI is not * sharing entries and we can simply remove the specific * characteristics from the VSIG. */ if (last_profile) { /* If there are no profiles left for this VSIG, * then simply remove the VSIG. */ status = ice_rem_vsig(hw, blk, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } else { status = ice_rem_prof_id_vsig(hw, blk, vsig, hdl, &chg); if (status) goto err_ice_rem_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } } else { /* Make a copy of the VSIG's list of Profiles */ status = ice_get_profs_vsig(hw, blk, vsig, ©); if (status) goto err_ice_rem_prof_id_flow; /* Remove specified profile entry from the list */ status = ice_rem_prof_from_list(hw, ©, hdl); if (status) goto err_ice_rem_prof_id_flow; if (LIST_EMPTY(©)) { status = ice_move_vsi(hw, blk, vsi, ICE_DEFAULT_VSIG, &chg); if (status) goto err_ice_rem_prof_id_flow; } else if (!ice_find_dup_props_vsig(hw, blk, ©, &vsig)) { /* found an exact match */ /* add or move VSI to the VSIG that matches */ /* Search for a VSIG with a matching profile * list */ /* Found match, move VSI to the matching VSIG */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } else { /* since no existing VSIG supports this * characteristic pattern, we need to create a * new VSIG and TCAM entries */ status = ice_create_vsig_from_lst(hw, blk, vsi, ©, &vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } } } else { status = ICE_ERR_DOES_NOT_EXIST; } /* update hardware tables */ if (!status) status = ice_upd_prof_hw(hw, blk, &chg); err_ice_rem_prof_id_flow: LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) { LIST_DEL(&del1->list); ice_free(hw, del1); } return status; } /** * ice_rem_flow - remove flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: array of VSIs from which to remove the profile specified by ID * @count: number of elements in the VSI array * @id: profile tracking ID * * The function will remove flows from the specified VSIs that were enabled * using ice_add_flow. The ID value will indicated which profile will be * removed. Once successfully called, the flow will be disabled. */ enum ice_status ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { enum ice_status status; u16 i; for (i = 0; i < count; i++) { status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; } return ICE_SUCCESS; } diff --git a/sys/dev/ice/ice_flex_pipe.h b/sys/dev/ice/ice_flex_pipe.h index 7ec919e3bc61..5c1dd7157537 100644 --- a/sys/dev/ice/ice_flex_pipe.h +++ b/sys/dev/ice/ice_flex_pipe.h @@ -1,124 +1,128 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_FLEX_PIPE_H_ #define _ICE_FLEX_PIPE_H_ #include "ice_type.h" /* Package minimal version supported */ #define ICE_PKG_SUPP_VER_MAJ 1 #define ICE_PKG_SUPP_VER_MNR 3 /* Package format version */ #define ICE_PKG_FMT_VER_MAJ 1 #define ICE_PKG_FMT_VER_MNR 0 #define ICE_PKG_FMT_VER_UPD 0 #define ICE_PKG_FMT_VER_DFT 0 #define ICE_PKG_CNT 4 enum ice_status ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); enum ice_status ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); enum ice_status ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, u16 *value); void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, ice_bitmap_t *bm); void ice_init_prof_result_bm(struct ice_hw *hw); enum ice_status ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); enum ice_status -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd); - -enum ice_status ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); +enum ice_status +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd); bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port); enum ice_status ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); enum ice_status ice_replay_tunnels(struct ice_hw *hw); /* XLT1/PType group functions */ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk); void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg); /* XLT2/VSI group functions */ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk); enum ice_status ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig); enum ice_status ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], struct ice_fv_word *es); struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); enum ice_status ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig); enum ice_status ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); enum ice_status ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); enum ice_status ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt); enum ice_status ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt); enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); enum ice_status ice_init_hw_tbls(struct ice_hw *hw); void ice_free_seg(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); enum ice_status ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id); enum ice_status ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id); enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section); +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/sys/dev/ice/ice_flex_type.h b/sys/dev/ice/ice_flex_type.h index e96d5f8028a8..31a7135f7e22 100644 --- a/sys/dev/ice/ice_flex_type.h +++ b/sys/dev/ice/ice_flex_type.h @@ -1,733 +1,759 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_FLEX_TYPE_H_ #define _ICE_FLEX_TYPE_H_ #define ICE_FV_OFFSET_INVAL 0x1FF #pragma pack(1) /* Extraction Sequence (Field Vector) Table */ struct ice_fv_word { u8 prot_id; u16 off; /* Offset within the protocol header */ u8 resvrd; }; #pragma pack() #define ICE_MAX_NUM_PROFILES 256 #define ICE_MAX_FV_WORDS 48 struct ice_fv { struct ice_fv_word ew[ICE_MAX_FV_WORDS]; }; /* Package and segment headers and tables */ struct ice_pkg_hdr { struct ice_pkg_ver pkg_format_ver; __le32 seg_count; __le32 seg_offset[STRUCT_HACK_VAR_LEN]; }; /* generic segment */ struct ice_generic_seg_hdr { #define SEGMENT_TYPE_METADATA 0x00000001 #define SEGMENT_TYPE_ICE 0x00000010 __le32 seg_type; struct ice_pkg_ver seg_format_ver; __le32 seg_size; char seg_id[ICE_PKG_NAME_SIZE]; }; /* ice specific segment */ union ice_device_id { struct { __le16 device_id; __le16 vendor_id; } dev_vend_id; __le32 id; }; struct ice_device_id_entry { union ice_device_id device; union ice_device_id sub_device; }; struct ice_seg { struct ice_generic_seg_hdr hdr; __le32 device_table_count; struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN]; }; struct ice_nvm_table { __le32 table_count; __le32 vers[STRUCT_HACK_VAR_LEN]; }; struct ice_buf { #define ICE_PKG_BUF_SIZE 4096 u8 buf[ICE_PKG_BUF_SIZE]; }; struct ice_buf_table { __le32 buf_count; struct ice_buf buf_array[STRUCT_HACK_VAR_LEN]; }; /* global metadata specific segment */ struct ice_global_metadata_seg { struct ice_generic_seg_hdr hdr; struct ice_pkg_ver pkg_ver; __le32 rsvd; char pkg_name[ICE_PKG_NAME_SIZE]; }; #define ICE_MIN_S_OFF 12 #define ICE_MAX_S_OFF 4095 #define ICE_MIN_S_SZ 1 #define ICE_MAX_S_SZ 4084 /* section information */ struct ice_section_entry { __le32 type; __le16 offset; __le16 size; }; #define ICE_MIN_S_COUNT 1 #define ICE_MAX_S_COUNT 511 #define ICE_MIN_S_DATA_END 12 #define ICE_MAX_S_DATA_END 4096 #define ICE_METADATA_BUF 0x80000000 struct ice_buf_hdr { __le16 section_count; __le16 data_end; struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN]; }; #define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ (ent_sz)) /* ice package section IDs */ #define ICE_SID_METADATA 1 #define ICE_SID_XLT0_SW 10 #define ICE_SID_XLT_KEY_BUILDER_SW 11 #define ICE_SID_XLT1_SW 12 #define ICE_SID_XLT2_SW 13 #define ICE_SID_PROFID_TCAM_SW 14 #define ICE_SID_PROFID_REDIR_SW 15 #define ICE_SID_FLD_VEC_SW 16 #define ICE_SID_CDID_KEY_BUILDER_SW 17 #define ICE_SID_CDID_REDIR_SW 18 #define ICE_SID_XLT0_ACL 20 #define ICE_SID_XLT_KEY_BUILDER_ACL 21 #define ICE_SID_XLT1_ACL 22 #define ICE_SID_XLT2_ACL 23 #define ICE_SID_PROFID_TCAM_ACL 24 #define ICE_SID_PROFID_REDIR_ACL 25 #define ICE_SID_FLD_VEC_ACL 26 #define ICE_SID_CDID_KEY_BUILDER_ACL 27 #define ICE_SID_CDID_REDIR_ACL 28 #define ICE_SID_XLT0_FD 30 #define ICE_SID_XLT_KEY_BUILDER_FD 31 #define ICE_SID_XLT1_FD 32 #define ICE_SID_XLT2_FD 33 #define ICE_SID_PROFID_TCAM_FD 34 #define ICE_SID_PROFID_REDIR_FD 35 #define ICE_SID_FLD_VEC_FD 36 #define ICE_SID_CDID_KEY_BUILDER_FD 37 #define ICE_SID_CDID_REDIR_FD 38 #define ICE_SID_XLT0_RSS 40 #define ICE_SID_XLT_KEY_BUILDER_RSS 41 #define ICE_SID_XLT1_RSS 42 #define ICE_SID_XLT2_RSS 43 #define ICE_SID_PROFID_TCAM_RSS 44 #define ICE_SID_PROFID_REDIR_RSS 45 #define ICE_SID_FLD_VEC_RSS 46 #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 #define ICE_SID_RXPARSER_CAM 50 #define ICE_SID_RXPARSER_NOMATCH_CAM 51 #define ICE_SID_RXPARSER_IMEM 52 #define ICE_SID_RXPARSER_XLT0_BUILDER 53 #define ICE_SID_RXPARSER_NODE_PTYPE 54 #define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 #define ICE_SID_RXPARSER_PROTO_GRP 57 #define ICE_SID_RXPARSER_METADATA_INIT 58 #define ICE_SID_RXPARSER_XLT0 59 #define ICE_SID_TXPARSER_CAM 60 #define ICE_SID_TXPARSER_NOMATCH_CAM 61 #define ICE_SID_TXPARSER_IMEM 62 #define ICE_SID_TXPARSER_XLT0_BUILDER 63 #define ICE_SID_TXPARSER_NODE_PTYPE 64 #define ICE_SID_TXPARSER_MARKER_PTYPE 65 #define ICE_SID_TXPARSER_BOOST_TCAM 66 #define ICE_SID_TXPARSER_PROTO_GRP 67 #define ICE_SID_TXPARSER_METADATA_INIT 68 #define ICE_SID_TXPARSER_XLT0 69 #define ICE_SID_RXPARSER_INIT_REDIR 70 #define ICE_SID_TXPARSER_INIT_REDIR 71 #define ICE_SID_RXPARSER_MARKER_GRP 72 #define ICE_SID_TXPARSER_MARKER_GRP 73 #define ICE_SID_RXPARSER_LAST_PROTO 74 #define ICE_SID_TXPARSER_LAST_PROTO 75 #define ICE_SID_RXPARSER_PG_SPILL 76 #define ICE_SID_TXPARSER_PG_SPILL 77 #define ICE_SID_RXPARSER_NOMATCH_SPILL 78 #define ICE_SID_TXPARSER_NOMATCH_SPILL 79 #define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT_KEY_BUILDER_PE 81 #define ICE_SID_XLT1_PE 82 #define ICE_SID_XLT2_PE 83 #define ICE_SID_PROFID_TCAM_PE 84 #define ICE_SID_PROFID_REDIR_PE 85 #define ICE_SID_FLD_VEC_PE 86 #define ICE_SID_CDID_KEY_BUILDER_PE 87 #define ICE_SID_CDID_REDIR_PE 88 /* Label Metadata section IDs */ #define ICE_SID_LBL_FIRST 0x80000010 #define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 #define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 #define ICE_SID_LBL_RESERVED_12 0x80000012 #define ICE_SID_LBL_RESERVED_13 0x80000013 #define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 #define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 #define ICE_SID_LBL_PTYPE 0x80000016 #define ICE_SID_LBL_PROTOCOL_ID 0x80000017 #define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 #define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 #define ICE_SID_LBL_RXPARSER_PG 0x8000001A #define ICE_SID_LBL_TXPARSER_PG 0x8000001B #define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C #define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D #define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E #define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F #define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 #define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 #define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 #define ICE_SID_LBL_FLAG 0x80000023 #define ICE_SID_LBL_REG 0x80000024 #define ICE_SID_LBL_SW_PTG 0x80000025 #define ICE_SID_LBL_ACL_PTG 0x80000026 #define ICE_SID_LBL_PE_PTG 0x80000027 #define ICE_SID_LBL_RSS_PTG 0x80000028 #define ICE_SID_LBL_FD_PTG 0x80000029 #define ICE_SID_LBL_SW_VSIG 0x8000002A #define ICE_SID_LBL_ACL_VSIG 0x8000002B #define ICE_SID_LBL_PE_VSIG 0x8000002C #define ICE_SID_LBL_RSS_VSIG 0x8000002D #define ICE_SID_LBL_FD_VSIG 0x8000002E #define ICE_SID_LBL_PTYPE_META 0x8000002F #define ICE_SID_LBL_SW_PROFID 0x80000030 #define ICE_SID_LBL_ACL_PROFID 0x80000031 #define ICE_SID_LBL_PE_PROFID 0x80000032 #define ICE_SID_LBL_RSS_PROFID 0x80000033 #define ICE_SID_LBL_FD_PROFID 0x80000034 #define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 #define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 #define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 #define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 /* The following define MUST be updated to reflect the last label section ID */ #define ICE_SID_LBL_LAST 0x80000038 enum ice_block { ICE_BLK_SW = 0, ICE_BLK_ACL, ICE_BLK_FD, ICE_BLK_RSS, ICE_BLK_PE, ICE_BLK_COUNT }; enum ice_sect { ICE_XLT0 = 0, ICE_XLT_KB, ICE_XLT1, ICE_XLT2, ICE_PROF_TCAM, ICE_PROF_REDIR, ICE_VEC_TBL, ICE_CDID_KB, ICE_CDID_REDIR, ICE_SECT_COUNT }; /* Packet Type (PTYPE) values */ #define ICE_PTYPE_MAC_PAY 1 #define ICE_PTYPE_IPV4FRAG_PAY 22 #define ICE_PTYPE_IPV4_PAY 23 #define ICE_PTYPE_IPV4_UDP_PAY 24 #define ICE_PTYPE_IPV4_TCP_PAY 26 #define ICE_PTYPE_IPV4_SCTP_PAY 27 #define ICE_PTYPE_IPV4_ICMP_PAY 28 #define ICE_PTYPE_IPV6FRAG_PAY 88 #define ICE_PTYPE_IPV6_PAY 89 #define ICE_PTYPE_IPV6_UDP_PAY 90 #define ICE_PTYPE_IPV6_TCP_PAY 92 #define ICE_PTYPE_IPV6_SCTP_PAY 93 #define ICE_PTYPE_IPV6_ICMP_PAY 94 struct ice_meta_sect { struct ice_pkg_ver ver; #define ICE_META_SECT_NAME_SIZE 28 char name[ICE_META_SECT_NAME_SIZE]; __le32 track_id; }; /* Packet Type Groups (PTG) - Inner Most fields (IM) */ #define ICE_PTG_IM_IPV4_TCP 16 #define ICE_PTG_IM_IPV4_UDP 17 #define ICE_PTG_IM_IPV4_SCTP 18 #define ICE_PTG_IM_IPV4_PAY 20 #define ICE_PTG_IM_IPV4_OTHER 21 #define ICE_PTG_IM_IPV6_TCP 32 #define ICE_PTG_IM_IPV6_UDP 33 #define ICE_PTG_IM_IPV6_SCTP 34 #define ICE_PTG_IM_IPV6_OTHER 37 #define ICE_PTG_IM_L2_OTHER 67 struct ice_flex_fields { union { struct { u8 src_ip; u8 dst_ip; u8 flow_label; /* valid for IPv6 only */ } ip_fields; struct { u8 src_prt; u8 dst_prt; } tcp_udp_fields; struct { u8 src_ip; u8 dst_ip; u8 src_prt; u8 dst_prt; } ip_tcp_udp_fields; struct { u8 src_prt; u8 dst_prt; u8 flow_label; /* valid for IPv6 only */ u8 spi; } ip_esp_fields; struct { u32 offset; u32 length; } off_len; } fields; }; #define ICE_XLT1_DFLT_GRP 0 #define ICE_XLT1_TABLE_SIZE 1024 /* package labels */ struct ice_label { __le16 value; #define ICE_PKG_LABEL_SIZE 64 char name[ICE_PKG_LABEL_SIZE]; }; struct ice_label_section { __le16 count; struct ice_label label[STRUCT_HACK_VAR_LEN]; }; #define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ ice_struct_size((struct ice_label_section *)0, label, 1) - \ sizeof(struct ice_label), sizeof(struct ice_label)) struct ice_sw_fv_section { __le16 count; __le16 base_offset; struct ice_fv fv[STRUCT_HACK_VAR_LEN]; }; struct ice_sw_fv_list_entry { struct LIST_ENTRY_TYPE list_entry; u32 profile_id; struct ice_fv *fv_ptr; }; #pragma pack(1) /* The BOOST TCAM stores the match packet header in reverse order, meaning * the fields are reversed; in addition, this means that the normally big endian * fields of the packet are now little endian. */ struct ice_boost_key_value { #define ICE_BOOST_REMAINING_HV_KEY 15 u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; __le16 hv_dst_port_key; __le16 hv_src_port_key; u8 tcam_search_key; }; #pragma pack() struct ice_boost_key { struct ice_boost_key_value key; struct ice_boost_key_value key2; }; /* package Boost TCAM entry */ struct ice_boost_tcam_entry { __le16 addr; __le16 reserved; /* break up the 40 bytes of key into different fields */ struct ice_boost_key key; u8 boost_hit_index_group; /* The following contains bitfields which are not on byte boundaries. * These fields are currently unused by driver software. */ #define ICE_BOOST_BIT_FIELDS 43 u8 bit_fields[ICE_BOOST_BIT_FIELDS]; }; struct ice_boost_tcam_section { __le16 count; __le16 reserved; struct ice_boost_tcam_entry tcam[STRUCT_HACK_VAR_LEN]; }; #define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ ice_struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \ sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) struct ice_xlt1_section { __le16 count; __le16 offset; u8 value[STRUCT_HACK_VAR_LEN]; }; struct ice_xlt2_section { __le16 count; __le16 offset; __le16 value[STRUCT_HACK_VAR_LEN]; }; struct ice_prof_redir_section { __le16 count; __le16 offset; u8 redir_value[STRUCT_HACK_VAR_LEN]; }; /* package buffer building */ struct ice_buf_build { struct ice_buf buf; u16 reserved_section_table_entries; }; struct ice_pkg_enum { struct ice_buf_table *buf_table; u32 buf_idx; u32 type; struct ice_buf_hdr *buf; u32 sect_idx; void *sect; u32 sect_type; u32 entry_idx; void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); }; /* Tunnel enabling */ enum ice_tunnel_type { TNL_VXLAN = 0, TNL_GENEVE, TNL_GTP, TNL_LAST = 0xFF, TNL_ALL = 0xFF, }; struct ice_tunnel_type_scan { enum ice_tunnel_type type; const char *label_prefix; }; struct ice_tunnel_entry { enum ice_tunnel_type type; u16 boost_addr; u16 port; u16 ref; struct ice_boost_tcam_entry *boost_entry; u8 valid; u8 in_use; u8 marked; }; #define ICE_TUNNEL_MAX_ENTRIES 16 struct ice_tunnel_table { struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES]; u16 count; }; struct ice_pkg_es { __le16 count; __le16 offset; struct ice_fv_word es[STRUCT_HACK_VAR_LEN]; }; struct ice_es { u32 sid; u16 count; u16 fvw; u16 *ref_count; struct LIST_HEAD_TYPE prof_map; struct ice_fv_word *t; struct ice_lock prof_map_lock; /* protect access to profiles list */ u8 *written; u8 reverse; /* set to true to reverse FV order */ }; /* PTYPE Group management */ /* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type * group (PTG) ID as output. * * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE * are a part of this group until moved to a new PTG. */ #define ICE_DEFAULT_PTG 0 struct ice_ptg_entry { struct ice_ptg_ptype *first_ptype; u8 in_use; }; struct ice_ptg_ptype { struct ice_ptg_ptype *next_ptype; u8 ptg; }; #define ICE_MAX_TCAM_PER_PROFILE 32 #define ICE_MAX_PTG_PER_PROFILE 32 struct ice_prof_map { struct LIST_ENTRY_TYPE list; u64 profile_cookie; u64 context; u8 prof_id; u8 ptg_cnt; u8 ptg[ICE_MAX_PTG_PER_PROFILE]; }; #define ICE_INVALID_TCAM 0xFFFF struct ice_tcam_inf { u16 tcam_idx; u8 ptg; u8 prof_id; u8 in_use; }; struct ice_vsig_prof { struct LIST_ENTRY_TYPE list; u64 profile_cookie; u8 prof_id; u8 tcam_count; struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE]; }; struct ice_vsig_entry { struct LIST_HEAD_TYPE prop_lst; struct ice_vsig_vsi *first_vsi; u8 in_use; }; struct ice_vsig_vsi { struct ice_vsig_vsi *next_vsi; u32 prop_mask; u16 changed; u16 vsig; }; #define ICE_XLT1_CNT 1024 #define ICE_MAX_PTGS 256 /* XLT1 Table */ struct ice_xlt1 { struct ice_ptg_entry *ptg_tbl; struct ice_ptg_ptype *ptypes; u8 *t; u32 sid; u16 count; }; #define ICE_XLT2_CNT 768 #define ICE_MAX_VSIGS 768 /* VSIG bit layout: * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS * [13:15]: PF number of device */ #define ICE_VSIG_IDX_M (0x1FFF) #define ICE_PF_NUM_S 13 #define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) #define ICE_VSIG_VALUE(vsig, pf_id) \ - (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ - (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M)) + ((u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ + (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))) #define ICE_DEFAULT_VSIG 0 /* XLT2 Table */ struct ice_xlt2 { struct ice_vsig_entry *vsig_tbl; struct ice_vsig_vsi *vsis; u16 *t; u32 sid; u16 count; }; /* Extraction sequence - list of match fields: * protocol ID, offset, profile length */ union ice_match_fld { struct { u8 prot_id; u8 offset; u8 length; u8 reserved; /* must be zero */ } fld; u32 val; }; #define ICE_MATCH_LIST_SZ 20 #pragma pack(1) struct ice_match { u8 count; union ice_match_fld list[ICE_MATCH_LIST_SZ]; }; /* Profile ID Management */ struct ice_prof_id_key { __le16 flags; u8 xlt1; __le16 xlt2_cdid; }; /* Keys are made up of two values, each one-half the size of the key. * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values) */ #define ICE_TCAM_KEY_VAL_SZ 5 #define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ) struct ice_prof_tcam_entry { __le16 addr; u8 key[ICE_TCAM_KEY_SZ]; u8 prof_id; }; #pragma pack() struct ice_prof_id_section { __le16 count; struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN]; }; struct ice_prof_tcam { u32 sid; u16 count; u16 max_prof_id; struct ice_prof_tcam_entry *t; u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */ }; struct ice_prof_redir { u8 *t; u32 sid; u16 count; }; /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; struct ice_xlt2 xlt2; struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; u8 overwrite; /* set to true to allow overwrite of table entries */ u8 is_list_init; }; enum ice_chg_type { ICE_TCAM_NONE = 0, ICE_PTG_ES_ADD, ICE_TCAM_ADD, ICE_VSIG_ADD, ICE_VSIG_REM, ICE_VSI_MOVE, }; struct ice_chs_chg { struct LIST_ENTRY_TYPE list_entry; enum ice_chg_type type; u8 add_ptg; u8 add_vsig; u8 add_tcam_idx; u8 add_prof; u16 ptype; u8 ptg; u8 prof_id; u16 vsi; u16 vsig; u16 orig_vsig; u16 tcam_idx; }; #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT enum ice_prof_type { ICE_PROF_NON_TUN = 0x1, ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_GRE = 0x4, ICE_PROF_TUN_ALL = 0x6, ICE_PROF_ALL = 0xFF, }; + +/* Number of bits/bytes contained in meta init entry. Note, this should be a + * multiple of 32 bits. + */ +#define ICE_META_INIT_BITS 192 +#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \ + BITS_PER_BYTE)) + +/* The meta init Flag field starts at this bit */ +#define ICE_META_FLAGS_ST 123 + +/* The entry and bit to check for Double VLAN Mode (DVM) support */ +#define ICE_META_VLAN_MODE_ENTRY 0 +#define ICE_META_FLAG_VLAN_MODE 60 +#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \ + ICE_META_FLAG_VLAN_MODE) + +struct ice_meta_init_entry { + __le32 bm[ICE_META_INIT_DW_CNT]; +}; + +struct ice_meta_init_section { + __le16 count; + __le16 offset; + struct ice_meta_init_entry entry[1]; +}; #endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c index f89c38915951..7d792e749e3d 100644 --- a/sys/dev/ice/ice_flow.c +++ b/sys/dev/ice/ice_flow.c @@ -1,1936 +1,1936 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_flow.h" /* Size of known protocol header fields */ #define ICE_FLOW_FLD_SZ_ETH_TYPE 2 #define ICE_FLOW_FLD_SZ_VLAN 2 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1 #define ICE_FLOW_FLD_SZ_IP_TTL 1 #define ICE_FLOW_FLD_SZ_IP_PROT 1 #define ICE_FLOW_FLD_SZ_PORT 2 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1 #define ICE_FLOW_FLD_SZ_ARP_OPER 2 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4 /* Describe properties of a protocol header field */ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; s16 off; /* Offset from start of a protocol header, in bits */ u16 size; /* Size of fields in bits */ }; #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ .hdr = _hdr, \ .off = (_offset_bytes) * BITS_PER_BYTE, \ .size = (_size_bytes) * BITS_PER_BYTE, \ } /* Table containing properties of supported protocol header fields */ static const struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* Ether */ /* ICE_FLOW_FIELD_IDX_ETH_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_ETH_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_S_VLAN */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN), /* ICE_FLOW_FIELD_IDX_C_VLAN */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN), /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE), /* IPv4 / IPv6 */ /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, ICE_FLOW_FLD_SZ_IP_DSCP), /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP), /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, ICE_FLOW_FLD_SZ_IP_TTL), /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT), /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 7, ICE_FLOW_FLD_SZ_IP_TTL), /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 6, ICE_FLOW_FLD_SZ_IP_PROT), /* ICE_FLOW_FIELD_IDX_IPV4_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_IPV4_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_IPV6_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR), /* ICE_FLOW_FIELD_IDX_IPV6_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), /* Transport */ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), /* ARP */ /* ICE_FLOW_FIELD_IDX_ARP_SIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_ARP_DIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_ARP_SHA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_ARP_DHA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_ARP_OP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER), /* ICMP */ /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE), /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE), /* GRE */ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID), }; /* Bitmaps indicating relevant packet types for a particular protocol header * * Packet types for packets with an Outer/First/Single MAC header */ static const u32 ice_ptypes_mac_ofos[] = { 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last MAC VLAN header */ static const u32 ice_ptypes_macvlan_il[] = { 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single IPv4 header, does NOT * include IPV4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single IPv4 header, includes * IPV4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos_all[] = { 0x1DC00000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv4 header */ static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single IPv6 header, does NOT * include IVP6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos[] = { 0x00000000, 0x00000000, 0x77000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single IPv6 header, includes * IPV6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos_all[] = { 0x00000000, 0x00000000, 0x77000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv6 header */ static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */ static const u32 ice_ptypes_ipv4_ofos_no_l4[] = { 0x10C00000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ static const u32 ice_ptypes_ipv4_il_no_l4[] = { 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */ static const u32 ice_ptypes_ipv6_ofos_no_l4[] = { 0x00000000, 0x00000000, 0x43000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ static const u32 ice_ptypes_ipv6_il_no_l4[] = { 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 0x00000430, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outermost/First ARP header */ static const u32 ice_ptypes_arp_of[] = { 0x00000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* UDP Packet types for non-tunneled packets or tunneled * packets with inner UDP. */ static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last TCP header */ static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last SCTP header */ static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outermost/First ICMP header */ static const u32 ice_ptypes_icmp_of[] = { 0x10000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last ICMP header */ static const u32 ice_ptypes_icmp_il[] = { 0x00000000, 0x02040408, 0x40000102, 0x08101020, 0x00000408, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outermost/First GRE header */ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last MAC header */ static const u32 ice_ptypes_mac_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Manage parameters and info. used during the creation of a flow profile */ struct ice_flow_prof_params { enum ice_block blk; u16 entry_length; /* # of bytes formatted entry will require */ u8 es_cnt; struct ice_flow_prof *prof; /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 * This will give us the direction flags. */ struct ice_fv_word es[ICE_MAX_FV_WORDS]; ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX); }; #define ICE_FLOW_SEG_HDRS_L3_MASK \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \ ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ ICE_FLOW_SEG_HDR_SCTP) /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */ #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) /** * ice_flow_val_hdrs - validates packet segments for valid protocol headers * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ static enum ice_status ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) { u8 i; for (i = 0; i < segs_cnt; i++) { /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) return ICE_ERR_PARAM; /* Multiple L4 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments * @params: information about the flow to be processed * * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile. */ static enum ice_status ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) { struct ice_flow_prof *prof; u8 i; ice_memset(params->ptypes, 0xff, sizeof(params->ptypes), ICE_NONDMA_MEM); prof = params->prof; for (i = 0; i < params->prof->segs_cnt; i++) { const ice_bitmap_t *src; u32 hdrs; hdrs = prof->segs[i].hdrs; if (hdrs & ICE_FLOW_SEG_HDR_ETH) { src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos : (const ice_bitmap_t *)ice_ptypes_mac_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { src = (const ice_bitmap_t *)ice_ptypes_macvlan_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { ice_and_bitmap(params->ptypes, params->ptypes, (const ice_bitmap_t *)ice_ptypes_arp_of, ICE_FLOW_PTYPE_MAX); } if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { src = i ? (const ice_bitmap_t *)ice_ptypes_ipv4_il : (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { src = i ? (const ice_bitmap_t *)ice_ptypes_ipv6_il : (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 : (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos : (const ice_bitmap_t *)ice_ptypes_ipv4_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 : (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos : (const ice_bitmap_t *)ice_ptypes_ipv6_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (hdrs & ICE_FLOW_SEG_HDR_UDP) { src = (const ice_bitmap_t *)ice_ptypes_udp_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { ice_and_bitmap(params->ptypes, params->ptypes, (const ice_bitmap_t *)ice_ptypes_tcp_il, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { src = (const ice_bitmap_t *)ice_ptypes_sctp_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of : (const ice_bitmap_t *)ice_ptypes_icmp_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { if (!i) { src = (const ice_bitmap_t *)ice_ptypes_gre_of; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } } } return ICE_SUCCESS; } /** * ice_flow_xtract_fld - Create an extraction sequence entry for the given field * @hw: pointer to the HW struct * @params: information about the flow to be processed * @seg: packet segment index of the field to be extracted * @fld: ID of field to be extracted * * This function determines the protocol ID, offset, and size of the given * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information. */ static enum ice_status ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg, enum ice_flow_field fld) { enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; u16 off; flds = params->prof->segs[seg].fields; switch (fld) { case ICE_FLOW_FIELD_IDX_ETH_DA: case ICE_FLOW_FIELD_IDX_ETH_SA: case ICE_FLOW_FIELD_IDX_S_VLAN: case ICE_FLOW_FIELD_IDX_C_VLAN: prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; break; case ICE_FLOW_FIELD_IDX_ETH_TYPE: prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_DSCP: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_DSCP: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_TTL: case ICE_FLOW_FIELD_IDX_IPV4_PROT: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. */ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; else sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; break; case ICE_FLOW_FIELD_IDX_IPV6_TTL: case ICE_FLOW_FIELD_IDX_IPV6_PROT: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. */ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; else sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_SA: case ICE_FLOW_FIELD_IDX_IPV6_DA: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; break; case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: case ICE_FLOW_FIELD_IDX_TCP_FLAGS: prot_id = ICE_PROT_TCP_IL; break; case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: prot_id = ICE_PROT_UDP_IL_OR_S; break; case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: prot_id = ICE_PROT_SCTP_IL; break; case ICE_FLOW_FIELD_IDX_ARP_SIP: case ICE_FLOW_FIELD_IDX_ARP_DIP: case ICE_FLOW_FIELD_IDX_ARP_SHA: case ICE_FLOW_FIELD_IDX_ARP_DHA: case ICE_FLOW_FIELD_IDX_ARP_OP: prot_id = ICE_PROT_ARP_OF; break; case ICE_FLOW_FIELD_IDX_ICMP_TYPE: case ICE_FLOW_FIELD_IDX_ICMP_CODE: /* ICMP type and code share the same extraction seq. entry */ prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? ICE_FLOW_FIELD_IDX_ICMP_CODE : ICE_FLOW_FIELD_IDX_ICMP_TYPE; break; case ICE_FLOW_FIELD_IDX_GRE_KEYID: prot_id = ICE_PROT_GRE_OF; break; default: return ICE_ERR_NOT_IMPL; } /* Each extraction sequence entry is a word in size, and extracts a * word-aligned offset from a protocol header. */ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; flds[fld].xtrct.prot_id = prot_id; flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); flds[fld].xtrct.idx = params->es_cnt; /* Adjust the next field-entry index after accommodating the number of * entries this field consumes */ cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, ese_bits); /* Fill in the extraction sequence entries needed for this field */ off = flds[fld].xtrct.off; for (i = 0; i < cnt; i++) { /* Only consume an extraction sequence entry if there is no * sibling field associated with this field or the sibling entry * already extracts the word shared with this field. */ if (sib == ICE_FLOW_FIELD_IDX_MAX || flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || flds[sib].xtrct.off != off) { u8 idx; /* Make sure the number of extraction sequence required * does not exceed the block's capability */ if (params->es_cnt >= fv_words) return ICE_ERR_MAX_LIMIT; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) idx = fv_words - params->es_cnt - 1; else idx = params->es_cnt; params->es[idx].prot_id = prot_id; params->es[idx].off = off; params->es_cnt++; } off += ICE_FLOW_FV_EXTRACT_SZ; } return ICE_SUCCESS; } /** * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments * @hw: pointer to the HW struct * @params: information about the flow to be processed * * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields. */ static enum ice_status ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params) { enum ice_status status = ICE_SUCCESS; u8 i; for (i = 0; i < params->prof->segs_cnt; i++) { u64 match = params->prof->segs[i].match; enum ice_flow_field j; ice_for_each_set_bit(j, (ice_bitmap_t *)&match, ICE_FLOW_FIELD_IDX_MAX) { status = ice_flow_xtract_fld(hw, params, i, j); if (status) return status; ice_clear_bit(j, (ice_bitmap_t *)&match); } } return status; } /** * ice_flow_proc_segs - process all packet segments associated with a profile * @hw: pointer to the HW struct * @params: information about the flow to be processed */ static enum ice_status ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) { enum ice_status status; status = ice_flow_proc_seg_hdrs(params); if (status) return status; status = ice_flow_create_xtrct_seq(hw, params); if (status) return status; switch (params->blk) { case ICE_BLK_RSS: status = ICE_SUCCESS; break; default: return ICE_ERR_NOT_IMPL; } return status; } #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 /** * ice_flow_find_prof_conds - Find a profile matching headers and conditions * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) */ static struct ice_flow_prof * ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, u16 vsi_handle, u32 conds) { struct ice_flow_prof *p, *prof = NULL; ice_acquire_lock(&hw->fl_profs_locks[blk]); LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && segs_cnt && segs_cnt == p->segs_cnt) { u8 i; /* Check for profile-VSI association if specified */ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && ice_is_vsi_valid(hw, vsi_handle) && !ice_is_bit_set(p->vsis, vsi_handle)) continue; /* Protocol headers must be checked. Matched fields are * checked if specified. */ for (i = 0; i < segs_cnt; i++) if (segs[i].hdrs != p->segs[i].hdrs || ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && segs[i].match != p->segs[i].match)) break; /* A match is found if all segments are matched */ if (i == segs_cnt) { prof = p; break; } } ice_release_lock(&hw->fl_profs_locks[blk]); return prof; } /** * ice_flow_find_prof - Look up a profile matching headers and matched fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ u64 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt) { struct ice_flow_prof *p; p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt, ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS); return p ? p->id : ICE_FLOW_PROF_ID_INVAL; } /** * ice_flow_find_prof_id - Look up a profile with given profile ID * @hw: pointer to the HW struct * @blk: classification stage * @prof_id: unique ID to identify this flow profile */ static struct ice_flow_prof * ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *p; LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) if (p->id == prof_id) return p; return NULL; } /** * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle * @hw: pointer to the HW struct * @blk: classification stage * @prof_id: the profile ID handle * @hw_prof_id: pointer to variable to receive the HW profile ID */ enum ice_status ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u8 *hw_prof_id) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *map; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, prof_id); if (map) { *hw_prof_id = map->prof_id; status = ICE_SUCCESS; } ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @acts: array of default actions * @acts_cnt: number of default actions * @prof: stores the returned flow profile added * * Assumption: the caller has acquired the lock to the profile list */ static enum ice_status ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; enum ice_status status; u8 i; if (!prof || (acts_cnt && !acts)) return ICE_ERR_BAD_PTR; params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params)); if (!params) return ICE_ERR_NO_MEMORY; params->prof = (struct ice_flow_prof *) ice_malloc(hw, sizeof(*params->prof)); if (!params->prof) { status = ICE_ERR_NO_MEMORY; goto free_params; } /* initialize extraction sequence to all invalid (0xff) */ for (i = 0; i < ICE_MAX_FV_WORDS; i++) { params->es[i].prot_id = ICE_PROT_INVALID; params->es[i].off = ICE_FV_OFFSET_INVAL; } params->blk = blk; params->prof->id = prof_id; params->prof->dir = dir; params->prof->segs_cnt = segs_cnt; /* Make a copy of the segments that need to be persistent in the flow * profile instance */ for (i = 0; i < segs_cnt; i++) ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs), ICE_NONDMA_TO_NONDMA); status = ice_flow_proc_segs(hw, params); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); goto out; } /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, params->es); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; } *prof = params->prof; out: if (status) { ice_free(hw, params->prof); } free_params: ice_free(hw, params); return status; } /** * ice_flow_rem_prof_sync - remove a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile to remove * * Assumption: the caller has acquired the lock to the profile list */ static enum ice_status ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof) { enum ice_status status; /* Remove all hardware profiles associated with this flow profile */ status = ice_rem_prof(hw, blk, prof->id); if (!status) { LIST_DEL(&prof->l_entry); ice_free(hw, prof); } return status; } /** * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG * @hw: pointer to the hardware structure * @blk: classification stage * @vsi_handle: software VSI handle * @vsig: target VSI group * * Assumption: the caller has already verified that the VSI to * be added has the same characteristics as the VSIG and will * thereby have access to all resources added to that VSIG. */ enum ice_status ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u16 vsig) { enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT) return ICE_ERR_PARAM; ice_acquire_lock(&hw->fl_profs_locks[blk]); status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), vsig); ice_release_lock(&hw->fl_profs_locks[blk]); return status; } /** * ice_flow_assoc_prof - associate a VSI with a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile * @vsi_handle: software VSI handle * * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ static enum ice_status ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { enum ice_status status = ICE_SUCCESS; if (!ice_is_bit_set(prof->vsis, vsi_handle)) { status = ice_add_prof_id_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), prof->id); if (!status) ice_set_bit(vsi_handle, prof->vsis); else ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", status); } return status; } /** * ice_flow_disassoc_prof - disassociate a VSI from a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile * @vsi_handle: software VSI handle * * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ static enum ice_status ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { enum ice_status status = ICE_SUCCESS; if (ice_is_bit_set(prof->vsis, vsi_handle)) { status = ice_rem_prof_id_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), prof->id); if (!status) ice_clear_bit(vsi_handle, prof->vsis); else ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", status); } return status; } /** * ice_flow_add_prof - Add a flow profile for packet segments and matched fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @acts: array of default actions * @acts_cnt: number of default actions * @prof: stores the returned flow profile added */ static enum ice_status ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_prof **prof) { enum ice_status status; if (segs_cnt > ICE_FLOW_SEG_MAX) return ICE_ERR_MAX_LIMIT; if (!segs_cnt) return ICE_ERR_PARAM; if (!segs) return ICE_ERR_BAD_PTR; status = ice_flow_val_hdrs(segs, segs_cnt); if (status) return status; ice_acquire_lock(&hw->fl_profs_locks[blk]); status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, acts, acts_cnt, prof); if (!status) LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]); ice_release_lock(&hw->fl_profs_locks[blk]); return status; } /** * ice_flow_rem_prof - Remove a flow profile and all entries associated with it * @hw: pointer to the HW struct * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ static enum ice_status ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; enum ice_status status; ice_acquire_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { status = ICE_ERR_DOES_NOT_EXIST; goto out; } /* prof becomes invalid after the call */ status = ice_flow_rem_prof_sync(hw, blk, prof); out: ice_release_lock(&hw->fl_profs_locks[blk]); return status; } /** * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set * @field_type: type of the field * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * input buffer * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from * entry's input buffer * * This helper function stores information of a field being matched, including * the type of the field and the locations of the value to match, the mask, and * the upper-bound value in the start of the input buffer for a flow entry. * This function should only be used for fixed-size data structures. * * This function also opportunistically determines the protocol headers to be * present based on the fields being set. Some fields cannot be used alone to * determine the protocol headers present. Sometimes, fields for particular * protocol headers are not matched. In those cases, the protocol headers * must be explicitly set. */ static void ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, enum ice_flow_fld_match_type field_type, u16 val_loc, u16 mask_loc, u16 last_loc) { u64 bit = BIT_ULL(fld); seg->match |= bit; if (field_type == ICE_FLOW_FLD_TYPE_RANGE) seg->range |= bit; seg->fields[fld].type = field_type; seg->fields[fld].src.val = val_loc; seg->fields[fld].src.mask = mask_loc; seg->fields[fld].src.last = last_loc; ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); } /** * ice_flow_set_fld - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * input buffer * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from * entry's input buffer * @range: indicate if field being matched is to be in a range * * This function specifies the locations, in the form of byte offsets from the * start of the input buffer for a flow entry, from where the value to match, * the mask value, and upper value can be extracted. These locations are then * stored in the flow profile. When adding a flow entry associated with the * flow profile, these locations will be used to quickly extract the values and * create the content of a match entry. This function should only be used for * fixed-size data structures. */ static void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range) { enum ice_flow_fld_match_type t = range ? ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); } /** * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf * @seg: packet segment the field being set belongs to * @fld: field to be set * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @pref_loc: location of prefix value from entry's input buffer * @pref_sz: size of the location holding the prefix value * * This function specifies the locations, in the form of byte offsets from the * start of the input buffer for a flow entry, from where the value to match * and the IPv4 prefix value can be extracted. These locations are then stored * in the flow profile. When adding flow entries to the associated flow profile, * these locations can be used to quickly extract the values to create the * content of a match entry. This function should only be used for fixed-size * data structures. */ void ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 pref_loc, u8 pref_sz) { /* For this type of field, the "mask" location is for the prefix value's * location and the "last" location is for the size of the location of * the prefix value. */ ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc, pref_loc, (u16)pref_sz); } #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ ICE_FLOW_RSS_SEG_HDR_L4_MASKS) /** * ice_flow_set_rss_seg_info - setup packet segments for RSS * @segs: pointer to the flow field segment(s) * @seg_cnt: segment count * @cfg: configure parameters * * Helper function to extract fields from hash bitmap and use flow * header value to set flow field segment for further use in flow * profile entry or removal. */ static enum ice_status ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, const struct ice_rss_hash_cfg *cfg) { struct ice_flow_seg_info *seg; u64 val; u8 i; /* set inner most segment */ seg = &segs[seg_cnt - 1]; ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds, ICE_FLOW_FIELD_IDX_MAX) ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs); /* set outer most header */ if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4) segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER; else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6) segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER; if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) return ICE_ERR_PARAM; val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !ice_is_pow2(val)) return ICE_ERR_CFG; val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !ice_is_pow2(val)) return ICE_ERR_CFG; return ICE_SUCCESS; } /** * ice_rem_vsi_rss_list - remove VSI from RSS list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * * Remove the VSI from all RSS configurations in the list. */ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) { struct ice_rss_cfg *r, *tmp; if (LIST_EMPTY(&hw->rss_list_head)) return; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, ice_rss_cfg, l_entry) if (ice_test_and_clear_bit(vsi_handle, r->vsis)) if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { LIST_DEL(&r->l_entry); ice_free(hw, r); } ice_release_lock(&hw->rss_locks); } /** * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * * This function will iterate through all flow profiles and disassociate * the VSI from that profile. If the flow profile has no VSIs it will * be removed. */ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *p, *t; enum ice_status status = ICE_SUCCESS; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; if (LIST_EMPTY(&hw->fl_profs[blk])) return ICE_SUCCESS; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof, l_entry) if (ice_is_bit_set(p->vsis, vsi_handle)) { status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); if (status) break; if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) { status = ice_flow_rem_prof(hw, blk, p->id); if (status) break; } } ice_release_lock(&hw->rss_locks); return status; } /** * ice_get_rss_hdr_type - get a RSS profile's header type * @prof: RSS flow profile */ static enum ice_rss_cfg_hdr_type ice_get_rss_hdr_type(struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS; if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) { hdr_type = ICE_RSS_OUTER_HEADERS; } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) { if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE) hdr_type = ICE_RSS_INNER_HEADERS; if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4) hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6) hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; } return hdr_type; } /** * ice_rem_rss_list - remove RSS configuration from list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @prof: pointer to flow profile * * Assumption: lock has already been acquired for RSS list */ static void ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *tmp; /* Search for RSS hash fields associated to the VSI that match the * hash configurations associated to the flow profile. If found * remove from the RSS entry list of the VSI context and delete entry. */ hdr_type = ice_get_rss_hdr_type(prof); LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, ice_rss_cfg, l_entry) if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs && r->hash.hdr_type == hdr_type) { ice_clear_bit(vsi_handle, r->vsis); if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { LIST_DEL(&r->l_entry); ice_free(hw, r); } return; } } /** * ice_add_rss_list - add RSS configuration to list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @prof: pointer to flow profile * * Assumption: lock has already been acquired for RSS list */ static enum ice_status ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *rss_cfg; hdr_type = ice_get_rss_hdr_type(prof); LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, ice_rss_cfg, l_entry) if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs && r->hash.hdr_type == hdr_type) { ice_set_bit(vsi_handle, r->vsis); return ICE_SUCCESS; } rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg)); if (!rss_cfg) return ICE_ERR_NO_MEMORY; rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs; rss_cfg->hash.hdr_type = hdr_type; rss_cfg->hash.symm = prof->cfg.symm; ice_set_bit(vsi_handle, rss_cfg->vsis); LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head); return ICE_SUCCESS; } #define ICE_FLOW_PROF_HASH_S 0 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) #define ICE_FLOW_PROF_HDR_S 32 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) #define ICE_FLOW_PROF_ENCAP_S 62 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S) /* Flow profile ID format: * [0:31] - Packet match fields * [32:61] - Protocol header * [62:63] - Encapsulation flag: * 0 if non-tunneled * 1 if tunneled * 2 for tunneled with outer ipv4 * 3 for tunneled with outer ipv6 */ #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \ - (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ - (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ - (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M)) + ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ + (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ + (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))) /** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static enum ice_status ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; enum ice_status status; u8 segs_cnt; if (cfg->symm) return ICE_ERR_PARAM; segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, sizeof(*segs)); if (!segs) return ICE_ERR_NO_MEMORY; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto exit; /* Search for a flow profile that has matching headers, hash fields * and has the input VSI associated to it. If found, no further * operations required and exit. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS | ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) goto exit; /* Check if a flow profile exists with the same protocol headers and * associated with the input VSI. If so disassociate the VSI from * this profile. The VSI will be added to a new profile created with * the protocol header and new hash field configuration. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) { status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (!status) ice_rem_rss_list(hw, vsi_handle, prof); else goto exit; /* Remove profile if it has no VSIs associated */ if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) { status = ice_flow_rem_prof(hw, blk, prof->id); if (status) goto exit; } } /* Search for a profile that has same match fields only. If this * exists then associate the VSI to this profile. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (prof) { status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); if (!status) status = ice_add_rss_list(hw, vsi_handle, prof); goto exit; } /* Create a new flow profile with generated profile and packet * segment information. */ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, ICE_FLOW_GEN_PROFID(cfg->hash_flds, segs[segs_cnt - 1].hdrs, cfg->hdr_type), segs, segs_cnt, NULL, 0, &prof); if (status) goto exit; status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); /* If association to a new flow profile failed then this profile can * be removed. */ if (status) { ice_flow_rem_prof(hw, blk, prof->id); goto exit; } status = ice_add_rss_list(hw, vsi_handle, prof); prof->cfg.symm = cfg->symm; exit: ice_free(hw, segs); return status; } /** * ice_add_rss_cfg - add an RSS configuration with specified hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * This function will generate a flow profile based on fields associated with * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ enum ice_status ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { struct ice_rss_hash_cfg local_cfg; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || cfg->hash_flds == ICE_HASH_INVALID) return ICE_ERR_PARAM; local_cfg = *cfg; if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { ice_acquire_lock(&hw->rss_locks); status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); ice_release_lock(&hw->rss_locks); } else { ice_acquire_lock(&hw->rss_locks); local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); if (!status) { local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); } ice_release_lock(&hw->rss_locks); } return status; } /** * ice_rem_rss_cfg_sync - remove an existing RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static enum ice_status ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; enum ice_status status; u8 segs_cnt; segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, sizeof(*segs)); if (!segs) return ICE_ERR_NO_MEMORY; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto out; prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { status = ICE_ERR_DOES_NOT_EXIST; goto out; } status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (status) goto out; /* Remove RSS configuration from VSI context before deleting * the flow profile. */ ice_rem_rss_list(hw, vsi_handle, prof); if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) status = ice_flow_rem_prof(hw, blk, prof->id); out: ice_free(hw, segs); return status; } /** * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * This function will lookup the flow profile based on the input * hash field bitmap, iterate through the profile entry list of * that profile and find entry associated with input VSI to be * removed. Calls are made to underlying flow apis which will in * turn build or update buffers for RSS XLT1 section. */ enum ice_status ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { struct ice_rss_hash_cfg local_cfg; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || cfg->hash_flds == ICE_HASH_INVALID) return ICE_ERR_PARAM; ice_acquire_lock(&hw->rss_locks); local_cfg = *cfg; if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); } else { local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); if (!status) { local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); } } ice_release_lock(&hw->rss_locks); return status; } /* Mapping of AVF hash bit fields to an L3-L4 hash combination. * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, * convert its values to their appropriate flow L3, L4 values. */ #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) /** * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure * * This function will take the hash bitmap provided by the AVF driver via a * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ enum ice_status ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) { enum ice_status status = ICE_SUCCESS; struct ice_rss_hash_cfg hcfg; u64 hash_flds; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; /* Make sure no unsupported bits are specified */ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) return ICE_ERR_CFG; hash_flds = avf_hash; /* Always create an L3 RSS configuration for any L4 RSS configuration */ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; /* Create the corresponding RSS configuration for each valid hash bit */ while (hash_flds) { u64 rss_hash = ICE_HASH_INVALID; if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { rss_hash = ICE_FLOW_HASH_IPV4; hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; } else if (hash_flds & BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT; hash_flds &= ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); } } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { rss_hash = ICE_FLOW_HASH_IPV6; hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; } else if (hash_flds & BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT; hash_flds &= ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); } } if (rss_hash == ICE_HASH_INVALID) return ICE_ERR_OUT_OF_RANGE; hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; hcfg.hash_flds = rss_hash; hcfg.symm = false; hcfg.hdr_type = ICE_RSS_ANY_HEADERS; status = ice_add_rss_cfg(hw, vsi_handle, &hcfg); if (status) break; } return status; } /** * ice_replay_rss_cfg - replay RSS configurations associated with VSI * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle */ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { enum ice_status status = ICE_SUCCESS; struct ice_rss_cfg *r; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, ice_rss_cfg, l_entry) { if (ice_is_bit_set(r->vsis, vsi_handle)) { status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash); if (status) break; } } ice_release_lock(&hw->rss_locks); return status; } /** * ice_get_rss_cfg - returns hashed fields for the given header types * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @hdrs: protocol header type * * This function will return the match fields of the first instance of flow * profile having the given header types and containing input VSI */ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) { u64 rss_hash = ICE_HASH_INVALID; struct ice_rss_cfg *r; /* verify if the protocol header is non zero and VSI is valid */ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) return ICE_HASH_INVALID; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, ice_rss_cfg, l_entry) if (ice_is_bit_set(r->vsis, vsi_handle) && r->hash.addl_hdrs == hdrs) { rss_hash = r->hash.hash_flds; break; } ice_release_lock(&hw->rss_locks); return rss_hash; } diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h index 09e32b486cbc..7864a7a1ebfa 100644 --- a/sys/dev/ice/ice_iflib.h +++ b/sys/dev/ice/ice_iflib.h @@ -1,294 +1,297 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_iflib.h * @brief main header for the iflib driver implementation * * Contains the definitions for various structures used by the iflib driver * implementation, including the Tx and Rx queue structures and the ice_softc * structure. */ #ifndef _ICE_IFLIB_H_ #define _ICE_IFLIB_H_ /* include kernel options first */ #include "ice_opts.h" #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "ice_lib.h" #include "ice_osdep.h" #include "ice_resmgr.h" #include "ice_type.h" #include "ice_features.h" /** * ASSERT_CTX_LOCKED - Assert that the iflib context lock is held * @sc: ice softc pointer * * Macro to trigger an assertion if the iflib context lock is not * currently held. */ #define ASSERT_CTX_LOCKED(sc) sx_assert((sc)->iflib_ctx_lock, SA_XLOCKED) /** * IFLIB_CTX_LOCK - lock the iflib context lock * @sc: ice softc pointer * * Macro used to unlock the iflib context lock. */ #define IFLIB_CTX_LOCK(sc) sx_xlock((sc)->iflib_ctx_lock) /** * IFLIB_CTX_UNLOCK - unlock the iflib context lock * @sc: ice softc pointer * * Macro used to unlock the iflib context lock. */ #define IFLIB_CTX_UNLOCK(sc) sx_xunlock((sc)->iflib_ctx_lock) /** * ASSERT_CFG_LOCKED - Assert that a configuration lock is held * @sc: ice softc pointer * * Macro used by ice_lib.c to verify that certain functions are called while * holding a configuration lock. For the iflib implementation, this will be * the iflib context lock. */ #define ASSERT_CFG_LOCKED(sc) ASSERT_CTX_LOCKED(sc) /** * ICE_IFLIB_MAX_DESC_COUNT - Maximum ring size for iflib * * The iflib stack currently requires that the ring size, or number of * descriptors, be a power of 2. The ice hardware is limited to a maximum of * 8160 descriptors, which is not quite 2^13. Limit the maximum ring size for * iflib to just 2^12 (4096). */ #define ICE_IFLIB_MAX_DESC_COUNT 4096 /** * @struct ice_irq_vector * @brief Driver irq vector structure * * ice_lib.c requires the following parameters * @me: the vector number * * Other parameters may be iflib driver specific * * The iflib driver uses a single hardware interrupt per Rx queue, and uses * software interrupts for the Tx queues. */ struct ice_irq_vector { u32 me; struct if_irq irq; }; /** * @struct ice_tx_queue * @brief Driver Tx queue structure * * ice_lib.c requires the following parameters: * @vsi: backpointer the VSI structure * @me: this queue's index into the queue array * @irqv: always NULL for iflib * @desc_count: the number of descriptors * @tx_paddr: the physical address for this queue * @q_teid: the Tx queue TEID returned from firmware * @stats: queue statistics * * Other parameters may be iflib driver specific */ struct ice_tx_queue { struct ice_vsi *vsi; struct ice_tx_desc *tx_base; bus_addr_t tx_paddr; struct tx_stats stats; u64 tso; u16 desc_count; u32 tail; struct ice_irq_vector *irqv; u32 q_teid; u32 me; /* descriptor writeback status */ qidx_t *tx_rsq; qidx_t tx_rs_cidx; qidx_t tx_rs_pidx; qidx_t tx_cidx_processed; }; /** * @struct ice_rx_queue * @brief Driver Rx queue structure * * ice_lib.c requires the following parameters: * @vsi: backpointer the VSI structure * @me: this queue's index into the queue array * @irqv: pointer to vector structure associated with this queue * @desc_count: the number of descriptors * @rx_paddr: the physical address for this queue * @tail: the tail register address for this queue * @stats: queue statistics * * Other parameters may be iflib driver specific */ struct ice_rx_queue { struct ice_vsi *vsi; union ice_32b_rx_flex_desc *rx_base; bus_addr_t rx_paddr; struct rx_stats stats; u16 desc_count; u32 tail; struct ice_irq_vector *irqv; u32 me; struct if_irq que_irq; }; /** * @struct ice_softc * @brief main structure representing one device * * ice_lib.c requires the following parameters * @all_vsi: the array of all allocated VSIs * @debug_sysctls: sysctl node for debug sysctls * @dev: device_t pointer * @feat_en: bitmap of enabled driver features * @hw: embedded ice_hw structure * @ifp: pointer to the ifnet structure * @link_up: boolean indicating if link is up * @num_available_vsi: size of the VSI array * @pf_vsi: embedded VSI structure for the main PF VSI * @rx_qmgr: queue manager for Rx queues * @soft_stats: software statistics for this device * @state: driver state flags * @stats: hardware statistics for this device * @tx_qmgr: queue manager for Tx queues * @vsi_sysctls: sysctl node for all VSI sysctls * @enable_tx_fc_filter: boolean indicating if the Tx FC filter is enabled * @enable_tx_lldp_filter: boolean indicating if the Tx LLDP filter is enabled * @rebuild_ticks: indicates when a post-reset rebuild started * @imgr: resource manager for interrupt allocations * @pf_imap: interrupt mapping for PF LAN interrupts * @lan_vectors: # of vectors used by LAN driver (length of pf_imap) * @ldo_tlv: LAN Default Override settings from NVM * * ice_iov.c requires the following parameters (when PCI_IOV is defined): * @vfs: array of VF context structures * @num_vfs: number of VFs to use for SR-IOV * * The main representation for a single OS device, used to represent a single * physical function. */ struct ice_softc { struct ice_hw hw; struct ice_vsi pf_vsi; /* Main PF VSI */ char admin_mtx_name[16]; /* name of the admin mutex */ struct mtx admin_mtx; /* mutex to protect the admin timer */ struct callout admin_timer; /* timer to trigger admin task */ struct ice_vsi **all_vsi; /* Array of VSI pointers */ u16 num_available_vsi; /* Size of VSI array */ struct sysctl_oid *vsi_sysctls; /* Sysctl node for VSI sysctls */ struct sysctl_oid *debug_sysctls; /* Sysctl node for debug sysctls */ device_t dev; if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; struct ifmedia *media; struct ifnet *ifp; /* device statistics */ struct ice_pf_hw_stats stats; struct ice_pf_sw_stats soft_stats; /* Tx/Rx queue managers */ struct ice_resmgr tx_qmgr; struct ice_resmgr rx_qmgr; /* Interrupt allocation manager */ struct ice_resmgr imgr; u16 *pf_imap; int lan_vectors; /* iflib Tx/Rx queue count sysctl values */ int ifc_sysctl_ntxqs; int ifc_sysctl_nrxqs; /* IRQ Vector data */ struct resource *msix_table; int num_irq_vectors; struct ice_irq_vector *irqvs; /* BAR info */ struct ice_bar_info bar0; /* link status */ bool link_up; /* Ethertype filters enabled */ bool enable_tx_fc_filter; bool enable_tx_lldp_filter; + + /* Other tunable flags */ + bool enable_health_events; int rebuild_ticks; /* driver state flags, only access using atomic functions */ u32 state; /* NVM link override settings */ struct ice_link_default_override_tlv ldo_tlv; struct sx *iflib_ctx_lock; /* Tri-state feature flags (capable/enabled) */ ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT); ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT); }; #endif /* _ICE_IFLIB_H_ */ diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h index f5443b920b49..6e3051719a24 100644 --- a/sys/dev/ice/ice_lan_tx_rx.h +++ b/sys/dev/ice/ice_lan_tx_rx.h @@ -1,2357 +1,2366 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_LAN_TX_RX_H_ #define _ICE_LAN_TX_RX_H_ #include "ice_osdep.h" /* Rx Descriptors */ union ice_16byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { struct { __le16 mirroring_status; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fd_id; /* Flow Director filter ID */ } hi_dword; } qword0; struct { /* ext status/error/PTYPE/length */ __le64 status_error_len; } qword1; } wb; /* writeback */ }; union ice_32byte_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_addr is DD bit */ __le64 rsvd1; __le64 rsvd2; } read; struct { struct { struct { __le16 mirroring_status; __le16 l2tag1; } lo_dword; union { __le32 rss; /* RSS Hash */ __le32 fd_id; /* Flow Director filter ID */ } hi_dword; } qword0; struct { /* status/error/PTYPE/length */ __le64 status_error_len; } qword1; struct { __le16 ext_status; /* extended status */ __le16 rsvd; __le16 l2tag2_1; __le16 l2tag2_2; } qword2; struct { __le32 reserved; __le32 fd_id; } qword3; } wb; /* writeback */ }; struct ice_fltr_desc { __le64 qidx_compq_space_stat; __le64 dtype_cmd_vsi_fdid; }; #define ICE_FXD_FLTR_QW0_QINDEX_S 0 #define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S) #define ICE_FXD_FLTR_QW0_COMP_Q_S 11 #define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S) #define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL #define ICE_FXD_FLTR_QW0_COMP_Q_QINDX 0x1ULL #define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12 #define ICE_FXD_FLTR_QW0_COMP_REPORT_M \ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S) #define ICE_FXD_FLTR_QW0_COMP_REPORT_NONE 0x0ULL #define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL #define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL #define ICE_FXD_FLTR_QW0_FD_SPACE_S 14 #define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S) #define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR 0x0ULL #define ICE_FXD_FLTR_QW0_FD_SPACE_BEST_EFFORT 0x1ULL #define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL #define ICE_FXD_FLTR_QW0_FD_SPACE_BEST_GUAR 0x3ULL #define ICE_FXD_FLTR_QW0_STAT_CNT_S 16 #define ICE_FXD_FLTR_QW0_STAT_CNT_M \ (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S) #define ICE_FXD_FLTR_QW0_STAT_ENA_S 29 #define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S) #define ICE_FXD_FLTR_QW0_STAT_ENA_NONE 0x0ULL #define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL #define ICE_FXD_FLTR_QW0_STAT_ENA_BYTES 0x2ULL #define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS_BYTES 0x3ULL #define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31 #define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S) #define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL #define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL #define ICE_FXD_FLTR_QW0_TO_Q_S 32 #define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S) #define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL #define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35 #define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S) #define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL #define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38 #define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \ (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S) #define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL #define ICE_FXD_FLTR_QW0_DROP_S 40 #define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S) #define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL #define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL #define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41 #define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S) #define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL #define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44 #define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S) #define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL #define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48 #define ICE_FXD_FLTR_QW0_FLEX_VAL_M \ (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S) #define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL #define ICE_FXD_FLTR_QW1_DTYPE_S 0 #define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S) #define ICE_FXD_FLTR_QW1_PCMD_S 4 #define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S) #define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL #define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL #define ICE_FXD_FLTR_QW1_PROF_PRI_S 5 #define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S) #define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL #define ICE_FXD_FLTR_QW1_PROF_S 8 #define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S) #define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL #define ICE_FXD_FLTR_QW1_FD_VSI_S 14 #define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S) #define ICE_FXD_FLTR_QW1_SWAP_S 24 #define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S) #define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL #define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL #define ICE_FXD_FLTR_QW1_FDID_PRI_S 25 #define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S) #define ICE_FXD_FLTR_QW1_FDID_PRI_ZERO 0x0ULL #define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL #define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL #define ICE_FXD_FLTR_QW1_FDID_MDID_S 28 #define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S) #define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL #define ICE_FXD_FLTR_QW1_FDID_S 32 #define ICE_FXD_FLTR_QW1_FDID_M \ (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S) #define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL enum ice_rx_desc_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_STATUS_DD_S = 0, ICE_RX_DESC_STATUS_EOF_S = 1, ICE_RX_DESC_STATUS_L2TAG1P_S = 2, ICE_RX_DESC_STATUS_L3L4P_S = 3, ICE_RX_DESC_STATUS_CRCP_S = 4, ICE_RX_DESC_STATUS_TSYNINDX_S = 5, ICE_RX_DESC_STATUS_TSYNVALID_S = 7, ICE_RX_DESC_STATUS_EXT_UDP_0_S = 8, ICE_RX_DESC_STATUS_UMBCAST_S = 9, ICE_RX_DESC_STATUS_FLM_S = 11, ICE_RX_DESC_STATUS_FLTSTAT_S = 12, ICE_RX_DESC_STATUS_LPBK_S = 14, ICE_RX_DESC_STATUS_IPV6EXADD_S = 15, ICE_RX_DESC_STATUS_RESERVED2_S = 16, ICE_RX_DESC_STATUS_INT_UDP_0_S = 18, ICE_RX_DESC_STATUS_LAST /* this entry must be last!!! */ }; #define ICE_RXD_QW1_STATUS_S 0 #define ICE_RXD_QW1_STATUS_M ((BIT(ICE_RX_DESC_STATUS_LAST) - 1) << \ ICE_RXD_QW1_STATUS_S) #define ICE_RXD_QW1_STATUS_TSYNINDX_S ICE_RX_DESC_STATUS_TSYNINDX_S #define ICE_RXD_QW1_STATUS_TSYNINDX_M (0x3UL << ICE_RXD_QW1_STATUS_TSYNINDX_S) #define ICE_RXD_QW1_STATUS_TSYNVALID_S ICE_RX_DESC_STATUS_TSYNVALID_S #define ICE_RXD_QW1_STATUS_TSYNVALID_M BIT_ULL(ICE_RXD_QW1_STATUS_TSYNVALID_S) enum ice_rx_desc_fltstat_values { ICE_RX_DESC_FLTSTAT_NO_DATA = 0, ICE_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ ICE_RX_DESC_FLTSTAT_RSV = 2, ICE_RX_DESC_FLTSTAT_RSS_HASH = 3, }; #define ICE_RXD_QW1_ERROR_S 19 #define ICE_RXD_QW1_ERROR_M (0xFFUL << ICE_RXD_QW1_ERROR_S) enum ice_rx_desc_error_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_ERROR_RXE_S = 0, ICE_RX_DESC_ERROR_RECIPE_S = 1, ICE_RX_DESC_ERROR_HBO_S = 2, ICE_RX_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */ ICE_RX_DESC_ERROR_IPE_S = 3, ICE_RX_DESC_ERROR_L4E_S = 4, ICE_RX_DESC_ERROR_EIPE_S = 5, ICE_RX_DESC_ERROR_OVERSIZE_S = 6, ICE_RX_DESC_ERROR_PPRS_S = 7 }; enum ice_rx_desc_error_l3l4e_masks { ICE_RX_DESC_ERROR_L3L4E_NONE = 0, ICE_RX_DESC_ERROR_L3L4E_PROT = 1, }; #define ICE_RXD_QW1_PTYPE_S 30 #define ICE_RXD_QW1_PTYPE_M (0xFFULL << ICE_RXD_QW1_PTYPE_S) /* Packet type non-ip values */ enum ice_rx_l2_ptype { ICE_RX_PTYPE_L2_RESERVED = 0, ICE_RX_PTYPE_L2_MAC_PAY2 = 1, ICE_RX_PTYPE_L2_FIP_PAY2 = 3, ICE_RX_PTYPE_L2_OUI_PAY2 = 4, ICE_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, ICE_RX_PTYPE_L2_LLDP_PAY2 = 6, ICE_RX_PTYPE_L2_ECP_PAY2 = 7, ICE_RX_PTYPE_L2_EVB_PAY2 = 8, ICE_RX_PTYPE_L2_QCN_PAY2 = 9, ICE_RX_PTYPE_L2_EAPOL_PAY2 = 10, ICE_RX_PTYPE_L2_ARP = 11, }; struct ice_rx_ptype_decoded { u32 ptype:10; u32 known:1; u32 outer_ip:1; u32 outer_ip_ver:2; u32 outer_frag:1; u32 tunnel_type:3; u32 tunnel_end_prot:2; u32 tunnel_end_frag:1; u32 inner_prot:4; u32 payload_layer:3; }; enum ice_rx_ptype_outer_ip { ICE_RX_PTYPE_OUTER_L2 = 0, ICE_RX_PTYPE_OUTER_IP = 1, }; enum ice_rx_ptype_outer_ip_ver { ICE_RX_PTYPE_OUTER_NONE = 0, ICE_RX_PTYPE_OUTER_IPV4 = 1, ICE_RX_PTYPE_OUTER_IPV6 = 2, }; enum ice_rx_ptype_outer_fragmented { ICE_RX_PTYPE_NOT_FRAG = 0, ICE_RX_PTYPE_FRAG = 1, }; enum ice_rx_ptype_tunnel_type { ICE_RX_PTYPE_TUNNEL_NONE = 0, ICE_RX_PTYPE_TUNNEL_IP_IP = 1, ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, }; enum ice_rx_ptype_tunnel_end_prot { ICE_RX_PTYPE_TUNNEL_END_NONE = 0, ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, }; enum ice_rx_ptype_inner_prot { ICE_RX_PTYPE_INNER_PROT_NONE = 0, ICE_RX_PTYPE_INNER_PROT_UDP = 1, ICE_RX_PTYPE_INNER_PROT_TCP = 2, ICE_RX_PTYPE_INNER_PROT_SCTP = 3, ICE_RX_PTYPE_INNER_PROT_ICMP = 4, }; enum ice_rx_ptype_payload_layer { ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, }; #define ICE_RXD_QW1_LEN_PBUF_S 38 #define ICE_RXD_QW1_LEN_PBUF_M (0x3FFFULL << ICE_RXD_QW1_LEN_PBUF_S) #define ICE_RXD_QW1_LEN_HBUF_S 52 #define ICE_RXD_QW1_LEN_HBUF_M (0x7FFULL << ICE_RXD_QW1_LEN_HBUF_S) #define ICE_RXD_QW1_LEN_SPH_S 63 #define ICE_RXD_QW1_LEN_SPH_M BIT_ULL(ICE_RXD_QW1_LEN_SPH_S) enum ice_rx_desc_ext_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_EXT_STATUS_L2TAG2P_S = 0, ICE_RX_DESC_EXT_STATUS_L2TAG3P_S = 1, ICE_RX_DESC_EXT_STATUS_FLEXBL_S = 2, ICE_RX_DESC_EXT_STATUS_FLEXBH_S = 4, ICE_RX_DESC_EXT_STATUS_FDLONGB_S = 9, ICE_RX_DESC_EXT_STATUS_PELONGB_S = 11, }; enum ice_rx_desc_pe_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_DESC_PE_STATUS_QPID_S = 0, /* 18 BITS */ ICE_RX_DESC_PE_STATUS_L4PORT_S = 0, /* 16 BITS */ ICE_RX_DESC_PE_STATUS_IPINDEX_S = 16, /* 8 BITS */ ICE_RX_DESC_PE_STATUS_QPIDHIT_S = 24, ICE_RX_DESC_PE_STATUS_APBVTHIT_S = 25, ICE_RX_DESC_PE_STATUS_PORTV_S = 26, ICE_RX_DESC_PE_STATUS_URG_S = 27, ICE_RX_DESC_PE_STATUS_IPFRAG_S = 28, ICE_RX_DESC_PE_STATUS_IPOPT_S = 29 }; #define ICE_RX_PROG_STATUS_DESC_LEN_S 38 #define ICE_RX_PROG_STATUS_DESC_LEN 0x2000000 #define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S 2 #define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M \ (0x7UL << ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S) #define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S 19 #define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M \ (0x3FUL << ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S) enum ice_rx_prog_status_desc_status_bits { /* Note: These are predefined bit offsets */ ICE_RX_PROG_STATUS_DESC_DD_S = 0, ICE_RX_PROG_STATUS_DESC_PROG_ID_S = 2 /* 3 BITS */ }; enum ice_rx_prog_status_desc_prog_id_masks { ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS = 1, }; enum ice_rx_prog_status_desc_error_bits { /* Note: These are predefined bit offsets */ ICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S = 0, ICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S = 1, }; /* Rx Flex Descriptors * These descriptors are used instead of the legacy version descriptors when * ice_rlan_ctx.adv_desc is set */ union ice_32b_rx_flex_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_addr is DD bit */ __le64 rsvd1; __le64 rsvd2; } read; struct { /* Qword 0 */ u8 rxdid; /* descriptor builder profile ID */ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ __le16 pkt_len; /* [15:14] are reserved */ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ /* sph=[11:11] */ /* ff1/ext=[15:12] */ /* Qword 1 */ __le16 status_error0; __le16 l2tag1; __le16 flex_meta0; __le16 flex_meta1; /* Qword 2 */ __le16 status_error1; u8 flex_flags2; u8 time_stamp_low; __le16 l2tag2_1st; __le16 l2tag2_2nd; /* Qword 3 */ __le16 flex_meta2; __le16 flex_meta3; union { struct { __le16 flex_meta4; __le16 flex_meta5; } flex; __le32 ts_high; } flex_ts; } wb; /* writeback */ }; /* Rx Flex Descriptor NIC Profile * RxDID Profile ID 2 * Flex-field 0: RSS hash lower 16-bits * Flex-field 1: RSS hash upper 16-bits * Flex-field 2: Flow ID lower 16-bits * Flex-field 3: Flow ID higher 16-bits * Flex-field 4: reserved, VLAN ID taken from L2Tag */ struct ice_32b_rx_flex_desc_nic { /* Qword 0 */ u8 rxdid; u8 mir_id_umb_cast; __le16 ptype_flexi_flags0; __le16 pkt_len; __le16 hdr_len_sph_flex_flags1; /* Qword 1 */ __le16 status_error0; __le16 l2tag1; __le32 rss_hash; /* Qword 2 */ __le16 status_error1; u8 flexi_flags2; u8 ts_low; __le16 l2tag2_1st; __le16 l2tag2_2nd; /* Qword 3 */ __le32 flow_id; union { struct { __le16 rsvd; __le16 flow_id_ipv6; } flex; __le32 ts_high; } flex_ts; }; /* Rx Flex Descriptor Switch Profile * RxDID Profile ID 3 * Flex-field 0: Source VSI */ struct ice_32b_rx_flex_desc_sw { /* Qword 0 */ u8 rxdid; u8 mir_id_umb_cast; __le16 ptype_flexi_flags0; __le16 pkt_len; __le16 hdr_len_sph_flex_flags1; /* Qword 1 */ __le16 status_error0; __le16 l2tag1; __le16 src_vsi; /* [10:15] are reserved */ __le16 flex_md1_rsvd; /* Qword 2 */ __le16 status_error1; u8 flex_flags2; u8 ts_low; __le16 l2tag2_1st; __le16 l2tag2_2nd; /* Qword 3 */ __le32 rsvd; /* flex words 2-3 are reserved */ __le32 ts_high; }; /* Rx Flex Descriptor NIC VEB Profile * RxDID Profile ID 4 * Flex-field 0: Destination VSI */ struct ice_32b_rx_flex_desc_nic_veb_dbg { /* Qword 0 */ u8 rxdid; u8 mir_id_umb_cast; __le16 ptype_flexi_flags0; __le16 pkt_len; __le16 hdr_len_sph_flex_flags1; /* Qword 1 */ __le16 status_error0; __le16 l2tag1; __le16 dst_vsi; /* [0:12]: destination VSI */ /* 13: VSI valid bit */ /* [14:15] are reserved */ __le16 flex_field_1; /* Qword 2 */ __le16 status_error1; u8 flex_flags2; u8 ts_low; __le16 l2tag2_1st; __le16 l2tag2_2nd; /* Qword 3 */ __le32 rsvd; /* flex words 2-3 are reserved */ __le32 ts_high; }; /* Rx Flex Descriptor NIC ACL Profile * RxDID Profile ID 5 * Flex-field 0: ACL Counter 0 * Flex-field 1: ACL Counter 1 * Flex-field 2: ACL Counter 2 */ struct ice_32b_rx_flex_desc_nic_acl_dbg { /* Qword 0 */ u8 rxdid; u8 mir_id_umb_cast; __le16 ptype_flexi_flags0; __le16 pkt_len; __le16 hdr_len_sph_flex_flags1; /* Qword 1 */ __le16 status_error0; __le16 l2tag1; __le16 acl_ctr0; __le16 acl_ctr1; /* Qword 2 */ __le16 status_error1; u8 flex_flags2; u8 ts_low; __le16 l2tag2_1st; __le16 l2tag2_2nd; /* Qword 3 */ __le16 acl_ctr2; __le16 rsvd; /* flex words 2-3 are reserved */ __le32 ts_high; }; /* Rx Flex Descriptor NIC Profile * RxDID Profile ID 6 * Flex-field 0: RSS hash lower 16-bits * Flex-field 1: RSS hash upper 16-bits * Flex-field 2: Flow ID lower 16-bits * Flex-field 3: Source VSI * Flex-field 4: reserved, VLAN ID taken from L2Tag */ struct ice_32b_rx_flex_desc_nic_2 { /* Qword 0 */ u8 rxdid; u8 mir_id_umb_cast; __le16 ptype_flexi_flags0; __le16 pkt_len; __le16 hdr_len_sph_flex_flags1; /* Qword 1 */ __le16 status_error0; __le16 l2tag1; __le32 rss_hash; /* Qword 2 */ __le16 status_error1; u8 flexi_flags2; u8 ts_low; __le16 l2tag2_1st; __le16 l2tag2_2nd; /* Qword 3 */ __le16 flow_id; __le16 src_vsi; union { struct { __le16 rsvd; __le16 flow_id_ipv6; } flex; __le32 ts_high; } flex_ts; }; /* Receive Flex Descriptor profile IDs: There are a total * of 64 profiles where profile IDs 0/1 are for legacy; and * profiles 2-63 are flex profiles that can be programmed * with a specific metadata (profile 7 reserved for HW) */ enum ice_rxdid { ICE_RXDID_LEGACY_0 = 0, ICE_RXDID_LEGACY_1 = 1, ICE_RXDID_FLEX_NIC = 2, ICE_RXDID_FLEX_NIC_2 = 6, ICE_RXDID_HW = 7, ICE_RXDID_LAST = 63, }; /* Recceive Flex descriptor Dword Index */ enum ice_flex_word { ICE_RX_FLEX_DWORD_0 = 0, ICE_RX_FLEX_DWORD_1, ICE_RX_FLEX_DWORD_2, ICE_RX_FLEX_DWORD_3, ICE_RX_FLEX_DWORD_4, ICE_RX_FLEX_DWORD_5 }; /* Receive Flex Descriptor Rx opcode values */ enum ice_flex_opcode { ICE_RX_OPC_DEBUG = 0, ICE_RX_OPC_MDID, ICE_RX_OPC_EXTRACT, ICE_RX_OPC_PROTID }; /* Receive Descriptor MDID values that access packet flags */ enum ice_flex_mdid_pkt_flags { ICE_RX_MDID_PKT_FLAGS_15_0 = 20, ICE_RX_MDID_PKT_FLAGS_31_16, ICE_RX_MDID_PKT_FLAGS_47_32, ICE_RX_MDID_PKT_FLAGS_63_48, }; /* Generic descriptor MDID values */ enum ice_flex_mdid { ICE_MDID_GENERIC_WORD_0, ICE_MDID_GENERIC_WORD_1, ICE_MDID_GENERIC_WORD_2, ICE_MDID_GENERIC_WORD_3, ICE_MDID_GENERIC_WORD_4, ICE_MDID_FLOW_ID_LOWER, ICE_MDID_FLOW_ID_HIGH, ICE_MDID_RX_DESCR_PROF_IDX, ICE_MDID_RX_PKT_DROP, ICE_MDID_RX_DST_Q = 12, ICE_MDID_RX_DST_VSI, ICE_MDID_SRC_VSI = 19, ICE_MDID_ACL_NOP = 55, /* Entry 56 */ ICE_MDID_RX_HASH_LOW, ICE_MDID_ACL_CNTR_PKT = ICE_MDID_RX_HASH_LOW, /* Entry 57 */ ICE_MDID_RX_HASH_HIGH, ICE_MDID_ACL_CNTR_BYTES = ICE_MDID_RX_HASH_HIGH, ICE_MDID_ACL_CNTR_PKT_BYTES }; /* for ice_32byte_rx_flex_desc.mir_id_umb_cast member */ #define ICE_RX_FLEX_DESC_MIRROR_M (0x3F) /* 6-bits */ /* Rx/Tx Flag64 packet flag bits */ enum ice_flg64_bits { ICE_FLG_PKT_DSI = 0, /* If there is a 1 in this bit position then that means Rx packet */ ICE_FLG_PKT_DIR = 4, ICE_FLG_EVLAN_x8100 = 14, ICE_FLG_EVLAN_x9100, ICE_FLG_VLAN_x8100, ICE_FLG_TNL_MAC = 22, ICE_FLG_TNL_VLAN, ICE_FLG_PKT_FRG, ICE_FLG_FIN = 32, ICE_FLG_SYN, ICE_FLG_RST, ICE_FLG_TNL0 = 38, ICE_FLG_TNL1, ICE_FLG_TNL2, ICE_FLG_UDP_GRE, ICE_FLG_RSVD = 63 }; enum ice_rx_flex_desc_umb_cast_bits { /* field is 2 bits long */ ICE_RX_FLEX_DESC_UMB_CAST_S = 6, ICE_RX_FLEX_DESC_UMB_CAST_LAST /* this entry must be last!!! */ }; enum ice_umbcast_dest_addr_types { ICE_DEST_UNICAST = 0, ICE_DEST_MULTICAST, ICE_DEST_BROADCAST, ICE_DEST_MIRRORED, }; /* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */ #define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */ enum ice_rx_flex_desc_flexi_flags0_bits { /* field is 6 bits long */ ICE_RX_FLEX_DESC_FLEXI_FLAGS0_S = 10, ICE_RX_FLEX_DESC_FLEXI_FLAGS0_LAST /* this entry must be last!!! */ }; /* for ice_32byte_rx_flex_desc.pkt_length member */ #define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */ /* for ice_32byte_rx_flex_desc.header_length_sph_flexi_flags1 member */ #define ICE_RX_FLEX_DESC_HEADER_LEN_M (0x7FF) /* 11-bits */ enum ice_rx_flex_desc_sph_bits { /* field is 1 bit long */ ICE_RX_FLEX_DESC_SPH_S = 11, ICE_RX_FLEX_DESC_SPH_LAST /* this entry must be last!!! */ }; enum ice_rx_flex_desc_flexi_flags1_bits { /* field is 4 bits long */ ICE_RX_FLEX_DESC_FLEXI_FLAGS1_S = 12, ICE_RX_FLEX_DESC_FLEXI_FLAGS1_LAST /* this entry must be last!!! */ }; enum ice_rx_flex_desc_ext_status_bits { /* field is 4 bits long */ ICE_RX_FLEX_DESC_EXT_STATUS_EXT_UDP_S = 12, ICE_RX_FLEX_DESC_EXT_STATUS_INT_UDP_S = 13, ICE_RX_FLEX_DESC_EXT_STATUS_RECIPE_S = 14, ICE_RX_FLEX_DESC_EXT_STATUS_OVERSIZE_S = 15, ICE_RX_FLEX_DESC_EXT_STATUS_LAST /* entry must be last!!! */ }; enum ice_rx_flex_desc_status_error_0_bits { /* Note: These are predefined bit offsets */ ICE_RX_FLEX_DESC_STATUS0_DD_S = 0, ICE_RX_FLEX_DESC_STATUS0_EOF_S, ICE_RX_FLEX_DESC_STATUS0_HBO_S, ICE_RX_FLEX_DESC_STATUS0_L3L4P_S, ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, ICE_RX_FLEX_DESC_STATUS0_LPBK_S, ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, ICE_RX_FLEX_DESC_STATUS0_RXE_S, ICE_RX_FLEX_DESC_STATUS0_CRCP_S, ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S, ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S, ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ }; enum ice_rx_flex_desc_status_error_1_bits { /* Note: These are predefined bit offsets */ ICE_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */ ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4, ICE_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5, /* [10:6] reserved */ ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11, ICE_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12, ICE_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13, ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14, ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15, ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; enum ice_rx_flex_desc_exstat_bits { /* Note: These are predefined bit offsets */ ICE_RX_FLEX_DESC_EXSTAT_EXTUDP_S = 0, ICE_RX_FLEX_DESC_EXSTAT_INTUDP_S = 1, ICE_RX_FLEX_DESC_EXSTAT_RECIPE_S = 2, ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3, }; #define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) /* RLAN Rx queue context data * * The sizes of the variables may be larger than needed due to crossing byte * boundaries. If we do not have the width of the variable set to the correct * size then we could end up shifting bits off the top of the variable when the * variable is at the top of a byte and crosses over into the next byte. */ struct ice_rlan_ctx { u16 head; u16 cpuid; /* bigger than needed, see above for reason */ #define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 u16 dbuf; /* bigger than needed, see above for reason */ #define ICE_RLAN_CTX_HBUF_S 6 u16 hbuf; /* bigger than needed, see above for reason */ u8 dtype; u8 dsize; u8 crcstrip; u8 l2tsel; u8 hsplit_0; u8 hsplit_1; u8 showiv; u32 rxmax; /* bigger than needed, see above for reason */ u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ u8 prefena; /* NOTE: normally must be set to 1 at init */ }; struct ice_ctx_ele { u16 offset; u16 size_of; u16 width; u16 lsb; }; #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ .offset = offsetof(struct _struct, _ele), \ .size_of = FIELD_SIZEOF(struct _struct, _ele), \ .width = _width, \ .lsb = _lsb, \ } /* for hsplit_0 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_0 { ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1, ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2, ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8, }; /* for hsplit_1 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_1 { ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0, ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1, ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2, }; /* Tx Descriptor */ struct ice_tx_desc { __le64 buf_addr; /* Address of descriptor's data buf */ __le64 cmd_type_offset_bsz; }; #define ICE_TXD_QW1_DTYPE_S 0 #define ICE_TXD_QW1_DTYPE_M (0xFUL << ICE_TXD_QW1_DTYPE_S) enum ice_tx_desc_dtype_value { ICE_TX_DESC_DTYPE_DATA = 0x0, ICE_TX_DESC_DTYPE_CTX = 0x1, ICE_TX_DESC_DTYPE_IPSEC = 0x3, ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8, ICE_TX_DESC_DTYPE_HLP_META = 0x9, /* DESC_DONE - HW has completed write-back of descriptor */ ICE_TX_DESC_DTYPE_DESC_DONE = 0xF, }; #define ICE_TXD_QW1_CMD_S 4 #define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S) enum ice_tx_desc_cmd_bits { ICE_TX_DESC_CMD_EOP = 0x0001, ICE_TX_DESC_CMD_RS = 0x0002, ICE_TX_DESC_CMD_RSVD = 0x0004, ICE_TX_DESC_CMD_IL2TAG1 = 0x0008, ICE_TX_DESC_CMD_DUMMY = 0x0010, ICE_TX_DESC_CMD_IIPT_NONIP = 0x0000, ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, ICE_TX_DESC_CMD_RSVD2 = 0x0080, ICE_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, ICE_TX_DESC_CMD_RE = 0x0400, ICE_TX_DESC_CMD_RSVD3 = 0x0800, }; #define ICE_TXD_QW1_OFFSET_S 16 #define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S) enum ice_tx_desc_len_fields { /* Note: These are predefined bit offsets */ ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */ ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */ ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */ }; #define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S) #define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S) #define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S) /* Tx descriptor field limits in bytes */ #define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \ ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD) #define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \ ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD) #define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \ ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD) #define ICE_TXD_QW1_TX_BUF_SZ_S 34 #define ICE_TXD_QW1_TX_BUF_SZ_M (0x3FFFULL << ICE_TXD_QW1_TX_BUF_SZ_S) #define ICE_TXD_QW1_L2TAG1_S 48 #define ICE_TXD_QW1_L2TAG1_M (0xFFFFULL << ICE_TXD_QW1_L2TAG1_S) /* Context descriptors */ struct ice_tx_ctx_desc { __le32 tunneling_params; __le16 l2tag2; __le16 rsvd; __le64 qw1; }; #define ICE_TXD_CTX_QW1_DTYPE_S 0 #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) #define ICE_TXD_CTX_QW1_CMD_S 4 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) #define ICE_TXD_CTX_QW1_IPSEC_S 11 #define ICE_TXD_CTX_QW1_IPSEC_M (0x7FUL << ICE_TXD_CTX_QW1_IPSEC_S) #define ICE_TXD_CTX_QW1_TSO_LEN_S 30 #define ICE_TXD_CTX_QW1_TSO_LEN_M \ (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) #define ICE_TXD_CTX_QW1_TSYN_S ICE_TXD_CTX_QW1_TSO_LEN_S #define ICE_TXD_CTX_QW1_TSYN_M ICE_TXD_CTX_QW1_TSO_LEN_M #define ICE_TXD_CTX_QW1_MSS_S 50 #define ICE_TXD_CTX_QW1_MSS_M (0x3FFFULL << ICE_TXD_CTX_QW1_MSS_S) #define ICE_TXD_CTX_MIN_MSS 64 #define ICE_TXD_CTX_MAX_MSS 9668 #define ICE_TXD_CTX_QW1_VSI_S 50 #define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) enum ice_tx_ctx_desc_cmd_bits { ICE_TX_CTX_DESC_TSO = 0x01, ICE_TX_CTX_DESC_TSYN = 0x02, ICE_TX_CTX_DESC_IL2TAG2 = 0x04, ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00, ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10, ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20, ICE_TX_CTX_DESC_SWTCH_VSI = 0x30, ICE_TX_CTX_DESC_RESERVED = 0x40 }; enum ice_tx_ctx_desc_eipt_offload { ICE_TX_CTX_EIPT_NONE = 0x0, ICE_TX_CTX_EIPT_IPV6 = 0x1, ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2, ICE_TX_CTX_EIPT_IPV4 = 0x3 }; #define ICE_TXD_CTX_QW0_EIPT_S 0 #define ICE_TXD_CTX_QW0_EIPT_M (0x3ULL << ICE_TXD_CTX_QW0_EIPT_S) #define ICE_TXD_CTX_QW0_EIPLEN_S 2 #define ICE_TXD_CTX_QW0_EIPLEN_M (0x7FUL << ICE_TXD_CTX_QW0_EIPLEN_S) #define ICE_TXD_CTX_QW0_L4TUNT_S 9 #define ICE_TXD_CTX_QW0_L4TUNT_M (0x3ULL << ICE_TXD_CTX_QW0_L4TUNT_S) #define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S) #define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S) #define ICE_TXD_CTX_QW0_EIP_NOINC_S 11 #define ICE_TXD_CTX_QW0_EIP_NOINC_M BIT_ULL(ICE_TXD_CTX_QW0_EIP_NOINC_S) #define ICE_TXD_CTX_EIP_NOINC_IPID_CONST ICE_TXD_CTX_QW0_EIP_NOINC_M #define ICE_TXD_CTX_QW0_NATLEN_S 12 #define ICE_TXD_CTX_QW0_NATLEN_M (0X7FULL << ICE_TXD_CTX_QW0_NATLEN_S) #define ICE_TXD_CTX_QW0_DECTTL_S 19 #define ICE_TXD_CTX_QW0_DECTTL_M (0xFULL << ICE_TXD_CTX_QW0_DECTTL_S) #define ICE_TXD_CTX_QW0_L4T_CS_S 23 #define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S) #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 /* Tx queue context data * * The sizes of the variables may be larger than needed due to crossing byte * boundaries. If we do not have the width of the variable set to the correct * size then we could end up shifting bits off the top of the variable when the * variable is at the top of a byte and crosses over into the next byte. */ struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ u8 port_num; u16 cgd_num; /* bigger than needed, see above for reason */ u8 pf_num; u16 vmvf_num; u8 vmvf_type; #define ICE_TLAN_CTX_VMVF_TYPE_VF 0 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; u8 tsyn_ena; u8 internal_usage_flag; u8 alt_vlan; u16 cpuid; /* bigger than needed, see above for reason */ u8 wb_mode; u8 tphrd_desc; u8 tphrd; u8 tphwr_desc; u16 cmpq_id; u16 qnum_in_func; u8 itr_notification_mode; u8 adjust_prof_id; u32 qlen; /* bigger than needed, see above for reason */ u8 quanta_prof_idx; u8 tso_ena; u16 tso_qnum; u8 legacy_int; u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; /* LAN Tx Completion Queue data */ #pragma pack(1) struct ice_tx_cmpltnq { u16 txq_id; u8 generation; u16 tx_head; u8 cmpl_type; }; #pragma pack() /* FIXME: move to a .c file that references this variable */ /* LAN Tx Completion Queue data info */ static const struct ice_ctx_ele ice_tx_cmpltnq_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tx_cmpltnq, txq_id, 14, 0), ICE_CTX_STORE(ice_tx_cmpltnq, generation, 1, 15), ICE_CTX_STORE(ice_tx_cmpltnq, tx_head, 13, 16), ICE_CTX_STORE(ice_tx_cmpltnq, cmpl_type, 3, 29), { 0 } }; /* LAN Tx Completion Queue Context */ #pragma pack(1) struct ice_tx_cmpltnq_ctx { u64 base; +#define ICE_TX_CMPLTNQ_CTX_BASE_S 7 u32 q_len; #define ICE_TX_CMPLTNQ_CTX_Q_LEN_S 4 u8 generation; u32 wrt_ptr; u8 pf_num; u16 vmvf_num; u8 vmvf_type; +#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_VF 0 +#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_VMQ 1 +#define ICE_TX_CMPLTNQ_CTX_VMVF_TYPE_PF 2 u8 tph_desc_wr; u8 cpuid; u32 cmpltn_cache[16]; }; #pragma pack() /* LAN Tx Doorbell Descriptor Format */ struct ice_tx_drbell_fmt { u16 txq_id; u8 dd; u8 rs; u32 db; }; /* FIXME: move to a .c file that references this variable */ /* LAN Tx Doorbell Descriptor format info */ static const struct ice_ctx_ele ice_tx_drbell_fmt_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tx_drbell_fmt, txq_id, 14, 0), ICE_CTX_STORE(ice_tx_drbell_fmt, dd, 1, 14), ICE_CTX_STORE(ice_tx_drbell_fmt, rs, 1, 15), ICE_CTX_STORE(ice_tx_drbell_fmt, db, 32, 32), { 0 } }; /* LAN Tx Doorbell Queue Context */ #pragma pack(1) struct ice_tx_drbell_q_ctx { u64 base; +#define ICE_TX_DRBELL_Q_CTX_BASE_S 7 u16 ring_len; +#define ICE_TX_DRBELL_Q_CTX_RING_LEN_S 4 u8 pf_num; u16 vf_num; u8 vmvf_type; +#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_VF 0 +#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_VMQ 1 +#define ICE_TX_DRBELL_Q_CTX_VMVF_TYPE_PF 2 u8 cpuid; u8 tph_desc_rd; u8 tph_desc_wr; u8 db_q_en; u16 rd_head; u16 rd_tail; }; #pragma pack() /* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. * * Macros are used to shorten the table lines and make this table human * readable. * * We store the PTYPE in the top byte of the bit field - this is just so that * we can check that the table doesn't have a row missing, as the index into * the table should be the PTYPE. * * Typical work flow: * * IF NOT ice_ptype_lkup[ptype].known * THEN * Packet is unknown * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP * Use the rest of the fields to look at the tunnels, inner protocols, etc * ELSE * Use the enum ice_rx_l2_ptype to decode the packet type * ENDIF */ /* macro to make the table lines short */ #define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ { PTYPE, \ 1, \ ICE_RX_PTYPE_OUTER_##OUTER_IP, \ ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ ICE_RX_PTYPE_##OUTER_FRAG, \ ICE_RX_PTYPE_TUNNEL_##T, \ ICE_RX_PTYPE_TUNNEL_END_##TE, \ ICE_RX_PTYPE_##TEF, \ ICE_RX_PTYPE_INNER_PROT_##I, \ ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } #define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* shorter macros makes the table fit but are terse */ #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG #define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG /* Lookup table mapping the HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { /* L2 Packet types */ ICE_PTT_UNUSED_ENTRY(0), ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(2), ICE_PTT_UNUSED_ENTRY(3), ICE_PTT_UNUSED_ENTRY(4), ICE_PTT_UNUSED_ENTRY(5), ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), ICE_PTT_UNUSED_ENTRY(8), ICE_PTT_UNUSED_ENTRY(9), ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), ICE_PTT_UNUSED_ENTRY(12), ICE_PTT_UNUSED_ENTRY(13), ICE_PTT_UNUSED_ENTRY(14), ICE_PTT_UNUSED_ENTRY(15), ICE_PTT_UNUSED_ENTRY(16), ICE_PTT_UNUSED_ENTRY(17), ICE_PTT_UNUSED_ENTRY(18), ICE_PTT_UNUSED_ENTRY(19), ICE_PTT_UNUSED_ENTRY(20), ICE_PTT_UNUSED_ENTRY(21), /* Non Tunneled IPv4 */ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(25), ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv4 --> IPv4 */ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(32), ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv4 --> IPv6 */ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(39), ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT */ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> IPv4 */ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(47), ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> IPv6 */ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(54), ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC */ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(62), ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(69), ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv4 --> GRE/NAT --> MAC/VLAN */ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(77), ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(84), ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* Non Tunneled IPv6 */ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(91), ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), /* IPv6 --> IPv4 */ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(98), ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), /* IPv6 --> IPv6 */ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(105), ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT */ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> IPv4 */ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(113), ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> IPv6 */ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(120), ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC */ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(128), ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(135), ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN */ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(143), ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), ICE_PTT_UNUSED_ENTRY(150), ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), /* unused entries */ ICE_PTT_UNUSED_ENTRY(154), ICE_PTT_UNUSED_ENTRY(155), ICE_PTT_UNUSED_ENTRY(156), ICE_PTT_UNUSED_ENTRY(157), ICE_PTT_UNUSED_ENTRY(158), ICE_PTT_UNUSED_ENTRY(159), ICE_PTT_UNUSED_ENTRY(160), ICE_PTT_UNUSED_ENTRY(161), ICE_PTT_UNUSED_ENTRY(162), ICE_PTT_UNUSED_ENTRY(163), ICE_PTT_UNUSED_ENTRY(164), ICE_PTT_UNUSED_ENTRY(165), ICE_PTT_UNUSED_ENTRY(166), ICE_PTT_UNUSED_ENTRY(167), ICE_PTT_UNUSED_ENTRY(168), ICE_PTT_UNUSED_ENTRY(169), ICE_PTT_UNUSED_ENTRY(170), ICE_PTT_UNUSED_ENTRY(171), ICE_PTT_UNUSED_ENTRY(172), ICE_PTT_UNUSED_ENTRY(173), ICE_PTT_UNUSED_ENTRY(174), ICE_PTT_UNUSED_ENTRY(175), ICE_PTT_UNUSED_ENTRY(176), ICE_PTT_UNUSED_ENTRY(177), ICE_PTT_UNUSED_ENTRY(178), ICE_PTT_UNUSED_ENTRY(179), ICE_PTT_UNUSED_ENTRY(180), ICE_PTT_UNUSED_ENTRY(181), ICE_PTT_UNUSED_ENTRY(182), ICE_PTT_UNUSED_ENTRY(183), ICE_PTT_UNUSED_ENTRY(184), ICE_PTT_UNUSED_ENTRY(185), ICE_PTT_UNUSED_ENTRY(186), ICE_PTT_UNUSED_ENTRY(187), ICE_PTT_UNUSED_ENTRY(188), ICE_PTT_UNUSED_ENTRY(189), ICE_PTT_UNUSED_ENTRY(190), ICE_PTT_UNUSED_ENTRY(191), ICE_PTT_UNUSED_ENTRY(192), ICE_PTT_UNUSED_ENTRY(193), ICE_PTT_UNUSED_ENTRY(194), ICE_PTT_UNUSED_ENTRY(195), ICE_PTT_UNUSED_ENTRY(196), ICE_PTT_UNUSED_ENTRY(197), ICE_PTT_UNUSED_ENTRY(198), ICE_PTT_UNUSED_ENTRY(199), ICE_PTT_UNUSED_ENTRY(200), ICE_PTT_UNUSED_ENTRY(201), ICE_PTT_UNUSED_ENTRY(202), ICE_PTT_UNUSED_ENTRY(203), ICE_PTT_UNUSED_ENTRY(204), ICE_PTT_UNUSED_ENTRY(205), ICE_PTT_UNUSED_ENTRY(206), ICE_PTT_UNUSED_ENTRY(207), ICE_PTT_UNUSED_ENTRY(208), ICE_PTT_UNUSED_ENTRY(209), ICE_PTT_UNUSED_ENTRY(210), ICE_PTT_UNUSED_ENTRY(211), ICE_PTT_UNUSED_ENTRY(212), ICE_PTT_UNUSED_ENTRY(213), ICE_PTT_UNUSED_ENTRY(214), ICE_PTT_UNUSED_ENTRY(215), ICE_PTT_UNUSED_ENTRY(216), ICE_PTT_UNUSED_ENTRY(217), ICE_PTT_UNUSED_ENTRY(218), ICE_PTT_UNUSED_ENTRY(219), ICE_PTT_UNUSED_ENTRY(220), ICE_PTT_UNUSED_ENTRY(221), ICE_PTT_UNUSED_ENTRY(222), ICE_PTT_UNUSED_ENTRY(223), ICE_PTT_UNUSED_ENTRY(224), ICE_PTT_UNUSED_ENTRY(225), ICE_PTT_UNUSED_ENTRY(226), ICE_PTT_UNUSED_ENTRY(227), ICE_PTT_UNUSED_ENTRY(228), ICE_PTT_UNUSED_ENTRY(229), ICE_PTT_UNUSED_ENTRY(230), ICE_PTT_UNUSED_ENTRY(231), ICE_PTT_UNUSED_ENTRY(232), ICE_PTT_UNUSED_ENTRY(233), ICE_PTT_UNUSED_ENTRY(234), ICE_PTT_UNUSED_ENTRY(235), ICE_PTT_UNUSED_ENTRY(236), ICE_PTT_UNUSED_ENTRY(237), ICE_PTT_UNUSED_ENTRY(238), ICE_PTT_UNUSED_ENTRY(239), ICE_PTT_UNUSED_ENTRY(240), ICE_PTT_UNUSED_ENTRY(241), ICE_PTT_UNUSED_ENTRY(242), ICE_PTT_UNUSED_ENTRY(243), ICE_PTT_UNUSED_ENTRY(244), ICE_PTT_UNUSED_ENTRY(245), ICE_PTT_UNUSED_ENTRY(246), ICE_PTT_UNUSED_ENTRY(247), ICE_PTT_UNUSED_ENTRY(248), ICE_PTT_UNUSED_ENTRY(249), ICE_PTT_UNUSED_ENTRY(250), ICE_PTT_UNUSED_ENTRY(251), ICE_PTT_UNUSED_ENTRY(252), ICE_PTT_UNUSED_ENTRY(253), ICE_PTT_UNUSED_ENTRY(254), ICE_PTT_UNUSED_ENTRY(255), ICE_PTT_UNUSED_ENTRY(256), ICE_PTT_UNUSED_ENTRY(257), ICE_PTT_UNUSED_ENTRY(258), ICE_PTT_UNUSED_ENTRY(259), ICE_PTT_UNUSED_ENTRY(260), ICE_PTT_UNUSED_ENTRY(261), ICE_PTT_UNUSED_ENTRY(262), ICE_PTT_UNUSED_ENTRY(263), ICE_PTT_UNUSED_ENTRY(264), ICE_PTT_UNUSED_ENTRY(265), ICE_PTT_UNUSED_ENTRY(266), ICE_PTT_UNUSED_ENTRY(267), ICE_PTT_UNUSED_ENTRY(268), ICE_PTT_UNUSED_ENTRY(269), ICE_PTT_UNUSED_ENTRY(270), ICE_PTT_UNUSED_ENTRY(271), ICE_PTT_UNUSED_ENTRY(272), ICE_PTT_UNUSED_ENTRY(273), ICE_PTT_UNUSED_ENTRY(274), ICE_PTT_UNUSED_ENTRY(275), ICE_PTT_UNUSED_ENTRY(276), ICE_PTT_UNUSED_ENTRY(277), ICE_PTT_UNUSED_ENTRY(278), ICE_PTT_UNUSED_ENTRY(279), ICE_PTT_UNUSED_ENTRY(280), ICE_PTT_UNUSED_ENTRY(281), ICE_PTT_UNUSED_ENTRY(282), ICE_PTT_UNUSED_ENTRY(283), ICE_PTT_UNUSED_ENTRY(284), ICE_PTT_UNUSED_ENTRY(285), ICE_PTT_UNUSED_ENTRY(286), ICE_PTT_UNUSED_ENTRY(287), ICE_PTT_UNUSED_ENTRY(288), ICE_PTT_UNUSED_ENTRY(289), ICE_PTT_UNUSED_ENTRY(290), ICE_PTT_UNUSED_ENTRY(291), ICE_PTT_UNUSED_ENTRY(292), ICE_PTT_UNUSED_ENTRY(293), ICE_PTT_UNUSED_ENTRY(294), ICE_PTT_UNUSED_ENTRY(295), ICE_PTT_UNUSED_ENTRY(296), ICE_PTT_UNUSED_ENTRY(297), ICE_PTT_UNUSED_ENTRY(298), ICE_PTT_UNUSED_ENTRY(299), ICE_PTT_UNUSED_ENTRY(300), ICE_PTT_UNUSED_ENTRY(301), ICE_PTT_UNUSED_ENTRY(302), ICE_PTT_UNUSED_ENTRY(303), ICE_PTT_UNUSED_ENTRY(304), ICE_PTT_UNUSED_ENTRY(305), ICE_PTT_UNUSED_ENTRY(306), ICE_PTT_UNUSED_ENTRY(307), ICE_PTT_UNUSED_ENTRY(308), ICE_PTT_UNUSED_ENTRY(309), ICE_PTT_UNUSED_ENTRY(310), ICE_PTT_UNUSED_ENTRY(311), ICE_PTT_UNUSED_ENTRY(312), ICE_PTT_UNUSED_ENTRY(313), ICE_PTT_UNUSED_ENTRY(314), ICE_PTT_UNUSED_ENTRY(315), ICE_PTT_UNUSED_ENTRY(316), ICE_PTT_UNUSED_ENTRY(317), ICE_PTT_UNUSED_ENTRY(318), ICE_PTT_UNUSED_ENTRY(319), ICE_PTT_UNUSED_ENTRY(320), ICE_PTT_UNUSED_ENTRY(321), ICE_PTT_UNUSED_ENTRY(322), ICE_PTT_UNUSED_ENTRY(323), ICE_PTT_UNUSED_ENTRY(324), ICE_PTT_UNUSED_ENTRY(325), ICE_PTT_UNUSED_ENTRY(326), ICE_PTT_UNUSED_ENTRY(327), ICE_PTT_UNUSED_ENTRY(328), ICE_PTT_UNUSED_ENTRY(329), ICE_PTT_UNUSED_ENTRY(330), ICE_PTT_UNUSED_ENTRY(331), ICE_PTT_UNUSED_ENTRY(332), ICE_PTT_UNUSED_ENTRY(333), ICE_PTT_UNUSED_ENTRY(334), ICE_PTT_UNUSED_ENTRY(335), ICE_PTT_UNUSED_ENTRY(336), ICE_PTT_UNUSED_ENTRY(337), ICE_PTT_UNUSED_ENTRY(338), ICE_PTT_UNUSED_ENTRY(339), ICE_PTT_UNUSED_ENTRY(340), ICE_PTT_UNUSED_ENTRY(341), ICE_PTT_UNUSED_ENTRY(342), ICE_PTT_UNUSED_ENTRY(343), ICE_PTT_UNUSED_ENTRY(344), ICE_PTT_UNUSED_ENTRY(345), ICE_PTT_UNUSED_ENTRY(346), ICE_PTT_UNUSED_ENTRY(347), ICE_PTT_UNUSED_ENTRY(348), ICE_PTT_UNUSED_ENTRY(349), ICE_PTT_UNUSED_ENTRY(350), ICE_PTT_UNUSED_ENTRY(351), ICE_PTT_UNUSED_ENTRY(352), ICE_PTT_UNUSED_ENTRY(353), ICE_PTT_UNUSED_ENTRY(354), ICE_PTT_UNUSED_ENTRY(355), ICE_PTT_UNUSED_ENTRY(356), ICE_PTT_UNUSED_ENTRY(357), ICE_PTT_UNUSED_ENTRY(358), ICE_PTT_UNUSED_ENTRY(359), ICE_PTT_UNUSED_ENTRY(360), ICE_PTT_UNUSED_ENTRY(361), ICE_PTT_UNUSED_ENTRY(362), ICE_PTT_UNUSED_ENTRY(363), ICE_PTT_UNUSED_ENTRY(364), ICE_PTT_UNUSED_ENTRY(365), ICE_PTT_UNUSED_ENTRY(366), ICE_PTT_UNUSED_ENTRY(367), ICE_PTT_UNUSED_ENTRY(368), ICE_PTT_UNUSED_ENTRY(369), ICE_PTT_UNUSED_ENTRY(370), ICE_PTT_UNUSED_ENTRY(371), ICE_PTT_UNUSED_ENTRY(372), ICE_PTT_UNUSED_ENTRY(373), ICE_PTT_UNUSED_ENTRY(374), ICE_PTT_UNUSED_ENTRY(375), ICE_PTT_UNUSED_ENTRY(376), ICE_PTT_UNUSED_ENTRY(377), ICE_PTT_UNUSED_ENTRY(378), ICE_PTT_UNUSED_ENTRY(379), ICE_PTT_UNUSED_ENTRY(380), ICE_PTT_UNUSED_ENTRY(381), ICE_PTT_UNUSED_ENTRY(382), ICE_PTT_UNUSED_ENTRY(383), ICE_PTT_UNUSED_ENTRY(384), ICE_PTT_UNUSED_ENTRY(385), ICE_PTT_UNUSED_ENTRY(386), ICE_PTT_UNUSED_ENTRY(387), ICE_PTT_UNUSED_ENTRY(388), ICE_PTT_UNUSED_ENTRY(389), ICE_PTT_UNUSED_ENTRY(390), ICE_PTT_UNUSED_ENTRY(391), ICE_PTT_UNUSED_ENTRY(392), ICE_PTT_UNUSED_ENTRY(393), ICE_PTT_UNUSED_ENTRY(394), ICE_PTT_UNUSED_ENTRY(395), ICE_PTT_UNUSED_ENTRY(396), ICE_PTT_UNUSED_ENTRY(397), ICE_PTT_UNUSED_ENTRY(398), ICE_PTT_UNUSED_ENTRY(399), ICE_PTT_UNUSED_ENTRY(400), ICE_PTT_UNUSED_ENTRY(401), ICE_PTT_UNUSED_ENTRY(402), ICE_PTT_UNUSED_ENTRY(403), ICE_PTT_UNUSED_ENTRY(404), ICE_PTT_UNUSED_ENTRY(405), ICE_PTT_UNUSED_ENTRY(406), ICE_PTT_UNUSED_ENTRY(407), ICE_PTT_UNUSED_ENTRY(408), ICE_PTT_UNUSED_ENTRY(409), ICE_PTT_UNUSED_ENTRY(410), ICE_PTT_UNUSED_ENTRY(411), ICE_PTT_UNUSED_ENTRY(412), ICE_PTT_UNUSED_ENTRY(413), ICE_PTT_UNUSED_ENTRY(414), ICE_PTT_UNUSED_ENTRY(415), ICE_PTT_UNUSED_ENTRY(416), ICE_PTT_UNUSED_ENTRY(417), ICE_PTT_UNUSED_ENTRY(418), ICE_PTT_UNUSED_ENTRY(419), ICE_PTT_UNUSED_ENTRY(420), ICE_PTT_UNUSED_ENTRY(421), ICE_PTT_UNUSED_ENTRY(422), ICE_PTT_UNUSED_ENTRY(423), ICE_PTT_UNUSED_ENTRY(424), ICE_PTT_UNUSED_ENTRY(425), ICE_PTT_UNUSED_ENTRY(426), ICE_PTT_UNUSED_ENTRY(427), ICE_PTT_UNUSED_ENTRY(428), ICE_PTT_UNUSED_ENTRY(429), ICE_PTT_UNUSED_ENTRY(430), ICE_PTT_UNUSED_ENTRY(431), ICE_PTT_UNUSED_ENTRY(432), ICE_PTT_UNUSED_ENTRY(433), ICE_PTT_UNUSED_ENTRY(434), ICE_PTT_UNUSED_ENTRY(435), ICE_PTT_UNUSED_ENTRY(436), ICE_PTT_UNUSED_ENTRY(437), ICE_PTT_UNUSED_ENTRY(438), ICE_PTT_UNUSED_ENTRY(439), ICE_PTT_UNUSED_ENTRY(440), ICE_PTT_UNUSED_ENTRY(441), ICE_PTT_UNUSED_ENTRY(442), ICE_PTT_UNUSED_ENTRY(443), ICE_PTT_UNUSED_ENTRY(444), ICE_PTT_UNUSED_ENTRY(445), ICE_PTT_UNUSED_ENTRY(446), ICE_PTT_UNUSED_ENTRY(447), ICE_PTT_UNUSED_ENTRY(448), ICE_PTT_UNUSED_ENTRY(449), ICE_PTT_UNUSED_ENTRY(450), ICE_PTT_UNUSED_ENTRY(451), ICE_PTT_UNUSED_ENTRY(452), ICE_PTT_UNUSED_ENTRY(453), ICE_PTT_UNUSED_ENTRY(454), ICE_PTT_UNUSED_ENTRY(455), ICE_PTT_UNUSED_ENTRY(456), ICE_PTT_UNUSED_ENTRY(457), ICE_PTT_UNUSED_ENTRY(458), ICE_PTT_UNUSED_ENTRY(459), ICE_PTT_UNUSED_ENTRY(460), ICE_PTT_UNUSED_ENTRY(461), ICE_PTT_UNUSED_ENTRY(462), ICE_PTT_UNUSED_ENTRY(463), ICE_PTT_UNUSED_ENTRY(464), ICE_PTT_UNUSED_ENTRY(465), ICE_PTT_UNUSED_ENTRY(466), ICE_PTT_UNUSED_ENTRY(467), ICE_PTT_UNUSED_ENTRY(468), ICE_PTT_UNUSED_ENTRY(469), ICE_PTT_UNUSED_ENTRY(470), ICE_PTT_UNUSED_ENTRY(471), ICE_PTT_UNUSED_ENTRY(472), ICE_PTT_UNUSED_ENTRY(473), ICE_PTT_UNUSED_ENTRY(474), ICE_PTT_UNUSED_ENTRY(475), ICE_PTT_UNUSED_ENTRY(476), ICE_PTT_UNUSED_ENTRY(477), ICE_PTT_UNUSED_ENTRY(478), ICE_PTT_UNUSED_ENTRY(479), ICE_PTT_UNUSED_ENTRY(480), ICE_PTT_UNUSED_ENTRY(481), ICE_PTT_UNUSED_ENTRY(482), ICE_PTT_UNUSED_ENTRY(483), ICE_PTT_UNUSED_ENTRY(484), ICE_PTT_UNUSED_ENTRY(485), ICE_PTT_UNUSED_ENTRY(486), ICE_PTT_UNUSED_ENTRY(487), ICE_PTT_UNUSED_ENTRY(488), ICE_PTT_UNUSED_ENTRY(489), ICE_PTT_UNUSED_ENTRY(490), ICE_PTT_UNUSED_ENTRY(491), ICE_PTT_UNUSED_ENTRY(492), ICE_PTT_UNUSED_ENTRY(493), ICE_PTT_UNUSED_ENTRY(494), ICE_PTT_UNUSED_ENTRY(495), ICE_PTT_UNUSED_ENTRY(496), ICE_PTT_UNUSED_ENTRY(497), ICE_PTT_UNUSED_ENTRY(498), ICE_PTT_UNUSED_ENTRY(499), ICE_PTT_UNUSED_ENTRY(500), ICE_PTT_UNUSED_ENTRY(501), ICE_PTT_UNUSED_ENTRY(502), ICE_PTT_UNUSED_ENTRY(503), ICE_PTT_UNUSED_ENTRY(504), ICE_PTT_UNUSED_ENTRY(505), ICE_PTT_UNUSED_ENTRY(506), ICE_PTT_UNUSED_ENTRY(507), ICE_PTT_UNUSED_ENTRY(508), ICE_PTT_UNUSED_ENTRY(509), ICE_PTT_UNUSED_ENTRY(510), ICE_PTT_UNUSED_ENTRY(511), ICE_PTT_UNUSED_ENTRY(512), ICE_PTT_UNUSED_ENTRY(513), ICE_PTT_UNUSED_ENTRY(514), ICE_PTT_UNUSED_ENTRY(515), ICE_PTT_UNUSED_ENTRY(516), ICE_PTT_UNUSED_ENTRY(517), ICE_PTT_UNUSED_ENTRY(518), ICE_PTT_UNUSED_ENTRY(519), ICE_PTT_UNUSED_ENTRY(520), ICE_PTT_UNUSED_ENTRY(521), ICE_PTT_UNUSED_ENTRY(522), ICE_PTT_UNUSED_ENTRY(523), ICE_PTT_UNUSED_ENTRY(524), ICE_PTT_UNUSED_ENTRY(525), ICE_PTT_UNUSED_ENTRY(526), ICE_PTT_UNUSED_ENTRY(527), ICE_PTT_UNUSED_ENTRY(528), ICE_PTT_UNUSED_ENTRY(529), ICE_PTT_UNUSED_ENTRY(530), ICE_PTT_UNUSED_ENTRY(531), ICE_PTT_UNUSED_ENTRY(532), ICE_PTT_UNUSED_ENTRY(533), ICE_PTT_UNUSED_ENTRY(534), ICE_PTT_UNUSED_ENTRY(535), ICE_PTT_UNUSED_ENTRY(536), ICE_PTT_UNUSED_ENTRY(537), ICE_PTT_UNUSED_ENTRY(538), ICE_PTT_UNUSED_ENTRY(539), ICE_PTT_UNUSED_ENTRY(540), ICE_PTT_UNUSED_ENTRY(541), ICE_PTT_UNUSED_ENTRY(542), ICE_PTT_UNUSED_ENTRY(543), ICE_PTT_UNUSED_ENTRY(544), ICE_PTT_UNUSED_ENTRY(545), ICE_PTT_UNUSED_ENTRY(546), ICE_PTT_UNUSED_ENTRY(547), ICE_PTT_UNUSED_ENTRY(548), ICE_PTT_UNUSED_ENTRY(549), ICE_PTT_UNUSED_ENTRY(550), ICE_PTT_UNUSED_ENTRY(551), ICE_PTT_UNUSED_ENTRY(552), ICE_PTT_UNUSED_ENTRY(553), ICE_PTT_UNUSED_ENTRY(554), ICE_PTT_UNUSED_ENTRY(555), ICE_PTT_UNUSED_ENTRY(556), ICE_PTT_UNUSED_ENTRY(557), ICE_PTT_UNUSED_ENTRY(558), ICE_PTT_UNUSED_ENTRY(559), ICE_PTT_UNUSED_ENTRY(560), ICE_PTT_UNUSED_ENTRY(561), ICE_PTT_UNUSED_ENTRY(562), ICE_PTT_UNUSED_ENTRY(563), ICE_PTT_UNUSED_ENTRY(564), ICE_PTT_UNUSED_ENTRY(565), ICE_PTT_UNUSED_ENTRY(566), ICE_PTT_UNUSED_ENTRY(567), ICE_PTT_UNUSED_ENTRY(568), ICE_PTT_UNUSED_ENTRY(569), ICE_PTT_UNUSED_ENTRY(570), ICE_PTT_UNUSED_ENTRY(571), ICE_PTT_UNUSED_ENTRY(572), ICE_PTT_UNUSED_ENTRY(573), ICE_PTT_UNUSED_ENTRY(574), ICE_PTT_UNUSED_ENTRY(575), ICE_PTT_UNUSED_ENTRY(576), ICE_PTT_UNUSED_ENTRY(577), ICE_PTT_UNUSED_ENTRY(578), ICE_PTT_UNUSED_ENTRY(579), ICE_PTT_UNUSED_ENTRY(580), ICE_PTT_UNUSED_ENTRY(581), ICE_PTT_UNUSED_ENTRY(582), ICE_PTT_UNUSED_ENTRY(583), ICE_PTT_UNUSED_ENTRY(584), ICE_PTT_UNUSED_ENTRY(585), ICE_PTT_UNUSED_ENTRY(586), ICE_PTT_UNUSED_ENTRY(587), ICE_PTT_UNUSED_ENTRY(588), ICE_PTT_UNUSED_ENTRY(589), ICE_PTT_UNUSED_ENTRY(590), ICE_PTT_UNUSED_ENTRY(591), ICE_PTT_UNUSED_ENTRY(592), ICE_PTT_UNUSED_ENTRY(593), ICE_PTT_UNUSED_ENTRY(594), ICE_PTT_UNUSED_ENTRY(595), ICE_PTT_UNUSED_ENTRY(596), ICE_PTT_UNUSED_ENTRY(597), ICE_PTT_UNUSED_ENTRY(598), ICE_PTT_UNUSED_ENTRY(599), ICE_PTT_UNUSED_ENTRY(600), ICE_PTT_UNUSED_ENTRY(601), ICE_PTT_UNUSED_ENTRY(602), ICE_PTT_UNUSED_ENTRY(603), ICE_PTT_UNUSED_ENTRY(604), ICE_PTT_UNUSED_ENTRY(605), ICE_PTT_UNUSED_ENTRY(606), ICE_PTT_UNUSED_ENTRY(607), ICE_PTT_UNUSED_ENTRY(608), ICE_PTT_UNUSED_ENTRY(609), ICE_PTT_UNUSED_ENTRY(610), ICE_PTT_UNUSED_ENTRY(611), ICE_PTT_UNUSED_ENTRY(612), ICE_PTT_UNUSED_ENTRY(613), ICE_PTT_UNUSED_ENTRY(614), ICE_PTT_UNUSED_ENTRY(615), ICE_PTT_UNUSED_ENTRY(616), ICE_PTT_UNUSED_ENTRY(617), ICE_PTT_UNUSED_ENTRY(618), ICE_PTT_UNUSED_ENTRY(619), ICE_PTT_UNUSED_ENTRY(620), ICE_PTT_UNUSED_ENTRY(621), ICE_PTT_UNUSED_ENTRY(622), ICE_PTT_UNUSED_ENTRY(623), ICE_PTT_UNUSED_ENTRY(624), ICE_PTT_UNUSED_ENTRY(625), ICE_PTT_UNUSED_ENTRY(626), ICE_PTT_UNUSED_ENTRY(627), ICE_PTT_UNUSED_ENTRY(628), ICE_PTT_UNUSED_ENTRY(629), ICE_PTT_UNUSED_ENTRY(630), ICE_PTT_UNUSED_ENTRY(631), ICE_PTT_UNUSED_ENTRY(632), ICE_PTT_UNUSED_ENTRY(633), ICE_PTT_UNUSED_ENTRY(634), ICE_PTT_UNUSED_ENTRY(635), ICE_PTT_UNUSED_ENTRY(636), ICE_PTT_UNUSED_ENTRY(637), ICE_PTT_UNUSED_ENTRY(638), ICE_PTT_UNUSED_ENTRY(639), ICE_PTT_UNUSED_ENTRY(640), ICE_PTT_UNUSED_ENTRY(641), ICE_PTT_UNUSED_ENTRY(642), ICE_PTT_UNUSED_ENTRY(643), ICE_PTT_UNUSED_ENTRY(644), ICE_PTT_UNUSED_ENTRY(645), ICE_PTT_UNUSED_ENTRY(646), ICE_PTT_UNUSED_ENTRY(647), ICE_PTT_UNUSED_ENTRY(648), ICE_PTT_UNUSED_ENTRY(649), ICE_PTT_UNUSED_ENTRY(650), ICE_PTT_UNUSED_ENTRY(651), ICE_PTT_UNUSED_ENTRY(652), ICE_PTT_UNUSED_ENTRY(653), ICE_PTT_UNUSED_ENTRY(654), ICE_PTT_UNUSED_ENTRY(655), ICE_PTT_UNUSED_ENTRY(656), ICE_PTT_UNUSED_ENTRY(657), ICE_PTT_UNUSED_ENTRY(658), ICE_PTT_UNUSED_ENTRY(659), ICE_PTT_UNUSED_ENTRY(660), ICE_PTT_UNUSED_ENTRY(661), ICE_PTT_UNUSED_ENTRY(662), ICE_PTT_UNUSED_ENTRY(663), ICE_PTT_UNUSED_ENTRY(664), ICE_PTT_UNUSED_ENTRY(665), ICE_PTT_UNUSED_ENTRY(666), ICE_PTT_UNUSED_ENTRY(667), ICE_PTT_UNUSED_ENTRY(668), ICE_PTT_UNUSED_ENTRY(669), ICE_PTT_UNUSED_ENTRY(670), ICE_PTT_UNUSED_ENTRY(671), ICE_PTT_UNUSED_ENTRY(672), ICE_PTT_UNUSED_ENTRY(673), ICE_PTT_UNUSED_ENTRY(674), ICE_PTT_UNUSED_ENTRY(675), ICE_PTT_UNUSED_ENTRY(676), ICE_PTT_UNUSED_ENTRY(677), ICE_PTT_UNUSED_ENTRY(678), ICE_PTT_UNUSED_ENTRY(679), ICE_PTT_UNUSED_ENTRY(680), ICE_PTT_UNUSED_ENTRY(681), ICE_PTT_UNUSED_ENTRY(682), ICE_PTT_UNUSED_ENTRY(683), ICE_PTT_UNUSED_ENTRY(684), ICE_PTT_UNUSED_ENTRY(685), ICE_PTT_UNUSED_ENTRY(686), ICE_PTT_UNUSED_ENTRY(687), ICE_PTT_UNUSED_ENTRY(688), ICE_PTT_UNUSED_ENTRY(689), ICE_PTT_UNUSED_ENTRY(690), ICE_PTT_UNUSED_ENTRY(691), ICE_PTT_UNUSED_ENTRY(692), ICE_PTT_UNUSED_ENTRY(693), ICE_PTT_UNUSED_ENTRY(694), ICE_PTT_UNUSED_ENTRY(695), ICE_PTT_UNUSED_ENTRY(696), ICE_PTT_UNUSED_ENTRY(697), ICE_PTT_UNUSED_ENTRY(698), ICE_PTT_UNUSED_ENTRY(699), ICE_PTT_UNUSED_ENTRY(700), ICE_PTT_UNUSED_ENTRY(701), ICE_PTT_UNUSED_ENTRY(702), ICE_PTT_UNUSED_ENTRY(703), ICE_PTT_UNUSED_ENTRY(704), ICE_PTT_UNUSED_ENTRY(705), ICE_PTT_UNUSED_ENTRY(706), ICE_PTT_UNUSED_ENTRY(707), ICE_PTT_UNUSED_ENTRY(708), ICE_PTT_UNUSED_ENTRY(709), ICE_PTT_UNUSED_ENTRY(710), ICE_PTT_UNUSED_ENTRY(711), ICE_PTT_UNUSED_ENTRY(712), ICE_PTT_UNUSED_ENTRY(713), ICE_PTT_UNUSED_ENTRY(714), ICE_PTT_UNUSED_ENTRY(715), ICE_PTT_UNUSED_ENTRY(716), ICE_PTT_UNUSED_ENTRY(717), ICE_PTT_UNUSED_ENTRY(718), ICE_PTT_UNUSED_ENTRY(719), ICE_PTT_UNUSED_ENTRY(720), ICE_PTT_UNUSED_ENTRY(721), ICE_PTT_UNUSED_ENTRY(722), ICE_PTT_UNUSED_ENTRY(723), ICE_PTT_UNUSED_ENTRY(724), ICE_PTT_UNUSED_ENTRY(725), ICE_PTT_UNUSED_ENTRY(726), ICE_PTT_UNUSED_ENTRY(727), ICE_PTT_UNUSED_ENTRY(728), ICE_PTT_UNUSED_ENTRY(729), ICE_PTT_UNUSED_ENTRY(730), ICE_PTT_UNUSED_ENTRY(731), ICE_PTT_UNUSED_ENTRY(732), ICE_PTT_UNUSED_ENTRY(733), ICE_PTT_UNUSED_ENTRY(734), ICE_PTT_UNUSED_ENTRY(735), ICE_PTT_UNUSED_ENTRY(736), ICE_PTT_UNUSED_ENTRY(737), ICE_PTT_UNUSED_ENTRY(738), ICE_PTT_UNUSED_ENTRY(739), ICE_PTT_UNUSED_ENTRY(740), ICE_PTT_UNUSED_ENTRY(741), ICE_PTT_UNUSED_ENTRY(742), ICE_PTT_UNUSED_ENTRY(743), ICE_PTT_UNUSED_ENTRY(744), ICE_PTT_UNUSED_ENTRY(745), ICE_PTT_UNUSED_ENTRY(746), ICE_PTT_UNUSED_ENTRY(747), ICE_PTT_UNUSED_ENTRY(748), ICE_PTT_UNUSED_ENTRY(749), ICE_PTT_UNUSED_ENTRY(750), ICE_PTT_UNUSED_ENTRY(751), ICE_PTT_UNUSED_ENTRY(752), ICE_PTT_UNUSED_ENTRY(753), ICE_PTT_UNUSED_ENTRY(754), ICE_PTT_UNUSED_ENTRY(755), ICE_PTT_UNUSED_ENTRY(756), ICE_PTT_UNUSED_ENTRY(757), ICE_PTT_UNUSED_ENTRY(758), ICE_PTT_UNUSED_ENTRY(759), ICE_PTT_UNUSED_ENTRY(760), ICE_PTT_UNUSED_ENTRY(761), ICE_PTT_UNUSED_ENTRY(762), ICE_PTT_UNUSED_ENTRY(763), ICE_PTT_UNUSED_ENTRY(764), ICE_PTT_UNUSED_ENTRY(765), ICE_PTT_UNUSED_ENTRY(766), ICE_PTT_UNUSED_ENTRY(767), ICE_PTT_UNUSED_ENTRY(768), ICE_PTT_UNUSED_ENTRY(769), ICE_PTT_UNUSED_ENTRY(770), ICE_PTT_UNUSED_ENTRY(771), ICE_PTT_UNUSED_ENTRY(772), ICE_PTT_UNUSED_ENTRY(773), ICE_PTT_UNUSED_ENTRY(774), ICE_PTT_UNUSED_ENTRY(775), ICE_PTT_UNUSED_ENTRY(776), ICE_PTT_UNUSED_ENTRY(777), ICE_PTT_UNUSED_ENTRY(778), ICE_PTT_UNUSED_ENTRY(779), ICE_PTT_UNUSED_ENTRY(780), ICE_PTT_UNUSED_ENTRY(781), ICE_PTT_UNUSED_ENTRY(782), ICE_PTT_UNUSED_ENTRY(783), ICE_PTT_UNUSED_ENTRY(784), ICE_PTT_UNUSED_ENTRY(785), ICE_PTT_UNUSED_ENTRY(786), ICE_PTT_UNUSED_ENTRY(787), ICE_PTT_UNUSED_ENTRY(788), ICE_PTT_UNUSED_ENTRY(789), ICE_PTT_UNUSED_ENTRY(790), ICE_PTT_UNUSED_ENTRY(791), ICE_PTT_UNUSED_ENTRY(792), ICE_PTT_UNUSED_ENTRY(793), ICE_PTT_UNUSED_ENTRY(794), ICE_PTT_UNUSED_ENTRY(795), ICE_PTT_UNUSED_ENTRY(796), ICE_PTT_UNUSED_ENTRY(797), ICE_PTT_UNUSED_ENTRY(798), ICE_PTT_UNUSED_ENTRY(799), ICE_PTT_UNUSED_ENTRY(800), ICE_PTT_UNUSED_ENTRY(801), ICE_PTT_UNUSED_ENTRY(802), ICE_PTT_UNUSED_ENTRY(803), ICE_PTT_UNUSED_ENTRY(804), ICE_PTT_UNUSED_ENTRY(805), ICE_PTT_UNUSED_ENTRY(806), ICE_PTT_UNUSED_ENTRY(807), ICE_PTT_UNUSED_ENTRY(808), ICE_PTT_UNUSED_ENTRY(809), ICE_PTT_UNUSED_ENTRY(810), ICE_PTT_UNUSED_ENTRY(811), ICE_PTT_UNUSED_ENTRY(812), ICE_PTT_UNUSED_ENTRY(813), ICE_PTT_UNUSED_ENTRY(814), ICE_PTT_UNUSED_ENTRY(815), ICE_PTT_UNUSED_ENTRY(816), ICE_PTT_UNUSED_ENTRY(817), ICE_PTT_UNUSED_ENTRY(818), ICE_PTT_UNUSED_ENTRY(819), ICE_PTT_UNUSED_ENTRY(820), ICE_PTT_UNUSED_ENTRY(821), ICE_PTT_UNUSED_ENTRY(822), ICE_PTT_UNUSED_ENTRY(823), ICE_PTT_UNUSED_ENTRY(824), ICE_PTT_UNUSED_ENTRY(825), ICE_PTT_UNUSED_ENTRY(826), ICE_PTT_UNUSED_ENTRY(827), ICE_PTT_UNUSED_ENTRY(828), ICE_PTT_UNUSED_ENTRY(829), ICE_PTT_UNUSED_ENTRY(830), ICE_PTT_UNUSED_ENTRY(831), ICE_PTT_UNUSED_ENTRY(832), ICE_PTT_UNUSED_ENTRY(833), ICE_PTT_UNUSED_ENTRY(834), ICE_PTT_UNUSED_ENTRY(835), ICE_PTT_UNUSED_ENTRY(836), ICE_PTT_UNUSED_ENTRY(837), ICE_PTT_UNUSED_ENTRY(838), ICE_PTT_UNUSED_ENTRY(839), ICE_PTT_UNUSED_ENTRY(840), ICE_PTT_UNUSED_ENTRY(841), ICE_PTT_UNUSED_ENTRY(842), ICE_PTT_UNUSED_ENTRY(843), ICE_PTT_UNUSED_ENTRY(844), ICE_PTT_UNUSED_ENTRY(845), ICE_PTT_UNUSED_ENTRY(846), ICE_PTT_UNUSED_ENTRY(847), ICE_PTT_UNUSED_ENTRY(848), ICE_PTT_UNUSED_ENTRY(849), ICE_PTT_UNUSED_ENTRY(850), ICE_PTT_UNUSED_ENTRY(851), ICE_PTT_UNUSED_ENTRY(852), ICE_PTT_UNUSED_ENTRY(853), ICE_PTT_UNUSED_ENTRY(854), ICE_PTT_UNUSED_ENTRY(855), ICE_PTT_UNUSED_ENTRY(856), ICE_PTT_UNUSED_ENTRY(857), ICE_PTT_UNUSED_ENTRY(858), ICE_PTT_UNUSED_ENTRY(859), ICE_PTT_UNUSED_ENTRY(860), ICE_PTT_UNUSED_ENTRY(861), ICE_PTT_UNUSED_ENTRY(862), ICE_PTT_UNUSED_ENTRY(863), ICE_PTT_UNUSED_ENTRY(864), ICE_PTT_UNUSED_ENTRY(865), ICE_PTT_UNUSED_ENTRY(866), ICE_PTT_UNUSED_ENTRY(867), ICE_PTT_UNUSED_ENTRY(868), ICE_PTT_UNUSED_ENTRY(869), ICE_PTT_UNUSED_ENTRY(870), ICE_PTT_UNUSED_ENTRY(871), ICE_PTT_UNUSED_ENTRY(872), ICE_PTT_UNUSED_ENTRY(873), ICE_PTT_UNUSED_ENTRY(874), ICE_PTT_UNUSED_ENTRY(875), ICE_PTT_UNUSED_ENTRY(876), ICE_PTT_UNUSED_ENTRY(877), ICE_PTT_UNUSED_ENTRY(878), ICE_PTT_UNUSED_ENTRY(879), ICE_PTT_UNUSED_ENTRY(880), ICE_PTT_UNUSED_ENTRY(881), ICE_PTT_UNUSED_ENTRY(882), ICE_PTT_UNUSED_ENTRY(883), ICE_PTT_UNUSED_ENTRY(884), ICE_PTT_UNUSED_ENTRY(885), ICE_PTT_UNUSED_ENTRY(886), ICE_PTT_UNUSED_ENTRY(887), ICE_PTT_UNUSED_ENTRY(888), ICE_PTT_UNUSED_ENTRY(889), ICE_PTT_UNUSED_ENTRY(890), ICE_PTT_UNUSED_ENTRY(891), ICE_PTT_UNUSED_ENTRY(892), ICE_PTT_UNUSED_ENTRY(893), ICE_PTT_UNUSED_ENTRY(894), ICE_PTT_UNUSED_ENTRY(895), ICE_PTT_UNUSED_ENTRY(896), ICE_PTT_UNUSED_ENTRY(897), ICE_PTT_UNUSED_ENTRY(898), ICE_PTT_UNUSED_ENTRY(899), ICE_PTT_UNUSED_ENTRY(900), ICE_PTT_UNUSED_ENTRY(901), ICE_PTT_UNUSED_ENTRY(902), ICE_PTT_UNUSED_ENTRY(903), ICE_PTT_UNUSED_ENTRY(904), ICE_PTT_UNUSED_ENTRY(905), ICE_PTT_UNUSED_ENTRY(906), ICE_PTT_UNUSED_ENTRY(907), ICE_PTT_UNUSED_ENTRY(908), ICE_PTT_UNUSED_ENTRY(909), ICE_PTT_UNUSED_ENTRY(910), ICE_PTT_UNUSED_ENTRY(911), ICE_PTT_UNUSED_ENTRY(912), ICE_PTT_UNUSED_ENTRY(913), ICE_PTT_UNUSED_ENTRY(914), ICE_PTT_UNUSED_ENTRY(915), ICE_PTT_UNUSED_ENTRY(916), ICE_PTT_UNUSED_ENTRY(917), ICE_PTT_UNUSED_ENTRY(918), ICE_PTT_UNUSED_ENTRY(919), ICE_PTT_UNUSED_ENTRY(920), ICE_PTT_UNUSED_ENTRY(921), ICE_PTT_UNUSED_ENTRY(922), ICE_PTT_UNUSED_ENTRY(923), ICE_PTT_UNUSED_ENTRY(924), ICE_PTT_UNUSED_ENTRY(925), ICE_PTT_UNUSED_ENTRY(926), ICE_PTT_UNUSED_ENTRY(927), ICE_PTT_UNUSED_ENTRY(928), ICE_PTT_UNUSED_ENTRY(929), ICE_PTT_UNUSED_ENTRY(930), ICE_PTT_UNUSED_ENTRY(931), ICE_PTT_UNUSED_ENTRY(932), ICE_PTT_UNUSED_ENTRY(933), ICE_PTT_UNUSED_ENTRY(934), ICE_PTT_UNUSED_ENTRY(935), ICE_PTT_UNUSED_ENTRY(936), ICE_PTT_UNUSED_ENTRY(937), ICE_PTT_UNUSED_ENTRY(938), ICE_PTT_UNUSED_ENTRY(939), ICE_PTT_UNUSED_ENTRY(940), ICE_PTT_UNUSED_ENTRY(941), ICE_PTT_UNUSED_ENTRY(942), ICE_PTT_UNUSED_ENTRY(943), ICE_PTT_UNUSED_ENTRY(944), ICE_PTT_UNUSED_ENTRY(945), ICE_PTT_UNUSED_ENTRY(946), ICE_PTT_UNUSED_ENTRY(947), ICE_PTT_UNUSED_ENTRY(948), ICE_PTT_UNUSED_ENTRY(949), ICE_PTT_UNUSED_ENTRY(950), ICE_PTT_UNUSED_ENTRY(951), ICE_PTT_UNUSED_ENTRY(952), ICE_PTT_UNUSED_ENTRY(953), ICE_PTT_UNUSED_ENTRY(954), ICE_PTT_UNUSED_ENTRY(955), ICE_PTT_UNUSED_ENTRY(956), ICE_PTT_UNUSED_ENTRY(957), ICE_PTT_UNUSED_ENTRY(958), ICE_PTT_UNUSED_ENTRY(959), ICE_PTT_UNUSED_ENTRY(960), ICE_PTT_UNUSED_ENTRY(961), ICE_PTT_UNUSED_ENTRY(962), ICE_PTT_UNUSED_ENTRY(963), ICE_PTT_UNUSED_ENTRY(964), ICE_PTT_UNUSED_ENTRY(965), ICE_PTT_UNUSED_ENTRY(966), ICE_PTT_UNUSED_ENTRY(967), ICE_PTT_UNUSED_ENTRY(968), ICE_PTT_UNUSED_ENTRY(969), ICE_PTT_UNUSED_ENTRY(970), ICE_PTT_UNUSED_ENTRY(971), ICE_PTT_UNUSED_ENTRY(972), ICE_PTT_UNUSED_ENTRY(973), ICE_PTT_UNUSED_ENTRY(974), ICE_PTT_UNUSED_ENTRY(975), ICE_PTT_UNUSED_ENTRY(976), ICE_PTT_UNUSED_ENTRY(977), ICE_PTT_UNUSED_ENTRY(978), ICE_PTT_UNUSED_ENTRY(979), ICE_PTT_UNUSED_ENTRY(980), ICE_PTT_UNUSED_ENTRY(981), ICE_PTT_UNUSED_ENTRY(982), ICE_PTT_UNUSED_ENTRY(983), ICE_PTT_UNUSED_ENTRY(984), ICE_PTT_UNUSED_ENTRY(985), ICE_PTT_UNUSED_ENTRY(986), ICE_PTT_UNUSED_ENTRY(987), ICE_PTT_UNUSED_ENTRY(988), ICE_PTT_UNUSED_ENTRY(989), ICE_PTT_UNUSED_ENTRY(990), ICE_PTT_UNUSED_ENTRY(991), ICE_PTT_UNUSED_ENTRY(992), ICE_PTT_UNUSED_ENTRY(993), ICE_PTT_UNUSED_ENTRY(994), ICE_PTT_UNUSED_ENTRY(995), ICE_PTT_UNUSED_ENTRY(996), ICE_PTT_UNUSED_ENTRY(997), ICE_PTT_UNUSED_ENTRY(998), ICE_PTT_UNUSED_ENTRY(999), ICE_PTT_UNUSED_ENTRY(1000), ICE_PTT_UNUSED_ENTRY(1001), ICE_PTT_UNUSED_ENTRY(1002), ICE_PTT_UNUSED_ENTRY(1003), ICE_PTT_UNUSED_ENTRY(1004), ICE_PTT_UNUSED_ENTRY(1005), ICE_PTT_UNUSED_ENTRY(1006), ICE_PTT_UNUSED_ENTRY(1007), ICE_PTT_UNUSED_ENTRY(1008), ICE_PTT_UNUSED_ENTRY(1009), ICE_PTT_UNUSED_ENTRY(1010), ICE_PTT_UNUSED_ENTRY(1011), ICE_PTT_UNUSED_ENTRY(1012), ICE_PTT_UNUSED_ENTRY(1013), ICE_PTT_UNUSED_ENTRY(1014), ICE_PTT_UNUSED_ENTRY(1015), ICE_PTT_UNUSED_ENTRY(1016), ICE_PTT_UNUSED_ENTRY(1017), ICE_PTT_UNUSED_ENTRY(1018), ICE_PTT_UNUSED_ENTRY(1019), ICE_PTT_UNUSED_ENTRY(1020), ICE_PTT_UNUSED_ENTRY(1021), ICE_PTT_UNUSED_ENTRY(1022), ICE_PTT_UNUSED_ENTRY(1023), }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) { return ice_ptype_lkup[ptype]; } #define ICE_LINK_SPEED_UNKNOWN 0 #define ICE_LINK_SPEED_10MBPS 10 #define ICE_LINK_SPEED_100MBPS 100 #define ICE_LINK_SPEED_1000MBPS 1000 #define ICE_LINK_SPEED_2500MBPS 2500 #define ICE_LINK_SPEED_5000MBPS 5000 #define ICE_LINK_SPEED_10000MBPS 10000 #define ICE_LINK_SPEED_20000MBPS 20000 #define ICE_LINK_SPEED_25000MBPS 25000 #define ICE_LINK_SPEED_40000MBPS 40000 #define ICE_LINK_SPEED_50000MBPS 50000 #define ICE_LINK_SPEED_100000MBPS 100000 #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c index f60e3feaafef..ecea51a95ae4 100644 --- a/sys/dev/ice/ice_lib.c +++ b/sys/dev/ice/ice_lib.c @@ -1,8186 +1,8335 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_lib.c * @brief Generic device setup and sysctl functions * * Library of generic device functions not specific to the networking stack. * * This includes hardware initialization functions, as well as handlers for * many of the device sysctls used to probe driver status or tune specific * behaviors. */ #include "ice_lib.h" #include "ice_iflib.h" #include #include #include #include #include #include +#include /** * @var M_ICE * @brief main ice driver allocation type * * malloc(9) allocation type used by the majority of memory allocations in the * ice driver. */ MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations"); /* * Helper function prototypes */ static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size); static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx); static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type); static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx); static int ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q); static int ice_setup_rx_ctx(struct ice_rx_queue *rxq); static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg); static void ice_free_fltr_list(struct ice_list_head *list); static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, const u8 *addr, enum ice_sw_fwd_act_type action); static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, struct ice_ctl_q_info *cq); static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e); static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, struct ice_rq_event_info *event); static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf); static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf); static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info); static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors); static void ice_add_debug_tunables(struct ice_softc *sc); static void ice_add_debug_sysctls(struct ice_softc *sc); static void ice_vsi_set_rss_params(struct ice_vsi *vsi); static void ice_get_default_rss_key(u8 *seed); static int ice_set_rss_key(struct ice_vsi *vsi); static int ice_set_rss_lut(struct ice_vsi *vsi); static void ice_set_rss_flow_flds(struct ice_vsi *vsi); static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi); static const char *ice_aq_speed_to_str(struct ice_port_info *pi); static const char *ice_requested_fec_mode(struct ice_port_info *pi); static const char *ice_negotiated_fec_mode(struct ice_port_info *pi); static const char *ice_autoneg_mode(struct ice_port_info *pi); static const char *ice_flowcontrol_mode(struct ice_port_info *pi); static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw); static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status); static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc); static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed); static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width); static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi); static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, struct sysctl_ctx_list *ctx, struct sysctl_oid *parent); static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, enum ice_vsi_type type, int idx, bool dynamic); static void ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event); static void ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event); static int ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, u16 ethertype, u16 direction, enum ice_sw_fwd_act_type action); static void ice_add_rx_lldp_filter(struct ice_softc *sc); static void ice_del_rx_lldp_filter(struct ice_softc *sc); -static u16 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, - u64 phy_type_high); -static void -ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static u16 ice_aq_phy_types_to_link_speeds(u64 phy_type_low, + u64 phy_type_high); +struct ice_phy_data; +static int +ice_intersect_phy_types_and_speeds(struct ice_softc *sc, + struct ice_phy_data *phy_data); +static int +ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg); -static void -ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static int +ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg); static void -ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, - struct ice_aqc_set_phy_cfg_data *cfg); -static void ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg); static void ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv); static void ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, u64 *phy_type_high); -static int -ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, - u64 *phy_type_low, u64 *phy_type_high); -static int -ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, - u64 *phy_type_high); +static u16 ice_apply_supported_speed_filter(u16 report_speeds); +static void +ice_handle_health_status_event(struct ice_softc *sc, + struct ice_rq_event_info *event); static void -ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high); -static enum ice_status -ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high); +ice_print_health_status_string(device_t dev, + struct ice_aqc_health_status_elem *elem); static int ice_module_init(void); static int ice_module_exit(void); /* * package version comparison functions */ static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name); static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver); /* * dynamic sysctl handlers */ static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS); static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS); static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS); static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS); static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS); static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS); static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS); static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high); static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS); static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode); static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS); static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS); static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS); /** * ice_map_bar - Map PCIe BAR memory * @dev: the PCIe device * @bar: the BAR info structure * @bar_num: PCIe BAR number * * Maps the specified PCIe BAR. Stores the mapping data in struct * ice_bar_info. */ int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num) { if (bar->res != NULL) { device_printf(dev, "PCI BAR%d already mapped\n", bar_num); return (EDOOFUS); } bar->rid = PCIR_BAR(bar_num); bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid, RF_ACTIVE); if (!bar->res) { device_printf(dev, "PCI BAR%d mapping failed\n", bar_num); return (ENXIO); } bar->tag = rman_get_bustag(bar->res); bar->handle = rman_get_bushandle(bar->res); bar->size = rman_get_size(bar->res); return (0); } /** * ice_free_bar - Free PCIe BAR memory * @dev: the PCIe device * @bar: the BAR info structure * * Frees the specified PCIe BAR, releasing its resources. */ void ice_free_bar(device_t dev, struct ice_bar_info *bar) { if (bar->res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res); bar->res = NULL; } /** * ice_set_ctrlq_len - Configure ctrlq lengths for a device * @hw: the device hardware structure * * Configures the control queues for the given device, setting up the * specified lengths, prior to initializing hardware. */ void ice_set_ctrlq_len(struct ice_hw *hw) { hw->adminq.num_rq_entries = ICE_AQ_LEN; hw->adminq.num_sq_entries = ICE_AQ_LEN; hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN; hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; } /** * ice_get_next_vsi - Get the next available VSI slot * @all_vsi: the VSI list * @size: the size of the VSI list * * Returns the index to the first available VSI slot. Will return size (one * past the last index) if there are no slots available. */ static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size) { int i; for (i = 0; i < size; i++) { if (all_vsi[i] == NULL) return i; } return size; } /** * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs * @sc: the device private softc structure * @vsi: the VSI to setup * @type: the VSI type of the new VSI * @idx: the index in the all_vsi array to use * @dynamic: whether this VSI memory was dynamically allocated * * Perform setup for a VSI that is common to both dynamically allocated VSIs * and the static PF VSI which is embedded in the softc structure. */ static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi, enum ice_vsi_type type, int idx, bool dynamic) { /* Store important values in VSI struct */ vsi->type = type; vsi->sc = sc; vsi->idx = idx; sc->all_vsi[idx] = vsi; vsi->dynamic = dynamic; /* Setup the VSI tunables now */ ice_add_vsi_tunables(vsi, sc->vsi_sysctls); } /** * ice_alloc_vsi - Allocate a dynamic VSI * @sc: device softc structure * @type: VSI type * * Allocates a new dynamic VSI structure and inserts it into the VSI list. */ struct ice_vsi * ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type) { struct ice_vsi *vsi; int idx; /* Find an open index for a new VSI to be allocated. If the returned * index is >= the num_available_vsi then it means no slot is * available. */ idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi); if (idx >= sc->num_available_vsi) { device_printf(sc->dev, "No available VSI slots\n"); return NULL; } vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO); if (!vsi) { device_printf(sc->dev, "Unable to allocate VSI memory\n"); return NULL; } ice_setup_vsi_common(sc, vsi, type, idx, true); return vsi; } /** * ice_setup_pf_vsi - Setup the PF VSI * @sc: the device private softc * * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of * the softc memory, instead of being dynamically allocated at creation. */ void ice_setup_pf_vsi(struct ice_softc *sc) { ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false); } /** * ice_alloc_vsi_qmap * @vsi: VSI structure * @max_tx_queues: Number of transmit queues to identify * @max_rx_queues: Number of receive queues to identify * * Allocates a max_[t|r]x_queues array of words for the VSI where each * word contains the index of the queue it represents. In here, all * words are initialized to an index of ICE_INVALID_RES_IDX, indicating * all queues for this VSI are not yet assigned an index and thus, * not ready for use. * * Returns an error code on failure. */ int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, const int max_rx_queues) { struct ice_softc *sc = vsi->sc; int i; MPASS(max_tx_queues > 0); MPASS(max_rx_queues > 0); /* Allocate Tx queue mapping memory */ if (!(vsi->tx_qmap = (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) { device_printf(sc->dev, "Unable to allocate Tx qmap memory\n"); return (ENOMEM); } /* Allocate Rx queue mapping memory */ if (!(vsi->rx_qmap = (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) { device_printf(sc->dev, "Unable to allocate Rx qmap memory\n"); goto free_tx_qmap; } /* Mark every queue map as invalid to start with */ for (i = 0; i < max_tx_queues; i++) { vsi->tx_qmap[i] = ICE_INVALID_RES_IDX; } for (i = 0; i < max_rx_queues; i++) { vsi->rx_qmap[i] = ICE_INVALID_RES_IDX; } return 0; free_tx_qmap: free(vsi->tx_qmap, M_ICE); vsi->tx_qmap = NULL; return (ENOMEM); } /** * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI * @vsi: the VSI private structure * * Frees the PF qmaps associated with the given VSI. Generally this will be * called by ice_release_vsi, but may need to be called during attach cleanup, * depending on when the qmaps were allocated. */ void ice_free_vsi_qmaps(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; if (vsi->tx_qmap) { ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); free(vsi->tx_qmap, M_ICE); vsi->tx_qmap = NULL; } if (vsi->rx_qmap) { ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap, vsi->num_rx_queues); free(vsi->rx_qmap, M_ICE); vsi->rx_qmap = NULL; } } /** * ice_set_default_vsi_ctx - Setup default VSI context parameters * @ctx: the VSI context to initialize * * Initialize and prepare a default VSI context for configuring a new VSI. */ static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx) { u32 table = 0; memset(&ctx->info, 0, sizeof(ctx->info)); /* VSI will be allocated from shared pool */ ctx->alloc_from_pool = true; /* Enable source pruning by default */ ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE; /* Traffic from VSI can be sent to LAN */ ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA; /* Allow all packets untagged/tagged */ - ctx->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL & - ICE_AQ_VSI_VLAN_MODE_M) >> - ICE_AQ_VSI_VLAN_MODE_S); + ctx->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL & + ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >> + ICE_AQ_VSI_INNER_VLAN_TX_MODE_S); /* Show VLAN/UP from packets in Rx descriptors */ - ctx->info.vlan_flags |= ((ICE_AQ_VSI_VLAN_EMOD_STR_BOTH & - ICE_AQ_VSI_VLAN_EMOD_M) >> - ICE_AQ_VSI_VLAN_EMOD_S); + ctx->info.inner_vlan_flags |= ((ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH & + ICE_AQ_VSI_INNER_VLAN_EMODE_M) >> + ICE_AQ_VSI_INNER_VLAN_EMODE_S); /* Have 1:1 UP mapping for both ingress/egress tables */ table |= ICE_UP_TABLE_TRANSLATE(0, 0); table |= ICE_UP_TABLE_TRANSLATE(1, 1); table |= ICE_UP_TABLE_TRANSLATE(2, 2); table |= ICE_UP_TABLE_TRANSLATE(3, 3); table |= ICE_UP_TABLE_TRANSLATE(4, 4); table |= ICE_UP_TABLE_TRANSLATE(5, 5); table |= ICE_UP_TABLE_TRANSLATE(6, 6); table |= ICE_UP_TABLE_TRANSLATE(7, 7); ctx->info.ingress_table = CPU_TO_LE32(table); ctx->info.egress_table = CPU_TO_LE32(table); /* Have 1:1 UP mapping for outer to inner UP table */ ctx->info.outer_up_table = CPU_TO_LE32(table); - /* No Outer tag support, so outer_tag_flags remains zero */ + /* No Outer tag support, so outer_vlan_flags remains zero */ } /** * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS * @ctx: the VSI context to configure * @type: the VSI type * * Configures the VSI context for RSS, based on the VSI type. */ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type) { u8 lut_type, hash_type; switch (type) { case ICE_VSI_PF: lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; case ICE_VSI_VF: lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ; break; default: /* Other VSI types do not support RSS */ return; } ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) & ICE_AQ_VSI_Q_OPT_RSS_LUT_M) | ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) & ICE_AQ_VSI_Q_OPT_RSS_HASH_M)); } /** * ice_setup_vsi_qmap - Setup the queue mapping for a VSI * @vsi: the VSI to configure * @ctx: the VSI context to configure * * Configures the context for the given VSI, setting up how the firmware * should map the queues for this VSI. */ static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) { int pow = 0; u16 qmap; MPASS(vsi->rx_qmap != NULL); /* TODO: * Handle multiple Traffic Classes * Handle scattered queues (for VFs) */ if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS) return (EOPNOTSUPP); ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG); ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); /* Calculate the next power-of-2 of number of queues */ if (vsi->num_rx_queues) pow = flsl(vsi->num_rx_queues - 1); /* Assign all the queues to traffic class zero */ qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); return 0; } /** * ice_initialize_vsi - Initialize a VSI for use * @vsi: the vsi to initialize * * Initialize a VSI over the adminq and prepare it for operation. */ int ice_initialize_vsi(struct ice_vsi *vsi) { struct ice_vsi_ctx ctx = { 0 }; struct ice_hw *hw = &vsi->sc->hw; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; enum ice_status status; int err; /* For now, we only have code supporting PF VSIs */ switch (vsi->type) { case ICE_VSI_PF: ctx.flags = ICE_AQ_VSI_TYPE_PF; break; default: return (ENODEV); } ice_set_default_vsi_ctx(&ctx); ice_set_rss_vsi_ctx(&ctx, vsi->type); /* XXX: VSIs of other types may need different port info? */ ctx.info.sw_id = hw->port_info->sw_id; /* Set some RSS parameters based on the VSI type */ ice_vsi_set_rss_params(vsi); /* Initialize the Rx queue mapping for this VSI */ err = ice_setup_vsi_qmap(vsi, &ctx); if (err) { return err; } /* (Re-)add VSI to HW VSI handle list */ status = ice_add_vsi(hw, vsi->idx, &ctx, NULL); if (status != 0) { device_printf(vsi->sc->dev, "Add VSI AQ call failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } vsi->info = ctx.info; /* TODO: DCB traffic class support? */ max_txqs[0] = vsi->num_tx_queues; status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, ICE_DFLT_TRAFFIC_CLASS, max_txqs); if (status) { device_printf(vsi->sc->dev, "Failed VSI lan queue config, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); ice_deinit_vsi(vsi); return (ENODEV); } /* Reset VSI stats */ ice_reset_vsi_stats(vsi); return 0; } /** * ice_deinit_vsi - Tell firmware to release resources for a VSI * @vsi: the VSI to release * * Helper function which requests the firmware to release the hardware * resources associated with a given VSI. */ void ice_deinit_vsi(struct ice_vsi *vsi) { struct ice_vsi_ctx ctx = { 0 }; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; enum ice_status status; /* Assert that the VSI pointer matches in the list */ MPASS(vsi == sc->all_vsi[vsi->idx]); ctx.info = vsi->info; status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx); if (status) { /* * This should only fail if the VSI handle is invalid, or if * any of the nodes have leaf nodes which are still in use. */ device_printf(sc->dev, "Unable to remove scheduler nodes for VSI %d, err %s\n", vsi->idx, ice_status_str(status)); } /* Tell firmware to release the VSI resources */ status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL); if (status != 0) { device_printf(sc->dev, "Free VSI %u AQ call failed, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /** * ice_release_vsi - Release resources associated with a VSI * @vsi: the VSI to release * * Release software and firmware resources associated with a VSI. Release the * queue managers associated with this VSI. Also free the VSI structure memory * if the VSI was allocated dynamically using ice_alloc_vsi(). */ void ice_release_vsi(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; int idx = vsi->idx; /* Assert that the VSI pointer matches in the list */ MPASS(vsi == sc->all_vsi[idx]); /* Cleanup RSS configuration */ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) ice_clean_vsi_rss_cfg(vsi); ice_del_vsi_sysctl_ctx(vsi); ice_deinit_vsi(vsi); ice_free_vsi_qmaps(vsi); if (vsi->dynamic) { free(sc->all_vsi[idx], M_ICE); } sc->all_vsi[idx] = NULL; } /** * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate * @pi: port info data * * Returns the baudrate value for the current link speed of a given port. */ uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi) { switch (pi->phy.link_info.link_speed) { case ICE_AQ_LINK_SPEED_100GB: return IF_Gbps(100); case ICE_AQ_LINK_SPEED_50GB: return IF_Gbps(50); case ICE_AQ_LINK_SPEED_40GB: return IF_Gbps(40); case ICE_AQ_LINK_SPEED_25GB: return IF_Gbps(25); case ICE_AQ_LINK_SPEED_10GB: return IF_Gbps(10); case ICE_AQ_LINK_SPEED_5GB: return IF_Gbps(5); case ICE_AQ_LINK_SPEED_2500MB: return IF_Mbps(2500); case ICE_AQ_LINK_SPEED_1000MB: return IF_Mbps(1000); case ICE_AQ_LINK_SPEED_100MB: return IF_Mbps(100); case ICE_AQ_LINK_SPEED_10MB: return IF_Mbps(10); case ICE_AQ_LINK_SPEED_UNKNOWN: default: /* return 0 if we don't know the link speed */ return 0; } } /** * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation * @pi: port info data * * Returns the string representation of the current link speed for a given * port. */ static const char * ice_aq_speed_to_str(struct ice_port_info *pi) { switch (pi->phy.link_info.link_speed) { case ICE_AQ_LINK_SPEED_100GB: return "100 Gbps"; case ICE_AQ_LINK_SPEED_50GB: return "50 Gbps"; case ICE_AQ_LINK_SPEED_40GB: return "40 Gbps"; case ICE_AQ_LINK_SPEED_25GB: return "25 Gbps"; case ICE_AQ_LINK_SPEED_20GB: return "20 Gbps"; case ICE_AQ_LINK_SPEED_10GB: return "10 Gbps"; case ICE_AQ_LINK_SPEED_5GB: return "5 Gbps"; case ICE_AQ_LINK_SPEED_2500MB: return "2.5 Gbps"; case ICE_AQ_LINK_SPEED_1000MB: return "1 Gbps"; case ICE_AQ_LINK_SPEED_100MB: return "100 Mbps"; case ICE_AQ_LINK_SPEED_10MB: return "10 Mbps"; case ICE_AQ_LINK_SPEED_UNKNOWN: default: return "Unknown speed"; } } /** * ice_get_phy_type_low - Get media associated with phy_type_low * @phy_type_low: the low 64bits of phy_type from the AdminQ * * Given the lower 64bits of the phy_type from the hardware, return the * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown. * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should * be called. If phy_type_low is zero, call ice_phy_type_high. */ int ice_get_phy_type_low(uint64_t phy_type_low) { switch (phy_type_low) { case ICE_PHY_TYPE_LOW_100BASE_TX: return IFM_100_TX; case ICE_PHY_TYPE_LOW_100M_SGMII: return IFM_100_SGMII; case ICE_PHY_TYPE_LOW_1000BASE_T: return IFM_1000_T; case ICE_PHY_TYPE_LOW_1000BASE_SX: return IFM_1000_SX; case ICE_PHY_TYPE_LOW_1000BASE_LX: return IFM_1000_LX; case ICE_PHY_TYPE_LOW_1000BASE_KX: return IFM_1000_KX; case ICE_PHY_TYPE_LOW_1G_SGMII: return IFM_1000_SGMII; case ICE_PHY_TYPE_LOW_2500BASE_T: return IFM_2500_T; case ICE_PHY_TYPE_LOW_2500BASE_X: return IFM_2500_X; case ICE_PHY_TYPE_LOW_2500BASE_KX: return IFM_2500_KX; case ICE_PHY_TYPE_LOW_5GBASE_T: return IFM_5000_T; case ICE_PHY_TYPE_LOW_5GBASE_KR: return IFM_5000_KR; case ICE_PHY_TYPE_LOW_10GBASE_T: return IFM_10G_T; case ICE_PHY_TYPE_LOW_10G_SFI_DA: return IFM_10G_TWINAX; case ICE_PHY_TYPE_LOW_10GBASE_SR: return IFM_10G_SR; case ICE_PHY_TYPE_LOW_10GBASE_LR: return IFM_10G_LR; case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: return IFM_10G_KR; case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: return IFM_10G_AOC; case ICE_PHY_TYPE_LOW_10G_SFI_C2C: return IFM_10G_SFI; case ICE_PHY_TYPE_LOW_25GBASE_T: return IFM_25G_T; case ICE_PHY_TYPE_LOW_25GBASE_CR: return IFM_25G_CR; case ICE_PHY_TYPE_LOW_25GBASE_CR_S: return IFM_25G_CR_S; case ICE_PHY_TYPE_LOW_25GBASE_CR1: return IFM_25G_CR1; case ICE_PHY_TYPE_LOW_25GBASE_SR: return IFM_25G_SR; case ICE_PHY_TYPE_LOW_25GBASE_LR: return IFM_25G_LR; case ICE_PHY_TYPE_LOW_25GBASE_KR: return IFM_25G_KR; case ICE_PHY_TYPE_LOW_25GBASE_KR_S: return IFM_25G_KR_S; case ICE_PHY_TYPE_LOW_25GBASE_KR1: return IFM_25G_KR1; case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: return IFM_25G_AOC; case ICE_PHY_TYPE_LOW_25G_AUI_C2C: return IFM_25G_AUI; case ICE_PHY_TYPE_LOW_40GBASE_CR4: return IFM_40G_CR4; case ICE_PHY_TYPE_LOW_40GBASE_SR4: return IFM_40G_SR4; case ICE_PHY_TYPE_LOW_40GBASE_LR4: return IFM_40G_LR4; case ICE_PHY_TYPE_LOW_40GBASE_KR4: return IFM_40G_KR4; case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: return IFM_40G_XLAUI_AC; case ICE_PHY_TYPE_LOW_40G_XLAUI: return IFM_40G_XLAUI; case ICE_PHY_TYPE_LOW_50GBASE_CR2: return IFM_50G_CR2; case ICE_PHY_TYPE_LOW_50GBASE_SR2: return IFM_50G_SR2; case ICE_PHY_TYPE_LOW_50GBASE_LR2: return IFM_50G_LR2; case ICE_PHY_TYPE_LOW_50GBASE_KR2: return IFM_50G_KR2; case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: return IFM_50G_LAUI2_AC; case ICE_PHY_TYPE_LOW_50G_LAUI2: return IFM_50G_LAUI2; case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: return IFM_50G_AUI2_AC; case ICE_PHY_TYPE_LOW_50G_AUI2: return IFM_50G_AUI2; case ICE_PHY_TYPE_LOW_50GBASE_CP: return IFM_50G_CP; case ICE_PHY_TYPE_LOW_50GBASE_SR: return IFM_50G_SR; case ICE_PHY_TYPE_LOW_50GBASE_FR: return IFM_50G_FR; case ICE_PHY_TYPE_LOW_50GBASE_LR: return IFM_50G_LR; case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: return IFM_50G_KR_PAM4; case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: return IFM_50G_AUI1_AC; case ICE_PHY_TYPE_LOW_50G_AUI1: return IFM_50G_AUI1; case ICE_PHY_TYPE_LOW_100GBASE_CR4: return IFM_100G_CR4; case ICE_PHY_TYPE_LOW_100GBASE_SR4: return IFM_100G_SR4; case ICE_PHY_TYPE_LOW_100GBASE_LR4: return IFM_100G_LR4; case ICE_PHY_TYPE_LOW_100GBASE_KR4: return IFM_100G_KR4; case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: return IFM_100G_CAUI4_AC; case ICE_PHY_TYPE_LOW_100G_CAUI4: return IFM_100G_CAUI4; case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: return IFM_100G_AUI4_AC; case ICE_PHY_TYPE_LOW_100G_AUI4: return IFM_100G_AUI4; case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: return IFM_100G_CR_PAM4; case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: return IFM_100G_KR_PAM4; case ICE_PHY_TYPE_LOW_100GBASE_CP2: return IFM_100G_CP2; case ICE_PHY_TYPE_LOW_100GBASE_SR2: return IFM_100G_SR2; case ICE_PHY_TYPE_LOW_100GBASE_DR: return IFM_100G_DR; default: return IFM_UNKNOWN; } } /** * ice_get_phy_type_high - Get media associated with phy_type_high * @phy_type_high: the upper 64bits of phy_type from the AdminQ * * Given the upper 64bits of the phy_type from the hardware, return the * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be * called. If phy_type_high is zero, call ice_get_phy_type_low. */ int ice_get_phy_type_high(uint64_t phy_type_high) { switch (phy_type_high) { case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return IFM_100G_KR2_PAM4; case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: return IFM_100G_CAUI2_AC; case ICE_PHY_TYPE_HIGH_100G_CAUI2: return IFM_100G_CAUI2; case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: return IFM_100G_AUI2_AC; case ICE_PHY_TYPE_HIGH_100G_AUI2: return IFM_100G_AUI2; default: return IFM_UNKNOWN; } } /** * ice_phy_types_to_max_rate - Returns port's max supported baudrate * @pi: port info struct * - * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP parameter needs to have - * been called before this function for it to work. + * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP_MEDIA parameter needs + * to have been called before this function for it to work. */ static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi) { uint64_t phy_low = pi->phy.phy_type_low; uint64_t phy_high = pi->phy.phy_type_high; uint64_t max_rate = 0; int bit; /* * These are based on the indices used in the BIT() macros for * ICE_PHY_TYPE_LOW_* */ static const uint64_t phy_rates[] = { IF_Mbps(100), IF_Mbps(100), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Gbps(1ULL), IF_Mbps(2500ULL), IF_Mbps(2500ULL), IF_Mbps(2500ULL), IF_Gbps(5ULL), IF_Gbps(5ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(10ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(25ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(40ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(50ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), /* These rates are for ICE_PHY_TYPE_HIGH_* */ IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL), IF_Gbps(100ULL) }; /* coverity[address_of] */ for_each_set_bit(bit, &phy_high, 64) if ((bit + 64) < (int)ARRAY_SIZE(phy_rates)) max_rate = uqmax(max_rate, phy_rates[(bit + 64)]); /* coverity[address_of] */ for_each_set_bit(bit, &phy_low, 64) max_rate = uqmax(max_rate, phy_rates[bit]); return (max_rate); } /* The if_media type is split over the original 5 bit media variant field, * along with extended types using up extra bits in the options section. * We want to convert this split number into a bitmap index, so we reverse the * calculation of IFM_X here. */ #define IFM_IDX(x) (((x) & IFM_TMASK) | \ (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT)) /** * ice_add_media_types - Add supported media types to the media structure * @sc: ice private softc structure * @media: ifmedia structure to setup * * Looks up the supported phy types, and initializes the various media types * available. * * @pre this function must be protected from being called while another thread * is accessing the ifmedia types. */ enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media) { + struct ice_aqc_get_phy_caps_data pcaps = { 0 }; + struct ice_port_info *pi = sc->hw.port_info; enum ice_status status; uint64_t phy_low, phy_high; int bit; ASSERT_CFG_LOCKED(sc); /* the maximum possible media type index is 511. We probably don't * need most of this space, but this ensures future compatibility when * additional media types are used. */ ice_declare_bitmap(already_added, 511); /* Remove all previous media types */ ifmedia_removeall(media); - status = ice_get_phy_types(sc, &phy_low, &phy_high); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, + &pcaps, NULL); if (status != ICE_SUCCESS) { - /* Function already prints appropriate error - * message - */ + device_printf(sc->dev, + "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", + __func__, ice_status_str(status), + ice_aq_str(sc->hw.adminq.sq_last_status)); return (status); } + phy_low = le64toh(pcaps.phy_type_low); + phy_high = le64toh(pcaps.phy_type_high); /* make sure the added bitmap is zero'd */ memset(already_added, 0, sizeof(already_added)); /* coverity[address_of] */ for_each_set_bit(bit, &phy_low, 64) { uint64_t type = BIT_ULL(bit); int ostype; /* get the OS media type */ ostype = ice_get_phy_type_low(type); /* don't bother adding the unknown type */ if (ostype == IFM_UNKNOWN) continue; /* only add each media type to the list once */ if (ice_is_bit_set(already_added, IFM_IDX(ostype))) continue; ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); ice_set_bit(IFM_IDX(ostype), already_added); } /* coverity[address_of] */ for_each_set_bit(bit, &phy_high, 64) { uint64_t type = BIT_ULL(bit); int ostype; /* get the OS media type */ ostype = ice_get_phy_type_high(type); /* don't bother adding the unknown type */ if (ostype == IFM_UNKNOWN) continue; /* only add each media type to the list once */ if (ice_is_bit_set(already_added, IFM_IDX(ostype))) continue; ifmedia_add(media, IFM_ETHER | ostype, 0, NULL); ice_set_bit(IFM_IDX(ostype), already_added); } /* Use autoselect media by default */ ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(media, IFM_ETHER | IFM_AUTO); return (ICE_SUCCESS); } /** * ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts * @vsi: the VSI to configure * * Called when setting up MSI-X interrupts to configure the Rx hardware queues. */ void ice_configure_rxq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; u32 val; val = (QINT_RQCTL_CAUSE_ENA_M | (ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) | (rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S)); wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val); } ice_flush(hw); } /** * ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts * @vsi: the VSI to configure * * Called when setting up MSI-X interrupts to configure the Tx hardware queues. */ void ice_configure_txq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; u32 val; val = (QINT_TQCTL_CAUSE_ENA_M | (ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) | (txq->irqv->me << QINT_TQCTL_MSIX_INDX_S)); wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val); } ice_flush(hw); } /** * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause * @vsi: the VSI to configure * * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger * a software interrupt on that cause. This is required as part of the Rx * queue disable logic to dissociate the Rx queue from the interrupt. * * Note: this function must be called prior to disabling Rx queues with * ice_control_rx_queues, otherwise the Rx queue may not be disabled properly. */ void ice_flush_rxq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; u32 reg, val; /* Clear the CAUSE_ENA flag */ reg = vsi->rx_qmap[rxq->me]; val = rd32(hw, QINT_RQCTL(reg)); val &= ~QINT_RQCTL_CAUSE_ENA_M; wr32(hw, QINT_RQCTL(reg), val); ice_flush(hw); /* Trigger a software interrupt to complete interrupt * dissociation. */ wr32(hw, GLINT_DYN_CTL(rxq->irqv->me), GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); } } /** * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause * @vsi: the VSI to configure * * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger * a software interrupt on that cause. This is required as part of the Tx * queue disable logic to dissociate the Tx queue from the interrupt. * * Note: this function must be called prior to ice_vsi_disable_tx, otherwise * the Tx queue disable may not complete properly. */ void ice_flush_txq_interrupts(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; u32 reg, val; /* Clear the CAUSE_ENA flag */ reg = vsi->tx_qmap[txq->me]; val = rd32(hw, QINT_TQCTL(reg)); val &= ~QINT_TQCTL_CAUSE_ENA_M; wr32(hw, QINT_TQCTL(reg), val); ice_flush(hw); /* Trigger a software interrupt to complete interrupt * dissociation. */ wr32(hw, GLINT_DYN_CTL(txq->irqv->me), GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); } } /** * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI * @vsi: the VSI to configure * * Program the hardware ITR registers with the settings for this VSI. */ void ice_configure_rx_itr(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; /* TODO: Handle per-queue/per-vector ITR? */ for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me), ice_itr_to_reg(hw, vsi->rx_itr)); } ice_flush(hw); } /** * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI * @vsi: the VSI to configure * * Program the hardware ITR registers with the settings for this VSI. */ void ice_configure_tx_itr(struct ice_vsi *vsi) { struct ice_hw *hw = &vsi->sc->hw; int i; /* TODO: Handle per-queue/per-vector ITR? */ for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me), ice_itr_to_reg(hw, vsi->tx_itr)); } ice_flush(hw); } /** * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue * @txq: the Tx queue to configure * @tlan_ctx: the Tx LAN queue context structure to initialize * @pf_q: real queue number */ static int ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) { struct ice_vsi *vsi = txq->vsi; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; tlan_ctx->port_num = hw->port_info->lport; /* number of descriptors in the queue */ tlan_ctx->qlen = txq->desc_count; /* set the transmit queue base address, defined in 128 byte units */ tlan_ctx->base = txq->tx_paddr >> 7; tlan_ctx->pf_num = hw->pf_id; /* For now, we only have code supporting PF VSIs */ switch (vsi->type) { case ICE_VSI_PF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; break; default: return (ENODEV); } tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); /* Enable TSO */ tlan_ctx->tso_ena = 1; tlan_ctx->internal_usage_flag = 1; tlan_ctx->tso_qnum = pf_q; /* * Stick with the older legacy Tx queue interface, instead of the new * advanced queue interface. */ tlan_ctx->legacy_int = 1; /* Descriptor WB mode */ tlan_ctx->wb_mode = 0; return (0); } /** * ice_cfg_vsi_for_tx - Configure the hardware for Tx * @vsi: the VSI to configure * * Configure the device Tx queues through firmware AdminQ commands. After * this, Tx queues will be ready for transmit. */ int ice_cfg_vsi_for_tx(struct ice_vsi *vsi) { struct ice_aqc_add_tx_qgrp *qg; struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; enum ice_status status; int i; int err = 0; u16 qg_size, pf_q; qg_size = ice_struct_size(qg, txqs, 1); qg = (struct ice_aqc_add_tx_qgrp *)malloc(qg_size, M_ICE, M_NOWAIT|M_ZERO); if (!qg) return (ENOMEM); qg->num_txqs = 1; for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_tx_queue *txq = &vsi->tx_queues[i]; pf_q = vsi->tx_qmap[txq->me]; qg->txqs[0].txq_id = htole16(pf_q); err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q); if (err) goto free_txqg; ice_set_ctx(hw, (u8 *)&tlan_ctx, qg->txqs[0].txq_ctx, ice_tlan_ctx_info); status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, i, 1, qg, qg_size, NULL); if (status) { device_printf(dev, "Failed to set LAN Tx queue context, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = ENODEV; goto free_txqg; } /* Keep track of the Tx queue TEID */ if (pf_q == le16toh(qg->txqs[0].txq_id)) txq->q_teid = le32toh(qg->txqs[0].q_teid); } free_txqg: free(qg, M_ICE); return (err); } /** * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue * @rxq: the receive queue to program * * Setup an Rx queue context structure and program it into the hardware * registers. This is a necessary step for enabling the Rx queue. * * @pre the VSI associated with this queue must have initialized mbuf_sz */ static int ice_setup_rx_ctx(struct ice_rx_queue *rxq) { struct ice_rlan_ctx rlan_ctx = {0}; struct ice_vsi *vsi = rxq->vsi; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; enum ice_status status; u32 rxdid = ICE_RXDID_FLEX_NIC; u32 regval; u16 pf_q; pf_q = vsi->rx_qmap[rxq->me]; /* set the receive queue base address, defined in 128 byte units */ rlan_ctx.base = rxq->rx_paddr >> 7; rlan_ctx.qlen = rxq->desc_count; rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S; /* use 32 byte descriptors */ rlan_ctx.dsize = 1; /* Strip the Ethernet CRC bytes before the packet is posted to the * host memory. */ rlan_ctx.crcstrip = 1; rlan_ctx.l2tsel = 1; /* don't do header splitting */ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; /* strip VLAN from inner headers */ rlan_ctx.showiv = 1; rlan_ctx.rxmax = min(vsi->max_frame_size, ICE_MAX_RX_SEGS * vsi->mbuf_sz); rlan_ctx.lrxqthresh = 1; if (vsi->type != ICE_VSI_VF) { regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M; regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & QRXFLXP_CNTXT_RXDID_IDX_M; regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M; regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & QRXFLXP_CNTXT_RXDID_PRIO_M; wr32(hw, QRXFLXP_CNTXT(pf_q), regval); } status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); if (status) { device_printf(sc->dev, "Failed to set LAN Rx queue context, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } wr32(hw, rxq->tail, 0); return 0; } /** * ice_cfg_vsi_for_rx - Configure the hardware for Rx * @vsi: the VSI to configure * * Prepare an Rx context descriptor and configure the device to receive * traffic. * * @pre the VSI must have initialized mbuf_sz */ int ice_cfg_vsi_for_rx(struct ice_vsi *vsi) { int i, err; for (i = 0; i < vsi->num_rx_queues; i++) { MPASS(vsi->mbuf_sz > 0); err = ice_setup_rx_ctx(&vsi->rx_queues[i]); if (err) return err; } return (0); } /** * ice_is_rxq_ready - Check if an Rx queue is ready * @hw: ice hw structure * @pf_q: absolute PF queue index to check * @reg: on successful return, contains qrx_ctrl contents * * Reads the QRX_CTRL register and verifies if the queue is in a consistent * state. That is, QENA_REQ matches QENA_STAT. Used to check before making * a request to change the queue, as well as to verify the request has * finished. The queue should change status within a few microseconds, so we * use a small delay while polling the register. * * Returns an error code if the queue does not update after a few retries. */ static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg) { u32 qrx_ctrl, qena_req, qena_stat; int i; for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { qrx_ctrl = rd32(hw, QRX_CTRL(pf_q)); qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1; qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1; /* if the request and status bits equal, then the queue is * fully disabled or enabled. */ if (qena_req == qena_stat) { *reg = qrx_ctrl; return (0); } /* wait a few microseconds before we check again */ DELAY(10); } return (ETIMEDOUT); } /** * ice_control_rx_queues - Configure hardware to start or stop the Rx queues * @vsi: VSI to enable/disable queues * @enable: true to enable queues, false to disable * * Control the Rx queues through the QRX_CTRL register, enabling or disabling * them. Wait for the appropriate time to ensure that the queues have actually * reached the expected state. */ int ice_control_rx_queues(struct ice_vsi *vsi, bool enable) { struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; u32 qrx_ctrl = 0; int i, err; /* TODO: amortize waits by changing all queues up front and then * checking their status afterwards. This will become more necessary * when we have a large number of queues. */ for (i = 0; i < vsi->num_rx_queues; i++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; int pf_q = vsi->rx_qmap[rxq->me]; err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); if (err) { device_printf(dev, "Rx queue %d is not ready\n", pf_q); return err; } /* Skip if the queue is already in correct state */ if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) continue; if (enable) qrx_ctrl |= QRX_CTRL_QENA_REQ_M; else qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M; wr32(hw, QRX_CTRL(pf_q), qrx_ctrl); /* wait for the queue to finalize the request */ err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl); if (err) { device_printf(dev, "Rx queue %d %sable timeout\n", pf_q, (enable ? "en" : "dis")); return err; } /* this should never happen */ if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) { device_printf(dev, "Rx queue %d invalid state\n", pf_q); return (EDOOFUS); } } return (0); } /** * ice_add_mac_to_list - Add MAC filter to a MAC filter list * @vsi: the VSI to forward to * @list: list which contains MAC filter entries * @addr: the MAC address to be added * @action: filter action to perform on match * * Adds a MAC address filter to the list which will be forwarded to firmware * to add a series of MAC address filters. * * Returns 0 on success, and an error code on failure. * */ static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list, const u8 *addr, enum ice_sw_fwd_act_type action) { struct ice_fltr_list_entry *entry; entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); if (!entry) return (ENOMEM); entry->fltr_info.flag = ICE_FLTR_TX; entry->fltr_info.src_id = ICE_SRC_ID_VSI; entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC; entry->fltr_info.fltr_act = action; entry->fltr_info.vsi_handle = vsi->idx; bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN); LIST_ADD(&entry->list_entry, list); return 0; } /** * ice_free_fltr_list - Free memory associated with a MAC address list * @list: the list to free * * Free the memory of each entry associated with the list. */ static void ice_free_fltr_list(struct ice_list_head *list) { struct ice_fltr_list_entry *e, *tmp; LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) { LIST_DEL(&e->list_entry); free(e, M_ICE); } } /** * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI * @vsi: the VSI to add the filter for * @addr: MAC address to add a filter for * * Add a MAC address filter for a given VSI. This is a wrapper around * ice_add_mac to simplify the interface. First, it only accepts a single * address, so we don't have to mess around with the list setup in other * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that * callers don't need to worry about attempting to add the same filter twice. */ int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) { struct ice_list_head mac_addr_list; struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; enum ice_status status; int err = 0; INIT_LIST_HEAD(&mac_addr_list); err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); if (err) goto free_mac_list; status = ice_add_mac(hw, &mac_addr_list); if (status == ICE_ERR_ALREADY_EXISTS) { ; /* Don't complain if we try to add a filter that already exists */ } else if (status) { device_printf(dev, "Failed to add a filter for MAC %6D, err %s aq_err %s\n", addr, ":", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free_mac_list: ice_free_fltr_list(&mac_addr_list); return err; } /** * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs * @sc: device softc structure * * Program the default unicast and broadcast filters for the PF VSI. */ int ice_cfg_pf_default_mac_filters(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; int err; /* Add the LAN MAC address */ err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); if (err) return err; /* Add the broadcast address */ err = ice_add_vsi_mac_filter(vsi, broadcastaddr); if (err) return err; return (0); } /** * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI * @vsi: the VSI to add the filter for * @addr: MAC address to remove a filter for * * Remove a MAC address filter from a given VSI. This is a wrapper around * ice_remove_mac to simplify the interface. First, it only accepts a single * address, so we don't have to mess around with the list setup in other * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that * callers don't need to worry about attempting to remove filters which * haven't yet been added. */ int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr) { struct ice_list_head mac_addr_list; struct ice_hw *hw = &vsi->sc->hw; device_t dev = vsi->sc->dev; enum ice_status status; int err = 0; INIT_LIST_HEAD(&mac_addr_list); err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI); if (err) goto free_mac_list; status = ice_remove_mac(hw, &mac_addr_list); if (status == ICE_ERR_DOES_NOT_EXIST) { ; /* Don't complain if we try to remove a filter that doesn't exist */ } else if (status) { device_printf(dev, "Failed to remove a filter for MAC %6D, err %s aq_err %s\n", addr, ":", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free_mac_list: ice_free_fltr_list(&mac_addr_list); return err; } /** * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs * @sc: device softc structure * * Remove the default unicast and broadcast filters from the PF VSI. */ int ice_rm_pf_default_mac_filters(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; int err; /* Remove the LAN MAC address */ err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr); if (err) return err; /* Remove the broadcast address */ err = ice_remove_vsi_mac_filter(vsi, broadcastaddr); if (err) return (EIO); return (0); } /** * ice_check_ctrlq_errors - Check for and report controlq errors * @sc: device private structure * @qname: name of the controlq * @cq: the controlq to check * * Check and report controlq errors. Currently all we do is report them to the * kernel message log, but we might want to improve this in the future, such * as to keep track of statistics. */ static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname, struct ice_ctl_q_info *cq) { struct ice_hw *hw = &sc->hw; u32 val; /* Check for error indications. Note that all the controlqs use the * same register layout, so we use the PF_FW_AxQLEN defines only. */ val = rd32(hw, cq->rq.len); if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | PF_FW_ARQLEN_ARQCRIT_M)) { if (val & PF_FW_ARQLEN_ARQVFE_M) device_printf(sc->dev, "%s Receive Queue VF Error detected\n", qname); if (val & PF_FW_ARQLEN_ARQOVFL_M) device_printf(sc->dev, "%s Receive Queue Overflow Error detected\n", qname); if (val & PF_FW_ARQLEN_ARQCRIT_M) device_printf(sc->dev, "%s Receive Queue Critical Error detected\n", qname); val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M | PF_FW_ARQLEN_ARQCRIT_M); wr32(hw, cq->rq.len, val); } val = rd32(hw, cq->sq.len); if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | PF_FW_ATQLEN_ATQCRIT_M)) { if (val & PF_FW_ATQLEN_ATQVFE_M) device_printf(sc->dev, "%s Send Queue VF Error detected\n", qname); if (val & PF_FW_ATQLEN_ATQOVFL_M) device_printf(sc->dev, "%s Send Queue Overflow Error detected\n", qname); if (val & PF_FW_ATQLEN_ATQCRIT_M) device_printf(sc->dev, "%s Send Queue Critical Error detected\n", qname); val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M | PF_FW_ATQLEN_ATQCRIT_M); wr32(hw, cq->sq.len, val); } } /** * ice_process_link_event - Process a link event indication from firmware * @sc: device softc structure * @e: the received event data * * Gets the current link status from hardware, and may print a message if an * unqualified is detected. */ static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info __invariant_only *e) { struct ice_port_info *pi = sc->hw.port_info; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; /* Sanity check that the data length matches */ MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data)); /* * Even though the adapter gets link status information inside the * event, it needs to send a Get Link Status AQ command in order * to re-enable link events. */ pi->phy.get_link_info = true; ice_get_link_status(pi, &sc->link_up); if (pi->phy.link_info.topo_media_conflict & (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT | ICE_AQ_LINK_TOPO_CORRUPT)) device_printf(dev, "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n"); if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) && - !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP) && - !(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) - device_printf(dev, - "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) { + if (!(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE)) + device_printf(dev, + "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) + device_printf(dev, + "The module's power requirements exceed the device's power supply. Cannot start link.\n"); + if (pi->phy.link_info.link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) + device_printf(dev, + "The installed module is incompatible with the device's NVM image. Cannot start link.\n"); + } if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) { status = ice_aq_set_link_restart_an(pi, false, NULL); if (status != ICE_SUCCESS) device_printf(dev, "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */ /* Indicate that link status must be reported again */ ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); /* OS link info is updated elsewhere */ } /** * ice_process_ctrlq_event - Respond to a controlq event * @sc: device private structure * @qname: the name for this controlq * @event: the event to process * * Perform actions in response to various controlq event notifications. */ static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname, struct ice_rq_event_info *event) { u16 opcode; opcode = le16toh(event->desc.opcode); switch (opcode) { case ice_aqc_opc_get_link_status: ice_process_link_event(sc, event); break; case ice_mbx_opc_send_msg_to_pf: /* TODO: handle IOV event */ break; case ice_aqc_opc_lldp_set_mib_change: ice_handle_mib_change_event(sc, event); break; case ice_aqc_opc_event_lan_overflow: ice_handle_lan_overflow_event(sc, event); break; + case ice_aqc_opc_get_health_status: + ice_handle_health_status_event(sc, event); + break; default: device_printf(sc->dev, "%s Receive Queue unhandled event 0x%04x ignored\n", qname, opcode); } } /** * ice_process_ctrlq - helper function to process controlq rings * @sc: device private structure * @q_type: specific control queue type * @pending: return parameter to track remaining events * * Process controlq events for a given control queue type. Returns zero on * success, and an error code on failure. If successful, pending is the number * of remaining events left in the queue. */ int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending) { struct ice_rq_event_info event = { { 0 } }; struct ice_hw *hw = &sc->hw; struct ice_ctl_q_info *cq; enum ice_status status; const char *qname; int loop = 0; switch (q_type) { case ICE_CTL_Q_ADMIN: cq = &hw->adminq; qname = "Admin"; break; case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; qname = "Mailbox"; break; default: device_printf(sc->dev, "Unknown control queue type 0x%x\n", q_type); return 0; } ice_check_ctrlq_errors(sc, qname, cq); /* * Control queue processing happens during the admin task which may be * holding a non-sleepable lock, so we *must* use M_NOWAIT here. */ event.buf_len = cq->rq_buf_size; event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT); if (!event.msg_buf) { device_printf(sc->dev, "Unable to allocate memory for %s Receive Queue event\n", qname); return (ENOMEM); } do { status = ice_clean_rq_elem(hw, cq, &event, pending); if (status == ICE_ERR_AQ_NO_WORK) break; if (status) { if (q_type == ICE_CTL_Q_ADMIN) device_printf(sc->dev, - "%s Receive Queue event error %s aq_err %s\n", - qname, ice_status_str(status), - ice_aq_str(cq->rq_last_status)); + "%s Receive Queue event error %s\n", + qname, ice_status_str(status)); else device_printf(sc->dev, - "%s Receive Queue event error %s cq_err %d\n", - qname, ice_status_str(status), cq->rq_last_status); + "%s Receive Queue event error %s\n", + qname, ice_status_str(status)); free(event.msg_buf, M_ICE); return (EIO); } /* XXX should we separate this handler by controlq type? */ ice_process_ctrlq_event(sc, qname, &event); } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT)); free(event.msg_buf, M_ICE); return 0; } /** * pkg_ver_empty - Check if a package version is empty * @pkg_ver: the package version to check * @pkg_name: the package name to check * * Checks if the package version structure is empty. We consider a package * version as empty if none of the versions are non-zero and the name string * is null as well. * * This is used to check if the package version was initialized by the driver, * as we do not expect an actual DDP package file to have a zero'd version and * name. * * @returns true if the package version is valid, or false otherwise. */ static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name) { return (pkg_name[0] == '\0' && pkg_ver->major == 0 && pkg_ver->minor == 0 && pkg_ver->update == 0 && pkg_ver->draft == 0); } /** * pkg_ver_compatible - Check if the package version is compatible * @pkg_ver: the package version to check * * Compares the package version number to the driver's expected major/minor * version. Returns an integer indicating whether the version is older, newer, * or compatible with the driver. * * @returns 0 if the package version is compatible, -1 if the package version * is older, and 1 if the package version is newer than the driver version. */ static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver) { if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ) return (1); /* newer */ else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) return (1); /* newer */ else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) && (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR)) return (0); /* compatible */ else return (-1); /* older */ } /** * ice_os_pkg_version_str - Format OS package version info into a sbuf * @hw: device hw structure * @buf: string buffer to store name/version string * * Formats the name and version of the OS DDP package as found in the ice_ddp * module into a string. * * @remark This will almost always be the same as the active package, but * could be different in some cases. Use ice_active_pkg_version_str to get the * version of the active DDP package. */ static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) { char name_buf[ICE_PKG_NAME_SIZE]; /* If the OS DDP package info is empty, use "None" */ if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) { sbuf_printf(buf, "None"); return; } /* * This should already be null-terminated, but since this is a raw * value from an external source, strlcpy() into a new buffer to * make sure. */ bzero(name_buf, sizeof(name_buf)); strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE); sbuf_printf(buf, "%s version %u.%u.%u.%u", name_buf, hw->pkg_ver.major, hw->pkg_ver.minor, hw->pkg_ver.update, hw->pkg_ver.draft); } /** * ice_active_pkg_version_str - Format active package version info into a sbuf * @hw: device hw structure * @buf: string buffer to store name/version string * * Formats the name and version of the active DDP package info into a string * buffer for use. */ static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf) { char name_buf[ICE_PKG_NAME_SIZE]; /* If the active DDP package info is empty, use "None" */ if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { sbuf_printf(buf, "None"); return; } /* * This should already be null-terminated, but since this is a raw * value from an external source, strlcpy() into a new buffer to * make sure. */ bzero(name_buf, sizeof(name_buf)); strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE); sbuf_printf(buf, "%s version %u.%u.%u.%u", name_buf, hw->active_pkg_ver.major, hw->active_pkg_ver.minor, hw->active_pkg_ver.update, hw->active_pkg_ver.draft); if (hw->active_track_id != 0) sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id); } /** * ice_nvm_version_str - Format the NVM version information into a sbuf * @hw: device hw structure * @buf: string buffer to store version string * * Formats the NVM information including firmware version, API version, NVM * version, the EETRACK id, and OEM specific version information into a string * buffer. */ static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf) { struct ice_nvm_info *nvm = &hw->flash.nvm; struct ice_orom_info *orom = &hw->flash.orom; struct ice_netlist_info *netlist = &hw->flash.netlist; /* Note that the netlist versions are stored in packed Binary Coded * Decimal format. The use of '%x' will correctly display these as * decimal numbers. This works because every 4 bits will be displayed * as a hexadecimal digit, and the BCD format will only use the values * 0-9. */ sbuf_printf(buf, "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch, hw->api_maj_ver, hw->api_min_ver, nvm->major, nvm->minor, nvm->eetrack, netlist->major, netlist->minor, netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, netlist->cust_ver, netlist->hash, orom->major, orom->build, orom->patch); } /** * ice_print_nvm_version - Print the NVM info to the kernel message log * @sc: the device softc structure * * Format and print an NVM version string using ice_nvm_version_str(). */ void ice_print_nvm_version(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct sbuf *sbuf; sbuf = sbuf_new_auto(); ice_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); device_printf(dev, "%s\n", sbuf_data(sbuf)); sbuf_delete(sbuf); } /** * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters * @vsi: the VSI to be updated * * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with * the updated values. */ void ice_update_vsi_hw_stats(struct ice_vsi *vsi) { struct ice_eth_stats *prev_es, *cur_es; struct ice_hw *hw = &vsi->sc->hw; u16 vsi_num; if (!ice_is_vsi_valid(hw, vsi->idx)) return; vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */ prev_es = &vsi->hw_stats.prev; cur_es = &vsi->hw_stats.cur; #define ICE_VSI_STAT40(name, location) \ ice_stat_update40(hw, name ## L(vsi_num), \ vsi->hw_stats.offsets_loaded, \ &prev_es->location, &cur_es->location) #define ICE_VSI_STAT32(name, location) \ ice_stat_update32(hw, name(vsi_num), \ vsi->hw_stats.offsets_loaded, \ &prev_es->location, &cur_es->location) ICE_VSI_STAT40(GLV_GORC, rx_bytes); ICE_VSI_STAT40(GLV_UPRC, rx_unicast); ICE_VSI_STAT40(GLV_MPRC, rx_multicast); ICE_VSI_STAT40(GLV_BPRC, rx_broadcast); ICE_VSI_STAT32(GLV_RDPC, rx_discards); ICE_VSI_STAT40(GLV_GOTC, tx_bytes); ICE_VSI_STAT40(GLV_UPTC, tx_unicast); ICE_VSI_STAT40(GLV_MPTC, tx_multicast); ICE_VSI_STAT40(GLV_BPTC, tx_broadcast); ICE_VSI_STAT32(GLV_TEPC, tx_errors); ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded, cur_es); #undef ICE_VSI_STAT40 #undef ICE_VSI_STAT32 vsi->hw_stats.offsets_loaded = true; } /** * ice_reset_vsi_stats - Reset VSI statistics counters * @vsi: VSI structure * * Resets the software tracking counters for the VSI statistics, and indicate * that the offsets haven't been loaded. This is intended to be called * post-reset so that VSI statistics count from zero again. */ void ice_reset_vsi_stats(struct ice_vsi *vsi) { /* Reset HW stats */ memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev)); memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur)); vsi->hw_stats.offsets_loaded = false; } /** * ice_update_pf_stats - Update port stats counters * @sc: device private softc structure * * Reads hardware statistics registers and updates the software tracking * structure with new values. */ void ice_update_pf_stats(struct ice_softc *sc) { struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw *hw = &sc->hw; u8 lport; MPASS(hw->port_info); prev_ps = &sc->stats.prev; cur_ps = &sc->stats.cur; lport = hw->port_info->lport; #define ICE_PF_STAT40(name, location) \ ice_stat_update40(hw, name ## L(lport), \ sc->stats.offsets_loaded, \ &prev_ps->location, &cur_ps->location) #define ICE_PF_STAT32(name, location) \ ice_stat_update32(hw, name(lport), \ sc->stats.offsets_loaded, \ &prev_ps->location, &cur_ps->location) ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes); ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast); ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast); ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast); ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes); ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast); ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast); ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast); /* This stat register doesn't have an lport */ ice_stat_update32(hw, PRTRPB_RDPC, sc->stats.offsets_loaded, &prev_ps->eth.rx_discards, &cur_ps->eth.rx_discards); ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down); ICE_PF_STAT40(GLPRT_PRC64, rx_size_64); ICE_PF_STAT40(GLPRT_PRC127, rx_size_127); ICE_PF_STAT40(GLPRT_PRC255, rx_size_255); ICE_PF_STAT40(GLPRT_PRC511, rx_size_511); ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023); ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522); ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big); ICE_PF_STAT40(GLPRT_PTC64, tx_size_64); ICE_PF_STAT40(GLPRT_PTC127, tx_size_127); ICE_PF_STAT40(GLPRT_PTC255, tx_size_255); ICE_PF_STAT40(GLPRT_PTC511, tx_size_511); ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023); ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522); ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big); ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx); ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx); ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx); ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx); ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors); ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes); ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults); ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults); ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors); ICE_PF_STAT32(GLPRT_RUC, rx_undersize); ICE_PF_STAT32(GLPRT_RFC, rx_fragments); ICE_PF_STAT32(GLPRT_ROC, rx_oversize); ICE_PF_STAT32(GLPRT_RJC, rx_jabber); #undef ICE_PF_STAT40 #undef ICE_PF_STAT32 sc->stats.offsets_loaded = true; } /** * ice_reset_pf_stats - Reset port stats counters * @sc: Device private softc structure * * Reset software tracking values for statistics to zero, and indicate that * offsets haven't been loaded. Intended to be called after a device reset so * that statistics count from zero again. */ void ice_reset_pf_stats(struct ice_softc *sc) { memset(&sc->stats.prev, 0, sizeof(sc->stats.prev)); memset(&sc->stats.cur, 0, sizeof(sc->stats.cur)); sc->stats.offsets_loaded = false; } /** * ice_sysctl_show_fw - sysctl callback to show firmware information * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the fw_version sysctl, to display the current firmware * information found at hardware init time. */ static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_pba_number - sysctl callback to show PBA number * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the pba_number sysctl, used to read the Product Board Assembly * number for this device. */ static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u8 pba_string[32] = ""; enum ice_status status; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_read_pba_string(hw, pba_string, sizeof(pba_string)); if (status) { device_printf(dev, "%s: failed to read PBA string from NVM; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req); } /** * ice_sysctl_pkg_version - sysctl to show the active package version info * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the pkg_version sysctl, to display the active DDP package name * and version information. */ static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_active_pkg_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_os_pkg_version - sysctl to show the OS package version info * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the pkg_version sysctl, to display the OS DDP package name and * version info found in the ice_ddp module. */ static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_os_pkg_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_current_speed - sysctl callback to show current link speed * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for the current_speed sysctl, to display the string representing * the current link speed. */ static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct sbuf *sbuf; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req); sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info)); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * @var phy_link_speeds * @brief PHY link speed conversion array * * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into * link speeds used by the link speed sysctls. * * @remark these are based on the indices used in the BIT() macros for the * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions. */ static const uint16_t phy_link_speeds[] = { ICE_AQ_LINK_SPEED_100MB, ICE_AQ_LINK_SPEED_100MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_1000MB, ICE_AQ_LINK_SPEED_2500MB, ICE_AQ_LINK_SPEED_2500MB, ICE_AQ_LINK_SPEED_2500MB, ICE_AQ_LINK_SPEED_5GB, ICE_AQ_LINK_SPEED_5GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_10GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_25GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_40GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_50GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, /* These rates are for ICE_PHY_TYPE_HIGH_* */ ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB, ICE_AQ_LINK_SPEED_100GB }; #define ICE_SYSCTL_HELP_ADVERTISE_SPEED \ "\nControl advertised link speed." \ "\nFlags:" \ "\n\t 0x0 - Auto" \ "\n\t 0x1 - 10 Mb" \ "\n\t 0x2 - 100 Mb" \ "\n\t 0x4 - 1G" \ "\n\t 0x8 - 2.5G" \ "\n\t 0x10 - 5G" \ "\n\t 0x20 - 10G" \ "\n\t 0x40 - 20G" \ "\n\t 0x80 - 25G" \ "\n\t 0x100 - 40G" \ "\n\t 0x200 - 50G" \ "\n\t 0x400 - 100G" \ "\n\t0x8000 - Unknown" \ "\n\t" \ "\nUse \"sysctl -x\" to view flags properly." #define ICE_PHYS_100MB \ (ICE_PHY_TYPE_LOW_100BASE_TX | \ ICE_PHY_TYPE_LOW_100M_SGMII) #define ICE_PHYS_1000MB \ (ICE_PHY_TYPE_LOW_1000BASE_T | \ ICE_PHY_TYPE_LOW_1000BASE_SX | \ ICE_PHY_TYPE_LOW_1000BASE_LX | \ ICE_PHY_TYPE_LOW_1000BASE_KX | \ ICE_PHY_TYPE_LOW_1G_SGMII) #define ICE_PHYS_2500MB \ (ICE_PHY_TYPE_LOW_2500BASE_T | \ ICE_PHY_TYPE_LOW_2500BASE_X | \ ICE_PHY_TYPE_LOW_2500BASE_KX) #define ICE_PHYS_5GB \ (ICE_PHY_TYPE_LOW_5GBASE_T | \ ICE_PHY_TYPE_LOW_5GBASE_KR) #define ICE_PHYS_10GB \ (ICE_PHY_TYPE_LOW_10GBASE_T | \ ICE_PHY_TYPE_LOW_10G_SFI_DA | \ ICE_PHY_TYPE_LOW_10GBASE_SR | \ ICE_PHY_TYPE_LOW_10GBASE_LR | \ ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ ICE_PHY_TYPE_LOW_10G_SFI_C2C) #define ICE_PHYS_25GB \ (ICE_PHY_TYPE_LOW_25GBASE_T | \ ICE_PHY_TYPE_LOW_25GBASE_CR | \ ICE_PHY_TYPE_LOW_25GBASE_CR_S | \ ICE_PHY_TYPE_LOW_25GBASE_CR1 | \ ICE_PHY_TYPE_LOW_25GBASE_SR | \ ICE_PHY_TYPE_LOW_25GBASE_LR | \ ICE_PHY_TYPE_LOW_25GBASE_KR | \ ICE_PHY_TYPE_LOW_25GBASE_KR_S | \ ICE_PHY_TYPE_LOW_25GBASE_KR1 | \ ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \ ICE_PHY_TYPE_LOW_25G_AUI_C2C) #define ICE_PHYS_40GB \ (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \ ICE_PHY_TYPE_LOW_40GBASE_SR4 | \ ICE_PHY_TYPE_LOW_40GBASE_LR4 | \ ICE_PHY_TYPE_LOW_40GBASE_KR4 | \ ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \ ICE_PHY_TYPE_LOW_40G_XLAUI) #define ICE_PHYS_50GB \ (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \ ICE_PHY_TYPE_LOW_50GBASE_SR2 | \ ICE_PHY_TYPE_LOW_50GBASE_LR2 | \ ICE_PHY_TYPE_LOW_50GBASE_KR2 | \ ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \ ICE_PHY_TYPE_LOW_50G_LAUI2 | \ ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_LOW_50G_AUI2 | \ ICE_PHY_TYPE_LOW_50GBASE_CP | \ ICE_PHY_TYPE_LOW_50GBASE_SR | \ ICE_PHY_TYPE_LOW_50GBASE_FR | \ ICE_PHY_TYPE_LOW_50GBASE_LR | \ ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \ ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \ ICE_PHY_TYPE_LOW_50G_AUI1) #define ICE_PHYS_100GB_LOW \ (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ ICE_PHY_TYPE_LOW_100G_CAUI4 | \ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ ICE_PHY_TYPE_LOW_100G_AUI4 | \ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ ICE_PHY_TYPE_LOW_100GBASE_DR) #define ICE_PHYS_100GB_HIGH \ (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ ICE_PHY_TYPE_HIGH_100G_AUI2) /** - * ice_aq_phy_types_to_sysctl_speeds - Convert the PHY Types to speeds + * ice_aq_phy_types_to_link_speeds - Convert the PHY Types to speeds * @phy_type_low: lower 64-bit PHY Type bitmask * @phy_type_high: upper 64-bit PHY Type bitmask * * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into * link speed flags. If phy_type_high has an unknown PHY type, then the return * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well. */ static u16 -ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, u64 phy_type_high) +ice_aq_phy_types_to_link_speeds(u64 phy_type_low, u64 phy_type_high) { u16 sysctl_speeds = 0; int bit; /* coverity[address_of] */ for_each_set_bit(bit, &phy_type_low, 64) sysctl_speeds |= phy_link_speeds[bit]; /* coverity[address_of] */ for_each_set_bit(bit, &phy_type_high, 64) { if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds)) sysctl_speeds |= phy_link_speeds[bit + 64]; else sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN; } return (sysctl_speeds); } /** * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags * @phy_type_low: output parameter for lower AQ PHY flags * @phy_type_high: output parameter for higher AQ PHY flags * * Converts the given link speed flags into AQ PHY type flag sets appropriate * for use in a Set PHY Config command. */ static void ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low, u64 *phy_type_high) { *phy_type_low = 0, *phy_type_high = 0; if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB) *phy_type_low |= ICE_PHYS_100MB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB) *phy_type_low |= ICE_PHYS_1000MB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB) *phy_type_low |= ICE_PHYS_2500MB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB) *phy_type_low |= ICE_PHYS_5GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB) *phy_type_low |= ICE_PHYS_10GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB) *phy_type_low |= ICE_PHYS_25GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB) *phy_type_low |= ICE_PHYS_40GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB) *phy_type_low |= ICE_PHYS_50GB; if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) { *phy_type_low |= ICE_PHYS_100GB_LOW; *phy_type_high |= ICE_PHYS_100GB_HIGH; } } /** - * ice_intersect_media_types_with_caps - Restrict input AQ PHY flags - * @sc: driver private structure - * @sysctl_speeds: current SW configuration of PHY types - * @phy_type_low: input/output flag set for low PHY types - * @phy_type_high: input/output flag set for high PHY types + * @struct ice_phy_data + * @brief PHY caps and link speeds * - * Intersects the input PHY flags with PHY flags retrieved from the adapter to - * ensure the flags are compatible. - * - * @returns 0 on success, EIO if an AQ command fails, or EINVAL if input PHY - * types have no intersection with TOPO_CAPS and the adapter is in non-lenient - * mode + * Buffer providing report mode and user speeds; + * returning intersection of PHY types and speeds. */ -static int -ice_intersect_media_types_with_caps(struct ice_softc *sc, u16 sysctl_speeds, - u64 *phy_type_low, u64 *phy_type_high) -{ - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_port_info *pi = sc->hw.port_info; - device_t dev = sc->dev; - enum ice_status status; - u64 temp_phy_low, temp_phy_high; - u64 final_phy_low, final_phy_high; - u16 topo_speeds; - - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(sc->hw.adminq.sq_last_status)); - return (EIO); - } - - final_phy_low = le64toh(pcaps.phy_type_low); - final_phy_high = le64toh(pcaps.phy_type_high); - - topo_speeds = ice_aq_phy_types_to_sysctl_speeds(final_phy_low, - final_phy_high); - - /* - * If the user specifies a subset of speeds the media is already - * capable of supporting, then we're good to go. - */ - if ((sysctl_speeds & topo_speeds) == sysctl_speeds) - goto intersect_final; - - temp_phy_low = final_phy_low; - temp_phy_high = final_phy_high; - /* - * Otherwise, we'll have to use the superset if Lenient Mode is - * supported. - */ - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { - /* - * Start with masks that _don't_ include the PHY types - * discovered by the TOPO_CAP. - */ - ice_sysctl_speeds_to_aq_phy_types(topo_speeds, &final_phy_low, - &final_phy_high); - final_phy_low = ~final_phy_low; - final_phy_high = ~final_phy_high; - - /* Get the PHY types the NVM says we can support */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (NVM_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(sc->hw.adminq.sq_last_status)); - return (status); - } - - /* - * Clear out the unsupported PHY types, including those - * from TOPO_CAP. - */ - final_phy_low &= le64toh(pcaps.phy_type_low); - final_phy_high &= le64toh(pcaps.phy_type_high); - /* - * Include PHY types from TOPO_CAP (which may be a subset - * of the types the NVM specifies). - */ - final_phy_low |= temp_phy_low; - final_phy_high |= temp_phy_high; - } - -intersect_final: - - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) - ice_apply_supported_speed_filter(&final_phy_low, &final_phy_high); - - ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &temp_phy_low, - &temp_phy_high); - - final_phy_low &= temp_phy_low; - final_phy_high &= temp_phy_high; - - if (final_phy_low == 0 && final_phy_high == 0) { - device_printf(dev, - "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); - return (EINVAL); - } - - /* Overwrite input phy_type values and return */ - *phy_type_low = final_phy_low; - *phy_type_high = final_phy_high; - - return (0); -} +struct ice_phy_data { + u64 phy_low_orig; /* PHY low quad from report */ + u64 phy_high_orig; /* PHY high quad from report */ + u64 phy_low_intr; /* PHY low quad intersection with user speeds */ + u64 phy_high_intr; /* PHY high quad intersection with user speeds */ + u16 user_speeds_orig; /* Input from caller - See ICE_AQ_LINK_SPEED_* */ + u16 user_speeds_intr; /* Intersect with report speeds */ + u8 report_mode; /* See ICE_AQC_REPORT_* */ +}; /** - * ice_get_auto_speeds - Get PHY type flags for "auto" speed - * @sc: driver private structure - * @phy_type_low: output low PHY type flags - * @phy_type_high: output high PHY type flags - * - * Retrieves a suitable set of PHY type flags to use for an "auto" speed - * setting by either using the NVM default overrides for speed, or retrieving - * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode. + * ice_intersect_phy_types_and_speeds - Return intersection of link speeds + * @sc: device private structure + * @phy_data: device PHY data * - * @returns 0 on success or EIO on AQ command failure + * On read: Displays the currently supported speeds + * On write: Sets the device's supported speeds + * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED */ static int -ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low, - u64 *phy_type_high) +ice_intersect_phy_types_and_speeds(struct ice_softc *sc, + struct ice_phy_data *phy_data) { struct ice_aqc_get_phy_caps_data pcaps = { 0 }; + const char *report_types[5] = { "w/o MEDIA", + "w/MEDIA", + "ACTIVE", + "EDOOFUS", /* Not used */ + "DFLT" }; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; - device_t dev = sc->dev; enum ice_status status; + u16 report_speeds, temp_speeds; + u8 report_type; + bool apply_speed_filter = false; + + switch (phy_data->report_mode) { + case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: + case ICE_AQC_REPORT_TOPO_CAP_MEDIA: + case ICE_AQC_REPORT_ACTIVE_CFG: + case ICE_AQC_REPORT_DFLT_CFG: + report_type = phy_data->report_mode >> 1; + break; + default: + device_printf(sc->dev, + "%s: phy_data.report_mode \"%u\" doesn't exist\n", + __func__, phy_data->report_mode); + return (EINVAL); + } - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE)) { - /* copy over speed settings from LDO TLV */ - *phy_type_low = CPU_TO_LE64(sc->ldo_tlv.phy_type_low); - *phy_type_high = CPU_TO_LE64(sc->ldo_tlv.phy_type_high); - } else { - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } + /* 0 is treated as "Auto"; the driver will handle selecting the + * correct speeds. Including, in some cases, applying an override + * if provided. + */ + if (phy_data->user_speeds_orig == 0) + phy_data->user_speeds_orig = USHRT_MAX; + else if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) + apply_speed_filter = true; + + status = ice_aq_get_phy_caps(pi, false, phy_data->report_mode, &pcaps, NULL); + if (status != ICE_SUCCESS) { + device_printf(sc->dev, + "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", + __func__, report_types[report_type], + ice_status_str(status), + ice_aq_str(sc->hw.adminq.sq_last_status)); + return (EIO); + } - *phy_type_low = le64toh(pcaps.phy_type_low); - *phy_type_high = le64toh(pcaps.phy_type_high); + phy_data->phy_low_orig = le64toh(pcaps.phy_type_low); + phy_data->phy_high_orig = le64toh(pcaps.phy_type_high); + report_speeds = ice_aq_phy_types_to_link_speeds(phy_data->phy_low_orig, + phy_data->phy_high_orig); + if (apply_speed_filter) { + temp_speeds = ice_apply_supported_speed_filter(report_speeds); + if ((phy_data->user_speeds_orig & temp_speeds) == 0) { + device_printf(sc->dev, + "User-specified speeds (\"0x%04X\") not supported\n", + phy_data->user_speeds_orig); + return (EINVAL); + } + report_speeds = temp_speeds; } + ice_sysctl_speeds_to_aq_phy_types(phy_data->user_speeds_orig, + &phy_data->phy_low_intr, &phy_data->phy_high_intr); + phy_data->user_speeds_intr = phy_data->user_speeds_orig & report_speeds; + phy_data->phy_low_intr &= phy_data->phy_low_orig; + phy_data->phy_high_intr &= phy_data->phy_high_orig; return (0); -} + } /** * ice_sysctl_advertise_speed - Display/change link speeds supported by port * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently supported speeds * On write: Sets the device's supported speeds * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED */ static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; - struct ice_hw *hw = &sc->hw; - struct ice_port_info *pi = hw->port_info; + struct ice_port_info *pi = sc->hw.port_info; + struct ice_phy_data phy_data = { 0 }; device_t dev = sc->dev; - enum ice_status status; - u64 phy_low, phy_high; - u16 sysctl_speeds = 0; - int error = 0; + u16 sysctl_speeds; + int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Get the current speeds from the adapter's "active" configuration. */ - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (SW_CFG) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); + phy_data.report_mode = ICE_AQC_REPORT_ACTIVE_CFG; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret) { + /* Error message already printed within function */ + return (ret); } - phy_low = le64toh(pcaps.phy_type_low); - phy_high = le64toh(pcaps.phy_type_high); - sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); + sysctl_speeds = phy_data.user_speeds_intr; - error = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_16(oidp, &sysctl_speeds, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (sysctl_speeds > 0x7FF) { device_printf(dev, "%s: \"%u\" is outside of the range of acceptable values.\n", __func__, sysctl_speeds); return (EINVAL); } - /* 0 is treated as "Auto"; the driver will handle selecting the correct speeds, - * or apply an override if one is specified in the NVM. - */ - if (sysctl_speeds == 0) { - error = ice_get_auto_speeds(sc, &phy_low, &phy_high); - if (error) - /* Function already prints appropriate error message */ - return (error); - } else { - error = ice_intersect_media_types_with_caps(sc, sysctl_speeds, - &phy_low, &phy_high); - if (error) - /* Function already prints appropriate error message */ - return (error); - } - sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); - - /* Cache new user setting for speeds */ pi->phy.curr_user_speed_req = sysctl_speeds; - /* Setup new PHY config with new input PHY types */ - ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); - - cfg.phy_type_low = phy_low; - cfg.phy_type_high = phy_high; - cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; - - status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); - if (status != ICE_SUCCESS) { - /* Don't indicate failure if there's no media in the port -- the sysctl - * handler has saved the value and will apply it when media is inserted. - */ - if (status == ICE_ERR_AQ_ERROR && - hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - device_printf(dev, - "%s: Setting will be applied when media is inserted\n", __func__); - return (0); - } else { - device_printf(dev, - "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - } - - return (0); + /* Apply settings requested by user */ + return ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS); } #define ICE_SYSCTL_HELP_FEC_CONFIG \ "\nDisplay or set the port's requested FEC mode." \ "\n\tauto - " ICE_FEC_STRING_AUTO \ "\n\tfc - " ICE_FEC_STRING_BASER \ "\n\trs - " ICE_FEC_STRING_RS \ "\n\tnone - " ICE_FEC_STRING_NONE \ "\nEither of the left or right strings above can be used to set the requested mode." /** * ice_sysctl_fec_config - Display/change the configured FEC mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the configured FEC mode * On write: Sets the device's FEC mode to the input string, if it's valid. * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG */ static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; - struct ice_hw *hw = &sc->hw; enum ice_fec_mode new_mode; - enum ice_status status; device_t dev = sc->dev; char req_fec[32]; - int error = 0; + int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); bzero(req_fec, sizeof(req_fec)); strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec)); - error = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (strcmp(req_fec, "auto") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { new_mode = ICE_FEC_AUTO; } else if (strcmp(req_fec, "fc") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { new_mode = ICE_FEC_BASER; } else if (strcmp(req_fec, "rs") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) { new_mode = ICE_FEC_RS; } else if (strcmp(req_fec, "none") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) { new_mode = ICE_FEC_NONE; } else { device_printf(dev, "%s: \"%s\" is not a valid FEC mode\n", __func__, req_fec); return (EINVAL); } /* Cache user FEC mode for later link ups */ pi->phy.curr_user_fec_req = new_mode; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps failed (SW_CFG); status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - - ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); - - /* Get link_fec_opt/AUTO_FEC mode from TOPO caps for base for new FEC mode */ - memset(&pcaps, 0, sizeof(pcaps)); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps failed (TOPO_CAP); status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - - /* Configure new FEC options using TOPO caps */ - cfg.link_fec_opt = pcaps.link_fec_options; - cfg.caps &= ~ICE_AQ_PHY_ENA_AUTO_FEC; - if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC) - cfg.caps |= ICE_AQ_PHY_ENA_AUTO_FEC; - - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE) && - new_mode == ICE_FEC_AUTO) { - /* copy over FEC settings from LDO TLV */ - cfg.link_fec_opt = sc->ldo_tlv.fec_options; - } else { - ice_cfg_phy_fec(pi, &cfg, new_mode); - - /* Check if the new mode is valid, and exit with an error if not */ - if (cfg.link_fec_opt && - !(cfg.link_fec_opt & pcaps.link_fec_options)) { - device_printf(dev, - "%s: The requested FEC mode, %s, is not supported by current media\n", - __func__, ice_fec_str(new_mode)); - return (ENOTSUP); - } - } - - cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); - if (status != ICE_SUCCESS) { - /* Don't indicate failure if there's no media in the port -- the sysctl - * handler has saved the value and will apply it when media is inserted. - */ - if (status == ICE_ERR_AQ_ERROR && - hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - device_printf(dev, - "%s: Setting will be applied when media is inserted\n", __func__); - return (0); - } else { - device_printf(dev, - "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return (EIO); - } - } - - return (0); + /* Apply settings requested by user */ + return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FEC); } /** * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the negotiated FEC mode, in a string */ static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; char neg_fec[32]; - int error; + int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Copy const string into a buffer to drop const qualifier */ bzero(neg_fec, sizeof(neg_fec)); strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec)); - error = sysctl_handle_string(oidp, neg_fec, 0, req); + ret = sysctl_handle_string(oidp, neg_fec, 0, req); if (req->newptr != NULL) return (EPERM); - return (error); + return (ret); } #define ICE_SYSCTL_HELP_FC_CONFIG \ "\nDisplay or set the port's advertised flow control mode.\n" \ "\t0 - " ICE_FC_STRING_NONE \ "\n\t1 - " ICE_FC_STRING_RX \ "\n\t2 - " ICE_FC_STRING_TX \ "\n\t3 - " ICE_FC_STRING_FULL \ "\nEither the numbers or the strings above can be used to set the advertised mode." /** * ice_sysctl_fc_config - Display/change the advertised flow control mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the configured flow control mode * On write: Sets the device's flow control mode to the input, if it's valid. * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG */ static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; enum ice_fc_mode old_mode, new_mode; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; - int error = 0, fc_num; + int ret, fc_num; bool mode_set = false; struct sbuf buf; char *fc_str_end; char fc_str[32]; - u8 aq_failures; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } /* Convert HW response format to SW enum value */ if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) && (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) old_mode = ICE_FC_FULL; else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) old_mode = ICE_FC_TX_PAUSE; else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) old_mode = ICE_FC_RX_PAUSE; else old_mode = ICE_FC_NONE; /* Create "old" string for output */ bzero(fc_str, sizeof(fc_str)); sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req); sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode)); sbuf_finish(&buf); sbuf_delete(&buf); - error = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req); + if ((ret) || (req->newptr == NULL)) + return (ret); /* Try to parse input as a string, first */ if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) { new_mode = ICE_FC_FULL; mode_set = true; } else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) { new_mode = ICE_FC_TX_PAUSE; mode_set = true; } else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) { new_mode = ICE_FC_RX_PAUSE; mode_set = true; } else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) { new_mode = ICE_FC_NONE; mode_set = true; } /* * Then check if it's an integer, for compatibility with the method * used in older drivers. */ if (!mode_set) { fc_num = strtol(fc_str, &fc_str_end, 0); if (fc_str_end == fc_str) fc_num = -1; switch (fc_num) { case 3: new_mode = ICE_FC_FULL; break; case 2: new_mode = ICE_FC_TX_PAUSE; break; case 1: new_mode = ICE_FC_RX_PAUSE; break; case 0: new_mode = ICE_FC_NONE; break; default: device_printf(dev, "%s: \"%s\" is not a valid flow control mode\n", __func__, fc_str); return (EINVAL); } } - /* Finally, set the flow control mode in FW */ - hw->port_info->fc.req_mode = new_mode; - status = ice_set_fc(pi, &aq_failures, true); - if (status != ICE_SUCCESS) { - /* Don't indicate failure if there's no media in the port -- the sysctl - * handler has saved the value and will apply it when media is inserted. - */ - if (aq_failures == ICE_SET_FC_AQ_FAIL_SET && - hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - device_printf(dev, - "%s: Setting will be applied when media is inserted\n", __func__); - return (0); - } else { - device_printf(dev, - "%s: ice_set_fc AQ failure = %d\n", __func__, aq_failures); - return (EIO); - } - } + /* Set the flow control mode in FW */ + pi->phy.curr_user_fc_req = new_mode; - return (0); + /* Apply settings requested by user */ + return ice_apply_saved_phy_cfg(sc, ICE_APPLY_FC); } /** * ice_sysctl_negotiated_fc - Display currently negotiated FC mode * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently negotiated flow control settings. * * If link is not established, this will report ICE_FC_NONE, as no flow * control is negotiated while link is down. */ static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_port_info *pi = sc->hw.port_info; const char *negotiated_fc; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); negotiated_fc = ice_flowcontrol_mode(pi); return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req); } /** * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type * * Private handler for phy_type_high and phy_type_low sysctls. */ static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; uint64_t types; - int error = 0; + int ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } if (is_phy_type_high) types = pcaps.phy_type_high; else types = pcaps.phy_type_low; - error = sysctl_handle_64(oidp, &types, sizeof(types), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_64(oidp, &types, sizeof(types), req); + if ((ret) || (req->newptr == NULL)) + return (ret); ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg); if (is_phy_type_high) cfg.phy_type_high = types & hw->port_info->phy.phy_type_high; else cfg.phy_type_low = types & hw->port_info->phy.phy_type_low; cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently supported lower PHY types * On write: Sets the device's supported low PHY types */ static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS) { return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false); } /** * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the currently supported higher PHY types * On write: Sets the device's supported high PHY types */ static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS) { return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true); } /** * ice_sysctl_phy_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * @report_mode: the mode to report * * On read: Display the response from Get PHY abillities with the given report * mode. */ static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; device_t dev = sc->dev; enum ice_status status; - int error; + int ret; UNREFERENCED_PARAMETER(arg2); - error = priv_check(curthread, PRIV_DRIVER); - if (error) - return (error); + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } - error = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); + ret = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req); if (req->newptr != NULL) return (EPERM); - return (error); + return (ret); } /** * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from Get PHY abillities reporting the last * software configuration. */ static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, - ICE_AQC_REPORT_SW_CFG); + ICE_AQC_REPORT_ACTIVE_CFG); } /** * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from Get PHY abillities reporting the NVM * configuration. */ static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, - ICE_AQC_REPORT_NVM_CAP); + ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA); } /** * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from Get PHY abillities reporting the * topology configuration. */ static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS) { return ice_sysctl_phy_caps(oidp, arg1, arg2, req, - ICE_AQC_REPORT_TOPO_CAP); + ICE_AQC_REPORT_TOPO_CAP_MEDIA); } /** * ice_sysctl_phy_link_status - Display response from Get Link Status * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Display the response from firmware for the Get Link Status * request. */ static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS) { struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; struct ice_aqc_get_link_status *resp; struct ice_aq_desc desc; device_t dev = sc->dev; enum ice_status status; - int error; + int ret; UNREFERENCED_PARAMETER(arg2); /* * Ensure that only contexts with driver privilege are allowed to * access this information */ - error = priv_check(curthread, PRIV_DRIVER); - if (error) - return (error); + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); resp = &desc.params.get_link_status; resp->lport_num = pi->lport; status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } - error = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); + ret = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req); if (req->newptr != NULL) return (EPERM); - return (error); + return (ret); } /** * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status * @oidp: sysctl oid structure * @arg1: pointer to private softc structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays current persistent LLDP status. */ static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; struct sbuf *sbuf; u32 lldp_state; UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_get_cur_lldp_persist_status(hw, &lldp_state); if (status) { device_printf(dev, "Could not acquire current LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status * @oidp: sysctl oid structure * @arg1: pointer to private softc structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays default persistent LLDP status. */ static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; struct sbuf *sbuf; u32 lldp_state; UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_get_dflt_lldp_persist_status(hw, &lldp_state); if (status) { device_printf(dev, "Could not acquire default LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state)); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ "\nDisplay or change FW LLDP agent state:" \ "\n\t0 - disabled" \ "\n\t1 - enabled" /** * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status * @oidp: sysctl oid structure * @arg1: pointer to private softc structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays whether the FW LLDP agent is running * On write: Persistently enables or disables the FW LLDP agent */ static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; - int error = 0; + int ret; u32 old_state; u8 fw_lldp_enabled; bool retried_start_lldp = false; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); status = ice_get_cur_lldp_persist_status(hw, &old_state); if (status) { device_printf(dev, "Could not acquire current LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) { status = ice_get_dflt_lldp_persist_status(hw, &old_state); if (status) { device_printf(dev, "Could not acquire default LLDP persistence status, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } if (old_state == 0) fw_lldp_enabled = false; else fw_lldp_enabled = true; - error = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (old_state == 0 && fw_lldp_enabled == false) return (0); if (old_state != 0 && fw_lldp_enabled == true) return (0); if (fw_lldp_enabled == false) { status = ice_aq_stop_lldp(hw, true, true, NULL); /* EPERM is returned if the LLDP agent is already shutdown */ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) { device_printf(dev, "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } ice_aq_set_dcb_parameters(hw, true, NULL); hw->port_info->qos_cfg.is_sw_lldp = true; ice_add_rx_lldp_filter(sc); } else { ice_del_rx_lldp_filter(sc); retry_start_lldp: status = ice_aq_start_lldp(hw, true, NULL); if (status) { switch (hw->adminq.sq_last_status) { /* EEXIST is returned if the LLDP agent is already started */ case ICE_AQ_RC_EEXIST: break; case ICE_AQ_RC_EAGAIN: /* Retry command after a 2 second wait */ if (retried_start_lldp == false) { retried_start_lldp = true; pause("slldp", ICE_START_LLDP_RETRY_WAIT); goto retry_start_lldp; } /* Fallthrough */ default: device_printf(dev, "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } hw->port_info->qos_cfg.is_sw_lldp = false; } - return (error); + return (ret); } /** * ice_add_device_sysctls - add device specific dynamic sysctls * @sc: device private structure * * Add per-device dynamic sysctls which show device configuration or enable * configuring device functionality. For tunable values which can be set prior * to load, see ice_add_device_tunables. * * This function depends on the sysctl layout setup by ice_add_device_tunables, * and likely should be called near the end of the attach process. */ void ice_add_device_sysctls(struct ice_softc *sc) { struct sysctl_oid *hw_node; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_show_fw, "A", "Firmware version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW, sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW, sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT); /* Differentiate software and hardware statistics, by keeping hw stats * in their own node. This isn't in ice_add_device_tunables, because * we won't have any CTLFLAG_TUN sysctls under this node. */ hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD, NULL, "Port Hardware Statistics"); ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur); /* Add the main PF VSI stats now. Other VSIs will add their own stats * during creation */ ice_add_vsi_sysctls(&sc->pf_vsi); /* Add sysctls related to debugging the device driver. This includes * sysctls which display additional internal driver state for use in * understanding what is happening within the driver. */ ice_add_debug_sysctls(sc); } /** * @enum hmc_error_type * @brief enumeration of HMC errors * * Enumeration defining the possible HMC errors that might occur. */ enum hmc_error_type { HMC_ERR_PMF_INVALID = 0, HMC_ERR_VF_IDX_INVALID = 1, HMC_ERR_VF_PARENT_PF_INVALID = 2, /* 3 is reserved */ HMC_ERR_INDEX_TOO_BIG = 4, HMC_ERR_ADDRESS_TOO_LARGE = 5, HMC_ERR_SEGMENT_DESC_INVALID = 6, HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7, HMC_ERR_PAGE_DESC_INVALID = 8, HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9, /* 10 is reserved */ HMC_ERR_INVALID_OBJECT_TYPE = 11, /* 12 is reserved */ }; /** * ice_log_hmc_error - Log an HMC error message * @hw: device hw structure * @dev: the device to pass to device_printf() * * Log a message when an HMC error interrupt is triggered. */ void ice_log_hmc_error(struct ice_hw *hw, device_t dev) { u32 info, data; u8 index, errtype, objtype; bool isvf; info = rd32(hw, PFHMC_ERRORINFO); data = rd32(hw, PFHMC_ERRORDATA); index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M); errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >> PFHMC_ERRORINFO_HMC_ERROR_TYPE_S); objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >> PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S); isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M; device_printf(dev, "%s HMC Error detected on PMF index %d:\n", isvf ? "VF" : "PF", index); device_printf(dev, "error type %d, object type %d, data 0x%08x\n", errtype, objtype, data); switch (errtype) { case HMC_ERR_PMF_INVALID: device_printf(dev, "Private Memory Function is not valid\n"); break; case HMC_ERR_VF_IDX_INVALID: device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n"); break; case HMC_ERR_VF_PARENT_PF_INVALID: device_printf(dev, "Invalid parent PF for PE enabled VF\n"); break; case HMC_ERR_INDEX_TOO_BIG: device_printf(dev, "Object index too big\n"); break; case HMC_ERR_ADDRESS_TOO_LARGE: device_printf(dev, "Address extends beyond segment descriptor limit\n"); break; case HMC_ERR_SEGMENT_DESC_INVALID: device_printf(dev, "Segment descriptor is invalid\n"); break; case HMC_ERR_SEGMENT_DESC_TOO_SMALL: device_printf(dev, "Segment descriptor is too small\n"); break; case HMC_ERR_PAGE_DESC_INVALID: device_printf(dev, "Page descriptor is invalid\n"); break; case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION: device_printf(dev, "Unsupported Request completion received from PCIe\n"); break; case HMC_ERR_INVALID_OBJECT_TYPE: device_printf(dev, "Invalid object type\n"); break; default: device_printf(dev, "Unknown HMC error\n"); } /* Clear the error indication */ wr32(hw, PFHMC_ERRORINFO, 0); } /** * @struct ice_sysctl_info * @brief sysctl information * * Structure used to simplify the process of defining the many similar * statistics sysctls. */ struct ice_sysctl_info { u64 *stat; const char *name; const char *description; }; /** * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics * @ctx: sysctl ctx to use * @parent: the parent node to add sysctls under * @stats: the ethernet stats structure to source values from * * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI. * Will add them under the parent node specified. * * Note that tx_errors is only meaningful for VSIs and not the global MAC/PF * statistics, so it is not included here. Similarly, rx_discards has different * descriptions for VSIs and MAC/PF stats, so it is also not included here. */ void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_eth_stats *stats) { const struct ice_sysctl_info ctls[] = { /* Rx Stats */ { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" }, { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" }, { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" }, { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" }, /* Tx Stats */ { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" }, { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" }, { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" }, { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" }, /* End */ { 0, 0, 0 } }; struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); const struct ice_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } } /** * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: Tx CSO stat to read * @req: sysctl request pointer * * On read: Sums the per-queue Tx CSO stat and displays it. */ static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2; u64 stat = 0; int i; if (ice_driver_is_detaching(vsi->sc)) return (ESHUTDOWN); /* Check that the type is valid */ if (type >= ICE_CSO_STAT_TX_COUNT) return (EDOOFUS); /* Sum the stat for each of the Tx queues */ for (i = 0; i < vsi->num_tx_queues; i++) stat += vsi->tx_queues[i].stats.cso[type]; return sysctl_handle_64(oidp, NULL, stat, req); } /** * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: Rx CSO stat to read * @req: sysctl request pointer * * On read: Sums the per-queue Rx CSO stat and displays it. */ static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2; u64 stat = 0; int i; if (ice_driver_is_detaching(vsi->sc)) return (ESHUTDOWN); /* Check that the type is valid */ if (type >= ICE_CSO_STAT_RX_COUNT) return (EDOOFUS); /* Sum the stat for each of the Rx queues */ for (i = 0; i < vsi->num_rx_queues; i++) stat += vsi->rx_queues[i].stats.cso[type]; return sysctl_handle_64(oidp, NULL, stat, req); } +/** + * ice_sysctl_rx_errors_stat - Display aggregate of Rx errors + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: unused + * @req: sysctl request pointer + * + * On read: Sums current values of Rx error statistics and + * displays it. + */ +static int +ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS) +{ + struct ice_vsi *vsi = (struct ice_vsi *)arg1; + struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; + u64 stat = 0; + int i, type; + + UNREFERENCED_PARAMETER(arg2); + + if (ice_driver_is_detaching(vsi->sc)) + return (ESHUTDOWN); + + stat += hs->rx_undersize; + stat += hs->rx_fragments; + stat += hs->rx_oversize; + stat += hs->rx_jabber; + stat += hs->rx_len_errors; + stat += hs->crc_errors; + stat += hs->illegal_bytes; + + /* Checksum error stats */ + for (i = 0; i < vsi->num_rx_queues; i++) + for (type = ICE_CSO_STAT_RX_IP4_ERR; + type < ICE_CSO_STAT_RX_COUNT; + type++) + stat += vsi->rx_queues[i].stats.cso[type]; + + return sysctl_handle_64(oidp, NULL, stat, req); +} + /** * @struct ice_rx_cso_stat_info * @brief sysctl information for an Rx checksum offload statistic * * Structure used to simplify the process of defining the checksum offload * statistics. */ struct ice_rx_cso_stat_info { enum ice_rx_cso_stat type; const char *name; const char *description; }; /** * @struct ice_tx_cso_stat_info * @brief sysctl information for a Tx checksum offload statistic * * Structure used to simplify the process of defining the checksum offload * statistics. */ struct ice_tx_cso_stat_info { enum ice_tx_cso_stat type; const char *name; const char *description; }; /** * ice_add_sysctls_sw_stats - Add sysctls for software statistics * @vsi: pointer to the VSI to add sysctls for * @ctx: sysctl ctx to use * @parent: the parent node to add sysctls under * * Add statistics sysctls for software tracked statistics of a VSI. * * Currently this only adds checksum offload statistics, but more counters may * be added in the future. */ static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi, struct sysctl_ctx_list *ctx, struct sysctl_oid *parent) { struct sysctl_oid *cso_node; struct sysctl_oid_list *cso_list; /* Tx CSO Stats */ const struct ice_tx_cso_stat_info tx_ctls[] = { { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" }, { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" }, { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" }, /* End */ { ICE_CSO_STAT_TX_COUNT, 0, 0 } }; /* Rx CSO Stats */ const struct ice_rx_cso_stat_info rx_ctls[] = { { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" }, { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" }, { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" }, { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" }, { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" }, { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" }, { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" }, /* End */ { ICE_CSO_STAT_RX_COUNT, 0, 0 } }; struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); /* Add a node for statistics tracked by software. */ cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD, NULL, "Checksum offload Statistics"); cso_list = SYSCTL_CHILDREN(cso_node); const struct ice_tx_cso_stat_info *tx_entry = tx_ctls; while (tx_entry->name && tx_entry->description) { SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU", tx_entry->description); tx_entry++; } const struct ice_rx_cso_stat_info *rx_entry = rx_ctls; while (rx_entry->name && rx_entry->description) { SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU", rx_entry->description); rx_entry++; } } /** * ice_add_vsi_sysctls - Add sysctls for a VSI * @vsi: pointer to VSI structure * * Add various sysctls for a given VSI. */ void ice_add_vsi_sysctls(struct ice_vsi *vsi) { struct sysctl_ctx_list *ctx = &vsi->ctx; struct sysctl_oid *hw_node, *sw_node; struct sysctl_oid_list *vsi_list, *hw_list, *sw_list; vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); /* Keep hw stats in their own node. */ hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD, NULL, "VSI Hardware Statistics"); hw_list = SYSCTL_CHILDREN(hw_node); /* Add the ethernet statistics for this VSI */ ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards, 0, "Discarded Rx Packets (see rx_errors or rx_no_desc)"); - SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors", - CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors, - 0, "Rx Packets Discarded Due To Error"); + SYSCTL_ADD_PROC(ctx, hw_list, OID_AUTO, "rx_errors", + CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS, + vsi, 0, ice_sysctl_rx_errors_stat, "QU", + "Aggregate of all Rx errors"); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc, 0, "Rx Packets Discarded Due To Lack Of Descriptors"); SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors", CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors, 0, "Tx Packets Discarded Due To Error"); /* Add a node for statistics tracked by software. */ sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD, NULL, "VSI Software Statistics"); sw_list = SYSCTL_CHILDREN(sw_node); ice_add_sysctls_sw_stats(vsi, ctx, sw_node); } /** * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics * @ctx: the sysctl ctx to use * @parent: parent node to add the sysctls under * @stats: the hw ports stat structure to pull values from * * Add global MAC statistics sysctls. */ void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_hw_port_stats *stats) { struct sysctl_oid *mac_node; struct sysctl_oid_list *parent_list, *mac_list; parent_list = SYSCTL_CHILDREN(parent); mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD, NULL, "Mac Hardware Statistics"); mac_list = SYSCTL_CHILDREN(mac_node); /* add the common ethernet statistics */ ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth); const struct ice_sysctl_info ctls[] = { /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"}, {&stats->eth.rx_discards, "rx_discards", "Discarded Rx Packets by Port (shortage of storage space)"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* Other */ {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, /* End */ { 0, 0, 0 } }; const struct ice_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } } /** * ice_configure_misc_interrupts - enable 'other' interrupt causes * @sc: pointer to device private softc * * Enable various "other" interrupt causes, and associate them to interrupt 0, * which is our administrative interrupt. */ void ice_configure_misc_interrupts(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; u32 val; /* Read the OICR register to clear it */ rd32(hw, PFINT_OICR); /* Enable useful "other" interrupt causes */ val = (PFINT_OICR_ECC_ERR_M | PFINT_OICR_MAL_DETECT_M | PFINT_OICR_GRST_M | PFINT_OICR_PCI_EXCEPTION_M | PFINT_OICR_VFLR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_CRITERR_M); wr32(hw, PFINT_OICR_ENA, val); /* Note that since we're using MSI-X index 0, and ITR index 0, we do * not explicitly program them when writing to the PFINT_*_CTL * registers. Nevertheless, these writes are associating the * interrupts with the ITR 0 vector */ /* Associate the OICR interrupt with ITR 0, and enable it */ wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M); /* Associate the Mailbox interrupt with ITR 0, and enable it */ wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M); /* Associate the AdminQ interrupt with ITR 0, and enable it */ wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M); } /** * ice_filter_is_mcast - Check if info is a multicast filter * @vsi: vsi structure addresses are targeted towards * @info: filter info * * @returns true if the provided info is a multicast filter, and false * otherwise. */ static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info) { const u8 *addr = info->l_data.mac.mac_addr; /* * Check if this info matches a multicast filter added by * ice_add_mac_to_list */ if ((info->flag == ICE_FLTR_TX) && (info->src_id == ICE_SRC_ID_VSI) && (info->lkup_type == ICE_SW_LKUP_MAC) && (info->vsi_handle == vsi->idx) && ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr)) return true; return false; } /** * @struct ice_mcast_sync_data * @brief data used by ice_sync_one_mcast_filter function * * Structure used to store data needed for processing by the * ice_sync_one_mcast_filter. This structure contains a linked list of filters * to be added, an error indication, and a pointer to the device softc. */ struct ice_mcast_sync_data { struct ice_list_head add_list; struct ice_softc *sc; int err; }; /** * ice_sync_one_mcast_filter - Check if we need to program the filter * @p: void pointer to algorithm data * @sdl: link level socket address * @count: unused count value * * Called by if_foreach_llmaddr to operate on each filter in the ifp filter * list. For the given address, search our internal list to see if we have * found the filter. If not, add it to our list of filters that need to be * programmed. * * @returns (1) if we've actually setup the filter to be added */ static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int __unused count) { struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p; struct ice_softc *sc = data->sc; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; const u8 *sdl_addr = (const u8 *)LLADDR(sdl); struct ice_fltr_mgmt_list_entry *itr; struct ice_list_head *rules; int err; rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* * If a previous filter already indicated an error, there is no need * for us to finish processing the rest of the filters. */ if (data->err) return (0); /* See if this filter has already been programmed */ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_info *info = &itr->fltr_info; const u8 *addr = info->l_data.mac.mac_addr; /* Only check multicast filters */ if (!ice_filter_is_mcast(&sc->pf_vsi, info)) continue; /* * If this filter matches, mark the internal filter as * "found", and exit. */ if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) { itr->marker = ICE_FLTR_FOUND; return (1); } } /* * If we failed to locate the filter in our internal list, we need to * place it into our add list. */ err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr, ICE_FWD_TO_VSI); if (err) { device_printf(sc->dev, "Failed to place MAC %6D onto add list, err %s\n", sdl_addr, ":", ice_err_str(err)); data->err = err; return (0); } return (1); } /** * ice_sync_multicast_filters - Synchronize OS and internal filter list * @sc: device private structure * * Called in response to SIOCDELMULTI to synchronize the operating system * multicast address list with the internal list of filters programmed to * firmware. * * Works in one phase to find added and deleted filters using a marker bit on * the internal list. * * First, a loop over the internal list clears the marker bit. Second, for * each filter in the ifp list is checked. If we find it in the internal list, * the marker bit is set. Otherwise, the filter is added to the add list. * Third, a loop over the internal list determines if any filters have not * been found. Each of these is added to the delete list. Finally, the add and * delete lists are programmed to firmware to update the filters. * * @returns zero on success or an integer error code on failure. */ int ice_sync_multicast_filters(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *itr; struct ice_mcast_sync_data data = {}; struct ice_list_head *rules, remove_list; enum ice_status status; int err = 0; INIT_LIST_HEAD(&data.add_list); INIT_LIST_HEAD(&remove_list); data.sc = sc; data.err = 0; rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* Acquire the lock for the entire duration */ ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); /* (1) Reset the marker state for all filters */ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) itr->marker = ICE_FLTR_NOT_FOUND; /* (2) determine which filters need to be added and removed */ if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data); if (data.err) { /* ice_sync_one_mcast_filter already prints an error */ err = data.err; ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); goto free_filter_lists; } LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_info *info = &itr->fltr_info; const u8 *addr = info->l_data.mac.mac_addr; /* Only check multicast filters */ if (!ice_filter_is_mcast(&sc->pf_vsi, info)) continue; /* * If the filter is not marked as found, then it must no * longer be in the ifp address list, so we need to remove it. */ if (itr->marker == ICE_FLTR_NOT_FOUND) { err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list, addr, ICE_FWD_TO_VSI); if (err) { device_printf(sc->dev, "Failed to place MAC %6D onto remove list, err %s\n", addr, ":", ice_err_str(err)); ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); goto free_filter_lists; } } } ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock); status = ice_add_mac(hw, &data.add_list); if (status) { device_printf(sc->dev, "Could not add new MAC filters, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); goto free_filter_lists; } status = ice_remove_mac(hw, &remove_list); if (status) { device_printf(sc->dev, "Could not remove old MAC filters, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); goto free_filter_lists; } free_filter_lists: ice_free_fltr_list(&data.add_list); ice_free_fltr_list(&remove_list); return (err); } /** * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI * @vsi: The VSI to add the filter for * @vid: VLAN to add * * Programs a HW filter so that the given VSI will receive the specified VLAN. */ enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) { struct ice_hw *hw = &vsi->sc->hw; struct ice_list_head vlan_list; struct ice_fltr_list_entry vlan_entry; INIT_LIST_HEAD(&vlan_list); memset(&vlan_entry, 0, sizeof(vlan_entry)); vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; vlan_entry.fltr_info.flag = ICE_FLTR_TX; vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; vlan_entry.fltr_info.vsi_handle = vsi->idx; vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; LIST_ADD(&vlan_entry.list_entry, &vlan_list); return ice_add_vlan(hw, &vlan_list); } /** * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI * @vsi: The VSI to add the filter for * @vid: VLAN to remove * * Removes a previously programmed HW filter for the specified VSI. */ enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid) { struct ice_hw *hw = &vsi->sc->hw; struct ice_list_head vlan_list; struct ice_fltr_list_entry vlan_entry; INIT_LIST_HEAD(&vlan_list); memset(&vlan_entry, 0, sizeof(vlan_entry)); vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN; vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; vlan_entry.fltr_info.flag = ICE_FLTR_TX; vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI; vlan_entry.fltr_info.vsi_handle = vsi->idx; vlan_entry.fltr_info.l_data.vlan.vlan_id = vid; LIST_ADD(&vlan_entry.list_entry, &vlan_list); return ice_remove_vlan(hw, &vlan_list); } #define ICE_SYSCTL_HELP_RX_ITR \ "\nControl Rx interrupt throttle rate." \ "\n\t0-8160 - sets interrupt rate in usecs" \ "\n\t -1 - reset the Rx itr to default" /** * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the current Rx ITR value * on write: Sets the Rx ITR value, reconfiguring device if it is up */ static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_softc *sc = vsi->sc; - int increment, error = 0; + int increment, ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - error = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (vsi->rx_itr < 0) vsi->rx_itr = ICE_DFLT_RX_ITR; if (vsi->rx_itr > ICE_ITR_MAX) vsi->rx_itr = ICE_ITR_MAX; /* Assume 2usec increment if it hasn't been loaded yet */ increment = sc->hw.itr_gran ? : 2; /* We need to round the value to the hardware's ITR granularity */ vsi->rx_itr = (vsi->rx_itr / increment ) * increment; /* If the driver has finished initializing, then we need to reprogram * the ITR registers now. Otherwise, they will be programmed during * driver initialization. */ if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) ice_configure_rx_itr(vsi); return (0); } #define ICE_SYSCTL_HELP_TX_ITR \ "\nControl Tx interrupt throttle rate." \ "\n\t0-8160 - sets interrupt rate in usecs" \ "\n\t -1 - reset the Tx itr to default" /** * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * On read: Displays the current Tx ITR value * on write: Sets the Tx ITR value, reconfiguring device if it is up */ static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) { struct ice_vsi *vsi = (struct ice_vsi *)arg1; struct ice_softc *sc = vsi->sc; - int increment, error = 0; + int increment, ret; UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); - error = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); /* Allow configuring a negative value to reset to the default */ if (vsi->tx_itr < 0) vsi->tx_itr = ICE_DFLT_TX_ITR; if (vsi->tx_itr > ICE_ITR_MAX) vsi->tx_itr = ICE_ITR_MAX; /* Assume 2usec increment if it hasn't been loaded yet */ increment = sc->hw.itr_gran ? : 2; /* We need to round the value to the hardware's ITR granularity */ vsi->tx_itr = (vsi->tx_itr / increment ) * increment; /* If the driver has finished initializing, then we need to reprogram * the ITR registers now. Otherwise, they will be programmed during * driver initialization. */ if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) ice_configure_tx_itr(vsi); return (0); } /** * ice_add_vsi_tunables - Add tunables and nodes for a VSI * @vsi: pointer to VSI structure * @parent: parent node to add the tunables under * * Create a sysctl context for the VSI, so that sysctls for the VSI can be * dynamically removed upon VSI removal. * * Add various tunables and set up the basic node structure for the VSI. Must * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as * possible after the VSI memory is initialized. * * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that * their values can be read from loader.conf prior to their first use in the * driver. */ void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent) { struct sysctl_oid_list *vsi_list; char vsi_name[32], vsi_desc[32]; struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent); /* Initialize the sysctl context for this VSI */ sysctl_ctx_init(&vsi->ctx); /* Add a node to collect this VSI's statistics together */ snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx); snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx); vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name, CTLFLAG_RD, NULL, vsi_desc); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); vsi->rx_itr = ICE_DFLT_TX_ITR; SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr", CTLTYPE_S16 | CTLFLAG_RWTUN, vsi, 0, ice_sysctl_rx_itr, "S", ICE_SYSCTL_HELP_RX_ITR); vsi->tx_itr = ICE_DFLT_TX_ITR; SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr", CTLTYPE_S16 | CTLFLAG_RWTUN, vsi, 0, ice_sysctl_tx_itr, "S", ICE_SYSCTL_HELP_TX_ITR); } /** * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI * @vsi: the VSI to remove contexts for * * Free the context for the VSI sysctls. This includes the main context, as * well as the per-queue sysctls. */ void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi) { device_t dev = vsi->sc->dev; int err; if (vsi->vsi_node) { err = sysctl_ctx_free(&vsi->ctx); if (err) device_printf(dev, "failed to free VSI %d sysctl context, err %s\n", vsi->idx, ice_err_str(err)); vsi->vsi_node = NULL; } } /** * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes * @sc: device private structure * * Add per-device dynamic tunable sysctls, and setup the general sysctl trees * for re-use by ice_add_device_sysctls. * * In order for the sysctl fields to be initialized before use, this function * should be called as early as possible during attach activities. * * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized * here in this function, rather than later in ice_add_device_sysctls. * * To make things easier, this function is also expected to setup the various * sysctl nodes in addition to tunables so that other sysctls which can't be * initialized early can hook into the same nodes. */ void ice_add_device_tunables(struct ice_softc *sc) { device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); + sc->enable_health_events = ice_enable_health_events; + + SYSCTL_ADD_BOOL(ctx, ctx_list, OID_AUTO, "enable_health_events", + CTLFLAG_RDTUN, &sc->enable_health_events, 0, + "Enable FW health event reporting for this PF"); + /* Add a node to track VSI sysctls. Keep track of the node in the * softc so that we can hook other sysctls into it later. This * includes both the VSI statistics, as well as potentially dynamic * VSIs in the future. */ sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi", CTLFLAG_RD, NULL, "VSI Configuration and Statistics"); /* Add debug tunables */ ice_add_debug_tunables(sc); } /** * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "mac_filters" sysctl to dump the programmed MAC filters. */ static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; sbuf_printf(sbuf, "MAC Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d", fi->l_data.mac.mac_addr, ":", fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters. */ static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; sbuf_printf(sbuf, "VLAN Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", fi->l_data.vlan.vlan_id, fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype * filters. */ static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules; sbuf_printf(sbuf, "Ethertype Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", fi->l_data.ethertype_mac.ethertype, fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "ethertype_mac_filters" sysctl to dump the programmed * Ethertype/MAC filters. */ static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_list_head *rule_head; struct ice_lock *rule_lock; struct ice_fltr_info *fi; struct sbuf *sbuf; int ret; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Wire the old buffer so we can take a non-sleepable lock */ ret = sysctl_wire_old_buffer(req, 0); if (ret) return (ret); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules; sbuf_printf(sbuf, "Ethertype/MAC Filter List"); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { fi = &fm_entry->fltr_info; sbuf_printf(sbuf, "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d", fi->l_data.ethertype_mac.ethertype, fi->l_data.ethertype_mac.mac_addr, ":", fi->vsi_handle, ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id); /* if we have a vsi_list_info, print some information about that */ if (fm_entry->vsi_list_info) { sbuf_printf(sbuf, ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d", fm_entry->vsi_count, fm_entry->vsi_list_info->vsi_list_id, fm_entry->vsi_list_info->ref_cnt); } } ice_release_lock(rule_lock); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_sysctl_dump_state_flags - Dump device driver state flags * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "state" sysctl to display currently set driver state flags. */ static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct sbuf *sbuf; u32 copied_state; unsigned int i; bool at_least_one = false; UNREFERENCED_PARAMETER(oidp); UNREFERENCED_PARAMETER(arg2); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Make a copy of the state to ensure we display coherent values */ copied_state = atomic_load_acq_32(&sc->state); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); /* Add the string for each set state to the sbuf */ for (i = 0; i < 32; i++) { if (copied_state & BIT(i)) { const char *str = ice_state_to_str((enum ice_state)i); at_least_one = true; if (str) sbuf_printf(sbuf, "\n%s", str); else sbuf_printf(sbuf, "\nBIT(%u)", i); } } if (!at_least_one) sbuf_printf(sbuf, "Nothing set"); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_add_debug_tunables - Add tunables helpful for debugging the device driver * @sc: device private structure * * Add sysctl tunable values related to debugging the device driver. For now, * this means a tunable to set the debug mask early during driver load. * * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so * that in normal kernel builds, these will all be hidden, but on a debug * kernel they will be more easily visible. */ static void ice_add_debug_tunables(struct ice_softc *sc) { struct sysctl_oid_list *debug_list; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0, "Debug message enable/disable mask"); /* Load the default value from the global sysctl first */ sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter", CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0, "Drop Ethertype 0x8808 control frames originating from software on this PF"); /* Load the default value from the global sysctl first */ sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter", CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0, "Drop Ethertype 0x88cc LLDP frames originating from software on this PF"); } #define ICE_SYSCTL_HELP_REQUEST_RESET \ "\nRequest the driver to initiate a reset." \ "\n\tpfr - Initiate a PF reset" \ "\n\tcorer - Initiate a CORE reset" \ "\n\tglobr - Initiate a GLOBAL reset" /** * @var rl_sysctl_ticks * @brief timestamp for latest reset request sysctl call * * Helps rate-limit the call to the sysctl which resets the device */ int rl_sysctl_ticks = 0; /** * ice_sysctl_request_reset - Request that the driver initiate a reset * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Callback for "request_reset" sysctl to request that the driver initiate * a reset. Expects to be passed one of the following strings * * "pfr" - Initiate a PF reset * "corer" - Initiate a CORE reset * "globr" - Initiate a Global reset */ static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_hw *hw = &sc->hw; enum ice_status status; enum ice_reset_req reset_type = ICE_RESET_INVAL; const char *reset_message; - int error = 0; + int ret; /* Buffer to store the requested reset string. Must contain enough * space to store the largest expected reset string, which currently * means 6 bytes of space. */ char reset[6] = ""; UNREFERENCED_PARAMETER(arg2); - error = priv_check(curthread, PRIV_DRIVER); - if (error) - return (error); + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); /* Read in the requested reset type. */ - error = sysctl_handle_string(oidp, reset, sizeof(reset), req); - if ((error) || (req->newptr == NULL)) - return (error); + ret = sysctl_handle_string(oidp, reset, sizeof(reset), req); + if ((ret) || (req->newptr == NULL)) + return (ret); if (strcmp(reset, "pfr") == 0) { reset_message = "Requesting a PF reset"; reset_type = ICE_RESET_PFR; } else if (strcmp(reset, "corer") == 0) { reset_message = "Initiating a CORE reset"; reset_type = ICE_RESET_CORER; } else if (strcmp(reset, "globr") == 0) { reset_message = "Initiating a GLOBAL reset"; reset_type = ICE_RESET_GLOBR; } else if (strcmp(reset, "empr") == 0) { device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n"); return (EOPNOTSUPP); } if (reset_type == ICE_RESET_INVAL) { device_printf(sc->dev, "%s is not a valid reset request\n", reset); return (EINVAL); } /* * Rate-limit the frequency at which this function is called. * Assuming this is called successfully once, typically, * everything should be handled within the allotted time frame. * However, in the odd setup situations, we've also put in * guards for when the reset has finished, but we're in the * process of rebuilding. And instead of queueing an intent, * simply error out and let the caller retry, if so desired. */ if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) { device_printf(sc->dev, "Call frequency too high. Operation aborted.\n"); return (EBUSY); } rl_sysctl_ticks = ticks; if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) { device_printf(sc->dev, "Device rebuilding. Operation aborted.\n"); return (EBUSY); } if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) { device_printf(sc->dev, "Device in reset. Operation aborted.\n"); return (EBUSY); } device_printf(sc->dev, "%s\n", reset_message); /* Initiate the PF reset during the admin status task */ if (reset_type == ICE_RESET_PFR) { ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); return (0); } /* * Other types of resets including CORE and GLOBAL resets trigger an * interrupt on all PFs. Initiate the reset now. Preparation and * rebuild logic will be handled by the admin status task. */ status = ice_reset(hw, reset_type); /* * Resets can take a long time and we still don't want another call * to this function before we settle down. */ rl_sysctl_ticks = ticks; if (status) { device_printf(sc->dev, "failed to initiate device reset, err %s\n", ice_status_str(status)); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); return (EFAULT); } return (0); } /** * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver * @sc: device private structure * * Add sysctls related to debugging the device driver. Generally these should * simply be sysctls which dump internal driver state, to aid in understanding * what the driver is doing. */ static void ice_add_debug_sysctls(struct ice_softc *sc) { struct sysctl_oid *sw_node; struct sysctl_oid_list *debug_list, *sw_list; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); debug_list = SYSCTL_CHILDREN(sc->debug_sysctls); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset", CTLTYPE_STRING | CTLFLAG_WR, sc, 0, ice_sysctl_request_reset, "A", ICE_SYSCTL_HELP_REQUEST_RESET); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", CTLFLAG_RD, &sc->soft_stats.pfr_count, 0, "# of PF resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", CTLFLAG_RD, &sc->soft_stats.corer_count, 0, "# of CORE resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", CTLFLAG_RD, &sc->soft_stats.globr_count, 0, "# of Global resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", CTLFLAG_RD, &sc->soft_stats.empr_count, 0, "# of EMP resets handled"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", CTLFLAG_RD, &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", CTLFLAG_RD, &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_type_low", CTLTYPE_U64 | CTLFLAG_RW, sc, 0, ice_sysctl_phy_type_low, "QU", "PHY type Low from Get PHY Caps/Set PHY Cfg"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_type_high", CTLTYPE_U64 | CTLFLAG_RW, sc, 0, ice_sysctl_phy_type_high, "QU", "PHY type High from Get PHY Caps/Set PHY Cfg"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_sw_caps", CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_sw_caps, "", "Get PHY Capabilities (Software configuration)"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_nvm_caps", CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_nvm_caps, "", "Get PHY Capabilities (NVM configuration)"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_topo_caps", CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_topo_caps, "", "Get PHY Capabilities (Topology configuration)"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_link_status", CTLTYPE_STRUCT | CTLFLAG_RD, sc, 0, ice_sysctl_phy_link_status, "", "Get PHY Link Status"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", CTLFLAG_RD, &sc->hw.fw_build, 0, "FW Build ID"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_os_pkg_version, "A", "DDP package name and version found in ice_ddp"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "cur_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "dflt_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "negotiated_fc", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode"); sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch", CTLFLAG_RD, NULL, "Switch Configuration"); sw_list = SYSCTL_CHILDREN(sw_node); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "mac_filters", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters"); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "vlan_filters", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters"); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "ethertype_filters", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters"); SYSCTL_ADD_PROC(ctx, sw_list, OID_AUTO, "ethertype_mac_filters", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); } /** * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI * @vsi: the VSI to disable * * Disables the Tx queues associated with this VSI. Essentially the opposite * of ice_cfg_vsi_for_tx. */ int ice_vsi_disable_tx(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; enum ice_status status; u32 *q_teids; u16 *q_ids, *q_handles; int i, err = 0; if (vsi->num_tx_queues > 255) return (ENOSYS); q_teids = (u32 *)malloc(sizeof(*q_teids) * vsi->num_tx_queues, M_ICE, M_NOWAIT|M_ZERO); if (!q_teids) return (ENOMEM); q_ids = (u16 *)malloc(sizeof(*q_ids) * vsi->num_tx_queues, M_ICE, M_NOWAIT|M_ZERO); if (!q_ids) { err = (ENOMEM); goto free_q_teids; } q_handles = (u16 *)malloc(sizeof(*q_handles) * vsi->num_tx_queues, M_ICE, M_NOWAIT|M_ZERO); if (!q_handles) { err = (ENOMEM); goto free_q_ids; } for (i = 0; i < vsi->num_tx_queues; i++) { struct ice_tx_queue *txq = &vsi->tx_queues[i]; q_ids[i] = vsi->tx_qmap[i]; q_handles[i] = i; q_teids[i] = txq->q_teid; } status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, vsi->num_tx_queues, q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); if (status == ICE_ERR_DOES_NOT_EXIST) { ; /* Queues have already been disabled, no need to report this as an error */ } else if (status == ICE_ERR_RESET_ONGOING) { device_printf(sc->dev, "Reset in progress. LAN Tx queues already disabled\n"); } else if (status) { device_printf(sc->dev, "Failed to disable LAN Tx queues: err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (ENODEV); } /* free_q_handles: */ free(q_handles, M_ICE); free_q_ids: free(q_ids, M_ICE); free_q_teids: free(q_teids, M_ICE); return err; } /** * ice_vsi_set_rss_params - Set the RSS parameters for the VSI * @vsi: the VSI to configure * * Sets the RSS table size and lookup table type for the VSI based on its * VSI type. */ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw_common_caps *cap; cap = &sc->hw.func_caps.common_cap; switch (vsi->type) { case ICE_VSI_PF: /* The PF VSI inherits RSS instance of the PF */ vsi->rss_table_size = cap->rss_table_size; vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF; break; case ICE_VSI_VF: vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE; vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI; break; default: device_printf(sc->dev, "VSI %d: RSS not supported for VSI type %d\n", vsi->idx, vsi->type); break; } } /** * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls * @vsi: The VSI to add the context for * * Creates a sysctl context for storing txq sysctls. Additionally creates * a node rooted at the given VSI's main sysctl node. This context will be * used to store per-txq sysctls which may need to be released during the * driver's lifetime. */ void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi) { struct sysctl_oid_list *vsi_list; sysctl_ctx_init(&vsi->txqs_ctx); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs", CTLFLAG_RD, NULL, "Tx Queues"); } /** * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls * @vsi: The VSI to add the context for * * Creates a sysctl context for storing rxq sysctls. Additionally creates * a node rooted at the given VSI's main sysctl node. This context will be * used to store per-rxq sysctls which may need to be released during the * driver's lifetime. */ void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi) { struct sysctl_oid_list *vsi_list; sysctl_ctx_init(&vsi->rxqs_ctx); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs", CTLFLAG_RD, NULL, "Rx Queues"); } /** * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI * @vsi: The VSI to delete from * * Frees the txq sysctl context created for storing the per-queue Tx sysctls. * Must be called prior to freeing the Tx queue memory, in order to avoid * having sysctls point at stale memory. */ void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi) { device_t dev = vsi->sc->dev; int err; if (vsi->txqs_node) { err = sysctl_ctx_free(&vsi->txqs_ctx); if (err) device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n", vsi->idx, ice_err_str(err)); vsi->txqs_node = NULL; } } /** * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI * @vsi: The VSI to delete from * * Frees the rxq sysctl context created for storing the per-queue Rx sysctls. * Must be called prior to freeing the Rx queue memory, in order to avoid * having sysctls point at stale memory. */ void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi) { device_t dev = vsi->sc->dev; int err; if (vsi->rxqs_node) { err = sysctl_ctx_free(&vsi->rxqs_ctx); if (err) device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n", vsi->idx, ice_err_str(err)); vsi->rxqs_node = NULL; } } /** * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue * @txq: pointer to the Tx queue * * Add per-queue sysctls for a given Tx queue. Can't be called during * ice_add_vsi_sysctls, since the queue memory has not yet been setup. */ void ice_add_txq_sysctls(struct ice_tx_queue *txq) { struct ice_vsi *vsi = txq->vsi; struct sysctl_ctx_list *ctx = &vsi->txqs_ctx; struct sysctl_oid_list *txqs_list, *this_txq_list; struct sysctl_oid *txq_node; char txq_name[32], txq_desc[32]; const struct ice_sysctl_info ctls[] = { { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" }, { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" }, { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" }, { 0, 0, 0 } }; const struct ice_sysctl_info *entry = ctls; txqs_list = SYSCTL_CHILDREN(vsi->txqs_node); snprintf(txq_name, sizeof(txq_name), "%u", txq->me); snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me); txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name, CTLFLAG_RD, NULL, txq_desc); this_txq_list = SYSCTL_CHILDREN(txq_node); /* Add the Tx queue statistics */ while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } } /** * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue * @rxq: pointer to the Rx queue * * Add per-queue sysctls for a given Rx queue. Can't be called during * ice_add_vsi_sysctls, since the queue memory has not yet been setup. */ void ice_add_rxq_sysctls(struct ice_rx_queue *rxq) { struct ice_vsi *vsi = rxq->vsi; struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx; struct sysctl_oid_list *rxqs_list, *this_rxq_list; struct sysctl_oid *rxq_node; char rxq_name[32], rxq_desc[32]; const struct ice_sysctl_info ctls[] = { { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" }, { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" }, { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" }, { 0, 0, 0 } }; const struct ice_sysctl_info *entry = ctls; rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node); snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me); snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me); rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name, CTLFLAG_RD, NULL, rxq_desc); this_rxq_list = SYSCTL_CHILDREN(rxq_node); /* Add the Rx queue statistics */ while (entry->stat != 0) { SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name, CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0, entry->description); entry++; } } /** * ice_get_default_rss_key - Obtain a default RSS key * @seed: storage for the RSS key data * * Copies a pre-generated RSS key into the seed memory. The seed pointer must * point to a block of memory that is at least 40 bytes in size. * * The key isn't randomly generated each time this function is called because * that makes the RSS key change every time we reconfigure RSS. This does mean * that we're hard coding a possibly 'well known' key. We might want to * investigate randomly generating this key once during the first call. */ static void ice_get_default_rss_key(u8 *seed) { const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = { 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8, 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97, 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0, 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5, }; bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE); } /** * ice_set_rss_key - Configure a given VSI with the default RSS key * @vsi: the VSI to configure * * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key. * If the kernel RSS interface is not available, this will fall back to our * pre-generated hash seed from ice_get_default_rss_key(). */ static int ice_set_rss_key(struct ice_vsi *vsi) { struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} }; struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; enum ice_status status; /* * If the RSS kernel interface is disabled, this will return the * default RSS key above. */ rss_getkey(keydata.standard_rss_key); status = ice_aq_set_rss_key(hw, vsi->idx, &keydata); if (status) { device_printf(sc->dev, "ice_aq_set_rss_key status %s, error %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_set_rss_flow_flds - Program the RSS hash flows after package init * @vsi: the VSI to configure * * If the package file is initialized, the default RSS flows are reset. We * need to reprogram the expected hash configuration. We'll use * rss_gethashconfig() to determine which flows to enable. If RSS kernel * support is not enabled, this macro will fall back to suitable defaults. */ static void ice_set_rss_flow_flds(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false }; device_t dev = sc->dev; enum ice_status status; u_int rss_hash_config; rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4; rss_cfg.hash_flds = ICE_FLOW_HASH_IPV4; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP; rss_cfg.hash_flds = ICE_HASH_TCP_IPV4; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP; rss_cfg.hash_flds = ICE_HASH_UDP_IPV4; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6; rss_cfg.hash_flds = ICE_FLOW_HASH_IPV6; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP; rss_cfg.hash_flds = ICE_HASH_TCP_IPV6; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) { rss_cfg.addl_hdrs = ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP; rss_cfg.hash_flds = ICE_HASH_UDP_IPV6; status = ice_add_rss_cfg(hw, vsi->idx, &rss_cfg); if (status) device_printf(dev, "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } /* Warn about RSS hash types which are not supported */ /* coverity[dead_error_condition] */ if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) { device_printf(dev, "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n", vsi->idx); } } /** * ice_set_rss_lut - Program the RSS lookup table for a VSI * @vsi: the VSI to configure * * Programs the RSS lookup table for a given VSI. We use * rss_get_indirection_to_bucket which will use the indirection table provided * by the kernel RSS interface when available. If the kernel RSS interface is * not available, we will fall back to a simple round-robin fashion queue * assignment. */ static int ice_set_rss_lut(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct ice_aq_get_set_rss_lut_params lut_params; enum ice_status status; int i, err = 0; u8 *lut; lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO); if (!lut) { device_printf(dev, "Failed to allocate RSS lut memory\n"); return (ENOMEM); } /* Populate the LUT with max no. of queues. If the RSS kernel * interface is disabled, this will assign the lookup table in * a simple round robin fashion */ for (i = 0; i < vsi->rss_table_size; i++) { /* XXX: this needs to be changed if num_rx_queues ever counts * more than just the RSS queues */ lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues; } lut_params.vsi_handle = vsi->idx; lut_params.lut_size = vsi->rss_table_size; lut_params.lut_type = vsi->rss_lut_type; lut_params.lut = lut; lut_params.global_lut_id = 0; status = ice_aq_set_rss_lut(hw, &lut_params); if (status) { device_printf(dev, "Cannot set RSS lut, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free(lut, M_ICE); return err; } /** * ice_config_rss - Configure RSS for a VSI * @vsi: the VSI to configure * * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for * a given VSI. */ int ice_config_rss(struct ice_vsi *vsi) { int err; /* Nothing to do, if RSS is not enabled */ if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS)) return 0; err = ice_set_rss_key(vsi); if (err) return err; ice_set_rss_flow_flds(vsi); return ice_set_rss_lut(vsi); } /** * ice_log_pkg_init - Log a message about status of DDP initialization * @sc: the device softc pointer * @pkg_status: the status result of ice_copy_and_init_pkg * * Called by ice_load_pkg after an attempt to download the DDP package * contents to the device. Determines whether the download was successful or * not and logs an appropriate message for the system administrator. * * @post if a DDP package was previously downloaded on another port and it * is not compatible with this driver, pkg_status will be updated to reflect * this, and the driver will transition to safe mode. */ void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; struct sbuf *active_pkg, *os_pkg; active_pkg = sbuf_new_auto(); ice_active_pkg_version_str(hw, active_pkg); sbuf_finish(active_pkg); os_pkg = sbuf_new_auto(); ice_os_pkg_version_str(hw, os_pkg); sbuf_finish(os_pkg); switch (*pkg_status) { case ICE_SUCCESS: /* The package download AdminQ command returned success because * this download succeeded or ICE_ERR_AQ_NO_WORK since there is * already a package loaded on the device. */ if (hw->pkg_ver.major == hw->active_pkg_ver.major && hw->pkg_ver.minor == hw->active_pkg_ver.minor && hw->pkg_ver.update == hw->active_pkg_ver.update && hw->pkg_ver.draft == hw->active_pkg_ver.draft && !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { switch (hw->pkg_dwnld_status) { case ICE_AQ_RC_OK: device_printf(dev, "The DDP package was successfully loaded: %s.\n", sbuf_data(active_pkg)); break; case ICE_AQ_RC_EEXIST: device_printf(dev, "DDP package already present on device: %s.\n", sbuf_data(active_pkg)); break; default: /* We do not expect this to occur, but the * extra messaging is here in case something * changes in the ice_init_pkg flow. */ device_printf(dev, "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n", sbuf_data(active_pkg), ice_aq_str(hw->pkg_dwnld_status)); break; } } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) { device_printf(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", sbuf_data(active_pkg), sbuf_data(os_pkg)); } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { device_printf(dev, "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); *pkg_status = ICE_ERR_NOT_SUPPORTED; } else { device_printf(dev, "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); *pkg_status = ICE_ERR_NOT_SUPPORTED; } break; case ICE_ERR_NOT_SUPPORTED: /* * This assumes that the active_pkg_ver will not be * initialized if the ice_ddp package version is not * supported. */ if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) { /* The ice_ddp version is not supported */ if (pkg_ver_compatible(&hw->pkg_ver) > 0) { device_printf(dev, "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n", sbuf_data(os_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) { device_printf(dev, "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n", sbuf_data(os_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else { device_printf(dev, "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", ice_status_str(*pkg_status), ice_aq_str(hw->pkg_dwnld_status), sbuf_data(os_pkg), sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } } else { if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { device_printf(dev, "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) { device_printf(dev, "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else { device_printf(dev, "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", ice_status_str(*pkg_status), ice_aq_str(hw->pkg_dwnld_status), sbuf_data(os_pkg), sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } } break; case ICE_ERR_CFG: case ICE_ERR_BUF_TOO_SHORT: case ICE_ERR_PARAM: device_printf(dev, "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); break; case ICE_ERR_FW_DDP_MISMATCH: device_printf(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; case ICE_ERR_AQ_ERROR: switch (hw->pkg_dwnld_status) { case ICE_AQ_RC_ENOSEC: case ICE_AQ_RC_EBADSIG: device_printf(dev, "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); goto free_sbufs; case ICE_AQ_RC_ESVN: device_printf(dev, "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); goto free_sbufs; case ICE_AQ_RC_EBADMAN: case ICE_AQ_RC_EBADBUF: device_printf(dev, "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); goto free_sbufs; default: break; } /* fall-through */ default: device_printf(dev, "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n", ice_status_str(*pkg_status), ice_aq_str(hw->pkg_dwnld_status)); break; } free_sbufs: sbuf_delete(active_pkg); sbuf_delete(os_pkg); } /** * ice_load_pkg_file - Load the DDP package file using firmware_get * @sc: device private softc * * Use firmware_get to load the DDP package memory and then request that * firmware download the package contents and program the relevant hardware * bits. * * This function makes a copy of the DDP package memory which is tracked in * the ice_hw structure. The copy will be managed and released by * ice_deinit_hw(). This allows the firmware reference to be immediately * released using firmware_put. */ void ice_load_pkg_file(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; const struct firmware *pkg; pkg = firmware_get("ice_ddp"); if (!pkg) { device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); if (cold) device_printf(dev, "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); return; } /* Copy and download the pkg contents */ status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); /* Release the firmware reference */ firmware_put(pkg, FIRMWARE_UNLOAD); /* Check the active DDP package version and log a message */ ice_log_pkg_init(sc, &status); /* Place the driver into safe mode */ if (status != ICE_SUCCESS) { ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); } } /** * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter * @vsi: the vsi to retrieve the value for * @counter: the counter type to retrieve * * Returns the value for a given ifnet counter. To do so, we calculate the * value based on the matching hardware statistics. */ uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter) { struct ice_hw_port_stats *hs = &vsi->sc->stats.cur; struct ice_eth_stats *es = &vsi->hw_stats.cur; /* For some statistics, especially those related to error flows, we do * not have per-VSI counters. In this case, we just report the global * counters. */ switch (counter) { case IFCOUNTER_IPACKETS: return (es->rx_unicast + es->rx_multicast + es->rx_broadcast); case IFCOUNTER_IERRORS: return (hs->crc_errors + hs->illegal_bytes + hs->mac_local_faults + hs->mac_remote_faults + hs->rx_len_errors + hs->rx_undersize + hs->rx_oversize + hs->rx_fragments + hs->rx_jabber); case IFCOUNTER_OPACKETS: return (es->tx_unicast + es->tx_multicast + es->tx_broadcast); case IFCOUNTER_OERRORS: return (es->tx_errors); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IBYTES: return (es->rx_bytes); case IFCOUNTER_OBYTES: return (es->tx_bytes); case IFCOUNTER_IMCASTS: return (es->rx_multicast); case IFCOUNTER_OMCASTS: return (es->tx_multicast); case IFCOUNTER_IQDROPS: return (es->rx_discards); case IFCOUNTER_OQDROPS: return (hs->tx_dropped_link_down); case IFCOUNTER_NOPROTO: return (es->rx_unknown_protocol); default: return if_get_counter_default(vsi->sc->ifp, counter); } } /** * ice_save_pci_info - Save PCI configuration fields in HW struct * @hw: the ice_hw struct to save the PCI information in * @dev: the device to get the PCI information from * * This should only be called once, early in the device attach * process. */ void ice_save_pci_info(struct ice_hw *hw, device_t dev) { hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->subsystem_vendor_id = pci_get_subvendor(dev); hw->subsystem_device_id = pci_get_subdevice(dev); hw->revision_id = pci_get_revid(dev); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); } /** * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset * @sc: the device softc * * Replace the configuration for each VSI, and then cleanup replay * information. Called after a hardware reset in order to reconfigure the * active VSIs. */ int ice_replay_all_vsi_cfg(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; enum ice_status status; int i; for (i = 0 ; i < sc->num_available_vsi; i++) { struct ice_vsi *vsi = sc->all_vsi[i]; if (!vsi) continue; status = ice_replay_vsi(hw, vsi->idx); if (status) { device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n", vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } /* Cleanup replay filters after successful reconfiguration */ ice_replay_post(hw); return (0); } /** * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI * @vsi: pointer to the VSI structure * * Cleanup the advanced RSS configuration for a given VSI. This is necessary * during driver removal to ensure that all RSS resources are properly * released. * * @remark this function doesn't report an error as it is expected to be * called during driver reset and unload, and there isn't much the driver can * do if freeing RSS resources fails. */ static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi) { struct ice_softc *sc = vsi->sc; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; status = ice_rem_vsi_rss_cfg(hw, vsi->idx); if (status) device_printf(dev, "Failed to remove RSS configuration for VSI %d, err %s\n", vsi->idx, ice_status_str(status)); /* Remove this VSI from the RSS list */ ice_rem_vsi_rss_list(hw, vsi->idx); } /** * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs * @sc: the device softc pointer * * Cleanup the advanced RSS configuration for all VSIs on a given PF * interface. * * @remark This should be called while preparing for a reset, to cleanup stale * RSS configuration for all VSIs. */ void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc) { int i; /* No need to cleanup if RSS is not enabled */ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS)) return; for (i = 0; i < sc->num_available_vsi; i++) { struct ice_vsi *vsi = sc->all_vsi[i]; if (vsi) ice_clean_vsi_rss_cfg(vsi); } } /** * ice_requested_fec_mode - Return the requested FEC mode as a string * @pi: The port info structure * * Return a string representing the requested FEC mode. */ static const char * ice_requested_fec_mode(struct ice_port_info *pi) { struct ice_aqc_get_phy_caps_data pcaps = { 0 }; enum ice_status status; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status) /* Just report unknown if we can't get capabilities */ return "Unknown"; /* Check if RS-FEC has been requested first */ if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ)) return ice_fec_str(ICE_FEC_RS); /* If RS FEC has not been requested, then check BASE-R */ if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ)) return ice_fec_str(ICE_FEC_BASER); return ice_fec_str(ICE_FEC_NONE); } /** * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string * @pi: The port info structure * * Return a string representing the current FEC mode. */ static const char * ice_negotiated_fec_mode(struct ice_port_info *pi) { /* First, check if RS has been requested first */ if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN | ICE_AQ_LINK_25G_RS_544_FEC_EN)) return ice_fec_str(ICE_FEC_RS); /* If RS FEC has not been requested, then check BASE-R */ if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN) return ice_fec_str(ICE_FEC_BASER); return ice_fec_str(ICE_FEC_NONE); } /** * ice_autoneg_mode - Return string indicating of autoneg completed * @pi: The port info structure * * Return "True" if autonegotiation is completed, "False" otherwise. */ static const char * ice_autoneg_mode(struct ice_port_info *pi) { if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) return "True"; else return "False"; } /** * ice_flowcontrol_mode - Return string indicating the Flow Control mode * @pi: The port info structure * * Returns the current Flow Control mode as a string. */ static const char * ice_flowcontrol_mode(struct ice_port_info *pi) { return ice_fc_str(pi->fc.current_mode); } /** * ice_link_up_msg - Log a link up message with associated info * @sc: the device private softc * * Log a link up message with LOG_NOTICE message level. Include information * about the duplex, FEC mode, autonegotiation and flow control. */ void ice_link_up_msg(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ifnet *ifp = sc->ifp; const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol; speed = ice_aq_speed_to_str(hw->port_info); req_fec = ice_requested_fec_mode(hw->port_info); neg_fec = ice_negotiated_fec_mode(hw->port_info); autoneg = ice_autoneg_mode(hw->port_info); flowcontrol = ice_flowcontrol_mode(hw->port_info); log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", ifp->if_xname, speed, req_fec, neg_fec, autoneg, flowcontrol); } /** * ice_update_laa_mac - Update MAC address if Locally Administered * @sc: the device softc * * Update the device MAC address when a Locally Administered Address is * assigned. * * This function does *not* update the MAC filter list itself. Instead, it * should be called after ice_rm_pf_default_mac_filters, so that the previous * address filter will be removed, and before ice_cfg_pf_default_mac_filters, * so that the new address filter will be assigned. */ int ice_update_laa_mac(struct ice_softc *sc) { const u8 *lladdr = (const u8 *)IF_LLADDR(sc->ifp); struct ice_hw *hw = &sc->hw; enum ice_status status; /* If the address is the same, then there is nothing to update */ if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN)) return (0); /* Reject Multicast addresses */ if (ETHER_IS_MULTICAST(lladdr)) return (EINVAL); status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL); if (status) { device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n", lladdr, ":", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EFAULT); } /* Copy the address into place of the LAN address. */ bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN); return (0); } /** * ice_get_and_print_bus_info - Save (PCI) bus info and print messages * @sc: device softc * * This will potentially print out a warning message if bus bandwidth * is insufficient for full-speed operation. * * This should only be called once, during the attach process, after * hw->port_info has been filled out with port link topology information * (from the Get PHY Capabilities Admin Queue command). */ void ice_get_and_print_bus_info(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u16 pci_link_status; int offset; pci_find_cap(dev, PCIY_EXPRESS, &offset); pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2); /* Fill out hw struct with PCIE link status info */ ice_set_pci_link_status_data(hw, pci_link_status); /* Use info to print out bandwidth messages */ ice_print_bus_link_data(dev, hw); if (ice_pcie_bandwidth_check(sc)) { device_printf(dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n"); device_printf(dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); } } /** * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to * a 64-bit baudrate. * @speed: enum value to convert * * This only goes up to PCIE Gen 4. */ static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed) { /* If the PCI-E speed is Gen1 or Gen2, then report * only 80% of bus speed to account for encoding overhead. */ switch (speed) { case ice_pcie_speed_2_5GT: return IF_Gbps(2); case ice_pcie_speed_5_0GT: return IF_Gbps(4); case ice_pcie_speed_8_0GT: return IF_Gbps(8); case ice_pcie_speed_16_0GT: return IF_Gbps(16); case ice_pcie_speed_unknown: default: return 0; } } /** * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to * a 32-bit number. * @width: enum value to convert */ static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width) { switch (width) { case ice_pcie_lnk_x1: return (1); case ice_pcie_lnk_x2: return (2); case ice_pcie_lnk_x4: return (4); case ice_pcie_lnk_x8: return (8); case ice_pcie_lnk_x12: return (12); case ice_pcie_lnk_x16: return (16); case ice_pcie_lnk_x32: return (32); case ice_pcie_lnk_width_resrv: case ice_pcie_lnk_width_unknown: default: return (0); } } /** * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for * full-speed device operation. * @sc: adapter softc * * Returns 0 if sufficient; 1 if not. */ static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; int num_ports, pcie_width; u64 pcie_speed, port_speed; MPASS(hw->port_info); num_ports = bitcount32(hw->func_caps.common_cap.valid_functions); port_speed = ice_phy_types_to_max_rate(hw->port_info); pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed); pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width); /* * If 2x100, clamp ports to 1 -- 2nd port is intended for * failover. */ if (port_speed == IF_Gbps(100)) num_ports = 1; return !!((num_ports * port_speed) > pcie_speed * pcie_width); } /** * ice_print_bus_link_data - Print PCI-E bandwidth information * @dev: device to print string for * @hw: hw struct with PCI-e link information */ static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw) { device_printf(dev, "PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" : (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" : (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" : (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"), (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" : (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" : (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" : (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" : (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" : (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" : (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown"); } /** * ice_set_pci_link_status_data - store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status word from PCI config space * * Stores the PCI bus info (speed, width, type) within the ice_hw structure **/ static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status) { u16 reg; hw->bus.type = ice_bus_pci_express; reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4; switch (reg) { case ice_pcie_lnk_x1: case ice_pcie_lnk_x2: case ice_pcie_lnk_x4: case ice_pcie_lnk_x8: case ice_pcie_lnk_x12: case ice_pcie_lnk_x16: case ice_pcie_lnk_x32: hw->bus.width = (enum ice_pcie_link_width)reg; break; default: hw->bus.width = ice_pcie_lnk_width_unknown; break; } reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x13; switch (reg) { case ice_pcie_speed_2_5GT: case ice_pcie_speed_5_0GT: case ice_pcie_speed_8_0GT: case ice_pcie_speed_16_0GT: hw->bus.speed = (enum ice_pcie_bus_speed)reg; break; default: hw->bus.speed = ice_pcie_speed_unknown; break; } } /** * ice_init_link_events - Initialize Link Status Events mask * @sc: the device softc * * Initialize the Link Status Events mask to disable notification of link * events we don't care about in software. Also request that link status * events be enabled. */ int ice_init_link_events(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; enum ice_status status; u16 wanted_events; /* Set the bits for the events that we want to be notified by */ wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA | ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL); /* request that every event except the wanted events be masked */ status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL); if (status) { device_printf(sc->dev, "Failed to set link status event mask, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } /* Request link info with the LSE bit set to enable link status events */ status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL); if (status) { device_printf(sc->dev, "Failed to enable link status events, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_handle_mdd_event - Handle possibly malicious events * @sc: the device softc * * Called by the admin task if an MDD detection interrupt is triggered. * Identifies possibly malicious events coming from VFs. Also triggers for * similar incorrect behavior from the PF as well. */ void ice_handle_mdd_event(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; bool mdd_detected = false, request_reinit = false; device_t dev = sc->dev; u32 reg; if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING)) return; reg = rd32(hw, GL_MDET_TX_TCLAN); if (reg & GL_MDET_TX_TCLAN_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S; u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S; u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S; u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S; device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n", ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num); /* Only clear this event if it matches this PF, that way other * PFs can read the event and determine VF and queue number. */ if (pf_num == hw->pf_id) wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); mdd_detected = true; } /* Determine what triggered the MDD event */ reg = rd32(hw, GL_MDET_TX_PQM); if (reg & GL_MDET_TX_PQM_VALID_M) { u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S; u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S; u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S; u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S; device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n", ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num); /* Only clear this event if it matches this PF, that way other * PFs can read the event and determine VF and queue number. */ if (pf_num == hw->pf_id) wr32(hw, GL_MDET_TX_PQM, 0xffffffff); mdd_detected = true; } reg = rd32(hw, GL_MDET_RX); if (reg & GL_MDET_RX_VALID_M) { u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S; u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S; u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S; u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S; device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n", ice_mdd_rx_str(event), queue, pf_num, vf_num); /* Only clear this event if it matches this PF, that way other * PFs can read the event and determine VF and queue number. */ if (pf_num == hw->pf_id) wr32(hw, GL_MDET_RX, 0xffffffff); mdd_detected = true; } /* Now, confirm that this event actually affects this PF, by checking * the PF registers. */ if (mdd_detected) { reg = rd32(hw, PF_MDET_TX_TCLAN); if (reg & PF_MDET_TX_TCLAN_VALID_M) { wr32(hw, PF_MDET_TX_TCLAN, 0xffff); sc->soft_stats.tx_mdd_count++; request_reinit = true; } reg = rd32(hw, PF_MDET_TX_PQM); if (reg & PF_MDET_TX_PQM_VALID_M) { wr32(hw, PF_MDET_TX_PQM, 0xffff); sc->soft_stats.tx_mdd_count++; request_reinit = true; } reg = rd32(hw, PF_MDET_RX); if (reg & PF_MDET_RX_VALID_M) { wr32(hw, PF_MDET_RX, 0xffff); sc->soft_stats.rx_mdd_count++; request_reinit = true; } } /* TODO: Implement logic to detect and handle events caused by VFs. */ /* request that the upper stack re-initialize the Tx/Rx queues */ if (request_reinit) ice_request_stack_reinit(sc); ice_flush(hw); } /** * ice_init_dcb_setup - Initialize DCB settings for HW * @sc: the device softc * * This needs to be called after the fw_lldp_agent sysctl is added, since that * can update the device's LLDP agent status if a tunable value is set. * * Get and store the initial state of DCB settings on driver load. Print out * informational messages as well. */ void ice_init_dcb_setup(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; bool dcbx_agent_status; enum ice_status status; /* Don't do anything if DCB isn't supported */ if (!hw->func_caps.common_cap.dcb) { device_printf(dev, "%s: No DCB support\n", __func__); return; } hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw); if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE && hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { /* * Start DCBX agent, but not LLDP. The return value isn't * checked here because a more detailed dcbx agent status is * retrieved and checked in ice_init_dcb() and below. */ ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); } /* This sets hw->port_info->qos_cfg.is_sw_lldp */ status = ice_init_dcb(hw, true); /* If there is an error, then FW LLDP is not in a usable state */ if (status != 0 && status != ICE_ERR_NOT_READY) { /* Don't print an error message if the return code from the AQ * cmd performed in ice_init_dcb() is is EPERM; that means the * FW LLDP engine is disabled, and that is a valid state. */ if (!(status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) { device_printf(dev, "DCB init failed, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } hw->port_info->qos_cfg.dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; } switch (hw->port_info->qos_cfg.dcbx_status) { case ICE_DCBX_STATUS_DIS: ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n"); break; case ICE_DCBX_STATUS_NOT_STARTED: ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n"); break; case ICE_DCBX_STATUS_MULTIPLE_PEERS: ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n"); break; default: break; } /* LLDP disabled in FW */ if (hw->port_info->qos_cfg.is_sw_lldp) { ice_add_rx_lldp_filter(sc); device_printf(dev, "Firmware LLDP agent disabled\n"); } } /** * ice_handle_mib_change_event - helper function to log LLDP MIB change events * @sc: device softc * @event: event received on a control queue * * Prints out the type of an LLDP MIB change event in a DCB debug message. * * XXX: Should be extended to do more if the driver decides to notify other SW * of LLDP MIB changes, or needs to extract info from the MIB. */ static void ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event) { struct ice_aqc_lldp_get_mib *params = (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib; u8 mib_type, bridge_type, tx_status; /* XXX: To get the contents of the MIB that caused the event, set the * ICE_DBG_AQ debug mask and read that output */ static const char* mib_type_strings[] = { "Local MIB", "Remote MIB", "Reserved", "Reserved" }; static const char* bridge_type_strings[] = { "Nearest Bridge", "Non-TPMR Bridge", "Reserved", "Reserved" }; static const char* tx_status_strings[] = { "Port's TX active", "Port's TX suspended and drained", "Reserved", "Port's TX suspended and srained; blocked TC pipe flushed" }; mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >> ICE_AQ_LLDP_MIB_TYPE_S; bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> ICE_AQ_LLDP_BRID_TYPE_S; tx_status = (params->type & ICE_AQ_LLDP_TX_M) >> ICE_AQ_LLDP_TX_S; ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n", mib_type_strings[mib_type], bridge_type_strings[bridge_type], tx_status_strings[tx_status]); } /** * ice_send_version - Send driver version to firmware * @sc: the device private softc * * Send the driver version to the firmware. This must be called as early as * possible after ice_init_hw(). */ int ice_send_version(struct ice_softc *sc) { struct ice_driver_ver driver_version = {0}; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; driver_version.major_ver = ice_major_version; driver_version.minor_ver = ice_minor_version; driver_version.build_ver = ice_patch_version; driver_version.subbuild_ver = ice_rc_version; strlcpy((char *)driver_version.driver_string, ice_driver_version, sizeof(driver_version.driver_string)); status = ice_aq_send_driver_ver(hw, &driver_version, NULL); if (status) { device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } return (0); } /** * ice_handle_lan_overflow_event - helper function to log LAN overflow events * @sc: device softc * @event: event received on a control queue * * Prints out a message when a LAN overflow event is detected on a receive * queue. */ static void ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event) { struct ice_aqc_event_lan_overflow *params = (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow; struct ice_hw *hw = &sc->hw; ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n", LE32_TO_CPU(params->prtdcb_ruptq), LE32_TO_CPU(params->qtx_ctl)); } /** * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list * @vsi: the VSI to target packets to * @list: the list to add the filter to * @ethertype: the Ethertype to filter on * @direction: The direction of the filter (Tx or Rx) * @action: the action to take * * Add an Ethertype filter to a filter list. Used to forward a series of * filters to the firmware for configuring the switch. * * Returns 0 on success, and an error code on failure. */ static int ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list, u16 ethertype, u16 direction, enum ice_sw_fwd_act_type action) { struct ice_fltr_list_entry *entry; MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX)); entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO); if (!entry) return (ENOMEM); entry->fltr_info.flag = direction; entry->fltr_info.src_id = ICE_SRC_ID_VSI; entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE; entry->fltr_info.fltr_act = action; entry->fltr_info.vsi_handle = vsi->idx; entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype; LIST_ADD(&entry->list_entry, list); return 0; } #define ETHERTYPE_PAUSE_FRAMES 0x8808 #define ETHERTYPE_LLDP_FRAMES 0x88cc /** * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes * @sc: the device private softc * * Configure the switch to drop PAUSE frames and LLDP frames transmitted from * the host. This prevents malicious VFs from sending these frames and being * able to control or configure the network. */ int ice_cfg_pf_ethertype_filters(struct ice_softc *sc) { struct ice_list_head ethertype_list; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; int err = 0; INIT_LIST_HEAD(ðertype_list); /* * Note that the switch filters will ignore the VSI index for the drop * action, so we only need to program drop filters once for the main * VSI. */ /* Configure switch to drop all Tx pause frames coming from any VSI. */ if (sc->enable_tx_fc_filter) { err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_PAUSE_FRAMES, ICE_FLTR_TX, ICE_DROP_PACKET); if (err) goto free_ethertype_list; } /* Configure switch to drop LLDP frames coming from any VSI */ if (sc->enable_tx_lldp_filter) { err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_LLDP_FRAMES, ICE_FLTR_TX, ICE_DROP_PACKET); if (err) goto free_ethertype_list; } status = ice_add_eth_mac(hw, ðertype_list); if (status) { device_printf(dev, "Failed to add Tx Ethertype filters, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); err = (EIO); } free_ethertype_list: ice_free_fltr_list(ðertype_list); return err; } /** * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames * @sc: the device private structure * * Add a switch ethertype filter which forwards the LLDP frames to the main PF * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to * be forwarded to the stack. */ static void ice_add_rx_lldp_filter(struct ice_softc *sc) { struct ice_list_head ethertype_list; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; int err; u16 vsi_num; /* * If FW is new enough, use a direct AQ command to perform the filter * addition. */ if (ice_fw_supports_lldp_fltr_ctrl(hw)) { vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); status = ice_lldp_fltr_add_remove(hw, vsi_num, true); if (status) { device_printf(dev, "Failed to add Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } else ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); return; } INIT_LIST_HEAD(ðertype_list); /* Forward Rx LLDP frames to the stack */ err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_LLDP_FRAMES, ICE_FLTR_RX, ICE_FWD_TO_VSI); if (err) { device_printf(dev, "Failed to add Rx LLDP filter, err %s\n", ice_err_str(err)); goto free_ethertype_list; } status = ice_add_eth_mac(hw, ðertype_list); if (status && status != ICE_ERR_ALREADY_EXISTS) { device_printf(dev, "Failed to add Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } else { /* * If status == ICE_ERR_ALREADY_EXISTS, we won't treat an * already existing filter as an error case. */ ice_set_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER); } free_ethertype_list: ice_free_fltr_list(ðertype_list); } /** * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames * @sc: the device private structure * * Remove the switch filter forwarding LLDP frames to the main PF VSI, called * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the * stack. */ static void ice_del_rx_lldp_filter(struct ice_softc *sc) { struct ice_list_head ethertype_list; struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; int err; u16 vsi_num; /* * Only in the scenario where the driver added the filter during * this session (while the driver was loaded) would we be able to * delete this filter. */ if (!ice_test_state(&sc->state, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER)) return; /* * If FW is new enough, use a direct AQ command to perform the filter * removal. */ if (ice_fw_supports_lldp_fltr_ctrl(hw)) { vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); status = ice_lldp_fltr_add_remove(hw, vsi_num, false); if (status) { device_printf(dev, "Failed to remove Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } return; } INIT_LIST_HEAD(ðertype_list); /* Remove filter forwarding Rx LLDP frames to the stack */ err = ice_add_ethertype_to_list(vsi, ðertype_list, ETHERTYPE_LLDP_FRAMES, ICE_FLTR_RX, ICE_FWD_TO_VSI); if (err) { device_printf(dev, "Failed to remove Rx LLDP filter, err %s\n", ice_err_str(err)); goto free_ethertype_list; } status = ice_remove_eth_mac(hw, ðertype_list); if (status == ICE_ERR_DOES_NOT_EXIST) { ; /* Don't complain if we try to remove a filter that doesn't exist */ } else if (status) { device_printf(dev, "Failed to remove Rx LLDP filter, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } free_ethertype_list: ice_free_fltr_list(ðertype_list); } /** * ice_init_link_configuration -- Setup link in different ways depending * on whether media is available or not. * @sc: device private structure * * Called at the end of the attach process to either set default link * parameters if there is media available, or force HW link down and * set a state bit if there is no media. */ void ice_init_link_configuration(struct ice_softc *sc) { struct ice_port_info *pi = sc->hw.port_info; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; pi->phy.get_link_info = true; status = ice_get_link_status(pi, &sc->link_up); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_get_link_status failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return; } if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); /* Apply default link settings */ - ice_apply_saved_phy_cfg(sc); + ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); } else { /* Set link down, and poll for media available in timer. This prevents the * driver from receiving spurious link-related events. */ ice_set_state(&sc->state, ICE_STATE_NO_MEDIA); status = ice_aq_set_link_restart_an(pi, false, NULL); if (status != ICE_SUCCESS) device_printf(dev, "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } } /** * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data - * @pi: port info struct - * @pcaps: TOPO_CAPS capability data to use for defaults + * @sc: device private structure * @cfg: new PHY config data to be modified * * Applies user settings for advertised speeds to the PHY type fields in the * supplied PHY config struct. It uses the data from pcaps to check if the * saved settings are invalid and uses the pcaps data instead if they are * invalid. */ -static void -ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static int +ice_apply_saved_phy_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg) { + struct ice_phy_data phy_data = { 0 }; + struct ice_port_info *pi = sc->hw.port_info; u64 phy_low = 0, phy_high = 0; + u16 link_speeds; + int ret; + + link_speeds = pi->phy.curr_user_speed_req; - ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); - cfg->phy_type_low = pcaps->phy_type_low & htole64(phy_low); - cfg->phy_type_high = pcaps->phy_type_high & htole64(phy_high); + if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) { + memset(&phy_data, 0, sizeof(phy_data)); + phy_data.report_mode = ICE_AQC_REPORT_DFLT_CFG; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (link_speeds == 0 || phy_data.user_speeds_intr) + goto finalize_link_speed; + if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { + memset(&phy_data, 0, sizeof(phy_data)); + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; - /* Can't use saved user speed request; use NVM default PHY capabilities */ - if (!cfg->phy_type_low && !cfg->phy_type_high) { - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; + if (!phy_data.user_speeds_intr) { + phy_low = phy_data.phy_low_orig; + phy_high = phy_data.phy_high_orig; + } + goto finalize_link_speed; + } + /* If we're here, then it means the benefits of Version 2 + * link management aren't utilized. We fall through to + * handling Strict Link Mode the same as Version 1 link + * management. + */ + } + + memset(&phy_data, 0, sizeof(phy_data)); + if ((link_speeds == 0) && + (sc->ldo_tlv.phy_type_low || sc->ldo_tlv.phy_type_high)) + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; + else + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) { + if (phy_low == 0 && phy_high == 0) { + device_printf(sc->dev, + "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); + return (EINVAL); + } + } else { + if (link_speeds == 0) { + if (sc->ldo_tlv.phy_type_low & phy_low || + sc->ldo_tlv.phy_type_high & phy_high) { + phy_low &= sc->ldo_tlv.phy_type_low; + phy_high &= sc->ldo_tlv.phy_type_high; + } + } else if (phy_low == 0 && phy_high == 0) { + memset(&phy_data, 0, sizeof(phy_data)); + phy_data.report_mode = ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA; + phy_data.user_speeds_orig = link_speeds; + ret = ice_intersect_phy_types_and_speeds(sc, &phy_data); + if (ret != 0) { + /* Error message already printed within function */ + return (ret); + } + phy_low = phy_data.phy_low_intr; + phy_high = phy_data.phy_high_intr; + + if (!phy_data.user_speeds_intr) { + phy_low = phy_data.phy_low_orig; + phy_high = phy_data.phy_high_orig; + } + } } + +finalize_link_speed: + + /* Cache new user settings for speeds */ + pi->phy.curr_user_speed_req = phy_data.user_speeds_intr; + cfg->phy_type_low = htole64(phy_low); + cfg->phy_type_high = htole64(phy_high); + + return (ret); } /** * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data - * @pi: port info struct - * @pcaps: TOPO_CAPS capability data to use for defaults + * @sc: device private structure * @cfg: new PHY config data to be modified * * Applies user setting for FEC mode to PHY config struct. It uses the data * from pcaps to check if the saved settings are invalid and uses the pcaps * data instead if they are invalid. */ -static void -ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, +static int +ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc, struct ice_aqc_set_phy_cfg_data *cfg) { - ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + struct ice_port_info *pi = sc->hw.port_info; + enum ice_status status; - /* Can't use saved user FEC mode; use NVM default PHY capabilities */ - if (cfg->link_fec_opt && - !(cfg->link_fec_opt & pcaps->link_fec_options)) { - cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; - cfg->link_fec_opt = pcaps->link_fec_options; - } + cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC; + status = ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + if (status) + return (EIO); + + return (0); } /** * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data * @pi: port info struct * @cfg: new PHY config data to be modified * * Applies user setting for flow control mode to PHY config struct. There are * no invalid flow control mode settings; if there are, then this function * treats them like "ICE_FC_NONE". */ static void ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg) { cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY); switch (pi->phy.curr_user_fc_req) { case ICE_FC_FULL: cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY | - ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; + ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; break; case ICE_FC_RX_PAUSE: cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY; break; case ICE_FC_TX_PAUSE: cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY; break; default: /* ICE_FC_NONE */ break; } } -/** - * ice_apply_saved_user_req_to_cfg -- Apply all saved user settings to AQ cfg data - * @pi: port info struct - * @pcaps: TOPO_CAPS capability data to use for defaults - * @cfg: new PHY config data to be modified - * - * Applies user settings for advertised speeds, FEC mode, and flow control - * mode to the supplied PHY config struct; it uses the data from pcaps to check - * if the saved settings are invalid and uses the pcaps data instead if they - * are invalid. - */ -static void -ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi, - struct ice_aqc_get_phy_caps_data *pcaps, - struct ice_aqc_set_phy_cfg_data *cfg) -{ - ice_apply_saved_phy_req_to_cfg(pi, pcaps, cfg); - ice_apply_saved_fec_req_to_cfg(pi, pcaps, cfg); - ice_apply_saved_fc_req_to_cfg(pi, cfg); -} - /** * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings * @sc: device private structure + * @settings: which settings to apply * - * Takes the saved user PHY config settings, overwrites the NVM - * default with them if they're valid, and uses the Set PHY Config AQ command - * to apply them. - * - * Intended for use when media is inserted. + * Applies user settings for advertised speeds, FEC mode, and flow + * control mode to a PHY config struct; it uses the data from pcaps + * to check if the saved settings are invalid and uses the pcaps + * data instead if they are invalid. * - * @pre Port has media available + * For things like sysctls where only one setting needs to be + * updated, the bitmap allows the caller to specify which setting + * to update. */ -void -ice_apply_saved_phy_cfg(struct ice_softc *sc) +int +ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; + u64 phy_low, phy_high; enum ice_status status; + enum ice_fec_mode dflt_fec_mode; + enum ice_fc_mode dflt_fc_mode; + u16 dflt_user_speed; + + if (!settings || settings > ICE_APPLY_LS_FEC_FC) { + ice_debug(hw, ICE_DBG_LINK, "Settings out-of-bounds: %u\n", + settings); + } - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", + "%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); - return; + return (EIO); } + phy_low = le64toh(pcaps.phy_type_low); + phy_high = le64toh(pcaps.phy_type_high); + + /* Save off initial config parameters */ + dflt_user_speed = ice_aq_phy_types_to_link_speeds(phy_low, phy_high); + dflt_fec_mode = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); + dflt_fc_mode = ice_caps_to_fc_mode(pcaps.caps); + /* Setup new PHY config */ ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg); - /* Apply settings requested by user */ - ice_apply_saved_user_req_to_cfg(pi, &pcaps, &cfg); + /* On error, restore active configuration values */ + if ((settings & ICE_APPLY_LS) && + ice_apply_saved_phy_req_to_cfg(sc, &cfg)) { + pi->phy.curr_user_speed_req = dflt_user_speed; + cfg.phy_type_low = pcaps.phy_type_low; + cfg.phy_type_high = pcaps.phy_type_high; + } + if ((settings & ICE_APPLY_FEC) && + ice_apply_saved_fec_req_to_cfg(sc, &cfg)) { + pi->phy.curr_user_fec_req = dflt_fec_mode; + } + if (settings & ICE_APPLY_FC) { + /* No real error indicators for this process, + * so we'll just have to assume it works. */ + ice_apply_saved_fc_req_to_cfg(pi, &cfg); + } /* Enable link and re-negotiate it */ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status != ICE_SUCCESS) { + /* Don't indicate failure if there's no media in the port. + * The settings have been saved and will apply when media + * is inserted. + */ if ((status == ICE_ERR_AQ_ERROR) && - (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) + (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)) { device_printf(dev, - "%s: User PHY cfg not applied; no media in port\n", + "%s: Setting will be applied when media is inserted\n", __func__); - else + return (0); + } else { device_printf(dev, "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); + return (EIO); + } } + + return (0); } /** * ice_print_ldo_tlv - Print out LDO TLV information * @sc: device private structure * @tlv: LDO TLV information from the adapter NVM * * Dump out the information in tlv to the kernel message buffer; intended for * debugging purposes. */ static void ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv) { device_t dev = sc->dev; device_printf(dev, "TLV: -options 0x%02x\n", tlv->options); device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config); device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options); device_printf(dev, " -phy_high 0x%016llx\n", (unsigned long long)tlv->phy_type_high); device_printf(dev, " -phy_low 0x%016llx\n", (unsigned long long)tlv->phy_type_low); } /** * ice_set_link_management_mode -- Strict or lenient link management * @sc: device private structure * * Some NVMs give the adapter the option to advertise a superset of link * configurations. This checks to see if that option is enabled. * Further, the NVM could also provide a specific set of configurations * to try; these are cached in the driver's private structure if they * are available. */ void ice_set_link_management_mode(struct ice_softc *sc) { struct ice_port_info *pi = sc->hw.port_info; device_t dev = sc->dev; struct ice_link_default_override_tlv tlv = { 0 }; enum ice_status status; /* Port must be in strict mode if FW version is below a certain * version. (i.e. Don't set lenient mode features) */ if (!(ice_fw_supports_link_override(&sc->hw))) return; status = ice_get_link_default_override(&tlv, pi); if (status != ICE_SUCCESS) { device_printf(dev, "%s: ice_get_link_default_override failed; status %s, aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return; } if (sc->hw.debug_mask & ICE_DBG_LINK) ice_print_ldo_tlv(sc, &tlv); /* Set lenient link mode */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) && (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE))) ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en); + /* FW supports reporting a default configuration */ + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_2) && + ice_fw_supports_report_dflt_cfg(&sc->hw)) { + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_en); + /* Knowing we're at a high enough firmware revision to + * support this link management configuration, we don't + * need to check/support earlier versions. + */ + return; + } + /* Default overrides only work if in lenient link mode */ - if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DEFAULT_OVERRIDE) && + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LINK_MGMT_VER_1) && ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) && (tlv.options & ICE_LINK_OVERRIDE_EN)) - ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_en); + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_en); - /* Cache the LDO TLV structure in the driver, since it won't change - * during the driver's lifetime. + /* Cache the LDO TLV structure in the driver, since it + * won't change during the driver's lifetime. */ sc->ldo_tlv = tlv; } /** * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults * @sc: device private structure * * This should be called before the tunables for these link settings * (e.g. advertise_speed) are added -- so that these defaults don't overwrite * the cached values that the sysctl handlers will write. * * This also needs to be called before ice_init_link_configuration, to ensure * that there are sane values that can be written if there is media available * in the port. */ void ice_init_saved_phy_cfg(struct ice_softc *sc) { struct ice_port_info *pi = sc->hw.port_info; struct ice_aqc_get_phy_caps_data pcaps = { 0 }; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; u64 phy_low, phy_high; + u8 report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - &pcaps, NULL); + if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2)) + report_mode = ICE_AQC_REPORT_DFLT_CFG; + status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); if (status != ICE_SUCCESS) { device_printf(dev, - "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n", - __func__, ice_status_str(status), + "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", + __func__, + report_mode == ICE_AQC_REPORT_DFLT_CFG ? "DFLT" : "w/MEDIA", + ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return; } phy_low = le64toh(pcaps.phy_type_low); phy_high = le64toh(pcaps.phy_type_high); /* Save off initial config parameters */ pi->phy.curr_user_speed_req = - ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high); + ice_aq_phy_types_to_link_speeds(phy_low, phy_high); pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps, pcaps.link_fec_options); pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps); } /** * ice_module_init - Driver callback to handle module load * * Callback for handling module load events. This function should initialize * any data structures that are used for the life of the device driver. */ static int ice_module_init(void) { return (0); } /** * ice_module_exit - Driver callback to handle module exit * * Callback for handling module unload events. This function should release * any resources initialized during ice_module_init. * * If this function returns non-zero, the module will not be unloaded. It * should only return such a value if the module cannot be unloaded at all, * such as due to outstanding memory references that cannot be revoked. */ static int ice_module_exit(void) { return (0); } /** * ice_module_event_handler - Callback for module events * @mod: unused module_t parameter * @what: the event requested * @arg: unused event argument * * Callback used to handle module events from the stack. Used to allow the * driver to define custom behavior that should happen at module load and * unload. */ int ice_module_event_handler(module_t __unused mod, int what, void __unused *arg) { switch (what) { case MOD_LOAD: return ice_module_init(); case MOD_UNLOAD: return ice_module_exit(); default: /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */ return (EOPNOTSUPP); } } /** * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request * @sc: the device private softc * @ifd: ifdrv ioctl request pointer */ int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd) { union ice_nvm_access_data *data; struct ice_nvm_access_cmd *cmd; size_t ifd_len = ifd->ifd_len, malloc_len; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; u8 *nvm_buffer; int err; /* * ifioctl forwards SIOCxDRVSPEC to iflib without performing * a privilege check. In turn, iflib forwards the ioctl to the driver * without performing a privilege check. Perform one here to ensure * that non-privileged threads cannot access this interface. */ err = priv_check(curthread, PRIV_DRIVER); if (err) return (err); if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); return (EINVAL); } if (ifd->ifd_data == NULL) { device_printf(dev, "%s: ifd data buffer not present.\n", __func__); return (EINVAL); } /* * If everything works correctly, ice_handle_nvm_access should not * modify data past the size of the ioctl length. However, it could * lead to memory corruption if it did. Make sure to allocate at least * enough space for the command and data regardless. This * ensures that any access to the data union will not access invalid * memory. */ malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd)); nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK); if (!nvm_buffer) return (ENOMEM); /* Copy the NVM access command and data in from user space */ /* coverity[tainted_data_argument] */ err = copyin(ifd->ifd_data, nvm_buffer, ifd_len); if (err) { device_printf(dev, "%s: Copying request from user space failed, err %s\n", __func__, ice_err_str(err)); goto cleanup_free_nvm_buffer; } /* * The NVM command structure is immediately followed by data which * varies in size based on the command. */ cmd = (struct ice_nvm_access_cmd *)nvm_buffer; data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd)); /* Handle the NVM access request */ status = ice_handle_nvm_access(hw, cmd, data); if (status) ice_debug(hw, ICE_DBG_NVM, "NVM access request failed, err %s\n", ice_status_str(status)); /* Copy the possibly modified contents of the handled request out */ err = copyout(nvm_buffer, ifd->ifd_data, ifd_len); if (err) { device_printf(dev, "%s: Copying response back to user space failed, err %s\n", __func__, ice_err_str(err)); goto cleanup_free_nvm_buffer; } /* Convert private status to an error code for proper ioctl response */ switch (status) { case ICE_SUCCESS: err = (0); break; case ICE_ERR_NO_MEMORY: err = (ENOMEM); break; case ICE_ERR_OUT_OF_RANGE: err = (ENOTTY); break; case ICE_ERR_PARAM: default: err = (EINVAL); break; } cleanup_free_nvm_buffer: free(nvm_buffer, M_ICE); return err; } /** * ice_read_sff_eeprom - Read data from SFF eeprom * @sc: device softc * @dev_addr: I2C device address (typically 0xA0 or 0xA2) * @offset: offset into the eeprom * @data: pointer to data buffer to store read data in * @length: length to read; max length is 16 * * Read from the SFF eeprom in the module for this PF's port. For more details * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP), * and SFF-8024 (both). */ int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length) { struct ice_hw *hw = &sc->hw; - int error = 0, retries = 0; + int ret = 0, retries = 0; enum ice_status status; if (length > 16) return (EINVAL); if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) return (ENXIO); do { status = ice_aq_sff_eeprom(hw, 0, dev_addr, offset, 0, 0, data, length, false, NULL); if (!status) { - error = 0; + ret = 0; break; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) { - error = EBUSY; + ret = EBUSY; continue; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) { /* FW says I2C access isn't supported */ - error = EACCES; + ret = EACCES; break; } if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) { device_printf(sc->dev, "%s: Module pointer location specified in command does not permit the required operation.\n", __func__); - error = EPERM; + ret = EPERM; break; } else { device_printf(sc->dev, "%s: Error reading I2C data: err %s aq_err %s\n", __func__, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); - error = EIO; + ret = EIO; break; } } while (retries++ < ICE_I2C_MAX_RETRIES); - if (error == EBUSY) + if (ret == EBUSY) device_printf(sc->dev, "%s: Error reading I2C data after %d retries\n", __func__, ICE_I2C_MAX_RETRIES); - return (error); + return (ret); } /** * ice_handle_i2c_req - Driver independent I2C request handler * @sc: device softc * @req: The I2C parameters to use * * Read from the port's I2C eeprom using the parameters from the ioctl. */ int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req) { return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len); } /** * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c * @oidp: sysctl oid structure * @arg1: pointer to private data structure * @arg2: unused * @req: sysctl request pointer * * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module * inserted into the port. * * | SFP A2 | QSFP Lower Page * ------------|---------|---------------- * Temperature | 96-97 | 22-23 * Vcc | 98-99 | 26-27 * TX power | 102-103 | 34-35..40-41 * RX power | 104-105 | 50-51..56-57 */ static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; device_t dev = sc->dev; struct sbuf *sbuf; - int error = 0; + int ret; u8 data[16]; UNREFERENCED_PARAMETER(arg2); UNREFERENCED_PARAMETER(oidp); if (ice_driver_is_detaching(sc)) return (ESHUTDOWN); if (req->oldptr == NULL) { - error = SYSCTL_OUT(req, 0, 128); - return (error); + ret = SYSCTL_OUT(req, 0, 128); + return (ret); } - error = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); - if (error) - return (error); + ret = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1); + if (ret) + return (ret); /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */ if (data[0] == 0x3) { /* * Check for: * - Internally calibrated data * - Diagnostic monitoring is implemented */ ice_read_sff_eeprom(sc, 0xA0, 92, data, 1); if (!(data[0] & 0x60)) { device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]); return (ENODEV); } sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_read_sff_eeprom(sc, 0xA2, 96, data, 4); for (int i = 0; i < 4; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA2, 102, data, 4); for (int i = 0; i < 4; i++) sbuf_printf(sbuf, "%02X ", data[i]); } else if (data[0] == 0xD || data[0] == 0x11) { /* * QSFP+ modules are always internally calibrated, and must indicate * what types of diagnostic monitoring are implemented */ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ice_read_sff_eeprom(sc, 0xA0, 22, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA0, 26, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA0, 34, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); ice_read_sff_eeprom(sc, 0xA0, 50, data, 2); for (int i = 0; i < 2; i++) sbuf_printf(sbuf, "%02X ", data[i]); } else { device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]); return (ENODEV); } sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } /** * ice_alloc_intr_tracking - Setup interrupt tracking structures * @sc: device softc structure * * Sets up the resource manager for keeping track of interrupt allocations, * and initializes the tracking maps for the PF's interrupt allocations. * * Unlike the scheme for queues, this is done in one step since both the * manager and the maps both have the same lifetime. * * @returns 0 on success, or an error code on failure. */ int ice_alloc_intr_tracking(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; int err; /* Initialize the interrupt allocation manager */ err = ice_resmgr_init_contig_only(&sc->imgr, hw->func_caps.common_cap.num_msix_vectors); if (err) { device_printf(dev, "Unable to initialize PF interrupt manager: %s\n", ice_err_str(err)); return (err); } /* Allocate PF interrupt mapping storage */ if (!(sc->pf_imap = (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors, M_ICE, M_NOWAIT))) { device_printf(dev, "Unable to allocate PF imap memory\n"); err = ENOMEM; goto free_imgr; } for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) { sc->pf_imap[i] = ICE_INVALID_RES_IDX; } return (0); free_imgr: ice_resmgr_destroy(&sc->imgr); return (err); } /** * ice_free_intr_tracking - Free PF interrupt tracking structures * @sc: device softc structure * * Frees the interrupt resource allocation manager and the PF's owned maps. * * VF maps are released when the owning VF's are destroyed, which should always * happen before this function is called. */ void ice_free_intr_tracking(struct ice_softc *sc) { if (sc->pf_imap) { ice_resmgr_release_map(&sc->imgr, sc->pf_imap, sc->lan_vectors); free(sc->pf_imap, M_ICE); sc->pf_imap = NULL; } ice_resmgr_destroy(&sc->imgr); } /** * ice_apply_supported_speed_filter - Mask off unsupported speeds - * @phy_type_low: bit-field for the low quad word of PHY types - * @phy_type_high: bit-field for the high quad word of PHY types + * @report_speeds: bit-field for the desired link speeds * - * Given the two quad words containing the supported PHY types, + * Given a bitmap of the desired lenient mode link speeds, * this function will mask off the speeds that are not currently * supported by the device. */ -static void -ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high) +static u16 +ice_apply_supported_speed_filter(u16 report_speeds) { - u64 phylow_mask; + u16 speed_mask; /* We won't offer anything lower than 1G for any part, * but we also won't offer anything under 25G for 100G - * parts. + * parts or under 10G for 50G parts. */ - phylow_mask = ~(ICE_PHY_TYPE_LOW_1000BASE_T - 1); - if (*phy_type_high || - *phy_type_low & ~(ICE_PHY_TYPE_LOW_100GBASE_CR4 - 1)) - phylow_mask = ~(ICE_PHY_TYPE_LOW_25GBASE_T - 1); - *phy_type_low &= phylow_mask; + speed_mask = ~((u16)ICE_AQ_LINK_SPEED_1000MB - 1); + if (report_speeds & ICE_AQ_LINK_SPEED_50GB) + speed_mask = ~((u16)ICE_AQ_LINK_SPEED_10GB - 1); + if (report_speeds & ICE_AQ_LINK_SPEED_100GB) + speed_mask = ~((u16)ICE_AQ_LINK_SPEED_25GB - 1); + return (report_speeds & speed_mask); } /** - * ice_get_phy_types - Report appropriate PHY types - * @sc: device softc structure - * @phy_type_low: bit-field for the low quad word of PHY types - * @phy_type_high: bit-field for the high quad word of PHY types - * - * Populate the two quad words with bits representing the PHY types - * supported by the device. This is really just a wrapper around - * the ice_aq_get_phy_caps() that chooses the appropriate report - * mode (lenient or strict) and reports back only the relevant PHY - * types. In lenient mode the capabilities are retrieved with the - * NVM_CAP report mode, otherwise they're retrieved using the - * TOPO_CAP report mode (NVM intersected with current media). + * ice_init_health_events - Enable FW health event reporting + * @sc: device softc * - * @returns 0 on success, or an error code on failure. + * Will try to enable firmware health event reporting, but shouldn't + * cause any grief (to the caller) if this fails. */ -static enum ice_status -ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high) +void +ice_init_health_events(struct ice_softc *sc) { - struct ice_aqc_get_phy_caps_data pcaps = { 0 }; - struct ice_port_info *pi = sc->hw.port_info; - device_t dev = sc->dev; enum ice_status status; - u8 report_mode; + u8 health_mask; - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE)) - report_mode = ICE_AQC_REPORT_NVM_CAP; - else - report_mode = ICE_AQC_REPORT_TOPO_CAP; - status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL); - if (status != ICE_SUCCESS) { - device_printf(dev, - "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n", - __func__, (report_mode) ? "TOPO_CAP" : "NVM_CAP", + if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) || + (!sc->enable_health_events)) + return; + + health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; + + status = ice_aq_set_health_status_config(&sc->hw, health_mask, NULL); + if (status) + device_printf(sc->dev, + "Failed to enable firmware health events, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); - return (status); + else + ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_en); +} + +/** + * ice_print_health_status_string - Print message for given FW health event + * @dev: the PCIe device + * @elem: health status element containing status code + * + * A rather large list of possible health status codes and their associated + * messages. + */ +static void +ice_print_health_status_string(device_t dev, + struct ice_aqc_health_status_elem *elem) +{ + u16 status_code = le16toh(elem->health_status_code); + + switch (status_code) { + case ICE_AQC_HEALTH_STATUS_INFO_RECOVERY: + device_printf(dev, "The device is in firmware recovery mode.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS: + device_printf(dev, "The flash chip cannot be accessed.\n"); + device_printf(dev, "Possible Solution: If issue persists, call customer support.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH: + device_printf(dev, "NVM authentication failed.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH: + device_printf(dev, "Option ROM authentication failed.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH: + device_printf(dev, "DDP package failed.\n"); + device_printf(dev, "Possible Solution: Update to latest base driver and DDP package.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT: + device_printf(dev, "NVM image is incompatible.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT: + device_printf(dev, "Option ROM is incompatible.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB: + device_printf(dev, "Supplied MIB file is invalid. DCB reverted to default configuration.\n"); + device_printf(dev, "Possible Solution: Disable FW-LLDP and check DCBx system configuration.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT: + device_printf(dev, "An unsupported module was detected.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE: + device_printf(dev, "Module type is not supported.\n"); + device_printf(dev, "Possible Solution: Change or replace the module or cable.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL: + device_printf(dev, "Module is not qualified.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM: + device_printf(dev, "Device cannot communicate with the module.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT: + device_printf(dev, "Unresolved module conflict.\n"); + device_printf(dev, "Possible Solution 1: Manually set speed/duplex or use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT: + device_printf(dev, "Module is not present.\n"); + device_printf(dev, "Possible Solution 1: Check that the module is inserted correctly.\n"); + device_printf(dev, "Possible Solution 2: If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.\n"); + break; + case ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED: + device_printf(dev, "Underutilized module.\n"); + device_printf(dev, "Possible Solution 1: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT: + device_printf(dev, "An unsupported module was detected.\n"); + device_printf(dev, "Possible Solution 1: Check your cable connection.\n"); + device_printf(dev, "Possible Solution 2: Change or replace the module or cable.\n"); + device_printf(dev, "Possible Solution 3: Manually set speed and duplex.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG: + device_printf(dev, "Invalid link configuration.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS: + device_printf(dev, "Port hardware access error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE: + device_printf(dev, "A port is unreachable.\n"); + device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED: + device_printf(dev, "Port speed is limited due to module.\n"); + device_printf(dev, "Possible Solution: Change the module or use Intel(R) Ethernet Port Configuration Tool to configure the port option to match the current module speed.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT: + device_printf(dev, "A parallel fault was detected.\n"); + device_printf(dev, "Possible Solution: Check link partner connection and configuration.\n"); + break; + case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED: + device_printf(dev, "Port speed is limited by PHY capabilities.\n"); + device_printf(dev, "Possible Solution 1: Change the module to align to port option.\n"); + device_printf(dev, "Possible Solution 2: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO: + device_printf(dev, "LOM topology netlist is corrupted.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_NETLIST: + device_printf(dev, "Unrecoverable netlist error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT: + device_printf(dev, "Port topology conflict.\n"); + device_printf(dev, "Possible Solution 1: Use Intel(R) Ethernet Port Configuration Tool to change the port option.\n"); + device_printf(dev, "Possible Solution 2: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS: + device_printf(dev, "Unrecoverable hardware access error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME: + device_printf(dev, "Unrecoverable runtime error.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + case ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT: + device_printf(dev, "Link management engine failed to initialize.\n"); + device_printf(dev, "Possible Solution: Update to the latest NVM image.\n"); + break; + default: + break; } +} + +/** + * ice_handle_health_status_event - helper function to output health status + * @sc: device softc structure + * @event: event received on a control queue + * + * Prints out the appropriate string based on the given Health Status Event + * code. + */ +static void +ice_handle_health_status_event(struct ice_softc *sc, + struct ice_rq_event_info *event) +{ + struct ice_aqc_health_status_elem *health_info; + u16 status_count; + int i; - *phy_type_low = le64toh(pcaps.phy_type_low); - *phy_type_high = le64toh(pcaps.phy_type_high); + if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_HEALTH_STATUS)) + return; - return (ICE_SUCCESS); + health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; + status_count = le16toh(event->desc.params.get_health_status.health_status_count); + + if (status_count > (event->buf_len / sizeof(*health_info))) { + device_printf(sc->dev, "Received a health status event with invalid event count\n"); + return; + } + + for (i = 0; i < status_count; i++) { + ice_print_health_status_string(sc->dev, health_info); + health_info++; + } } /** * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings * @sc: device softc structure * * This function needs to be called after link up; it makes sure the FW * has certain PFC/DCB settings. This is intended to workaround a FW behavior * where these settings seem to be cleared on link up. */ void ice_set_default_local_lldp_mib(struct ice_softc *sc) { struct ice_dcbx_cfg *dcbcfg; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; enum ice_status status; pi = hw->port_info; dcbcfg = &pi->qos_cfg.local_dcbx_cfg; /* This value is only 3 bits; 8 TCs maps to 0 */ u8 maxtcs = hw->func_caps.common_cap.maxtc & ICE_IEEE_ETS_MAXTC_M; /** * Setup the default settings used by the driver for the Set Local * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no * PFC). */ memset(dcbcfg, 0, sizeof(*dcbcfg)); dcbcfg->etscfg.willing = 1; dcbcfg->etscfg.tcbwtable[0] = 100; dcbcfg->etscfg.maxtcs = maxtcs; dcbcfg->etsrec.willing = 1; dcbcfg->etsrec.tcbwtable[0] = 100; dcbcfg->etsrec.maxtcs = maxtcs; dcbcfg->pfc.willing = 1; dcbcfg->pfc.pfccap = maxtcs; status = ice_set_dcb_cfg(pi); if (status) device_printf(dev, "Error setting Local LLDP MIB: %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h index 336da8905316..d8abe1ea9783 100644 --- a/sys/dev/ice/ice_lib.h +++ b/sys/dev/ice/ice_lib.h @@ -1,813 +1,830 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_lib.h * @brief header for generic device and sysctl functions * * Contains definitions and function declarations for the ice_lib.c file. It * does not depend on the iflib networking stack. */ #ifndef _ICE_LIB_H_ #define _ICE_LIB_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ice_dcb.h" #include "ice_type.h" #include "ice_common.h" #include "ice_flow.h" #include "ice_sched.h" #include "ice_resmgr.h" #include "ice_rss.h" /* Hide debug sysctls unless INVARIANTS is enabled */ #ifdef INVARIANTS #define ICE_CTLFLAG_DEBUG 0 #else #define ICE_CTLFLAG_DEBUG CTLFLAG_SKIP #endif /** * for_each_set_bit - For loop over each set bit in a bit string * @bit: storage for the bit index * @data: address of data block to loop over * @nbits: maximum number of bits to loop over * * macro to create a for loop over a bit string, which runs the body once for * each bit that is set in the string. The bit variable will be set to the * index of each set bit in the string, with zero representing the first bit. */ #define for_each_set_bit(bit, data, nbits) \ for (bit_ffs((bitstr_t *)(data), (nbits), &(bit)); \ (bit) != -1; \ bit_ffs_at((bitstr_t *)(data), (bit) + 1, (nbits), &(bit))) /** * @var broadcastaddr * @brief broadcast MAC address * * constant defining the broadcast MAC address, used for programming the * broadcast address as a MAC filter for the PF VSI. */ static const u8 broadcastaddr[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; MALLOC_DECLARE(M_ICE); extern const char ice_driver_version[]; extern const uint8_t ice_major_version; extern const uint8_t ice_minor_version; extern const uint8_t ice_patch_version; extern const uint8_t ice_rc_version; /* global sysctl indicating whether the Tx FC filter should be enabled */ extern bool ice_enable_tx_fc_filter; /* global sysctl indicating whether the Tx LLDP filter should be enabled */ extern bool ice_enable_tx_lldp_filter; +/* global sysctl indicating whether FW health status events should be enabled */ +extern bool ice_enable_health_events; + /** * @struct ice_bar_info * @brief PCI BAR mapping information * * Contains data about a PCI BAR that the driver has mapped for use. */ struct ice_bar_info { struct resource *res; bus_space_tag_t tag; bus_space_handle_t handle; bus_size_t size; int rid; }; /* Alignment for queues */ #define DBA_ALIGN 128 /* Maximum TSO size is (256K)-1 */ #define ICE_TSO_SIZE ((256*1024) - 1) /* Minimum size for TSO MSS */ #define ICE_MIN_TSO_MSS 64 #define ICE_MAX_TX_SEGS 8 #define ICE_MAX_TSO_SEGS 128 #define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1) #define ICE_MAX_RX_SEGS 5 #define ICE_MAX_TSO_HDR_SEGS 3 #define ICE_MSIX_BAR 3 #define ICE_DEFAULT_DESC_COUNT 1024 #define ICE_MAX_DESC_COUNT 8160 #define ICE_MIN_DESC_COUNT 64 #define ICE_DESC_COUNT_INCR 32 /* List of hardware offloads we support */ #define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \ CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \ CSUM_IP_TSO | CSUM_IP6_TSO) /* Macros to decide what kind of hardware offload to enable */ #define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP) #define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP) #define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP) #define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO) /* List of known RX CSUM offload flags */ #define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \ CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \ CSUM_COALESCED) /* List of interface capabilities supported by ice hardware */ #define ICE_FULL_CAPS \ (IFCAP_TSO4 | IFCAP_TSO6 | \ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \ IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \ IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO) /* Safe mode disables support for hardware checksums and TSO */ #define ICE_SAFE_CAPS \ (ICE_FULL_CAPS & ~(IFCAP_HWCSUM | IFCAP_TSO | \ IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM)) #define ICE_CAPS(sc) \ (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS) /** * ICE_NVM_ACCESS * @brief Private ioctl command number for NVM access ioctls * * The ioctl command number used by NVM update for accessing the driver for * NVM access commands. */ #define ICE_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) #define ICE_AQ_LEN 512 #define ICE_MBXQ_LEN 512 #define ICE_SBQ_LEN 512 #define ICE_CTRLQ_WORK_LIMIT 256 #define ICE_DFLT_TRAFFIC_CLASS BIT(0) /* wait up to 50 microseconds for queue state change */ #define ICE_Q_WAIT_RETRY_LIMIT 5 #define ICE_UP_TABLE_TRANSLATE(val, i) \ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ ICE_AQ_VSI_UP_TABLE_UP##i##_M) /* * For now, set this to the hardware maximum. Each function gets a smaller * number assigned to it in hw->func_caps.guar_num_vsi, though there * appears to be no guarantee that is the maximum number that a function * can use. */ #define ICE_MAX_VSI_AVAILABLE 768 /* Maximum size of a single frame (for Tx and Rx) */ #define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX /* Maximum MTU size */ #define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) /* * Hardware requires that TSO packets have an segment size of at least 64 * bytes. To avoid sending bad frames to the hardware, the driver forces the * MSS for all TSO packets to have a segment size of at least 64 bytes. * * However, if the MTU is reduced below a certain size, then the resulting * larger MSS can result in transmitting segmented frames with a packet size * larger than the MTU. * * Avoid this by preventing the MTU from being lowered below this limit. * Alternative solutions require changing the TCP stack to disable offloading * the segmentation when the requested segment size goes below 64 bytes. */ #define ICE_MIN_MTU 112 #define ICE_DEFAULT_VF_QUEUES 4 +/* + * There are three settings that can be updated independently or + * altogether: Link speed, FEC, and Flow Control. These macros allow + * the caller to specify which setting(s) to update. + */ +#define ICE_APPLY_LS BIT(0) +#define ICE_APPLY_FEC BIT(1) +#define ICE_APPLY_FC BIT(2) +#define ICE_APPLY_LS_FEC (ICE_APPLY_LS | ICE_APPLY_FEC) +#define ICE_APPLY_LS_FC (ICE_APPLY_LS | ICE_APPLY_FC) +#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC) +#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC) + /** * @enum ice_dyn_idx_t * @brief Dynamic Control ITR indexes * * This enum matches hardware bits and is meant to be used by DYN_CTLN * registers and QINT registers or more generally anywhere in the manual * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any * register but instead is a special value meaning "don't update" ITR0/1/2. */ enum ice_dyn_idx_t { ICE_IDX_ITR0 = 0, ICE_IDX_ITR1 = 1, ICE_IDX_ITR2 = 2, ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ }; /* By convenction ITR0 is used for RX, and ITR1 is used for TX */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 #define ICE_ITR_MAX 8160 /* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */ #define ICE_DFLT_TX_ITR 50 #define ICE_DFLT_RX_ITR 50 /** * ice_itr_to_reg - Convert an ITR setting into its register equivalent * @hw: The device HW structure * @itr_setting: the ITR setting to convert * * Based on the hardware ITR granularity, convert an ITR setting into the * correct value to prepare programming to the HW. */ static inline u16 ice_itr_to_reg(struct ice_hw *hw, u16 itr_setting) { return itr_setting / hw->itr_gran; } /** * @enum ice_rx_dtype * @brief DTYPE header split options * * This enum matches the Rx context bits to define whether header split is * enabled or not. */ enum ice_rx_dtype { ICE_RX_DTYPE_NO_SPLIT = 0, ICE_RX_DTYPE_HEADER_SPLIT = 1, ICE_RX_DTYPE_SPLIT_ALWAYS = 2, }; /* Strings used for displaying FEC mode * * Use ice_fec_str() to get these unless these need to be embedded in a * string constant. */ #define ICE_FEC_STRING_AUTO "Auto" #define ICE_FEC_STRING_RS "RS-FEC" #define ICE_FEC_STRING_BASER "FC-FEC/BASE-R" #define ICE_FEC_STRING_NONE "None" /* Strings used for displaying Flow Control mode * * Use ice_fc_str() to get these unless these need to be embedded in a * string constant. */ #define ICE_FC_STRING_FULL "Full" #define ICE_FC_STRING_TX "Tx" #define ICE_FC_STRING_RX "Rx" #define ICE_FC_STRING_NONE "None" /* * The number of times the ice_handle_i2c_req function will retry reading * I2C data via the Admin Queue before returning EBUSY. */ #define ICE_I2C_MAX_RETRIES 10 /* * The Start LLDP Agent AQ command will fail if it's sent too soon after * the LLDP agent is stopped. The period between the stop and start * commands must currently be at least 2 seconds. */ #define ICE_START_LLDP_RETRY_WAIT (2 * hz) /* * The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous * modes to operate on. This mask is the default one for the driver, where * promiscuous is enabled/disabled for all types of non-VLAN-tagged/VLAN 0 * traffic. */ #define ICE_VSI_PROMISC_MASK (ICE_PROMISC_UCAST_TX | \ ICE_PROMISC_UCAST_RX | \ ICE_PROMISC_MCAST_TX | \ ICE_PROMISC_MCAST_RX) struct ice_softc; /** * @enum ice_rx_cso_stat * @brief software checksum offload statistics * * Enumeration of possible checksum offload statistics captured by software * during the Rx path. */ enum ice_rx_cso_stat { ICE_CSO_STAT_RX_IP4_ERR, ICE_CSO_STAT_RX_IP6_ERR, ICE_CSO_STAT_RX_L3_ERR, ICE_CSO_STAT_RX_TCP_ERR, ICE_CSO_STAT_RX_UDP_ERR, ICE_CSO_STAT_RX_SCTP_ERR, ICE_CSO_STAT_RX_L4_ERR, ICE_CSO_STAT_RX_COUNT }; /** * @enum ice_tx_cso_stat * @brief software checksum offload statistics * * Enumeration of possible checksum offload statistics captured by software * during the Tx path. */ enum ice_tx_cso_stat { ICE_CSO_STAT_TX_TCP, ICE_CSO_STAT_TX_UDP, ICE_CSO_STAT_TX_SCTP, ICE_CSO_STAT_TX_IP4, ICE_CSO_STAT_TX_IP6, ICE_CSO_STAT_TX_L3_ERR, ICE_CSO_STAT_TX_L4_ERR, ICE_CSO_STAT_TX_COUNT }; /** * @struct tx_stats * @brief software Tx statistics * * Contains software counted Tx statistics for a single queue */ struct tx_stats { /* Soft Stats */ u64 tx_bytes; u64 tx_packets; u64 mss_too_small; u64 cso[ICE_CSO_STAT_TX_COUNT]; }; /** * @struct rx_stats * @brief software Rx statistics * * Contains software counted Rx statistics for a single queue */ struct rx_stats { /* Soft Stats */ u64 rx_packets; u64 rx_bytes; u64 desc_errs; u64 cso[ICE_CSO_STAT_RX_COUNT]; }; /** * @struct ice_vsi_hw_stats * @brief hardware statistics for a VSI * * Stores statistics that are generated by hardware for a VSI. */ struct ice_vsi_hw_stats { struct ice_eth_stats prev; struct ice_eth_stats cur; bool offsets_loaded; }; /** * @struct ice_pf_hw_stats * @brief hardware statistics for a PF * * Stores statistics that are generated by hardware for each PF. */ struct ice_pf_hw_stats { struct ice_hw_port_stats prev; struct ice_hw_port_stats cur; bool offsets_loaded; }; /** * @struct ice_pf_sw_stats * @brief software statistics for a PF * * Contains software generated statistics relevant to a PF. */ struct ice_pf_sw_stats { /* # of reset events handled, by type */ u32 corer_count; u32 globr_count; u32 empr_count; u32 pfr_count; /* # of detected MDD events for Tx and Rx */ u32 tx_mdd_count; u32 rx_mdd_count; }; /** * @struct ice_vsi * @brief VSI structure * * Contains data relevant to a single VSI */ struct ice_vsi { /* back pointer to the softc */ struct ice_softc *sc; bool dynamic; /* if true, dynamically allocated */ enum ice_vsi_type type; /* type of this VSI */ u16 idx; /* software index to sc->all_vsi[] */ u16 *tx_qmap; /* Tx VSI to PF queue mapping */ u16 *rx_qmap; /* Rx VSI to PF queue mapping */ bitstr_t *vmap; /* Vector(s) assigned to VSI */ enum ice_resmgr_alloc_type qmap_type; struct ice_tx_queue *tx_queues; /* Tx queue array */ struct ice_rx_queue *rx_queues; /* Rx queue array */ int num_tx_queues; int num_rx_queues; int num_vectors; int16_t rx_itr; int16_t tx_itr; /* RSS configuration */ u16 rss_table_size; /* HW RSS table size */ u8 rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */ int max_frame_size; u16 mbuf_sz; struct ice_aqc_vsi_props info; /* context for per-VSI sysctls */ struct sysctl_ctx_list ctx; struct sysctl_oid *vsi_node; /* context for per-txq sysctls */ struct sysctl_ctx_list txqs_ctx; struct sysctl_oid *txqs_node; /* context for per-rxq sysctls */ struct sysctl_ctx_list rxqs_ctx; struct sysctl_oid *rxqs_node; /* VSI-level stats */ struct ice_vsi_hw_stats hw_stats; }; /** * @enum ice_state * @brief Driver state flags * * Used to indicate the status of various driver events. Intended to be * modified only using atomic operations, so that we can use it even in places * which aren't locked. */ enum ice_state { ICE_STATE_CONTROLQ_EVENT_PENDING, ICE_STATE_VFLR_PENDING, ICE_STATE_MDD_PENDING, ICE_STATE_RESET_OICR_RECV, ICE_STATE_RESET_PFR_REQ, ICE_STATE_PREPARED_FOR_RESET, ICE_STATE_RESET_FAILED, ICE_STATE_DRIVER_INITIALIZED, ICE_STATE_NO_MEDIA, ICE_STATE_RECOVERY_MODE, ICE_STATE_ROLLBACK_MODE, ICE_STATE_LINK_STATUS_REPORTED, ICE_STATE_DETACHING, ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER, /* This entry must be last */ ICE_STATE_LAST, }; /* Functions for setting and checking driver state. Note the functions take * bit positions, not bitmasks. The atomic_testandset_32 and * atomic_testandclear_32 operations require bit positions, while the * atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to * programming error, so we provide wrapper functions to avoid this. */ /** * ice_set_state - Set the specified state * @s: the state bitmap * @bit: the state to set * * Atomically update the state bitmap with the specified bit set. */ static inline void ice_set_state(volatile u32 *s, enum ice_state bit) { /* atomic_set_32 expects a bitmask */ atomic_set_32(s, BIT(bit)); } /** * ice_clear_state - Clear the specified state * @s: the state bitmap * @bit: the state to clear * * Atomically update the state bitmap with the specified bit cleared. */ static inline void ice_clear_state(volatile u32 *s, enum ice_state bit) { /* atomic_clear_32 expects a bitmask */ atomic_clear_32(s, BIT(bit)); } /** * ice_testandset_state - Test and set the specified state * @s: the state bitmap * @bit: the bit to test * * Atomically update the state bitmap, setting the specified bit. Returns the * previous value of the bit. */ static inline u32 ice_testandset_state(volatile u32 *s, enum ice_state bit) { /* atomic_testandset_32 expects a bit position */ return atomic_testandset_32(s, bit); } /** * ice_testandclear_state - Test and clear the specified state * @s: the state bitmap * @bit: the bit to test * * Atomically update the state bitmap, clearing the specified bit. Returns the * previous value of the bit. */ static inline u32 ice_testandclear_state(volatile u32 *s, enum ice_state bit) { /* atomic_testandclear_32 expects a bit position */ return atomic_testandclear_32(s, bit); } /** * ice_test_state - Test the specified state * @s: the state bitmap * @bit: the bit to test * * Return true if the state is set, false otherwise. Use this only if the flow * does not need to update the state. If you must update the state as well, * prefer ice_testandset_state or ice_testandclear_state. */ static inline u32 ice_test_state(volatile u32 *s, enum ice_state bit) { return (*s & BIT(bit)) ? true : false; } /** * @struct ice_str_buf * @brief static length buffer for string returning * * Structure containing a fixed size string buffer, used to implement * numeric->string conversion functions that may want to return non-constant * strings. * * This allows returning a fixed size string that is generated by a conversion * function, and then copied to the used location without needing to use an * explicit local variable passed by reference. */ struct ice_str_buf { char str[ICE_STR_BUF_LEN]; }; struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err); struct ice_str_buf _ice_status_str(enum ice_status status); struct ice_str_buf _ice_err_str(int err); struct ice_str_buf _ice_fltr_flag_str(u16 flag); struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event); struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event); struct ice_str_buf _ice_mdd_rx_str(u8 event); struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status); #define ice_aq_str(err) _ice_aq_str(err).str #define ice_status_str(err) _ice_status_str(err).str #define ice_err_str(err) _ice_err_str(err).str #define ice_fltr_flag_str(flag) _ice_fltr_flag_str(flag).str #define ice_mdd_tx_tclan_str(event) _ice_mdd_tx_tclan_str(event).str #define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str #define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str #define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str /** * ice_enable_intr - Enable interrupts for given vector * @hw: the device private HW structure * @vector: the interrupt index in PF space * * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index. */ static inline void ice_enable_intr(struct ice_hw *hw, int vector) { u32 dyn_ctl; /* Use ITR_NONE so that ITR configuration is not changed. */ dyn_ctl = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl); } /** * ice_disable_intr - Disable interrupts for given vector * @hw: the device private HW structure * @vector: the interrupt index in PF space * * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index. */ static inline void ice_disable_intr(struct ice_hw *hw, int vector) { u32 dyn_ctl; /* Use ITR_NONE so that ITR configuration is not changed. */ dyn_ctl = ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S; wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl); } /** * ice_is_tx_desc_done - determine if a Tx descriptor is done * @txd: the Tx descriptor to check * * Returns true if hardware is done with a Tx descriptor and software is * capable of re-using it. */ static inline bool ice_is_tx_desc_done(struct ice_tx_desc *txd) { return (((txd->cmd_type_offset_bsz & ICE_TXD_QW1_DTYPE_M) >> ICE_TXD_QW1_DTYPE_S) == ICE_TX_DESC_DTYPE_DESC_DONE); } /** * ice_get_pf_id - Get the PF id from the hardware registers * @hw: the ice hardware structure * * Reads the PF_FUNC_RID register and extracts the function number from it. * Intended to be used in cases where hw->pf_id hasn't yet been assigned by * ice_init_hw. * * @pre this function should be called only after PCI register access has been * setup, and prior to ice_init_hw. After hardware has been initialized, the * cached hw->pf_id value can be used. */ static inline u8 ice_get_pf_id(struct ice_hw *hw) { return (u8)((rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >> PF_FUNC_RID_FUNCTION_NUMBER_S); } /* Details of how to re-initialize depend on the networking stack */ void ice_request_stack_reinit(struct ice_softc *sc); /* Details of how to check if the network stack is detaching us */ bool ice_driver_is_detaching(struct ice_softc *sc); int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending); int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num); void ice_free_bar(device_t dev, struct ice_bar_info *bar); void ice_set_ctrlq_len(struct ice_hw *hw); void ice_release_vsi(struct ice_vsi *vsi); struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type); int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues, const int max_rx_queues); void ice_free_vsi_qmaps(struct ice_vsi *vsi); int ice_initialize_vsi(struct ice_vsi *vsi); void ice_deinit_vsi(struct ice_vsi *vsi); uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi); int ice_get_phy_type_low(uint64_t phy_type_low); int ice_get_phy_type_high(uint64_t phy_type_high); enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media); void ice_configure_rxq_interrupts(struct ice_vsi *vsi); void ice_configure_txq_interrupts(struct ice_vsi *vsi); void ice_flush_rxq_interrupts(struct ice_vsi *vsi); void ice_flush_txq_interrupts(struct ice_vsi *vsi); int ice_cfg_vsi_for_tx(struct ice_vsi *vsi); int ice_cfg_vsi_for_rx(struct ice_vsi *vsi); int ice_control_rx_queues(struct ice_vsi *vsi, bool enable); int ice_cfg_pf_default_mac_filters(struct ice_softc *sc); int ice_rm_pf_default_mac_filters(struct ice_softc *sc); void ice_print_nvm_version(struct ice_softc *sc); void ice_update_vsi_hw_stats(struct ice_vsi *vsi); void ice_reset_vsi_stats(struct ice_vsi *vsi); void ice_update_pf_stats(struct ice_softc *sc); void ice_reset_pf_stats(struct ice_softc *sc); void ice_add_device_sysctls(struct ice_softc *sc); void ice_log_hmc_error(struct ice_hw *hw, device_t dev); void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_eth_stats *stats); void ice_add_vsi_sysctls(struct ice_vsi *vsi); void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid *parent, struct ice_hw_port_stats *stats); void ice_configure_misc_interrupts(struct ice_softc *sc); -int ice_sync_multicast_filters(struct ice_softc *sc); +int ice_sync_multicast_filters(struct ice_softc *sc); enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid); void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent); void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi); void ice_add_device_tunables(struct ice_softc *sc); int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr); int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr); int ice_vsi_disable_tx(struct ice_vsi *vsi); void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi); void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi); void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi); void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi); void ice_add_txq_sysctls(struct ice_tx_queue *txq); void ice_add_rxq_sysctls(struct ice_rx_queue *rxq); int ice_config_rss(struct ice_vsi *vsi); void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc); void ice_load_pkg_file(struct ice_softc *sc); void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status); uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter); void ice_save_pci_info(struct ice_hw *hw, device_t dev); int ice_replay_all_vsi_cfg(struct ice_softc *sc); void ice_link_up_msg(struct ice_softc *sc); int ice_update_laa_mac(struct ice_softc *sc); void ice_get_and_print_bus_info(struct ice_softc *sc); const char *ice_fec_str(enum ice_fec_mode mode); const char *ice_fc_str(enum ice_fc_mode mode); const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action); -const char * ice_state_to_str(enum ice_state state); +const char *ice_state_to_str(enum ice_state state); int ice_init_link_events(struct ice_softc *sc); void ice_configure_rx_itr(struct ice_vsi *vsi); void ice_configure_tx_itr(struct ice_vsi *vsi); void ice_setup_pf_vsi(struct ice_softc *sc); void ice_handle_mdd_event(struct ice_softc *sc); void ice_init_dcb_setup(struct ice_softc *sc); int ice_send_version(struct ice_softc *sc); -int ice_cfg_pf_ethertype_filters(struct ice_softc *sc); +int ice_cfg_pf_ethertype_filters(struct ice_softc *sc); void ice_init_link_configuration(struct ice_softc *sc); void ice_init_saved_phy_cfg(struct ice_softc *sc); -void ice_apply_saved_phy_cfg(struct ice_softc *sc); +int ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings); void ice_set_link_management_mode(struct ice_softc *sc); -int ice_module_event_handler(module_t mod, int what, void *arg); -int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd); -int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req); -int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length); -int ice_alloc_intr_tracking(struct ice_softc *sc); +int ice_module_event_handler(module_t mod, int what, void *arg); +int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd); +int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req); +int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length); +int ice_alloc_intr_tracking(struct ice_softc *sc); void ice_free_intr_tracking(struct ice_softc *sc); void ice_set_default_local_lldp_mib(struct ice_softc *sc); +void ice_init_health_events(struct ice_softc *sc); #endif /* _ICE_LIB_H_ */ diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c index f0b2b9f77e55..66ee4188c8c0 100644 --- a/sys/dev/ice/ice_nvm.c +++ b/sys/dev/ice/ice_nvm.c @@ -1,2053 +1,2054 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" /** * ice_aq_read_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be read (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @read_shadow_ram: tell if this is a shadow RAM read * @cd: pointer to command details structure or NULL * * Read the NVM using the admin queue commands (0x0701) */ enum ice_status ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, bool read_shadow_ram, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm; if (offset > ICE_AQC_NVM_MAX_OFFSET) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read); if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT) cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY; /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; cmd->module_typeid = CPU_TO_LE16(module_typeid); cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = CPU_TO_LE16(length); return ice_aq_send_cmd(hw, &desc, data, length, cd); } /** * ice_read_flat_nvm - Read portion of NVM by flat offset * @hw: pointer to the HW struct * @offset: offset from beginning of NVM * @length: (in) number of bytes to read; (out) number of bytes actually read * @data: buffer to return data in (sized to fit the specified length) * @read_shadow_ram: if true, read from shadow RAM instead of NVM * * Reads a portion of the NVM, as a flat memory space. This function correctly * breaks read requests across Shadow RAM sectors and ensures that no single * read request exceeds the maximum 4KB read for a single AdminQ command. * * Returns a status code on failure. Note that the data pointer may be * partially updated if some reads succeed before a failure. */ enum ice_status ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram) { enum ice_status status; u32 inlen = *length; u32 bytes_read = 0; bool last_cmd; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); *length = 0; /* Verify the length of the read if this is for the Shadow RAM */ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) { ice_debug(hw, ICE_DBG_NVM, "NVM error: requested data is beyond Shadow RAM limit\n"); return ICE_ERR_PARAM; } do { u32 read_size, sector_offset; /* ice_aq_read_nvm cannot read more than 4KB at a time. * Additionally, a read from the Shadow RAM may not cross over * a sector boundary. Conveniently, the sector size is also * 4KB. */ sector_offset = offset % ICE_AQ_MAX_BUF_LEN; read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset, inlen - bytes_read); last_cmd = !(bytes_read + read_size < inlen); /* ice_aq_read_nvm takes the length as a u16. Our read_size is * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum * size guarantees that it will fit within the 2 bytes. */ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, offset, (u16)read_size, data + bytes_read, last_cmd, read_shadow_ram, NULL); if (status) break; bytes_read += read_size; offset += read_size; } while (!last_cmd); *length = bytes_read; return status; } /** * ice_aq_update_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @offset: byte offset from the module beginning * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series * @command_flags: command parameters * @cd: pointer to command details structure or NULL * * Update the NVM using the admin queue commands (0x0703) */ enum ice_status ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, u8 command_flags, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm; /* In offset the highest byte must be zeroed. */ if (offset & 0xFF000000) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); cmd->cmd_flags |= command_flags; /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; cmd->module_typeid = CPU_TO_LE16(module_typeid); cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF); cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = CPU_TO_LE16(length); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, data, length, cd); } /** * ice_aq_erase_nvm * @hw: pointer to the HW struct * @module_typeid: module pointer location in words from the NVM beginning * @cd: pointer to command details structure or NULL * * Erase the NVM sector using the admin queue commands (0x0702) */ enum ice_status ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; enum ice_status status; __le16 len; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* read a length value from SR, so module_typeid is equal to 0 */ /* calculate offset where module size is placed from bytes to words */ /* set last command and read from SR values to true */ status = ice_aq_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true, true, NULL); if (status) return status; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); cmd->module_typeid = CPU_TO_LE16(module_typeid); cmd->length = len; cmd->offset_low = 0; cmd->offset_high = 0; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_read_nvm_cfg - read an NVM config block * @hw: pointer to the HW struct * @cmd_flags: NVM access admin command bits * @field_id: field or feature ID * @data: buffer for result * @buf_size: buffer size * @elem_count: pointer to count of elements read by FW * @cd: pointer to command details structure or NULL * * Reads single or multiple feature/field ID and data (0x0704) */ enum ice_status ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data, u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd) { struct ice_aqc_nvm_cfg *cmd; struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm_cfg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read); cmd->cmd_flags = cmd_flags; cmd->id = CPU_TO_LE16(field_id); status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd); if (!status && elem_count) *elem_count = LE16_TO_CPU(cmd->count); return status; } /** * ice_aq_write_nvm_cfg - write an NVM config block * @hw: pointer to the HW struct * @cmd_flags: NVM access admin command bits * @data: buffer for result * @buf_size: buffer size * @elem_count: count of elements to be written * @cd: pointer to command details structure or NULL * * Writes single or multiple feature/field ID and data (0x0705) */ enum ice_status ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size, u16 elem_count, struct ice_sq_cd *cd) { struct ice_aqc_nvm_cfg *cmd; struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.nvm_cfg; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->count = CPU_TO_LE16(elem_count); cmd->cmd_flags = cmd_flags; return ice_aq_send_cmd(hw, &desc, data, buf_size, cd); } /** * ice_check_sr_access_params - verify params for Shadow RAM R/W operations. * @hw: pointer to the HW structure * @offset: offset in words from module start * @words: number of words to access */ static enum ice_status ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words) { if ((offset + words) > hw->flash.sr_words) { ice_debug(hw, ICE_DBG_NVM, "NVM error: offset beyond SR lmt.\n"); return ICE_ERR_PARAM; } if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) { /* We can access only up to 4KB (one sector), in one AQ write */ ice_debug(hw, ICE_DBG_NVM, "NVM error: tried to access %d words, limit is %d.\n", words, ICE_SR_SECTOR_SIZE_IN_WORDS); return ICE_ERR_PARAM; } if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) != (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) { /* A single access cannot spread over two sectors */ ice_debug(hw, ICE_DBG_NVM, "NVM error: cannot spread over two sectors.\n"); return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_read_sr_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm. */ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) { u32 bytes = sizeof(u16); enum ice_status status; __le16 data_local; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Note that ice_read_flat_nvm checks if the read is past the Shadow * RAM size, and ensures we don't read across a Shadow RAM sector * boundary */ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes, (_FORCE_ u8 *)&data_local, true); if (status) return status; *data = LE16_TO_CPU(data_local); return ICE_SUCCESS; } /** * ice_write_sr_aq - Writes Shadow RAM. * @hw: pointer to the HW structure * @offset: offset in words from module start * @words: number of words to write * @data: buffer with words to write to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. */ static enum ice_status ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data, bool last_command) { enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_check_sr_access_params(hw, offset, words); if (!status) status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data, last_command, 0, NULL); return status; } /** * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is * taken before reading the buffer and later released. */ static enum ice_status ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) { u32 bytes = *words * 2, i; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* ice_read_flat_nvm takes into account the 4KB AdminQ and Shadow RAM * sector restrictions necessary when reading from the NVM. */ status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); /* Report the number of words successfully read */ *words = bytes / 2; /* Byte swap the words up to the amount we actually read */ for (i = 0; i < *words; i++) data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]); return status; } /** * ice_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure * @access: NVM access type (read or write) * * This function will request NVM ownership. */ enum ice_status ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (hw->flash.blank_nvm_mode) return ICE_SUCCESS; return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** * ice_release_nvm - Generic request for releasing the NVM ownership * @hw: pointer to the HW structure * * This function will release NVM ownership. */ void ice_release_nvm(struct ice_hw *hw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (hw->flash.blank_nvm_mode) return; ice_release_res(hw, ICE_NVM_RES_ID); } /** * ice_get_flash_bank_offset - Get offset into requested flash bank * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive flash bank * @module: the module to read from * * Based on the module, lookup the module offset from the beginning of the * flash. * * Returns the flash offset. Note that a value of zero is invalid and must be * treated as an error. */ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select bank, u16 module) { struct ice_bank_info *banks = &hw->flash.banks; enum ice_flash_bank active_bank; bool second_bank_active; u32 offset, size; switch (module) { case ICE_SR_1ST_NVM_BANK_PTR: offset = banks->nvm_ptr; size = banks->nvm_size; active_bank = banks->nvm_bank; break; case ICE_SR_1ST_OROM_BANK_PTR: offset = banks->orom_ptr; size = banks->orom_size; active_bank = banks->orom_bank; break; case ICE_SR_NETLIST_BANK_PTR: offset = banks->netlist_ptr; size = banks->netlist_size; active_bank = banks->netlist_bank; break; default: ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash module: 0x%04x\n", module); return 0; } switch (active_bank) { case ICE_1ST_FLASH_BANK: second_bank_active = false; break; case ICE_2ND_FLASH_BANK: second_bank_active = true; break; default: ice_debug(hw, ICE_DBG_NVM, "Unexpected value for active flash bank: %u\n", active_bank); return 0; } /* The second flash bank is stored immediately following the first * bank. Based on whether the 1st or 2nd bank is active, and whether * we want the active or inactive bank, calculate the desired offset. */ switch (bank) { case ICE_ACTIVE_FLASH_BANK: return offset + (second_bank_active ? size : 0); case ICE_INACTIVE_FLASH_BANK: return offset + (second_bank_active ? 0 : size); } ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash bank selection: %u\n", bank); return 0; } /** * ice_read_flash_module - Read a word from one of the main NVM modules * @hw: pointer to the HW structure * @bank: which bank of the module to read * @module: the module to read * @offset: the offset into the module in bytes * @data: storage for the word read from the flash * @length: bytes of data to read * * Read data from the specified flash module. The bank parameter indicates * whether or not to read from the active bank or the inactive bank of that * module. * * The word will be read using flat NVM access, and relies on the * hw->flash.banks data being setup by ice_determine_active_flash_banks() * during initialization. */ static enum ice_status ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module, u32 offset, u8 *data, u32 length) { enum ice_status status; u32 start; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); start = ice_get_flash_bank_offset(hw, bank, module); if (!start) { ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n", module); return ICE_ERR_PARAM; } status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; status = ice_read_flat_nvm(hw, start + offset, &length, data, false); ice_release_nvm(hw); return status; } /** * ice_read_nvm_module - Read from the active main NVM module * @hw: pointer to the HW structure * @bank: whether to read from active or inactive NVM module * @offset: offset into the NVM module to read, in words * @data: storage for returned word value * * Read the specified word from the active NVM module. This includes the CSS * header at the start of the NVM module. */ static enum ice_status ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { enum ice_status status; __le16 data_local; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16), (_FORCE_ u8 *)&data_local, sizeof(u16)); if (!status) *data = LE16_TO_CPU(data_local); return status; } /** * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive NVM module * @offset: offset into the Shadow RAM copy to read, in words * @data: storage for returned word value * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. */ static enum ice_status ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); } /** * ice_read_orom_module - Read from the active Option ROM module * @hw: pointer to the HW structure * @bank: whether to read from active or inactive OROM module * @offset: offset into the OROM module to read, in words * @data: storage for returned word value * * Read the specified word from the active Option ROM module of the flash. * Note that unlike the NVM module, the CSS data is stored at the end of the * module instead of at the beginning. */ static enum ice_status ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { enum ice_status status; __le16 data_local; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16), (_FORCE_ u8 *)&data_local, sizeof(u16)); if (!status) *data = LE16_TO_CPU(data_local); return status; } /** * ice_read_netlist_module - Read data from the netlist module area * @hw: pointer to the HW structure * @bank: whether to read from the active or inactive module * @offset: offset into the netlist to read from * @data: storage for returned word value * * Read a word from the specified netlist bank. */ static enum ice_status ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { enum ice_status status; __le16 data_local; status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16), (_FORCE_ u8 *)&data_local, sizeof(u16)); if (!status) *data = LE16_TO_CPU(data_local); return status; } /** * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { enum ice_status status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_read_sr_word_aq(hw, offset, data); ice_release_nvm(hw); } return status; } /** * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA * @hw: pointer to hardware structure * @module_tlv: pointer to module TLV to return * @module_tlv_len: pointer to module TLV length to return * @module_type: module type requested * * Finds the requested sub module TLV type from the Preserved Field * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ enum ice_status ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { enum ice_status status; u16 pfa_len, pfa_ptr; u16 next_tlv; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n"); return status; } status = ice_read_sr_word(hw, pfa_ptr, &pfa_len); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } /* Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; while (next_tlv < pfa_ptr + pfa_len) { u16 tlv_sub_module_type; u16 tlv_len; /* Read TLV type */ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n"); break; } /* Read TLV length */ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n"); break; } if (tlv_sub_module_type == module_type) { if (tlv_len) { *module_tlv = next_tlv; *module_tlv_len = tlv_len; return ICE_SUCCESS; } return ICE_ERR_INVAL_SIZE; } /* Check next TLV, i.e. current TLV pointer + length + 2 words * (for current TLV's type and length) */ next_tlv = next_tlv + tlv_len + 2; } /* Module does not exist */ return ICE_ERR_DOES_NOT_EXIST; } /** * ice_read_pba_string - Reads part number string from NVM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the NVM * @pba_num_size: part number string buffer length * * Reads the part number string from the NVM. */ enum ice_status ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size) { u16 pba_tlv, pba_tlv_len; enum ice_status status; u16 pba_word, pba_size; u16 i; status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len, ICE_SR_PBA_BLOCK_PTR); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n"); return status; } /* pba_size is the next word */ status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n"); return status; } if (pba_tlv_len < pba_size) { ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n"); return ICE_ERR_INVAL_SIZE; } /* Subtract one to get PBA word count (PBA Size word is included in * total size) */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n"); return ICE_ERR_PARAM; } for (i = 0; i < pba_size; i++) { status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i); return status; } pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; pba_num[(i * 2) + 1] = pba_word & 0xFF; } pba_num[(pba_size * 2)] = '\0'; return status; } /** * ice_get_nvm_srev - Read the security revision from the NVM CSS header * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @srev: storage for security revision * * Read the security revision out of the CSS header of the active NVM module * bank. */ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) { enum ice_status status; u16 srev_l, srev_h; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l); if (status) return status; status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_H, &srev_h); if (status) return status; *srev = srev_h << 16 | srev_l; return ICE_SUCCESS; } /** * ice_get_nvm_ver_info - Read NVM version information * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @nvm: pointer to NVM info structure * * Read the NVM EETRACK ID and map version of the main NVM image bank, filling * in the nvm info structure. */ static enum ice_status ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm) { u16 eetrack_lo, eetrack_hi, ver; enum ice_status status; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n"); return status; } nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT; nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT; status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n"); return status; } status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_HI, &eetrack_hi); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n"); return status; } nvm->eetrack = (eetrack_hi << 16) | eetrack_lo; status = ice_get_nvm_srev(hw, bank, &nvm->srev); if (status) ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM security revision.\n"); return ICE_SUCCESS; } /** * ice_get_inactive_nvm_ver - Read Option ROM version from the inactive bank * @hw: pointer to the HW structure * @nvm: storage for Option ROM version information * * Reads the NVM EETRACK ID, Map version, and security revision of the * inactive NVM bank. Used to access version data for a pending update that * has not yet been activated. */ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm) { return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm); } /** * ice_get_orom_srev - Read the security revision from the OROM CSS header * @hw: pointer to the HW struct * @bank: whether to read from active or inactive flash module * @srev: storage for security revision * * Read the security revision out of the CSS header of the active OROM module * bank. */ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev) { enum ice_status status; u16 srev_l, srev_h; u32 css_start; if (hw->flash.banks.orom_size < ICE_NVM_OROM_TRAILER_LENGTH) { ice_debug(hw, ICE_DBG_NVM, "Unexpected Option ROM Size of %u\n", hw->flash.banks.orom_size); return ICE_ERR_CFG; } /* calculate how far into the Option ROM the CSS header starts. Note * that ice_read_orom_module takes a word offset so we need to * divide by 2 here. */ css_start = (hw->flash.banks.orom_size - ICE_NVM_OROM_TRAILER_LENGTH) / 2; status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_L, &srev_l); if (status) return status; status = ice_read_orom_module(hw, bank, css_start + ICE_NVM_CSS_SREV_H, &srev_h); if (status) return status; *srev = srev_h << 16 | srev_l; return ICE_SUCCESS; } /** * ice_get_orom_civd_data - Get the combo version information from Option ROM * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash module * @civd: storage for the Option ROM CIVD data. * * Searches through the Option ROM flash contents to locate the CIVD data for * the image. */ static enum ice_status ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_civd_info *civd) { struct ice_orom_civd_info tmp; enum ice_status status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. * The first 4 bytes must contain the ASCII characters "$CIV". * A simple modulo 256 sum of all of the bytes of the structure must * equal 0. */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { u8 sum = 0, i; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset, (u8 *)&tmp, sizeof(tmp)); if (status) { ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n"); return status; } /* Skip forward until we find a matching signature */ if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0) continue; /* Verify that the simple checksum is zero */ for (i = 0; i < sizeof(tmp); i++) + /* cppcheck-suppress objectIndex */ sum += ((u8 *)&tmp)[i]; if (sum) { ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n", sum); return ICE_ERR_NVM; } *civd = tmp; return ICE_SUCCESS; } return ICE_ERR_NVM; } /** * ice_get_orom_ver_info - Read Option ROM version information * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash module * @orom: pointer to Option ROM info structure * * Read Option ROM version and security revision from the Option ROM flash * section. */ static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom) { struct ice_orom_civd_info civd; enum ice_status status; u32 combo_ver; status = ice_get_orom_civd_data(hw, bank, &civd); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to locate valid Option ROM CIVD data\n"); return status; } combo_ver = LE32_TO_CPU(civd.combo_ver); orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT); orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK); orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT); status = ice_get_orom_srev(hw, bank, &orom->srev); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Option ROM security revision.\n"); return status; } return ICE_SUCCESS; } /** * ice_get_inactive_orom_ver - Read Option ROM version from the inactive bank * @hw: pointer to the HW structure * @orom: storage for Option ROM version information * * Reads the Option ROM version and security revision data for the inactive * section of flash. Used to access version data for a pending update that has * not yet been activated. */ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom) { return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom); } /** * ice_get_netlist_info * @hw: pointer to the HW struct * @bank: whether to read from the active or inactive flash bank * @netlist: pointer to netlist version info structure * * Get the netlist version information from the requested bank. Reads the Link * Topology section to find the Netlist ID block and extract the relevant * information into the netlist version structure. */ static enum ice_status ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_netlist_info *netlist) { u16 module_id, length, node_count, i; enum ice_status status; u16 *id_blk; status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id); if (status) return status; if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) { ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n", ICE_NETLIST_LINK_TOPO_MOD_ID, module_id); return ICE_ERR_NVM; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length); if (status) return status; /* sanity check that we have at least enough words to store the netlist ID block */ if (length < ICE_NETLIST_ID_BLK_SIZE) { ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n", ICE_NETLIST_ID_BLK_SIZE, length); return ICE_ERR_NVM; } status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count); if (status) return status; node_count &= ICE_LINK_TOPO_NODE_COUNT_M; id_blk = (u16 *)ice_calloc(hw, ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk)); if (!id_blk) return ICE_ERR_NO_MEMORY; /* Read out the entire Netlist ID Block at once. */ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, ICE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16), (u8 *)id_blk, ICE_NETLIST_ID_BLK_SIZE * sizeof(u16)); if (status) goto exit_error; for (i = 0; i < ICE_NETLIST_ID_BLK_SIZE; i++) id_blk[i] = LE16_TO_CPU(((_FORCE_ __le16 *)id_blk)[i]); netlist->major = id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_LOW]; netlist->minor = id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_LOW]; netlist->type = id_blk[ICE_NETLIST_ID_BLK_TYPE_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_TYPE_LOW]; netlist->rev = id_blk[ICE_NETLIST_ID_BLK_REV_HIGH] << 16 | id_blk[ICE_NETLIST_ID_BLK_REV_LOW]; netlist->cust_ver = id_blk[ICE_NETLIST_ID_BLK_CUST_VER]; /* Read the left most 4 bytes of SHA */ netlist->hash = id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 | id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(14)]; exit_error: ice_free(hw, id_blk); return status; } /** * ice_get_netlist_ver_info * @hw: pointer to the HW struct * @netlist: pointer to netlist version info structure * * Get the netlist version information */ enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, netlist); } /** * ice_get_inactive_netlist_ver * @hw: pointer to the HW struct * @netlist: pointer to netlist version info structure * * Read the netlist version data from the inactive netlist bank. Used to * extract version data of a pending flash update in order to display the * version data. */ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist) { return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist); } /** * ice_discover_flash_size - Discover the available flash size. * @hw: pointer to the HW struct * * The device flash could be up to 16MB in size. However, it is possible that * the actual size is smaller. Use bisection to determine the accessible size * of flash memory. */ static enum ice_status ice_discover_flash_size(struct ice_hw *hw) { u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; while ((max_size - min_size) > 1) { u32 offset = (max_size + min_size) / 2; u32 len = 1; u8 data; status = ice_read_flat_nvm(hw, offset, &len, &data, false); if (status == ICE_ERR_AQ_ERROR && hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) { ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n", __func__, offset); status = ICE_SUCCESS; max_size = offset; } else if (!status) { ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n", __func__, offset); min_size = offset; } else { /* an unexpected error occurred */ goto err_read_flat_nvm; } } ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size); hw->flash.flash_size = max_size; err_read_flat_nvm: ice_release_nvm(hw); return status; } /** * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word * @hw: pointer to the HW structure * @offset: the word offset of the Shadow RAM word to read * @pointer: pointer value read from Shadow RAM * * Read the given Shadow RAM word, and convert it to a pointer value specified * in bytes. This function assumes the specified offset is a valid pointer * word. * * Each pointer word specifies whether it is stored in word size or 4KB * sector size by using the highest bit. The reported pointer value will be in * bytes, intended for flat NVM reads. */ static enum ice_status ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer) { enum ice_status status; u16 value; status = ice_read_sr_word(hw, offset, &value); if (status) return status; /* Determine if the pointer is in 4KB or word units */ if (value & ICE_SR_NVM_PTR_4KB_UNITS) *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024; else *pointer = value * 2; return ICE_SUCCESS; } /** * ice_read_sr_area_size - Read an area size from a Shadow RAM word * @hw: pointer to the HW structure * @offset: the word offset of the Shadow RAM to read * @size: size value read from the Shadow RAM * * Read the given Shadow RAM word, and convert it to an area size value * specified in bytes. This function assumes the specified offset is a valid * area size word. * * Each area size word is specified in 4KB sector units. This function reports * the size in bytes, intended for flat NVM reads. */ static enum ice_status ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size) { enum ice_status status; u16 value; status = ice_read_sr_word(hw, offset, &value); if (status) return status; /* Area sizes are always specified in 4KB units */ *size = value * 4 * 1024; return ICE_SUCCESS; } /** * ice_determine_active_flash_banks - Discover active bank for each module * @hw: pointer to the HW struct * * Read the Shadow RAM control word and determine which banks are active for * the NVM, OROM, and Netlist modules. Also read and calculate the associated * pointer and size. These values are then cached into the ice_flash_info * structure for later use in order to calculate the correct offset to read * from the active module. */ static enum ice_status ice_determine_active_flash_banks(struct ice_hw *hw) { struct ice_bank_info *banks = &hw->flash.banks; enum ice_status status; u16 ctrl_word; status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n"); return status; } /* Check that the control word indicates validity */ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) { ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n"); return ICE_ERR_CFG; } if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK)) banks->nvm_bank = ICE_1ST_FLASH_BANK; else banks->nvm_bank = ICE_2ND_FLASH_BANK; if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK)) banks->orom_bank = ICE_1ST_FLASH_BANK; else banks->orom_bank = ICE_2ND_FLASH_BANK; if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK)) banks->netlist_bank = ICE_1ST_FLASH_BANK; else banks->netlist_bank = ICE_2ND_FLASH_BANK; status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n"); return status; } status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n"); return status; } status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n"); return status; } status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n"); return status; } return ICE_SUCCESS; } /** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * * This function reads and populates NVM settings such as Shadow RAM size, * max_timeout, and blank_nvm_mode */ enum ice_status ice_init_nvm(struct ice_hw *hw) { struct ice_flash_info *flash = &hw->flash; enum ice_status status; u32 fla, gens_stat; u8 sr_size; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* The SR size is stored regardless of the NVM programming mode * as the blank mode may be used in the factory line. */ gens_stat = rd32(hw, GLNVM_GENS); sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S; /* Switching to words (sr_size contains power of 2) */ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB; /* Check if we are in the normal or blank NVM programming mode */ fla = rd32(hw, GLNVM_FLA); if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */ flash->blank_nvm_mode = false; } else { /* Blank programming mode */ flash->blank_nvm_mode = true; ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n"); return ICE_ERR_NVM_BLANK_MODE; } status = ice_discover_flash_size(hw); if (status) { ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n"); return status; } status = ice_determine_active_flash_banks(hw); if (status) { ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n"); return status; } status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); return status; } status = ice_get_orom_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->orom); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n"); /* read the netlist version information */ status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n"); return ICE_SUCCESS; } /** * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @words: (in) number of words to read; (out) number of words actually read * @data: words read from the Shadow RAM * * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq * method. The buf read is preceded by the NVM ownership take * and followed by the release. */ enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) { enum ice_status status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_read_sr_buf_aq(hw, offset, words, data); ice_release_nvm(hw); } return status; } /** * __ice_write_sr_word - Writes Shadow RAM word * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to write * @data: word to write to the Shadow RAM * * Writes a 16 bit word to the SR using the ice_write_sr_aq method. * NVM ownership have to be acquired and released (on ARQ completion event * reception) by caller. To commit SR to NVM update checksum function * should be called. */ enum ice_status __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data) { __le16 data_local = CPU_TO_LE16(*data); ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Value 0x00 below means that we treat SR as a flat mem */ return ice_write_sr_aq(hw, offset, 1, &data_local, false); } /** * __ice_write_sr_buf - Writes Shadow RAM buf * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM buffer to write * @words: number of words to write * @data: words to write to the Shadow RAM * * Writes a 16 bit words buffer to the Shadow RAM using the admin command. * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. To commit SR to NVM update * checksum function should be called. */ enum ice_status __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data) { enum ice_status status; __le16 *data_local; void *vmem; u32 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); vmem = ice_calloc(hw, words, sizeof(u16)); if (!vmem) return ICE_ERR_NO_MEMORY; data_local = (_FORCE_ __le16 *)vmem; for (i = 0; i < words; i++) data_local[i] = CPU_TO_LE16(data[i]); /* Here we will only write one buffer as the size of the modules * mirrored in the Shadow RAM is always less than 4K. */ status = ice_write_sr_aq(hw, offset, words, data_local, false); ice_free(hw, vmem); return status; } /** * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum * @hw: pointer to hardware structure * @checksum: pointer to the checksum * * This function calculates SW Checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD * is customer specific and unknown. Therefore, this function skips all maximum * possible size of VPD (1kB). */ static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum) { enum ice_status status = ICE_SUCCESS; u16 pcie_alt_module = 0; u16 checksum_local = 0; u16 vpd_module; void *vmem; u16 *data; u16 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16)); if (!vmem) return ICE_ERR_NO_MEMORY; data = (u16 *)vmem; /* read pointer to VPD area */ status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module); if (status) goto ice_calc_sr_checksum_exit; /* read pointer to PCIe Alt Auto-load module */ status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (status) goto ice_calc_sr_checksum_exit; /* Calculate SW checksum that covers the whole 64kB shadow RAM * except the VPD and PCIe ALT Auto-load modules */ for (i = 0; i < hw->flash.sr_words; i++) { /* Read SR page */ if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) { u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS; status = ice_read_sr_buf_aq(hw, i, &words, data); if (status != ICE_SUCCESS) goto ice_calc_sr_checksum_exit; } /* Skip Checksum word */ if (i == ICE_SR_SW_CHECKSUM_WORD) continue; /* Skip VPD module (convert byte size to word count) */ if (i >= (u32)vpd_module && i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)) continue; /* Skip PCIe ALT module (convert byte size to word count) */ if (i >= (u32)pcie_alt_module && i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)) continue; checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS]; } *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local; ice_calc_sr_checksum_exit: ice_free(hw, vmem); return status; } /** * ice_update_sr_checksum - Updates the Shadow RAM SW checksum * @hw: pointer to hardware structure * * NVM ownership must be acquired before calling this function and released * on ARQ completion event reception by caller. * This function will commit SR to NVM. */ enum ice_status ice_update_sr_checksum(struct ice_hw *hw) { enum ice_status status; __le16 le_sum; u16 checksum; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_calc_sr_checksum(hw, &checksum); if (!status) { le_sum = CPU_TO_LE16(checksum); status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1, &le_sum, true); } return status; } /** * ice_validate_sr_checksum - Validate Shadow RAM SW checksum * @hw: pointer to hardware structure * @checksum: calculated checksum * * Performs checksum calculation and validates the Shadow RAM SW checksum. * If the caller does not need checksum, the value can be NULL. */ enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum) { enum ice_status status; u16 checksum_local; u16 checksum_sr; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_nvm(hw, ICE_RES_READ); if (!status) { status = ice_calc_sr_checksum(hw, &checksum_local); ice_release_nvm(hw); if (status) return status; } else { return status; } ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr); /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (checksum_local != checksum_sr) status = ICE_ERR_NVM_CHECKSUM; /* If the user cares, return the calculated checksum */ if (checksum) *checksum = checksum_local; return status; } /** * ice_nvm_validate_checksum * @hw: pointer to the HW struct * * Verify NVM PFA checksum validity (0x0706) */ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; enum ice_status status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; cmd = &desc.params.nvm_checksum; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); ice_release_nvm(hw); if (!status) if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT) status = ICE_ERR_NVM_CHECKSUM; return status; } /** * ice_nvm_recalculate_checksum * @hw: pointer to the HW struct * * Recalculate NVM PFA checksum (0x0706) */ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw) { struct ice_aqc_nvm_checksum *cmd; struct ice_aq_desc desc; enum ice_status status; status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; cmd = &desc.params.nvm_checksum; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum); cmd->flags = ICE_AQC_NVM_CHECKSUM_RECALC; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); ice_release_nvm(hw); return status; } /** * ice_nvm_write_activate * @hw: pointer to the HW struct * @cmd_flags: NVM activate admin command bits (banks to be validated) * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) */ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); cmd->cmd_flags = cmd_flags; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_get_nvm_minsrevs - Get the Minimum Security Revision values from flash * @hw: pointer to the HW struct * @minsrevs: structure to store NVM and OROM minsrev values * * Read the Minimum Security Revision TLV and extract the revision values from * the flash image into a readable structure for processing. */ enum ice_status ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) { struct ice_aqc_nvm_minsrev data; enum ice_status status; u16 valid; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_acquire_nvm(hw, ICE_RES_READ); if (status) return status; status = ice_aq_read_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, true, false, NULL); ice_release_nvm(hw); if (status) return status; valid = LE16_TO_CPU(data.validity); /* Extract NVM minimum security revision */ if (valid & ICE_AQC_NVM_MINSREV_NVM_VALID) { u16 minsrev_l, minsrev_h; minsrev_l = LE16_TO_CPU(data.nvm_minsrev_l); minsrev_h = LE16_TO_CPU(data.nvm_minsrev_h); minsrevs->nvm = minsrev_h << 16 | minsrev_l; minsrevs->nvm_valid = true; } /* Extract the OROM minimum security revision */ if (valid & ICE_AQC_NVM_MINSREV_OROM_VALID) { u16 minsrev_l, minsrev_h; minsrev_l = LE16_TO_CPU(data.orom_minsrev_l); minsrev_h = LE16_TO_CPU(data.orom_minsrev_h); minsrevs->orom = minsrev_h << 16 | minsrev_l; minsrevs->orom_valid = true; } return ICE_SUCCESS; } /** * ice_update_nvm_minsrevs - Update minimum security revision TLV data in flash * @hw: pointer to the HW struct * @minsrevs: minimum security revision information * * Update the NVM or Option ROM minimum security revision fields in the PFA * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid * fields to determine what update is being requested. If the valid bit is not * set for that module, then the associated minsrev will be left as is. */ enum ice_status ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs) { struct ice_aqc_nvm_minsrev data; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (!minsrevs->nvm_valid && !minsrevs->orom_valid) { ice_debug(hw, ICE_DBG_NVM, "At least one of NVM and OROM MinSrev must be valid"); return ICE_ERR_PARAM; } status = ice_acquire_nvm(hw, ICE_RES_WRITE); if (status) return status; /* Get current data */ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, true, false, NULL); if (status) goto exit_release_res; if (minsrevs->nvm_valid) { data.nvm_minsrev_l = CPU_TO_LE16(minsrevs->nvm & 0xFFFF); data.nvm_minsrev_h = CPU_TO_LE16(minsrevs->nvm >> 16); data.validity |= CPU_TO_LE16(ICE_AQC_NVM_MINSREV_NVM_VALID); } if (minsrevs->orom_valid) { data.orom_minsrev_l = CPU_TO_LE16(minsrevs->orom & 0xFFFF); data.orom_minsrev_h = CPU_TO_LE16(minsrevs->orom >> 16); data.validity |= CPU_TO_LE16(ICE_AQC_NVM_MINSREV_OROM_VALID); } /* Update flash data */ status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); if (status) goto exit_release_res; /* Dump the Shadow RAM to the flash */ status = ice_nvm_write_activate(hw, 0); exit_release_res: ice_release_nvm(hw); return status; } /** * ice_nvm_access_get_features - Return the NVM access features structure * @cmd: NVM access command to process * @data: storage for the driver NVM features * * Fill in the data section of the NVM access request with a copy of the NVM * features structure. */ enum ice_status ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { /* The provided data_size must be at least as large as our NVM * features structure. A larger size should not be treated as an * error, to allow future extensions to the features structure to * work on older drivers. */ if (cmd->data_size < sizeof(struct ice_nvm_features)) return ICE_ERR_NO_MEMORY; /* Initialize the data buffer to zeros */ ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); /* Fill in the features data */ data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER; data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER; data->drv_features.size = sizeof(struct ice_nvm_features); data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS; return ICE_SUCCESS; } /** * ice_nvm_access_get_module - Helper function to read module value * @cmd: NVM access command structure * * Reads the module value out of the NVM access config field. */ u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd) { return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S); } /** * ice_nvm_access_get_flags - Helper function to read flags value * @cmd: NVM access command structure * * Reads the flags value out of the NVM access config field. */ u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd) { return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S); } /** * ice_nvm_access_get_adapter - Helper function to read adapter info * @cmd: NVM access command structure * * Read the adapter info value out of the NVM access config field. */ u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd) { return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >> ICE_NVM_CFG_ADAPTER_INFO_S); } /** * ice_validate_nvm_rw_reg - Check than an NVM access request is valid * @cmd: NVM access command structure * * Validates that an NVM access structure is request to read or write a valid * register offset. First validates that the module and flags are correct, and * then ensures that the register offset is one of the accepted registers. */ static enum ice_status ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd) { u32 module, flags, offset; u16 i; module = ice_nvm_access_get_module(cmd); flags = ice_nvm_access_get_flags(cmd); offset = cmd->offset; /* Make sure the module and flags indicate a read/write request */ if (module != ICE_NVM_REG_RW_MODULE || flags != ICE_NVM_REG_RW_FLAGS || cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval)) return ICE_ERR_PARAM; switch (offset) { case GL_HICR: case GL_HICR_EN: /* Note, this register is read only */ case GL_FWSTS: case GL_MNG_FWSM: case GLGEN_CSR_DEBUG_C: case GLGEN_RSTAT: case GLPCI_LBARCTRL: case GLNVM_GENS: case GLNVM_FLA: case PF_FUNC_RID: return ICE_SUCCESS; default: break; } for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++) if (offset == (u32)GL_HIDA(i)) return ICE_SUCCESS; for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++) if (offset == (u32)GL_HIBA(i)) return ICE_SUCCESS; /* All other register offsets are not valid */ return ICE_ERR_OUT_OF_RANGE; } /** * ice_nvm_access_read - Handle an NVM read request * @hw: pointer to the HW struct * @cmd: NVM access command to process * @data: storage for the register value read * * Process an NVM access request to read a register. */ enum ice_status ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Always initialize the output data, even on failure */ ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM); /* Make sure this is a valid read/write access request */ status = ice_validate_nvm_rw_reg(cmd); if (status) return status; ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n", cmd->offset); /* Read the register and store the contents in the data field */ data->regval = rd32(hw, cmd->offset); return ICE_SUCCESS; } /** * ice_nvm_access_write - Handle an NVM write request * @hw: pointer to the HW struct * @cmd: NVM access command to process * @data: NVM access data to write * * Process an NVM access request to write a register. */ enum ice_status ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Make sure this is a valid read/write access request */ status = ice_validate_nvm_rw_reg(cmd); if (status) return status; /* Reject requests to write to read-only registers */ switch (cmd->offset) { case GL_HICR_EN: case GLGEN_RSTAT: return ICE_ERR_OUT_OF_RANGE; default: break; } ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n", cmd->offset, data->regval); /* Write the data field to the specified register */ wr32(hw, cmd->offset, data->regval); return ICE_SUCCESS; } /** * ice_handle_nvm_access - Handle an NVM access request * @hw: pointer to the HW struct * @cmd: NVM access command info * @data: pointer to read or return data * * Process an NVM access request. Read the command structure information and * determine if it is valid. If not, report an error indicating the command * was invalid. * * For valid commands, perform the necessary function, copying the data into * the provided data buffer. */ enum ice_status ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd, union ice_nvm_access_data *data) { u32 module, flags, adapter_info; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Extended flags are currently reserved and must be zero */ if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0) return ICE_ERR_PARAM; /* Adapter info must match the HW device ID */ adapter_info = ice_nvm_access_get_adapter(cmd); if (adapter_info != hw->device_id) return ICE_ERR_PARAM; switch (cmd->command) { case ICE_NVM_CMD_READ: module = ice_nvm_access_get_module(cmd); flags = ice_nvm_access_get_flags(cmd); /* Getting the driver's NVM features structure shares the same * command type as reading a register. Read the config field * to determine if this is a request to get features. */ if (module == ICE_NVM_GET_FEATURES_MODULE && flags == ICE_NVM_GET_FEATURES_FLAGS && cmd->offset == 0) return ice_nvm_access_get_features(cmd, data); else return ice_nvm_access_read(hw, cmd, data); case ICE_NVM_CMD_WRITE: return ice_nvm_access_write(hw, cmd, data); default: return ICE_ERR_PARAM; } } diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h index b6967a5b7fb7..94e9aad5a5c1 100644 --- a/sys/dev/ice/ice_protocol_type.h +++ b/sys/dev/ice/ice_protocol_type.h @@ -1,336 +1,340 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_PROTOCOL_TYPE_H_ #define _ICE_PROTOCOL_TYPE_H_ #include "ice_flex_type.h" #define ICE_IPV6_ADDR_LENGTH 16 /* Each recipe can match up to 5 different fields. Fields to match can be meta- * data, values extracted from packet headers, or results from other recipes. * One of the 5 fields is reserved for matching the switch ID. So, up to 4 * recipes can provide intermediate results to another one through chaining, * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. */ #define ICE_NUM_WORDS_RECIPE 4 /* Max recipes that can be chained */ #define ICE_MAX_CHAIN_RECIPE 5 /* 1 word reserved for switch ID from allowed 5 words. * So a recipe can have max 4 words. And you can chain 5 such recipes * together. So maximum words that can be programmed for look up is 5 * 4. */ #define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) /* Field vector index corresponding to chaining */ #define ICE_CHAIN_FV_INDEX_START 47 enum ice_protocol_type { ICE_MAC_OFOS = 0, ICE_MAC_IL, ICE_ETYPE_OL, ICE_VLAN_OFOS, ICE_IPV4_OFOS, ICE_IPV4_IL, ICE_IPV6_OFOS, ICE_IPV6_IL, ICE_TCP_IL, ICE_UDP_OF, ICE_UDP_ILOS, ICE_SCTP_IL, ICE_VXLAN, ICE_GENEVE, ICE_VXLAN_GPE, ICE_NVGRE, ICE_GTP, ICE_PROTOCOL_LAST }; enum ice_sw_tunnel_type { ICE_NON_TUN = 0, ICE_SW_TUN_AND_NON_TUN, ICE_SW_TUN_VXLAN_GPE, ICE_SW_TUN_GENEVE, /* GENEVE matches only non-VLAN pkts */ ICE_SW_TUN_GENEVE_VLAN, /* GENEVE matches both VLAN and non-VLAN pkts */ ICE_SW_TUN_VXLAN, /* VXLAN matches only non-VLAN pkts */ ICE_SW_TUN_VXLAN_VLAN, /* VXLAN matches both VLAN and non-VLAN pkts */ ICE_SW_TUN_NVGRE, ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN * and GENEVE */ ICE_SW_TUN_IPV4_GTP_IPV4_TCP, ICE_SW_TUN_IPV4_GTP_IPV4_UDP, ICE_SW_TUN_IPV4_GTP_IPV6_TCP, ICE_SW_TUN_IPV4_GTP_IPV6_UDP, ICE_SW_TUN_IPV6_GTP_IPV4_TCP, ICE_SW_TUN_IPV6_GTP_IPV4_UDP, ICE_SW_TUN_IPV6_GTP_IPV6_TCP, ICE_SW_TUN_IPV6_GTP_IPV6_UDP, + ICE_SW_TUN_IPV4_GTPU_IPV4, + ICE_SW_TUN_IPV4_GTPU_IPV6, + ICE_SW_TUN_IPV6_GTPU_IPV4, + ICE_SW_TUN_IPV6_GTPU_IPV6, ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ }; /* Decoders for ice_prot_id: * - F: First * - I: Inner * - L: Last * - O: Outer * - S: Single */ enum ice_prot_id { ICE_PROT_ID_INVAL = 0, ICE_PROT_MAC_OF_OR_S = 1, ICE_PROT_MAC_O2 = 2, ICE_PROT_MAC_IL = 4, ICE_PROT_MAC_IN_MAC = 7, ICE_PROT_ETYPE_OL = 9, ICE_PROT_ETYPE_IL = 10, ICE_PROT_PAY = 15, ICE_PROT_EVLAN_O = 16, ICE_PROT_VLAN_O = 17, ICE_PROT_VLAN_IF = 18, ICE_PROT_MPLS_OL_MINUS_1 = 27, ICE_PROT_MPLS_OL_OR_OS = 28, ICE_PROT_MPLS_IL = 29, ICE_PROT_IPV4_OF_OR_S = 32, ICE_PROT_IPV4_IL = 33, ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_IL = 41, ICE_PROT_IPV6_FRAG = 47, ICE_PROT_TCP_IL = 49, ICE_PROT_UDP_OF = 52, ICE_PROT_UDP_IL_OR_S = 53, ICE_PROT_GRE_OF = 64, ICE_PROT_NSH_F = 84, ICE_PROT_ESP_F = 88, ICE_PROT_ESP_2 = 89, ICE_PROT_SCTP_IL = 96, ICE_PROT_ICMP_IL = 98, ICE_PROT_ICMPV6_IL = 100, ICE_PROT_VRRP_F = 101, ICE_PROT_OSPF = 102, ICE_PROT_ATAOE_OF = 114, ICE_PROT_CTRL_OF = 116, ICE_PROT_LLDP_OF = 117, ICE_PROT_ARP_OF = 118, ICE_PROT_EAPOL_OF = 120, ICE_PROT_META_ID = 255, /* when offset == metaddata */ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */ }; #define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ #define ICE_MAC_OFOS_HW 1 #define ICE_MAC_IL_HW 4 #define ICE_ETYPE_OL_HW 9 #define ICE_VLAN_OF_HW 16 #define ICE_VLAN_OL_HW 17 #define ICE_IPV4_OFOS_HW 32 #define ICE_IPV4_IL_HW 33 #define ICE_IPV6_OFOS_HW 40 #define ICE_IPV6_IL_HW 41 #define ICE_TCP_IL_HW 49 #define ICE_UDP_ILOS_HW 53 #define ICE_SCTP_IL_HW 96 /* ICE_UDP_OF is used to identify all 3 tunnel types * VXLAN, GENEVE and VXLAN_GPE. To differentiate further * need to use flags from the field vector */ #define ICE_UDP_OF_HW 52 /* UDP Tunnels */ #define ICE_GRE_OF_HW 64 /* NVGRE */ #define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ #define ICE_MDID_SIZE 2 #define ICE_TUN_FLAG_MDID 21 #define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) #define ICE_TUN_FLAG_MASK 0xFF #define ICE_TUN_FLAG_VLAN_MASK 0x01 #define ICE_TUN_FLAG_FV_IND 2 #define ICE_PROTOCOL_MAX_ENTRIES 16 /* Mapping of software defined protocol ID to hardware defined protocol ID */ struct ice_protocol_entry { enum ice_protocol_type type; u8 protocol_id; }; struct ice_ether_hdr { u8 dst_addr[ETH_ALEN]; u8 src_addr[ETH_ALEN]; }; struct ice_ethtype_hdr { __be16 ethtype_id; }; struct ice_ether_vlan_hdr { u8 dst_addr[ETH_ALEN]; u8 src_addr[ETH_ALEN]; __be32 vlan_id; }; struct ice_vlan_hdr { __be16 vlan; __be16 type; }; struct ice_ipv4_hdr { u8 version; u8 tos; __be16 total_length; __be16 id; __be16 frag_off; u8 time_to_live; u8 protocol; __be16 check; __be32 src_addr; __be32 dst_addr; }; struct ice_le_ver_tc_flow { union { struct { u32 flow_label : 20; u32 tc : 8; u32 version : 4; } fld; u32 val; } u; }; struct ice_ipv6_hdr { __be32 be_ver_tc_flow; __be16 payload_len; u8 next_hdr; u8 hop_limit; u8 src_addr[ICE_IPV6_ADDR_LENGTH]; u8 dst_addr[ICE_IPV6_ADDR_LENGTH]; }; struct ice_sctp_hdr { __be16 src_port; __be16 dst_port; __be32 verification_tag; __be32 check; }; struct ice_l4_hdr { __be16 src_port; __be16 dst_port; __be16 len; __be16 check; }; struct ice_udp_tnl_hdr { __be16 field; __be16 proto_type; __be32 vni; /* only use lower 24-bits */ }; struct ice_udp_gtp_hdr { u8 flags; u8 msg_type; __be16 rsrvd_len; __be32 teid; __be16 rsrvd_seq_nbr; u8 rsrvd_n_pdu_nbr; u8 rsrvd_next_ext; u8 rsvrd_ext_len; u8 pdu_type; u8 qfi; u8 rsvrd; }; struct ice_nvgre { __be16 flags; __be16 protocol; __be32 tni_flow; }; union ice_prot_hdr { struct ice_ether_hdr eth_hdr; struct ice_ethtype_hdr ethertype; struct ice_vlan_hdr vlan_hdr; struct ice_ipv4_hdr ipv4_hdr; struct ice_ipv6_hdr ipv6_hdr; struct ice_l4_hdr l4_hdr; struct ice_sctp_hdr sctp_hdr; struct ice_udp_tnl_hdr tnl_hdr; struct ice_nvgre nvgre_hdr; struct ice_udp_gtp_hdr gtp_hdr; }; /* This is mapping table entry that maps every word within a given protocol * structure to the real byte offset as per the specification of that * protocol header. * for e.g. dst address is 3 words in ethertype header and corresponding bytes * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8 */ struct ice_prot_ext_tbl_entry { enum ice_protocol_type prot_type; /* Byte offset into header of given protocol type */ u8 offs[sizeof(union ice_prot_hdr)]; }; /* Extractions to be looked up for a given recipe */ struct ice_prot_lkup_ext { u16 prot_type; u8 n_val_words; /* create a buffer to hold max words per recipe */ u16 field_off[ICE_MAX_CHAIN_WORDS]; u16 field_mask[ICE_MAX_CHAIN_WORDS]; struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; /* Indicate field offsets that have field vector indices assigned */ ice_declare_bitmap(done, ICE_MAX_CHAIN_WORDS); }; struct ice_pref_recipe_group { u8 n_val_pairs; /* Number of valid pairs */ struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; u16 mask[ICE_NUM_WORDS_RECIPE]; }; struct ice_recp_grp_entry { struct LIST_ENTRY_TYPE l_entry; #define ICE_INVAL_CHAIN_IND 0xFF u16 rid; u8 chain_idx; u16 fv_idx[ICE_NUM_WORDS_RECIPE]; u16 fv_mask[ICE_NUM_WORDS_RECIPE]; struct ice_pref_recipe_group r_group; }; #endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c index ed99f51b0080..ab9b888ceac0 100644 --- a/sys/dev/ice/ice_sched.c +++ b/sys/dev/ice/ice_sched.c @@ -1,5746 +1,5771 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_sched.h" /** * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB * @pi: port information structure * @info: Scheduler element information from firmware * * This function inserts the root node of the scheduling tree topology * to the SW DB. */ static enum ice_status ice_sched_add_root_node(struct ice_port_info *pi, struct ice_aqc_txsched_elem_data *info) { struct ice_sched_node *root; struct ice_hw *hw; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root)); if (!root) return ICE_ERR_NO_MEMORY; /* coverity[suspicious_sizeof] */ root->children = (struct ice_sched_node **) ice_calloc(hw, hw->max_children[0], sizeof(*root)); if (!root->children) { ice_free(hw, root); return ICE_ERR_NO_MEMORY; } ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA); pi->root = root; return ICE_SUCCESS; } /** * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB * @start_node: pointer to the starting ice_sched_node struct in a sub-tree * @teid: node TEID to search * * This function searches for a node matching the TEID in the scheduling tree * from the SW DB. The search is recursive and is restricted by the number of * layers it has searched through; stopping at the max supported layer. * * This function needs to be called when holding the port_info->sched_lock */ struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) { u16 i; /* The TEID is same as that of the start_node */ if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid) return start_node; /* The node has no children or is at the max layer */ if (!start_node->num_children || start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM || start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) return NULL; /* Check if TEID matches to any of the children nodes */ for (i = 0; i < start_node->num_children; i++) if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) return start_node->children[i]; /* Search within each child's sub-tree */ for (i = 0; i < start_node->num_children; i++) { struct ice_sched_node *tmp; tmp = ice_sched_find_node_by_teid(start_node->children[i], teid); if (tmp) return tmp; } return NULL; } /** * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd * @hw: pointer to the HW struct * @cmd_opc: cmd opcode * @elems_req: number of elements to request * @buf: pointer to buffer * @buf_size: buffer size in bytes * @elems_resp: returns total number of elements response * @cd: pointer to command details structure or NULL * * This function sends a scheduling elements cmd (cmd_opc) */ static enum ice_status ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, u16 elems_req, void *buf, u16 buf_size, u16 *elems_resp, struct ice_sq_cd *cd) { struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.sched_elem_cmd; ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); cmd->num_elem_req = CPU_TO_LE16(elems_req); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && elems_resp) *elems_resp = LE16_TO_CPU(cmd->num_elem_resp); return status; } /** * ice_aq_query_sched_elems - query scheduler elements * @hw: pointer to the HW struct * @elems_req: number of elements to query * @buf: pointer to buffer * @buf_size: buffer size in bytes * @elems_ret: returns total number of elements returned * @cd: pointer to command details structure or NULL * * Query scheduling elements (0x0404) */ enum ice_status ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, elems_req, (void *)buf, buf_size, elems_ret, cd); } /** * ice_sched_add_node - Insert the Tx scheduler node in SW DB * @pi: port information structure * @layer: Scheduler layer of the node * @info: Scheduler element information from firmware * * This function inserts a scheduler node to the SW DB. */ enum ice_status ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; struct ice_sched_node *node; enum ice_status status; struct ice_hw *hw; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; /* A valid parent node should be there */ parent = ice_sched_find_node_by_teid(pi->root, LE32_TO_CPU(info->parent_teid)); if (!parent) { ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n", LE32_TO_CPU(info->parent_teid)); return ICE_ERR_PARAM; } /* query the current node information from FW before adding it * to the SW DB */ status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem); if (status) return status; node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node)); if (!node) return ICE_ERR_NO_MEMORY; if (hw->max_children[layer]) { /* coverity[suspicious_sizeof] */ node->children = (struct ice_sched_node **) ice_calloc(hw, hw->max_children[layer], sizeof(*node)); if (!node->children) { ice_free(hw, node); return ICE_ERR_NO_MEMORY; } } node->in_use = true; node->parent = parent; node->tx_sched_layer = layer; parent->children[parent->num_children++] = node; node->info = elem; return ICE_SUCCESS; } /** * ice_aq_delete_sched_elems - delete scheduler elements * @hw: pointer to the HW struct * @grps_req: number of groups to delete * @buf: pointer to buffer * @buf_size: buffer size in bytes * @grps_del: returns total number of elements deleted * @cd: pointer to command details structure or NULL * * Delete scheduling elements (0x040F) */ static enum ice_status ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, grps_req, (void *)buf, buf_size, grps_del, cd); } /** * ice_sched_remove_elems - remove nodes from HW * @hw: pointer to the HW struct * @parent: pointer to the parent node * @num_nodes: number of nodes * @node_teids: array of node teids to be deleted * * This function remove nodes from HW */ static enum ice_status ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, u16 num_nodes, u32 *node_teids) { struct ice_aqc_delete_elem *buf; u16 i, num_groups_removed = 0; enum ice_status status; u16 buf_size; buf_size = ice_struct_size(buf, teid, num_nodes); buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size); if (!buf) return ICE_ERR_NO_MEMORY; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = CPU_TO_LE16(num_nodes); for (i = 0; i < num_nodes; i++) buf->teid[i] = CPU_TO_LE32(node_teids[i]); status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); if (status != ICE_SUCCESS || num_groups_removed != 1) ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", hw->adminq.sq_last_status); ice_free(hw, buf); return status; } /** * ice_sched_get_first_node - get the first node of the given layer * @pi: port information structure * @parent: pointer the base node of the subtree * @layer: layer number * * This function retrieves the first node of the given layer from the subtree */ static struct ice_sched_node * ice_sched_get_first_node(struct ice_port_info *pi, struct ice_sched_node *parent, u8 layer) { return pi->sib_head[parent->tc_num][layer]; } /** * ice_sched_get_tc_node - get pointer to TC node * @pi: port information structure * @tc: TC number * * This function returns the TC node pointer */ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc) { u8 i; if (!pi || !pi->root) return NULL; for (i = 0; i < pi->root->num_children; i++) if (pi->root->children[i]->tc_num == tc) return pi->root->children[i]; return NULL; } /** * ice_free_sched_node - Free a Tx scheduler node from SW DB * @pi: port information structure * @node: pointer to the ice_sched_node struct * * This function frees up a node from SW DB as well as from HW * * This function needs to be called with the port_info->sched_lock held */ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) { struct ice_sched_node *parent; struct ice_hw *hw = pi->hw; u8 i, j; /* Free the children before freeing up the parent node * The parent array is updated below and that shifts the nodes * in the array. So always pick the first child if num children > 0 */ while (node->num_children) ice_free_sched_node(pi, node->children[0]); /* Leaf, TC and root nodes can't be deleted by SW */ if (node->tx_sched_layer >= hw->sw_entry_point_layer && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = LE32_TO_CPU(node->info.node_teid); ice_sched_remove_elems(hw, node->parent, 1, &teid); } parent = node->parent; /* root has no parent */ if (parent) { struct ice_sched_node *p; /* update the parent */ for (i = 0; i < parent->num_children; i++) if (parent->children[i] == node) { for (j = i + 1; j < parent->num_children; j++) parent->children[j - 1] = parent->children[j]; parent->num_children--; break; } p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); while (p) { if (p->sibling == node) { p->sibling = node->sibling; break; } p = p->sibling; } /* update the sibling head if head is getting removed */ if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) pi->sib_head[node->tc_num][node->tx_sched_layer] = node->sibling; } /* leaf nodes have no children */ if (node->children) ice_free(hw, node->children); ice_free(hw, node); } /** * ice_aq_get_dflt_topo - gets default scheduler topology * @hw: pointer to the HW struct * @lport: logical port number * @buf: pointer to buffer * @buf_size: buffer size in bytes * @num_branches: returns total number of queue to port branches * @cd: pointer to command details structure or NULL * * Get default scheduler topology (0x400) */ static enum ice_status ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport, struct ice_aqc_get_topo_elem *buf, u16 buf_size, u8 *num_branches, struct ice_sq_cd *cd) { struct ice_aqc_get_topo *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.get_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo); cmd->port_num = lport; status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && num_branches) *num_branches = cmd->num_branches; return status; } /** * ice_aq_add_sched_elems - adds scheduling element * @hw: pointer to the HW struct * @grps_req: the number of groups that are requested to be added * @buf: pointer to buffer * @buf_size: buffer size in bytes * @grps_added: returns total number of groups added * @cd: pointer to command details structure or NULL * * Add scheduling elements (0x0401) */ static enum ice_status ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, grps_req, (void *)buf, buf_size, grps_added, cd); } /** * ice_aq_cfg_sched_elems - configures scheduler elements * @hw: pointer to the HW struct * @elems_req: number of elements to configure * @buf: pointer to buffer * @buf_size: buffer size in bytes * @elems_cfgd: returns total number of elements configured * @cd: pointer to command details structure or NULL * * Configure scheduling elements (0x0403) */ static enum ice_status ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, elems_req, (void *)buf, buf_size, elems_cfgd, cd); } /** * ice_aq_move_sched_elems - move scheduler elements * @hw: pointer to the HW struct * @grps_req: number of groups to move * @buf: pointer to buffer * @buf_size: buffer size in bytes * @grps_movd: returns total number of groups moved * @cd: pointer to command details structure or NULL * * Move scheduling elements (0x0408) */ static enum ice_status ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, grps_req, (void *)buf, buf_size, grps_movd, cd); } /** * ice_aq_suspend_sched_elems - suspend scheduler elements * @hw: pointer to the HW struct * @elems_req: number of elements to suspend * @buf: pointer to buffer * @buf_size: buffer size in bytes * @elems_ret: returns total number of elements suspended * @cd: pointer to command details structure or NULL * * Suspend scheduling elements (0x0409) */ static enum ice_status ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, elems_req, (void *)buf, buf_size, elems_ret, cd); } /** * ice_aq_resume_sched_elems - resume scheduler elements * @hw: pointer to the HW struct * @elems_req: number of elements to resume * @buf: pointer to buffer * @buf_size: buffer size in bytes * @elems_ret: returns total number of elements resumed * @cd: pointer to command details structure or NULL * * resume scheduling elements (0x040A) */ static enum ice_status ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, elems_req, (void *)buf, buf_size, elems_ret, cd); } /** * ice_aq_query_sched_res - query scheduler resource * @hw: pointer to the HW struct * @buf_size: buffer size in bytes * @buf: pointer to buffer * @cd: pointer to command details structure or NULL * * Query scheduler resource allocation (0x0412) */ static enum ice_status ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size, struct ice_aqc_query_txsched_res_resp *buf, struct ice_sq_cd *cd) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_sched_suspend_resume_elems - suspend or resume HW nodes * @hw: pointer to the HW struct * @num_nodes: number of nodes * @node_teids: array of node teids to be suspended or resumed * @suspend: true means suspend / false means resume * * This function suspends or resumes HW nodes */ static enum ice_status ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { u16 i, buf_size, num_elem_ret = 0; enum ice_status status; __le32 *buf; buf_size = sizeof(*buf) * num_nodes; buf = (__le32 *)ice_malloc(hw, buf_size); if (!buf) return ICE_ERR_NO_MEMORY; for (i = 0; i < num_nodes; i++) buf[i] = CPU_TO_LE32(node_teids[i]); if (suspend) status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, buf_size, &num_elem_ret, NULL); else status = ice_aq_resume_sched_elems(hw, num_nodes, buf, buf_size, &num_elem_ret, NULL); if (status != ICE_SUCCESS || num_elem_ret != num_nodes) ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n"); ice_free(hw, buf); return status; } /** * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: VSI handle * @tc: TC number * @new_numqs: number of queues */ static enum ice_status ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) { struct ice_vsi_ctx *vsi_ctx; struct ice_q_ctx *q_ctx; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; /* allocate LAN queue contexts */ if (!vsi_ctx->lan_q_ctx[tc]) { vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *) ice_calloc(hw, new_numqs, sizeof(*q_ctx)); if (!vsi_ctx->lan_q_ctx[tc]) return ICE_ERR_NO_MEMORY; vsi_ctx->num_lan_q_entries[tc] = new_numqs; return ICE_SUCCESS; } /* num queues are increased, update the queue contexts */ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; q_ctx = (struct ice_q_ctx *) ice_calloc(hw, new_numqs, sizeof(*q_ctx)); if (!q_ctx) return ICE_ERR_NO_MEMORY; ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA); ice_free(hw, vsi_ctx->lan_q_ctx[tc]); vsi_ctx->lan_q_ctx[tc] = q_ctx; vsi_ctx->num_lan_q_entries[tc] = new_numqs; } return ICE_SUCCESS; } /** * ice_aq_rl_profile - performs a rate limiting task * @hw: pointer to the HW struct * @opcode: opcode for add, query, or remove profile(s) * @num_profiles: the number of profiles * @buf: pointer to buffer * @buf_size: buffer size in bytes * @num_processed: number of processed add or remove profile(s) to return * @cd: pointer to command details structure * * RL profile function to add, query, or remove profile(s) */ static enum ice_status ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.rl_profile; ice_fill_dflt_direct_cmd_desc(&desc, opcode); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_profiles = CPU_TO_LE16(num_profiles); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && num_processed) *num_processed = LE16_TO_CPU(cmd->num_processed); return status; } /** * ice_aq_add_rl_profile - adds rate limiting profile(s) * @hw: pointer to the HW struct * @num_profiles: the number of profile(s) to be add * @buf: pointer to buffer * @buf_size: buffer size in bytes * @num_profiles_added: total number of profiles added to return * @cd: pointer to command details structure * * Add RL profile (0x0410) */ static enum ice_status ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_added, struct ice_sq_cd *cd) { return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, buf, buf_size, num_profiles_added, cd); } /** * ice_aq_query_rl_profile - query rate limiting profile(s) * @hw: pointer to the HW struct * @num_profiles: the number of profile(s) to query * @buf: pointer to buffer * @buf_size: buffer size in bytes * @cd: pointer to command details structure * * Query RL profile (0x0411) */ enum ice_status ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles, num_profiles, buf, buf_size, NULL, cd); } /** * ice_aq_remove_rl_profile - removes RL profile(s) * @hw: pointer to the HW struct * @num_profiles: the number of profile(s) to remove * @buf: pointer to buffer * @buf_size: buffer size in bytes * @num_profiles_removed: total number of profiles removed to return * @cd: pointer to command details structure or NULL * * Remove RL profile (0x0415) */ static enum ice_status ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_profiles_removed, struct ice_sq_cd *cd) { return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, num_profiles, buf, buf_size, num_profiles_removed, cd); } /** * ice_sched_del_rl_profile - remove RL profile * @hw: pointer to the HW struct * @rl_info: rate limit profile information * * If the profile ID is not referenced anymore, it removes profile ID with * its associated parameters from HW DB,and locally. The caller needs to * hold scheduler lock. */ static enum ice_status ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; enum ice_status status; u16 num_profiles = 1; if (rl_info->prof_id_ref != 0) return ICE_ERR_IN_USE; /* Safe to remove profile ID */ buf = &rl_info->profile; status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), &num_profiles_removed, NULL); if (status || num_profiles_removed != num_profiles) return ICE_ERR_CFG; /* Delete stale entry now */ LIST_DEL(&rl_info->list_entry); ice_free(hw, rl_info); return status; } /** * ice_sched_clear_rl_prof - clears RL prof entries * @pi: port information structure * * This function removes all RL profile from HW as well as from SW DB. */ static void ice_sched_clear_rl_prof(struct ice_port_info *pi) { u16 ln; struct ice_hw *hw = pi->hw; for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { struct ice_aqc_rl_profile_info *rl_prof_elem; struct ice_aqc_rl_profile_info *rl_prof_tmp; LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, &hw->rl_prof_list[ln], ice_aqc_rl_profile_info, list_entry) { enum ice_status status; rl_prof_elem->prof_id_ref = 0; status = ice_sched_del_rl_profile(hw, rl_prof_elem); if (status) { ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); /* On error, free mem required */ LIST_DEL(&rl_prof_elem->list_entry); ice_free(hw, rl_prof_elem); } } } } /** * ice_sched_clear_agg - clears the aggregator related information * @hw: pointer to the hardware structure * * This function removes aggregator list and free up aggregator related memory * previously allocated. */ void ice_sched_clear_agg(struct ice_hw *hw) { struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *atmp; LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list, ice_sched_agg_info, list_entry) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *vtmp; LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) { LIST_DEL(&agg_vsi_info->list_entry); ice_free(hw, agg_vsi_info); } LIST_DEL(&agg_info->list_entry); ice_free(hw, agg_info); } } /** * ice_sched_clear_tx_topo - clears the scheduler tree nodes * @pi: port information structure * * This function removes all the nodes from HW as well as from SW DB. */ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) { if (!pi) return; /* remove RL profiles related lists */ ice_sched_clear_rl_prof(pi); if (pi->root) { ice_free_sched_node(pi, pi->root); pi->root = NULL; } } /** * ice_sched_clear_port - clear the scheduler elements from SW DB for a port * @pi: port information structure * * Cleanup scheduling elements from SW DB */ void ice_sched_clear_port(struct ice_port_info *pi) { if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return; pi->port_state = ICE_SCHED_PORT_STATE_INIT; ice_acquire_lock(&pi->sched_lock); ice_sched_clear_tx_topo(pi); ice_release_lock(&pi->sched_lock); ice_destroy_lock(&pi->sched_lock); } /** * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports * @hw: pointer to the HW struct * * Cleanup scheduling elements from SW DB for all the ports */ void ice_sched_cleanup_all(struct ice_hw *hw) { if (!hw) return; if (hw->layer_info) { ice_free(hw, hw->layer_info); hw->layer_info = NULL; } ice_sched_clear_port(hw->port_info); hw->num_tx_sched_layers = 0; hw->num_tx_sched_phys_layers = 0; hw->flattened_layers = 0; hw->max_cgds = 0; } /** * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping * @hw: pointer to the HW struct * @num_l2_nodes: the number of L2 nodes whose CGDs to configure * @buf: pointer to buffer * @buf_size: buffer size in bytes * @cd: pointer to command details structure or NULL * * Configure L2 Node CGD (0x0414) */ enum ice_status ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes, struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_cfg_l2_node_cgd *cmd; struct ice_aq_desc desc; cmd = &desc.params.cfg_l2_node_cgd; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_sched_add_elems - add nodes to HW and SW DB * @pi: port information structure * @tc_node: pointer to the branch node * @parent: pointer to the parent node * @layer: layer number to add nodes * @num_nodes: number of nodes * @num_nodes_added: pointer to num nodes added * @first_node_teid: if new nodes are added then return the TEID of first node * * This function add nodes to HW as well as to SW DB for a given layer */ static enum ice_status ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, struct ice_sched_node *parent, u8 layer, u16 num_nodes, u16 *num_nodes_added, u32 *first_node_teid) { struct ice_sched_node *prev, *new_node; struct ice_aqc_add_elem *buf; u16 i, num_groups_added = 0; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u16 buf_size; u32 teid; buf_size = ice_struct_size(buf, generic, num_nodes); buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size); if (!buf) return ICE_ERR_NO_MEMORY; buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = CPU_TO_LE16(num_nodes); for (i = 0; i < num_nodes; i++) { buf->generic[i].parent_teid = parent->info.node_teid; buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC; buf->generic[i].data.valid_sections = ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR; buf->generic[i].data.generic = 0; buf->generic[i].data.cir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->generic[i].data.cir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); buf->generic[i].data.eir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->generic[i].data.eir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); } status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, &num_groups_added, NULL); if (status != ICE_SUCCESS || num_groups_added != 1) { ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", hw->adminq.sq_last_status); ice_free(hw, buf); return ICE_ERR_CFG; } *num_nodes_added = num_nodes; /* add nodes to the SW DB */ for (i = 0; i < num_nodes; i++) { status = ice_sched_add_node(pi, layer, &buf->generic[i]); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n", status); break; } teid = LE32_TO_CPU(buf->generic[i].node_teid); new_node = ice_sched_find_node_by_teid(parent, teid); if (!new_node) { ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); break; } new_node->sibling = NULL; new_node->tc_num = tc_node->tc_num; /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ prev = ice_sched_get_first_node(pi, tc_node, layer); if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; prev->sibling = new_node; } /* initialize the sibling head */ if (!pi->sib_head[tc_node->tc_num][layer]) pi->sib_head[tc_node->tc_num][layer] = new_node; if (i == 0) *first_node_teid = teid; } ice_free(hw, buf); return status; } /** - * ice_sched_add_nodes_to_layer - Add nodes to a given layer + * ice_sched_add_nodes_to_hw_layer - Add nodes to hw layer * @pi: port information structure * @tc_node: pointer to TC node * @parent: pointer to parent node * @layer: layer number to add nodes * @num_nodes: number of nodes to be added * @first_node_teid: pointer to the first node TEID * @num_nodes_added: pointer to number of nodes added * - * This function add nodes to a given layer. + * Add nodes into specific hw layer. */ static enum ice_status -ice_sched_add_nodes_to_layer(struct ice_port_info *pi, - struct ice_sched_node *tc_node, - struct ice_sched_node *parent, u8 layer, - u16 num_nodes, u32 *first_node_teid, - u16 *num_nodes_added) +ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) { - u32 *first_teid_ptr = first_node_teid; - u16 new_num_nodes, max_child_nodes; - enum ice_status status = ICE_SUCCESS; - struct ice_hw *hw = pi->hw; - u16 num_added = 0; - u32 temp; + u16 max_child_nodes; *num_nodes_added = 0; if (!num_nodes) - return status; + return ICE_SUCCESS; - if (!parent || layer < hw->sw_entry_point_layer) + if (!parent || layer < pi->hw->sw_entry_point_layer) return ICE_ERR_PARAM; /* max children per node per layer */ - max_child_nodes = hw->max_children[parent->tx_sched_layer]; + max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; - /* current number of children + required nodes exceed max children ? */ + /* current number of children + required nodes exceed max children */ if ((parent->num_children + num_nodes) > max_child_nodes) { /* Fail if the parent is a TC node */ if (parent == tc_node) return ICE_ERR_CFG; + return ICE_ERR_MAX_LIMIT; + } + + return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, + num_nodes_added, first_node_teid); +} + +/** + * ice_sched_add_nodes_to_layer - Add nodes to a given layer + * @pi: port information structure + * @tc_node: pointer to TC node + * @parent: pointer to parent node + * @layer: layer number to add nodes + * @num_nodes: number of nodes to be added + * @first_node_teid: pointer to the first node TEID + * @num_nodes_added: pointer to number of nodes added + * + * This function add nodes to a given layer. + */ +static enum ice_status +ice_sched_add_nodes_to_layer(struct ice_port_info *pi, + struct ice_sched_node *tc_node, + struct ice_sched_node *parent, u8 layer, + u16 num_nodes, u32 *first_node_teid, + u16 *num_nodes_added) +{ + u32 *first_teid_ptr = first_node_teid; + u16 new_num_nodes = num_nodes; + enum ice_status status = ICE_SUCCESS; + *num_nodes_added = 0; + while (*num_nodes_added < num_nodes) { + u16 max_child_nodes, num_added = 0; + /* cppcheck-suppress unusedVariable */ + u32 temp; + + status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent, + layer, new_num_nodes, + first_teid_ptr, + &num_added); + if (status == ICE_SUCCESS) + *num_nodes_added += num_added; + /* added more nodes than requested ? */ + if (*num_nodes_added > num_nodes) { + ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes, + *num_nodes_added); + status = ICE_ERR_CFG; + break; + } + /* break if all the nodes are added successfully */ + if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes)) + break; + /* break if the error is not max limit */ + if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT) + break; + /* Exceeded the max children */ + max_child_nodes = pi->hw->max_children[parent->tx_sched_layer]; /* utilize all the spaces if the parent is not full */ if (parent->num_children < max_child_nodes) { new_num_nodes = max_child_nodes - parent->num_children; - /* this recursion is intentional, and wouldn't - * go more than 2 calls + } else { + /* This parent is full, try the next sibling */ + parent = parent->sibling; + /* Don't modify the first node TEID memory if the + * first node was added already in the above call. + * Instead send some temp memory for all other + * recursive calls. */ - status = ice_sched_add_nodes_to_layer(pi, tc_node, - parent, layer, - new_num_nodes, - first_node_teid, - &num_added); - if (status != ICE_SUCCESS) - return status; + if (num_added) + first_teid_ptr = &temp; - *num_nodes_added += num_added; + new_num_nodes = num_nodes - *num_nodes_added; } - /* Don't modify the first node TEID memory if the first node was - * added already in the above call. Instead send some temp - * memory for all other recursive calls. - */ - if (num_added) - first_teid_ptr = &temp; - - new_num_nodes = num_nodes - num_added; - - /* This parent is full, try the next sibling */ - parent = parent->sibling; - - /* this recursion is intentional, for 1024 queues - * per VSI, it goes max of 16 iterations. - * 1024 / 8 = 128 layer 8 nodes - * 128 /8 = 16 (add 8 nodes per iteration) - */ - status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, - layer, new_num_nodes, - first_teid_ptr, - &num_added); - *num_nodes_added += num_added; - return status; } - - status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes, - num_nodes_added, first_node_teid); return status; } /** * ice_sched_get_qgrp_layer - get the current queue group layer number * @hw: pointer to the HW struct * * This function returns the current queue group layer number */ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw) { /* It's always total layers - 1, the array is 0 relative so -2 */ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; } /** * ice_sched_get_vsi_layer - get the current VSI layer number * @hw: pointer to the HW struct * * This function returns the current VSI layer number */ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) { /* Num Layers VSI layer * 9 6 * 7 4 * 5 or less sw_entry_point_layer */ /* calculate the VSI layer based on number of layers. */ if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; if (layer > hw->sw_entry_point_layer) return layer; } return hw->sw_entry_point_layer; } /** * ice_sched_get_agg_layer - get the current aggregator layer number * @hw: pointer to the HW struct * * This function returns the current aggregator layer number */ static u8 ice_sched_get_agg_layer(struct ice_hw *hw) { /* Num Layers aggregator layer * 9 4 * 7 or less sw_entry_point_layer */ /* calculate the aggregator layer based on number of layers. */ if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; if (layer > hw->sw_entry_point_layer) return layer; } return hw->sw_entry_point_layer; } /** * ice_rm_dflt_leaf_node - remove the default leaf node in the tree * @pi: port information structure * * This function removes the leaf node that was created by the FW * during initialization */ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) { struct ice_sched_node *node; node = pi->root; while (node) { if (!node->num_children) break; node = node->children[0]; } if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) { u32 teid = LE32_TO_CPU(node->info.node_teid); enum ice_status status; /* remove the default leaf node */ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid); if (!status) ice_free_sched_node(pi, node); } } /** * ice_sched_rm_dflt_nodes - free the default nodes in the tree * @pi: port information structure * * This function frees all the nodes except root and TC that were created by * the FW during initialization */ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) { struct ice_sched_node *node; ice_rm_dflt_leaf_node(pi); /* remove the default nodes except TC and root nodes */ node = pi->root; while (node) { if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC && node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) { ice_free_sched_node(pi, node); break; } if (!node->num_children) break; node = node->children[0]; } } /** * ice_sched_init_port - Initialize scheduler by querying information from FW * @pi: port info structure for the tree to cleanup * * This function is the initial call to find the total number of Tx scheduler * resources, default topology created by firmware and storing the information * in SW DB. */ enum ice_status ice_sched_init_port(struct ice_port_info *pi) { struct ice_aqc_get_topo_elem *buf; enum ice_status status; struct ice_hw *hw; u8 num_branches; u16 num_elems; u8 i, j; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; /* Query the Default Topology from FW */ buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); if (!buf) return ICE_ERR_NO_MEMORY; /* Query default scheduling tree topology */ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, &num_branches, NULL); if (status) goto err_init_port; /* num_branches should be between 1-8 */ if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) { ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n", num_branches); status = ICE_ERR_PARAM; goto err_init_port; } /* get the number of elements on the default/first branch */ num_elems = LE16_TO_CPU(buf[0].hdr.num_elems); /* num_elems should always be between 1-9 */ if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) { ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n", num_elems); status = ICE_ERR_PARAM; goto err_init_port; } /* If the last node is a leaf node then the index of the queue group * layer is two less than the number of elements. */ if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) pi->last_node_teid = LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid); else pi->last_node_teid = LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid); /* Insert the Tx Sched root node */ status = ice_sched_add_root_node(pi, &buf[0].generic[0]); if (status) goto err_init_port; /* Parse the default tree and cache the information */ for (i = 0; i < num_branches; i++) { num_elems = LE16_TO_CPU(buf[i].hdr.num_elems); /* Skip root element as already inserted */ for (j = 1; j < num_elems; j++) { /* update the sw entry point */ if (buf[0].generic[j].data.elem_type == ICE_AQC_ELEM_TYPE_ENTRY_POINT) hw->sw_entry_point_layer = j; status = ice_sched_add_node(pi, j, &buf[i].generic[j]); if (status) goto err_init_port; } } /* Remove the default nodes. */ if (pi->root) ice_sched_rm_dflt_nodes(pi); /* initialize the port for handling the scheduler tree */ pi->port_state = ICE_SCHED_PORT_STATE_READY; ice_init_lock(&pi->sched_lock); for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) INIT_LIST_HEAD(&hw->rl_prof_list[i]); err_init_port: if (status && pi->root) { ice_free_sched_node(pi, pi->root); pi->root = NULL; } ice_free(hw, buf); return status; } /** * ice_sched_get_node - Get the struct ice_sched_node for given TEID * @pi: port information structure * @teid: Scheduler node TEID * * This function retrieves the ice_sched_node struct for given TEID from * the SW DB and returns it to the caller. */ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid) { struct ice_sched_node *node; if (!pi) return NULL; /* Find the node starting from root */ ice_acquire_lock(&pi->sched_lock); node = ice_sched_find_node_by_teid(pi->root, teid); ice_release_lock(&pi->sched_lock); if (!node) ice_debug(pi->hw, ICE_DBG_SCHED, "Node not found for teid=0x%x\n", teid); return node; } /** * ice_sched_query_res_alloc - query the FW for num of logical sched layers * @hw: pointer to the HW struct * * query FW for allocated scheduler resources and store in HW struct */ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; enum ice_status status = ICE_SUCCESS; __le16 max_sibl; u8 i; if (hw->layer_info) return status; buf = (struct ice_aqc_query_txsched_res_resp *) ice_malloc(hw, sizeof(*buf)); if (!buf) return ICE_ERR_NO_MEMORY; status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL); if (status) goto sched_query_out; hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels); hw->num_tx_sched_phys_layers = LE16_TO_CPU(buf->sched_props.phys_levels); hw->flattened_layers = buf->sched_props.flattening_bitmap; hw->max_cgds = buf->sched_props.max_pf_cgds; /* max sibling group size of current layer refers to the max children * of the below layer node. * layer 1 node max children will be layer 2 max sibling group size * layer 2 node max children will be layer 3 max sibling group size * and so on. This array will be populated from root (index 0) to * qgroup layer 7. Leaf node has no children. */ for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; hw->max_children[i] = LE16_TO_CPU(max_sibl); } hw->layer_info = (struct ice_aqc_layer_props *) ice_memdup(hw, buf->layer_props, (hw->num_tx_sched_layers * sizeof(*hw->layer_info)), - ICE_DMA_TO_DMA); + ICE_NONDMA_TO_NONDMA); if (!hw->layer_info) { status = ICE_ERR_NO_MEMORY; goto sched_query_out; } sched_query_out: ice_free(hw, buf); return status; } /** * ice_sched_get_psm_clk_freq - determine the PSM clock frequency * @hw: pointer to the HW struct * * Determine the PSM clock frequency and store in HW struct */ void ice_sched_get_psm_clk_freq(struct ice_hw *hw) { u32 val, clk_src; val = rd32(hw, GLGEN_CLKSTAT_SRC); clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >> GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S; #define PSM_CLK_SRC_367_MHZ 0x0 #define PSM_CLK_SRC_416_MHZ 0x1 #define PSM_CLK_SRC_446_MHZ 0x2 #define PSM_CLK_SRC_390_MHZ 0x3 switch (clk_src) { case PSM_CLK_SRC_367_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ; break; case PSM_CLK_SRC_416_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ; break; case PSM_CLK_SRC_446_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; break; case PSM_CLK_SRC_390_MHZ: hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ; break; default: ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n", clk_src); /* fall back to a safe default */ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ; } } /** * ice_sched_find_node_in_subtree - Find node in part of base node subtree * @hw: pointer to the HW struct * @base: pointer to the base node * @node: pointer to the node to search * * This function checks whether a given node is part of the base node * subtree or not */ bool ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, struct ice_sched_node *node) { u8 i; for (i = 0; i < base->num_children; i++) { struct ice_sched_node *child = base->children[i]; if (node == child) return true; if (child->tx_sched_layer > node->tx_sched_layer) return false; /* this recursion is intentional, and wouldn't * go more than 8 calls */ if (ice_sched_find_node_in_subtree(hw, child, node)) return true; } return false; } /** * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node * @pi: port information structure * @vsi_node: software VSI handle * @qgrp_node: first queue group node identified for scanning * @owner: LAN or RDMA * * This function retrieves a free LAN or RDMA queue group node by scanning * qgrp_node and its siblings for the queue group with the fewest number * of queues currently assigned. */ static struct ice_sched_node * ice_sched_get_free_qgrp(struct ice_port_info *pi, struct ice_sched_node *vsi_node, struct ice_sched_node *qgrp_node, u8 owner) { struct ice_sched_node *min_qgrp; u8 min_children; if (!qgrp_node) return qgrp_node; min_children = qgrp_node->num_children; if (!min_children) return qgrp_node; min_qgrp = qgrp_node; /* scan all queue groups until find a node which has less than the * minimum number of children. This way all queue group nodes get * equal number of shares and active. The bandwidth will be equally * distributed across all queues. */ while (qgrp_node) { /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) if (qgrp_node->num_children < min_children && qgrp_node->owner == owner) { /* replace the new min queue group node */ min_qgrp = qgrp_node; min_children = min_qgrp->num_children; /* break if it has no children, */ if (!min_children) break; } qgrp_node = qgrp_node->sibling; } return min_qgrp; } /** * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node * @pi: port information structure * @vsi_handle: software VSI handle * @tc: branch number * @owner: LAN or RDMA * * This function retrieves a free LAN or RDMA queue group node */ struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner) { struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; u16 max_children; u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); max_children = pi->hw->max_children[qgrp_layer]; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) return NULL; vsi_node = vsi_ctx->sched.vsi_node[tc]; /* validate invalid VSI ID */ if (!vsi_node) return NULL; /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { /* make sure the qgroup node is part of the VSI subtree */ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) if (qgrp_node->num_children < max_children && qgrp_node->owner == owner) break; qgrp_node = qgrp_node->sibling; } /* Select the best queue group */ return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); } /** * ice_sched_get_vsi_node - Get a VSI node based on VSI ID * @pi: pointer to the port information structure * @tc_node: pointer to the TC node * @vsi_handle: software VSI handle * * This function retrieves a VSI node for a given VSI ID from a given * TC branch */ struct ice_sched_node * ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, u16 vsi_handle) { struct ice_sched_node *node; u8 vsi_layer; vsi_layer = ice_sched_get_vsi_layer(pi->hw); node = ice_sched_get_first_node(pi, tc_node, vsi_layer); /* Check whether it already exists */ while (node) { if (node->vsi_handle == vsi_handle) return node; node = node->sibling; } return node; } /** * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID * @pi: pointer to the port information structure * @tc_node: pointer to the TC node * @agg_id: aggregator ID * * This function retrieves an aggregator node for a given aggregator ID from * a given TC branch */ static struct ice_sched_node * ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node, u32 agg_id) { struct ice_sched_node *node; struct ice_hw *hw = pi->hw; u8 agg_layer; if (!hw) return NULL; agg_layer = ice_sched_get_agg_layer(hw); node = ice_sched_get_first_node(pi, tc_node, agg_layer); /* Check whether it already exists */ while (node) { if (node->agg_id == agg_id) return node; node = node->sibling; } return node; } /** * ice_sched_check_node - Compare node parameters between SW DB and HW DB * @hw: pointer to the HW struct * @node: pointer to the ice_sched_node struct * * This function queries and compares the HW element with SW DB node parameters */ static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node) { struct ice_aqc_txsched_elem_data buf; enum ice_status status; u32 node_teid; node_teid = LE32_TO_CPU(node->info.node_teid); status = ice_sched_query_elem(hw, node_teid, &buf); if (status != ICE_SUCCESS) return false; if (memcmp(&buf, &node->info, sizeof(buf))) { ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n", node_teid); return false; } return true; } /** * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes * @hw: pointer to the HW struct * @num_qs: number of queues * @num_nodes: num nodes array * * This function calculates the number of VSI child nodes based on the * number of queues. */ static void ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) { u16 num = num_qs; u8 i, qgl, vsil; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); /* calculate num nodes from queue group to VSI layer */ for (i = qgl; i > vsil; i--) { /* round to the next integer if there is a remainder */ num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]); /* need at least one node */ num_nodes[i] = num ? num : 1; } } /** * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree * @pi: port information structure * @vsi_handle: software VSI handle * @tc_node: pointer to the TC node * @num_nodes: pointer to the num nodes that needs to be added per layer * @owner: node owner (LAN or RDMA) * * This function adds the VSI child nodes to tree. It gets called for * LAN and RDMA separately. */ static enum ice_status ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes, u8 owner) { struct ice_sched_node *parent, *node; struct ice_hw *hw = pi->hw; enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { if (!parent) return ICE_ERR_CFG; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status != ICE_SUCCESS || num_nodes[i] != num_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next * layer nodes */ if (num_added) { parent = ice_sched_find_node_by_teid(tc_node, first_node_teid); node = parent; while (node) { node->owner = owner; node = node->sibling; } } else { parent = parent->children[0]; } } return ICE_SUCCESS; } /** * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes * @pi: pointer to the port info structure * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * * This function calculates the number of supported nodes needed to add this * VSI into Tx tree including the VSI, parent and intermediate nodes in below * layers */ static void ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *node; u8 vsil; int i; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--) /* Add intermediate nodes if TC has no children and * need at least one node for VSI */ if (!tc_node->num_children || i == vsil) { num_nodes[i]++; } else { /* If intermediate nodes are reached max children * then add a new one. */ node = ice_sched_get_first_node(pi, tc_node, (u8)i); /* scan all the siblings */ while (node) { if (node->num_children < pi->hw->max_children[i]) break; node = node->sibling; } /* tree has one intermediate node to add this new VSI. * So no need to calculate supported nodes for below * layers. */ if (node) break; /* all the nodes are full, allocate a new one */ num_nodes[i]++; } } /** * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree * @pi: port information structure * @vsi_handle: software VSI handle * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ static enum ice_status ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, vsil; if (!pi) return ICE_ERR_PARAM; vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_added); if (status != ICE_SUCCESS || num_nodes[i] != num_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next * layer nodes */ if (num_added) parent = ice_sched_find_node_by_teid(tc_node, first_node_teid); else parent = parent->children[0]; if (!parent) return ICE_ERR_CFG; if (i == vsil) parent->vsi_handle = vsi_handle; } return ICE_SUCCESS; } /** * ice_sched_add_vsi_to_topo - add a new VSI into tree * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * * This function adds a new VSI into scheduler tree */ static enum ice_status ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) { u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *tc_node; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_PARAM; /* calculate number of supported nodes needed for this VSI */ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes); /* add VSI supported nodes to TC subtree */ return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, num_nodes); } /** * ice_sched_update_vsi_child_nodes - update VSI child nodes * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @new_numqs: new number of max queues * @owner: owner of this subtree * * This function updates the VSI child nodes based on the number of queues */ static enum ice_status ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 new_numqs, u8 owner) { u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; struct ice_sched_node *vsi_node; struct ice_sched_node *tc_node; struct ice_vsi_ctx *vsi_ctx; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u16 prev_numqs; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_CFG; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; prev_numqs = vsi_ctx->sched.max_lanq[tc]; /* num queues are not changed or less than the previous number */ if (new_numqs <= prev_numqs) return status; status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); if (status) return status; if (new_numqs) ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); /* Keep the max number of queue configuration all the time. Update the * tree only if number of queues > previous number of queues. This may * leave some extra nodes in the tree if number of queues < previous * number but that wouldn't harm anything. Removing those extra nodes * may complicate the code if those nodes are part of SRL or * individually rate limited. */ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, new_num_nodes, owner); if (status) return status; vsi_ctx->sched.max_lanq[tc] = new_numqs; return ICE_SUCCESS; } /** * ice_sched_cfg_vsi - configure the new/existing VSI * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @maxqs: max number of queues * @owner: LAN or RDMA * @enable: TC enabled or disabled * * This function adds/updates VSI nodes based on the number of queues. If TC is * enabled and VSI is in suspended state then resume the VSI back. If TC is * disabled then suspend the VSI if it is not already. */ enum ice_status ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable) { struct ice_sched_node *vsi_node, *tc_node; struct ice_vsi_ctx *vsi_ctx; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_PARAM; vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); /* suspend the VSI if TC is not enabled */ if (!enable) { if (vsi_node && vsi_node->in_use) { u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); status = ice_sched_suspend_resume_elems(hw, 1, &teid, true); if (!status) vsi_node->in_use = false; } return status; } /* TC is enabled, if it is a new VSI then add it to the tree */ if (!vsi_node) { status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); if (status) return status; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; vsi_ctx->sched.vsi_node[tc] = vsi_node; vsi_node->in_use = true; /* invalidate the max queues whenever VSI gets added first time * into the scheduler tree (boot or after reset). We need to * recreate the child nodes all the time in these cases. */ vsi_ctx->sched.max_lanq[tc] = 0; } /* update the VSI child nodes */ status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, owner); if (status) return status; /* TC is enabled, resume the VSI if it is in the suspend state */ if (!vsi_node->in_use) { u32 teid = LE32_TO_CPU(vsi_node->info.node_teid); status = ice_sched_suspend_resume_elems(hw, 1, &teid, false); if (!status) vsi_node->in_use = true; } return status; } /** * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry * @pi: port information structure * @vsi_handle: software VSI handle * * This function removes single aggregator VSI info entry from * aggregator list. */ static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) { struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *atmp; LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list, ice_sched_agg_info, list_entry) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *vtmp; LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) if (agg_vsi_info->vsi_handle == vsi_handle) { LIST_DEL(&agg_vsi_info->list_entry); ice_free(pi->hw, agg_vsi_info); return; } } } /** * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree * @node: pointer to the sub-tree node * * This function checks for a leaf node presence in a given sub-tree node. */ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) { u8 i; for (i = 0; i < node->num_children; i++) if (ice_sched_is_leaf_node_present(node->children[i])) return true; /* check for a leaf node */ return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); } /** * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes * @pi: port information structure * @vsi_handle: software VSI handle * @owner: LAN or RDMA * * This function removes the VSI and its LAN or RDMA children nodes from the * scheduler tree. */ static enum ice_status ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) { enum ice_status status = ICE_ERR_PARAM; struct ice_vsi_ctx *vsi_ctx; u8 i; ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return status; ice_acquire_lock(&pi->sched_lock); vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) goto exit_sched_rm_vsi_cfg; ice_for_each_traffic_class(i) { struct ice_sched_node *vsi_node, *tc_node; u8 j = 0; tc_node = ice_sched_get_tc_node(pi, i); if (!tc_node) continue; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) continue; if (ice_sched_is_leaf_node_present(vsi_node)) { ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i); status = ICE_ERR_IN_USE; goto exit_sched_rm_vsi_cfg; } while (j < vsi_node->num_children) { if (vsi_node->children[j]->owner == owner) { ice_free_sched_node(pi, vsi_node->children[j]); /* reset the counter again since the num * children will be updated after node removal */ j = 0; } else { j++; } } /* remove the VSI if it has no children */ if (!vsi_node->num_children) { ice_free_sched_node(pi, vsi_node); vsi_ctx->sched.vsi_node[i] = NULL; /* clean up aggregator related VSI info if any */ ice_sched_rm_agg_vsi_info(pi, vsi_handle); } if (owner == ICE_SCHED_NODE_OWNER_LAN) vsi_ctx->sched.max_lanq[i] = 0; } status = ICE_SUCCESS; exit_sched_rm_vsi_cfg: ice_release_lock(&pi->sched_lock); return status; } /** * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes * @pi: port information structure * @vsi_handle: software VSI handle * * This function clears the VSI and its LAN children nodes from scheduler tree * for all TCs. */ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); } /** * ice_sched_is_tree_balanced - Check tree nodes are identical or not * @hw: pointer to the HW struct * @node: pointer to the ice_sched_node struct * * This function compares all the nodes for a given tree against HW DB nodes * This function needs to be called with the port_info->sched_lock held */ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node) { u8 i; /* start from the leaf node */ for (i = 0; i < node->num_children; i++) /* Fail if node doesn't match with the SW DB * this recursion is intentional, and wouldn't * go more than 9 calls */ if (!ice_sched_is_tree_balanced(hw, node->children[i])) return false; return ice_sched_check_node(hw, node); } /** * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID * @hw: pointer to the HW struct * @node_teid: node TEID * @buf: pointer to buffer * @buf_size: buffer size in bytes * @cd: pointer to command details structure or NULL * * This function retrieves the tree topology from the firmware for a given * node TEID to the root node. */ enum ice_status ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_query_node_to_root *cmd; struct ice_aq_desc desc; cmd = &desc.params.query_node_to_root; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root); cmd->teid = CPU_TO_LE32(node_teid); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_get_agg_info - get the aggregator ID * @hw: pointer to the hardware structure * @agg_id: aggregator ID * * This function validates aggregator ID. The function returns info if * aggregator ID is present in list otherwise it returns null. */ static struct ice_sched_agg_info * ice_get_agg_info(struct ice_hw *hw, u32 agg_id) { struct ice_sched_agg_info *agg_info; LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, list_entry) if (agg_info->agg_id == agg_id) return agg_info; return NULL; } /** * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree * @hw: pointer to the HW struct * @node: pointer to a child node * @num_nodes: num nodes count array * * This function walks through the aggregator subtree to find a free parent * node */ static struct ice_sched_node * ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node, u16 *num_nodes) { u8 l = node->tx_sched_layer; u8 vsil, i; vsil = ice_sched_get_vsi_layer(hw); /* Is it VSI parent layer ? */ if (l == vsil - 1) return (node->num_children < hw->max_children[l]) ? node : NULL; /* We have intermediate nodes. Let's walk through the subtree. If the * intermediate node has space to add a new node then clear the count */ if (node->num_children < hw->max_children[l]) num_nodes[l] = 0; /* The below recursive call is intentional and wouldn't go more than * 2 or 3 iterations. */ for (i = 0; i < node->num_children; i++) { struct ice_sched_node *parent; parent = ice_sched_get_free_vsi_parent(hw, node->children[i], num_nodes); if (parent) return parent; } return NULL; } /** * ice_sched_update_parent - update the new parent in SW DB * @new_parent: pointer to a new parent node * @node: pointer to a child node * * This function removes the child from the old parent and adds it to a new * parent */ static void ice_sched_update_parent(struct ice_sched_node *new_parent, struct ice_sched_node *node) { struct ice_sched_node *old_parent; u8 i, j; old_parent = node->parent; /* update the old parent children */ for (i = 0; i < old_parent->num_children; i++) if (old_parent->children[i] == node) { for (j = i + 1; j < old_parent->num_children; j++) old_parent->children[j - 1] = old_parent->children[j]; old_parent->num_children--; break; } /* now move the node to a new parent */ new_parent->children[new_parent->num_children++] = node; node->parent = new_parent; node->info.parent_teid = new_parent->info.node_teid; } /** * ice_sched_move_nodes - move child nodes to a given parent * @pi: port information structure * @parent: pointer to parent node * @num_items: number of child nodes to be moved * @list: pointer to child node teids * * This function move the child nodes to a given parent. */ static enum ice_status ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, u16 num_items, u32 *list) { struct ice_aqc_move_elem *buf; struct ice_sched_node *node; enum ice_status status = ICE_SUCCESS; u16 i, grps_movd = 0; struct ice_hw *hw; u16 buf_len; hw = pi->hw; if (!parent || !num_items) return ICE_ERR_PARAM; /* Does parent have enough space */ if (parent->num_children + num_items > hw->max_children[parent->tx_sched_layer]) return ICE_ERR_AQ_FULL; buf_len = ice_struct_size(buf, teid, 1); buf = (struct ice_aqc_move_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; for (i = 0; i < num_items; i++) { node = ice_sched_find_node_by_teid(pi->root, list[i]); if (!node) { status = ICE_ERR_PARAM; goto move_err_exit; } buf->hdr.src_parent_teid = node->info.parent_teid; buf->hdr.dest_parent_teid = parent->info.node_teid; buf->teid[0] = node->info.node_teid; buf->hdr.num_elems = CPU_TO_LE16(1); status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, &grps_movd, NULL); if (status && grps_movd != 1) { status = ICE_ERR_CFG; goto move_err_exit; } /* update the SW DB */ ice_sched_update_parent(parent, node); } move_err_exit: ice_free(hw, buf); return status; } /** * ice_sched_move_vsi_to_agg - move VSI to aggregator node * @pi: port information structure * @vsi_handle: software VSI handle * @agg_id: aggregator ID * @tc: TC number * * This function moves a VSI to an aggregator node or its subtree. * Intermediate nodes may be created if required. */ static enum ice_status ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id, u8 tc) { struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; u32 first_node_teid, vsi_teid; enum ice_status status; u16 num_nodes_added; u8 aggl, vsil, i; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_CFG; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) return ICE_ERR_DOES_NOT_EXIST; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_DOES_NOT_EXIST; /* Is this VSI already part of given aggregator? */ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node)) return ICE_SUCCESS; aggl = ice_sched_get_agg_layer(pi->hw); vsil = ice_sched_get_vsi_layer(pi->hw); /* set intermediate node count to 1 between aggregator and VSI layers */ for (i = aggl + 1; i < vsil; i++) num_nodes[i] = 1; /* Check if the aggregator subtree has any free node to add the VSI */ for (i = 0; i < agg_node->num_children; i++) { parent = ice_sched_get_free_vsi_parent(pi->hw, agg_node->children[i], num_nodes); if (parent) goto move_nodes; } /* add new nodes */ parent = agg_node; for (i = aggl + 1; i < vsil; i++) { status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_nodes_added); if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next * layer nodes */ if (num_nodes_added) parent = ice_sched_find_node_by_teid(tc_node, first_node_teid); else parent = parent->children[0]; if (!parent) return ICE_ERR_CFG; } move_nodes: vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid); return ice_sched_move_nodes(pi, parent, 1, &vsi_teid); } /** * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator * @pi: port information structure * @agg_info: aggregator info * @tc: traffic class number * @rm_vsi_info: true or false * * This function move all the VSI(s) to the default aggregator and delete * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The * caller holds the scheduler lock. */ static enum ice_status ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *tmp; enum ice_status status = ICE_SUCCESS; LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) { u16 vsi_handle = agg_vsi_info->vsi_handle; /* Move VSI to default aggregator */ if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc)) continue; status = ice_sched_move_vsi_to_agg(pi, vsi_handle, ICE_DFLT_AGG_ID, tc); if (status) break; ice_clear_bit(tc, agg_vsi_info->tc_bitmap); if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) { LIST_DEL(&agg_vsi_info->list_entry); ice_free(pi->hw, agg_vsi_info); } } return status; } /** * ice_sched_is_agg_inuse - check whether the aggregator is in use or not * @pi: port information structure * @node: node pointer * * This function checks whether the aggregator is attached with any VSI or not. */ static bool ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node) { u8 vsil, i; vsil = ice_sched_get_vsi_layer(pi->hw); if (node->tx_sched_layer < vsil - 1) { for (i = 0; i < node->num_children; i++) if (ice_sched_is_agg_inuse(pi, node->children[i])) return true; return false; } else { return node->num_children ? true : false; } } /** * ice_sched_rm_agg_cfg - remove the aggregator node * @pi: port information structure * @agg_id: aggregator ID * @tc: TC number * * This function removes the aggregator node and intermediate nodes if any * from the given TC */ static enum ice_status ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *tc_node, *agg_node; struct ice_hw *hw = pi->hw; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_CFG; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) return ICE_ERR_DOES_NOT_EXIST; /* Can't remove the aggregator node if it has children */ if (ice_sched_is_agg_inuse(pi, agg_node)) return ICE_ERR_IN_USE; /* need to remove the whole subtree if aggregator node is the * only child. */ while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) { struct ice_sched_node *parent = agg_node->parent; if (!parent) return ICE_ERR_CFG; if (parent->num_children > 1) break; agg_node = parent; } ice_free_sched_node(pi, agg_node); return ICE_SUCCESS; } /** * ice_rm_agg_cfg_tc - remove aggregator configuration for TC * @pi: port information structure * @agg_info: aggregator ID * @tc: TC number * @rm_vsi_info: bool value true or false * * This function removes aggregator reference to VSI of given TC. It removes * the aggregator configuration completely for requested TC. The caller needs * to hold the scheduler lock. */ static enum ice_status ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info, u8 tc, bool rm_vsi_info) { enum ice_status status = ICE_SUCCESS; /* If nothing to remove - return success */ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) goto exit_rm_agg_cfg_tc; status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info); if (status) goto exit_rm_agg_cfg_tc; /* Delete aggregator node(s) */ status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc); if (status) goto exit_rm_agg_cfg_tc; ice_clear_bit(tc, agg_info->tc_bitmap); exit_rm_agg_cfg_tc: return status; } /** * ice_save_agg_tc_bitmap - save aggregator TC bitmap * @pi: port information structure * @agg_id: aggregator ID * @tc_bitmap: 8 bits TC bitmap * * Save aggregator TC bitmap. This function needs to be called with scheduler * lock held. */ static enum ice_status ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id, ice_bitmap_t *tc_bitmap) { struct ice_sched_agg_info *agg_info; agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) return ICE_ERR_PARAM; ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return ICE_SUCCESS; } /** * ice_sched_add_agg_cfg - create an aggregator node * @pi: port information structure * @agg_id: aggregator ID * @tc: TC number * * This function creates an aggregator node and intermediate nodes if required * for the given TC */ static enum ice_status ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc) { struct ice_sched_node *parent, *agg_node, *tc_node; u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u32 first_node_teid; u16 num_nodes_added; u8 i, aggl; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_CFG; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); /* Does Agg node already exist ? */ if (agg_node) return status; aggl = ice_sched_get_agg_layer(hw); /* need one node in Agg layer */ num_nodes[aggl] = 1; /* Check whether the intermediate nodes have space to add the * new aggregator. If they are full, then SW needs to allocate a new * intermediate node on those layers */ for (i = hw->sw_entry_point_layer; i < aggl; i++) { parent = ice_sched_get_first_node(pi, tc_node, i); /* scan all the siblings */ while (parent) { if (parent->num_children < hw->max_children[i]) break; parent = parent->sibling; } /* all the nodes are full, reserve one for this layer */ if (!parent) num_nodes[i]++; } /* add the aggregator node */ parent = tc_node; for (i = hw->sw_entry_point_layer; i <= aggl; i++) { if (!parent) return ICE_ERR_CFG; status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, &num_nodes_added); if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added) return ICE_ERR_CFG; /* The newly added node can be a new parent for the next * layer nodes */ if (num_nodes_added) { parent = ice_sched_find_node_by_teid(tc_node, first_node_teid); /* register aggregator ID with the aggregator node */ if (parent && i == aggl) parent->agg_id = agg_id; } else { parent = parent->children[0]; } } return ICE_SUCCESS; } /** * ice_sched_cfg_agg - configure aggregator node * @pi: port information structure * @agg_id: aggregator ID * @agg_type: aggregator type queue, VSI, or aggregator group * @tc_bitmap: bits TC bitmap * * It registers a unique aggregator node into scheduler services. It * allows a user to register with a unique ID to track it's resources. * The aggregator type determines if this is a queue group, VSI group * or aggregator group. It then creates the aggregator node(s) for requested * TC(s) or removes an existing aggregator node including its configuration * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator * resources and remove aggregator ID. * This function needs to be called with scheduler lock held. */ static enum ice_status ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap) { struct ice_sched_agg_info *agg_info; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u8 tc; agg_info = ice_get_agg_info(hw, agg_id); if (!agg_info) { /* Create new entry for new aggregator ID */ agg_info = (struct ice_sched_agg_info *) ice_malloc(hw, sizeof(*agg_info)); if (!agg_info) return ICE_ERR_NO_MEMORY; agg_info->agg_id = agg_id; agg_info->agg_type = agg_type; agg_info->tc_bitmap[0] = 0; /* Initialize the aggregator VSI list head */ INIT_LIST_HEAD(&agg_info->agg_vsi_list); /* Add new entry in aggregator list */ LIST_ADD(&agg_info->list_entry, &hw->agg_list); } /* Create aggregator node(s) for requested TC(s) */ ice_for_each_traffic_class(tc) { if (!ice_is_tc_ena(*tc_bitmap, tc)) { /* Delete aggregator cfg TC if it exists previously */ status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false); if (status) break; continue; } /* Check if aggregator node for TC already exists */ if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) continue; /* Create new aggregator node for TC */ status = ice_sched_add_agg_cfg(pi, agg_id, tc); if (status) break; /* Save aggregator node's TC information */ ice_set_bit(tc, agg_info->tc_bitmap); } return status; } /** * ice_cfg_agg - config aggregator node * @pi: port information structure * @agg_id: aggregator ID * @agg_type: aggregator type queue, VSI, or aggregator group * @tc_bitmap: bits TC bitmap * * This function configures aggregator node(s). */ enum ice_status ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type, u8 tc_bitmap) { ice_bitmap_t bitmap = tc_bitmap; enum ice_status status; ice_acquire_lock(&pi->sched_lock); status = ice_sched_cfg_agg(pi, agg_id, agg_type, (ice_bitmap_t *)&bitmap); if (!status) status = ice_save_agg_tc_bitmap(pi, agg_id, (ice_bitmap_t *)&bitmap); ice_release_lock(&pi->sched_lock); return status; } /** * ice_get_agg_vsi_info - get the aggregator ID * @agg_info: aggregator info * @vsi_handle: software VSI handle * * The function returns aggregator VSI info based on VSI handle. This function * needs to be called with scheduler lock held. */ static struct ice_sched_agg_vsi_info * ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle) { struct ice_sched_agg_vsi_info *agg_vsi_info; LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) if (agg_vsi_info->vsi_handle == vsi_handle) return agg_vsi_info; return NULL; } /** * ice_get_vsi_agg_info - get the aggregator info of VSI * @hw: pointer to the hardware structure * @vsi_handle: Sw VSI handle * * The function returns aggregator info of VSI represented via vsi_handle. The * VSI has in this case a different aggregator than the default one. This * function needs to be called with scheduler lock held. */ static struct ice_sched_agg_info * ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle) { struct ice_sched_agg_info *agg_info; LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, list_entry) { struct ice_sched_agg_vsi_info *agg_vsi_info; agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (agg_vsi_info) return agg_info; } return NULL; } /** * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap * @pi: port information structure * @agg_id: aggregator ID * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap of enabled TC(s) * * Save VSI to aggregator TC bitmap. This function needs to call with scheduler * lock held. */ static enum ice_status ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, ice_bitmap_t *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_info *agg_info; agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) return ICE_ERR_PARAM; /* check if entry already exist */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) return ICE_ERR_PARAM; ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap, ICE_MAX_TRAFFIC_CLASS); return ICE_SUCCESS; } /** * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator * @pi: port information structure * @agg_id: aggregator ID * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap of enabled TC(s) * * This function moves VSI to a new or default aggregator node. If VSI is * already associated to the aggregator node then no operation is performed on * the tree. This function needs to be called with scheduler lock held. */ static enum ice_status ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, ice_bitmap_t *tc_bitmap) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_info *agg_info; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; agg_info = ice_get_agg_info(hw, agg_id); if (!agg_info) return ICE_ERR_PARAM; /* check if entry already exist */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) { /* Create new entry for VSI under aggregator list */ agg_vsi_info = (struct ice_sched_agg_vsi_info *) ice_malloc(hw, sizeof(*agg_vsi_info)); if (!agg_vsi_info) return ICE_ERR_PARAM; /* add VSI ID into the aggregator list */ agg_vsi_info->vsi_handle = vsi_handle; LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list); } /* Move VSI node to new aggregator node for requested TC(s) */ ice_for_each_traffic_class(tc) { if (!ice_is_tc_ena(*tc_bitmap, tc)) continue; /* Move VSI to new aggregator */ status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc); if (status) break; ice_set_bit(tc, agg_vsi_info->tc_bitmap); } return status; } /** * ice_sched_rm_unused_rl_prof - remove unused RL profile * @hw: pointer to the hardware structure * * This function removes unused rate limit profiles from the HW and * SW DB. The caller needs to hold scheduler lock. */ static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw) { u16 ln; for (ln = 0; ln < hw->num_tx_sched_layers; ln++) { struct ice_aqc_rl_profile_info *rl_prof_elem; struct ice_aqc_rl_profile_info *rl_prof_tmp; LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp, &hw->rl_prof_list[ln], ice_aqc_rl_profile_info, list_entry) { if (!ice_sched_del_rl_profile(hw, rl_prof_elem)) ice_debug(hw, ICE_DBG_SCHED, "Removed rl profile\n"); } } } /** * ice_sched_update_elem - update element * @hw: pointer to the HW struct * @node: pointer to node * @info: node info to update * * Update the HW DB, and local SW DB of node. Update the scheduling * parameters of node from argument info data buffer (Info->data buf) and * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. */ static enum ice_status ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { struct ice_aqc_txsched_elem_data buf; enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; buf = *info; /* Parent TEID is reserved field in this aq call */ buf.parent_teid = 0; /* Element type is reserved field in this aq call */ buf.data.elem_type = 0; /* Flags is reserved field in this aq call */ buf.data.flags = 0; /* Update HW DB */ /* Configure element node */ status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), &elem_cfgd, NULL); if (status || elem_cfgd != num_elems) { ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); return ICE_ERR_CFG; } /* Config success case */ /* Now update local SW DB */ /* Only copy the data portion of info buffer */ node->info.data = info->data; return status; } /** * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params * @hw: pointer to the HW struct * @node: sched node to configure * @rl_type: rate limit type CIR, EIR, or shared * @bw_alloc: BW weight/allocation * * This function configures node element's BW allocation. */ static enum ice_status ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; enum ice_status status; buf = node->info; data = &buf.data; if (rl_type == ICE_MIN_BW) { data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); } else if (rl_type == ICE_MAX_BW) { data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc); } else { return ICE_ERR_PARAM; } /* Configure element */ status = ice_sched_update_elem(hw, node, &buf); return status; } /** * ice_move_vsi_to_agg - moves VSI to new or default aggregator * @pi: port information structure * @agg_id: aggregator ID * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap of enabled TC(s) * * Move or associate VSI to a new or default aggregator node. */ enum ice_status ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle, u8 tc_bitmap) { ice_bitmap_t bitmap = tc_bitmap; enum ice_status status; ice_acquire_lock(&pi->sched_lock); status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle, (ice_bitmap_t *)&bitmap); if (!status) status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle, (ice_bitmap_t *)&bitmap); ice_release_lock(&pi->sched_lock); return status; } /** * ice_rm_agg_cfg - remove aggregator configuration * @pi: port information structure * @agg_id: aggregator ID * * This function removes aggregator reference to VSI and delete aggregator ID * info. It removes the aggregator configuration completely. */ enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id) { struct ice_sched_agg_info *agg_info; enum ice_status status = ICE_SUCCESS; u8 tc; ice_acquire_lock(&pi->sched_lock); agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) { status = ICE_ERR_DOES_NOT_EXIST; goto exit_ice_rm_agg_cfg; } ice_for_each_traffic_class(tc) { status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true); if (status) goto exit_ice_rm_agg_cfg; } if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { status = ICE_ERR_IN_USE; goto exit_ice_rm_agg_cfg; } /* Safe to delete entry now */ LIST_DEL(&agg_info->list_entry); ice_free(pi->hw, agg_info); /* Remove unused RL profile IDs from HW and SW DB */ ice_sched_rm_unused_rl_prof(pi->hw); exit_ice_rm_agg_cfg: ice_release_lock(&pi->sched_lock); return status; } /** * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information * @bw_t_info: bandwidth type information structure * @bw_alloc: Bandwidth allocation information * * Save or clear CIR BW alloc information (bw_alloc) in the passed param * bw_t_info. */ static void ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) { bw_t_info->cir_bw.bw_alloc = bw_alloc; if (bw_t_info->cir_bw.bw_alloc) ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); else ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap); } /** * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information * @bw_t_info: bandwidth type information structure * @bw_alloc: Bandwidth allocation information * * Save or clear EIR BW alloc information (bw_alloc) in the passed param * bw_t_info. */ static void ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc) { bw_t_info->eir_bw.bw_alloc = bw_alloc; if (bw_t_info->eir_bw.bw_alloc) ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); else ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap); } /** * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information * @pi: port information structure * @vsi_handle: sw VSI handle * @tc: traffic class * @rl_type: rate limit type min or max * @bw_alloc: Bandwidth allocation information * * Save BW alloc information of VSI type node for post replay use. */ static enum ice_status ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_vsi_ctx *vsi_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], bw_alloc); break; case ICE_MAX_BW: ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc], bw_alloc); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_set_clear_cir_bw - set or clear CIR BW * @bw_t_info: bandwidth type information structure * @bw: bandwidth in Kbps - Kilo bits per sec * * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. */ static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) { if (bw == ICE_SCHED_DFLT_BW) { ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); bw_t_info->cir_bw.bw = 0; } else { /* Save type of BW information */ ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); bw_t_info->cir_bw.bw = bw; } } /** * ice_set_clear_eir_bw - set or clear EIR BW * @bw_t_info: bandwidth type information structure * @bw: bandwidth in Kbps - Kilo bits per sec * * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. */ static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) { if (bw == ICE_SCHED_DFLT_BW) { ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); bw_t_info->eir_bw.bw = 0; } else { /* save EIR BW information */ ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); bw_t_info->eir_bw.bw = bw; } } /** * ice_set_clear_shared_bw - set or clear shared BW * @bw_t_info: bandwidth type information structure * @bw: bandwidth in Kbps - Kilo bits per sec * * Save or clear shared bandwidth (BW) in the passed param bw_t_info. */ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) { if (bw == ICE_SCHED_DFLT_BW) { ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); bw_t_info->shared_bw = 0; } else { /* save shared BW information */ ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); bw_t_info->shared_bw = bw; } } /** * ice_sched_save_vsi_bw - save VSI node's BW information * @pi: port information structure * @vsi_handle: sw VSI handle * @tc: traffic class * @rl_type: rate limit type min, max, or shared * @bw: bandwidth in Kbps - Kilo bits per sec * * Save BW information of VSI type node for post replay use. */ static enum ice_status ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { struct ice_vsi_ctx *vsi_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); break; case ICE_MAX_BW: ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw); break; case ICE_SHARED_BW: ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_set_clear_prio - set or clear priority information * @bw_t_info: bandwidth type information structure * @prio: priority to save * * Save or clear priority (prio) in the passed param bw_t_info. */ static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio) { bw_t_info->generic = prio; if (bw_t_info->generic) ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); else ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap); } /** * ice_sched_save_vsi_prio - save VSI node's priority information * @pi: port information structure * @vsi_handle: Software VSI handle * @tc: traffic class * @prio: priority to save * * Save priority information of VSI type node for post replay use. */ static enum ice_status ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 prio) { struct ice_vsi_ctx *vsi_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; if (tc >= ICE_MAX_TRAFFIC_CLASS) return ICE_ERR_PARAM; ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio); return ICE_SUCCESS; } /** * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information * @pi: port information structure * @agg_id: node aggregator ID * @tc: traffic class * @rl_type: rate limit type min or max * @bw_alloc: bandwidth alloc information * * Save BW alloc information of AGG type node for post replay use. */ static enum ice_status ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { struct ice_sched_agg_info *agg_info; agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) return ICE_ERR_PARAM; if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) return ICE_ERR_PARAM; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); break; case ICE_MAX_BW: ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_sched_save_agg_bw - save aggregator node's BW information * @pi: port information structure * @agg_id: node aggregator ID * @tc: traffic class * @rl_type: rate limit type min, max, or shared * @bw: bandwidth in Kbps - Kilo bits per sec * * Save BW information of AGG type node for post replay use. */ static enum ice_status ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw) { struct ice_sched_agg_info *agg_info; agg_info = ice_get_agg_info(pi->hw, agg_id); if (!agg_info) return ICE_ERR_PARAM; if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc)) return ICE_ERR_PARAM; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw); break; case ICE_MAX_BW: ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw); break; case ICE_SHARED_BW: ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC * @pi: port information structure * @vsi_handle: software VSI handle * @tc: traffic class * @rl_type: min or max * @bw: bandwidth in Kbps * * This function configures BW limit of VSI scheduling node based on TC * information. */ enum ice_status ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type, u32 bw) { enum ice_status status; status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, ICE_AGG_TYPE_VSI, tc, rl_type, bw); if (!status) { ice_acquire_lock(&pi->sched_lock); status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); ice_release_lock(&pi->sched_lock); } return status; } /** * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC * @pi: port information structure * @vsi_handle: software VSI handle * @tc: traffic class * @rl_type: min or max * * This function configures default BW limit of VSI scheduling node based on TC * information. */ enum ice_status ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_rl_type rl_type) { enum ice_status status; status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle, ICE_AGG_TYPE_VSI, tc, rl_type, ICE_SCHED_DFLT_BW); if (!status) { ice_acquire_lock(&pi->sched_lock); status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, ICE_SCHED_DFLT_BW); ice_release_lock(&pi->sched_lock); } return status; } /** * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * @rl_type: min or max * @bw: bandwidth in Kbps * * This function applies BW limit to aggregator scheduling node based on TC * information. */ enum ice_status ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type, u32 bw) { enum ice_status status; status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, tc, rl_type, bw); if (!status) { ice_acquire_lock(&pi->sched_lock); status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); ice_release_lock(&pi->sched_lock); } return status; } /** * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * @rl_type: min or max * * This function applies default BW limit to aggregator scheduling node based * on TC information. */ enum ice_status ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, enum ice_rl_type rl_type) { enum ice_status status; status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG, tc, rl_type, ICE_SCHED_DFLT_BW); if (!status) { ice_acquire_lock(&pi->sched_lock); status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, ICE_SCHED_DFLT_BW); ice_release_lock(&pi->sched_lock); } return status; } /** * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit * @pi: port information structure * @vsi_handle: software VSI handle * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic * classes for VSI matching handle. */ enum ice_status ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw) { return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, min_bw, max_bw, shared_bw); } /** * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter * @pi: port information structure * @vsi_handle: software VSI handle * * This function removes the shared rate limiter(SRL) of all VSI type nodes * across all traffic classes for VSI matching handle. */ enum ice_status ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle) { return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, ICE_SCHED_DFLT_BW, ICE_SCHED_DFLT_BW, ICE_SCHED_DFLT_BW); } /** * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit * @pi: port information structure * @agg_id: aggregator ID * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ enum ice_status ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw) { return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, min_bw, max_bw, shared_bw); } /** * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter * @pi: port information structure * @agg_id: aggregator ID * * This function removes the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ enum ice_status ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id) { return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW, ICE_SCHED_DFLT_BW, ICE_SCHED_DFLT_BW); } /** * ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ enum ice_status ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, min_bw, max_bw, shared_bw); } /** * ice_cfg_agg_bw_shared_lmt_per_tc - configure aggregator BW shared limit per tc * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. */ enum ice_status ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc) { return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc, ICE_SCHED_DFLT_BW, ICE_SCHED_DFLT_BW, ICE_SCHED_DFLT_BW); } /** * ice_config_vsi_queue_priority - config VSI queue priority of node * @pi: port information structure * @num_qs: number of VSI queues * @q_ids: queue IDs array * @q_prio: queue priority array * * This function configures the queue node priority (Sibling Priority) of the * passed in VSI's queue(s) for a given traffic class (TC). */ enum ice_status ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids, u8 *q_prio) { enum ice_status status = ICE_ERR_PARAM; u16 i; ice_acquire_lock(&pi->sched_lock); for (i = 0; i < num_qs; i++) { struct ice_sched_node *node; node = ice_sched_find_node_by_teid(pi->root, q_ids[i]); if (!node || node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { status = ICE_ERR_PARAM; break; } /* Configure Priority */ status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]); if (status) break; } ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC * @pi: port information structure * @agg_id: Aggregator ID * @num_vsis: number of VSI(s) * @vsi_handle_arr: array of software VSI handles * @node_prio: pointer to node priority * @tc: traffic class * * This function configures the node priority (Sibling Priority) of the * passed in VSI's for a given traffic class (TC) of an Aggregator ID. */ enum ice_status ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id, u16 num_vsis, u16 *vsi_handle_arr, u8 *node_prio, u8 tc) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_node *tc_node, *agg_node; enum ice_status status = ICE_ERR_PARAM; struct ice_sched_agg_info *agg_info; bool agg_id_present = false; struct ice_hw *hw = pi->hw; u16 i; ice_acquire_lock(&pi->sched_lock); LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, list_entry) if (agg_info->agg_id == agg_id) { agg_id_present = true; break; } if (!agg_id_present) goto exit_agg_priority_per_tc; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) goto exit_agg_priority_per_tc; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) goto exit_agg_priority_per_tc; if (num_vsis > hw->max_children[agg_node->tx_sched_layer]) goto exit_agg_priority_per_tc; for (i = 0; i < num_vsis; i++) { struct ice_sched_node *vsi_node; bool vsi_handle_valid = false; u16 vsi_handle; status = ICE_ERR_PARAM; vsi_handle = vsi_handle_arr[i]; if (!ice_is_vsi_valid(hw, vsi_handle)) goto exit_agg_priority_per_tc; /* Verify child nodes before applying settings */ LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) if (agg_vsi_info->vsi_handle == vsi_handle) { /* cppcheck-suppress unreadVariable */ vsi_handle_valid = true; break; } if (!vsi_handle_valid) goto exit_agg_priority_per_tc; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) goto exit_agg_priority_per_tc; if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) { /* Configure Priority */ status = ice_sched_cfg_sibl_node_prio(pi, vsi_node, node_prio[i]); if (status) break; status = ice_sched_save_vsi_prio(pi, vsi_handle, tc, node_prio[i]); if (status) break; } } exit_agg_priority_per_tc: ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC * @pi: port information structure * @vsi_handle: software VSI handle * @ena_tcmap: enabled TC map * @rl_type: Rate limit type CIR/EIR * @bw_alloc: Array of BW alloc * * This function configures the BW allocation of the passed in VSI's * node(s) for enabled traffic class. */ enum ice_status ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc) { enum ice_status status = ICE_SUCCESS; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *vsi_node; if (!ice_is_tc_ena(ena_tcmap, tc)) continue; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) continue; status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type, bw_alloc[tc]); if (status) break; status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc, rl_type, bw_alloc[tc]); if (status) break; } ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_agg_bw_alloc - config aggregator BW alloc * @pi: port information structure * @agg_id: aggregator ID * @ena_tcmap: enabled TC map * @rl_type: rate limit type CIR/EIR * @bw_alloc: array of BW alloc * * This function configures the BW allocation of passed in aggregator for * enabled traffic class(s). */ enum ice_status ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap, enum ice_rl_type rl_type, u8 *bw_alloc) { struct ice_sched_agg_info *agg_info; bool agg_id_present = false; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw = pi->hw; u8 tc; ice_acquire_lock(&pi->sched_lock); LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, list_entry) if (agg_info->agg_id == agg_id) { agg_id_present = true; break; } if (!agg_id_present) { status = ICE_ERR_PARAM; goto exit_cfg_agg_bw_alloc; } /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *agg_node; if (!ice_is_tc_ena(ena_tcmap, tc)) continue; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) continue; status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type, bw_alloc[tc]); if (status) break; status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type, bw_alloc[tc]); if (status) break; } exit_cfg_agg_bw_alloc: ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_calc_wakeup - calculate RL profile wakeup parameter * @hw: pointer to the HW struct * @bw: bandwidth in Kbps * * This function calculates the wakeup parameter of RL profile. */ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw) { s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; s32 wakeup_f_int; u16 wakeup = 0; /* Get the wakeup integer value */ bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec); if (wakeup_int > 63) { wakeup = (u16)((1 << 15) | wakeup_int); } else { /* Calculate fraction value up to 4 decimals * Convert Integer value to a constant multiplier */ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER * hw->psm_clk_freq, bytes_per_sec); /* Get Fraction value */ wakeup_f = wakeup_a - wakeup_b; /* Round up the Fractional value via Ceil(Fractional value) */ if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2)) wakeup_f += 1; wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION, ICE_RL_PROF_MULTIPLIER); wakeup |= (u16)(wakeup_int << 9); wakeup |= (u16)(0x1ff & wakeup_f_int); } return wakeup; } /** * ice_sched_bw_to_rl_profile - convert BW to profile parameters * @hw: pointer to the HW struct * @bw: bandwidth in Kbps * @profile: profile parameters to return * * This function converts the BW to profile structure format. */ static enum ice_status ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw, struct ice_aqc_rl_profile_elem *profile) { enum ice_status status = ICE_ERR_PARAM; s64 bytes_per_sec, ts_rate, mv_tmp; bool found = false; s32 encode = 0; s64 mv = 0; s32 i; /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) return status; /* Bytes per second from Kbps */ bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE); /* encode is 6 bits but really useful are 5 bits */ for (i = 0; i < 64; i++) { u64 pow_result = BIT_ULL(i); ts_rate = DIV_64BIT((s64)hw->psm_clk_freq, pow_result * ICE_RL_PROF_TS_MULTIPLIER); if (ts_rate <= 0) continue; /* Multiplier value */ mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, ts_rate); /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); /* First multiplier value greater than the given * accuracy bytes */ if (mv > ICE_RL_PROF_ACCURACY_BYTES) { encode = i; found = true; break; } } if (found) { u16 wm; wm = ice_sched_calc_wakeup(hw, bw); profile->rl_multiply = CPU_TO_LE16(mv); profile->wake_up_calc = CPU_TO_LE16(wm); profile->rl_encode = CPU_TO_LE16(encode); status = ICE_SUCCESS; } else { status = ICE_ERR_DOES_NOT_EXIST; } return status; } /** * ice_sched_add_rl_profile - add RL profile * @hw: pointer to the hardware structure * @rl_type: type of rate limit BW - min, max, or shared * @bw: bandwidth in Kbps - Kilo bits per sec * @layer_num: specifies in which layer to create profile * * This function first checks the existing list for corresponding BW * parameter. If it exists, it returns the associated profile otherwise * it creates a new rate limit profile for requested BW, and adds it to * the HW DB and local list. It returns the new profile or null on error. * The caller needs to hold the scheduler lock. */ static struct ice_aqc_rl_profile_info * ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; struct ice_aqc_rl_profile_elem *buf; enum ice_status status; u8 profile_type; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) return NULL; switch (rl_type) { case ICE_MIN_BW: profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; break; case ICE_MAX_BW: profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; break; case ICE_SHARED_BW: profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; break; default: return NULL; } if (!hw) return NULL; LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], ice_aqc_rl_profile_info, list_entry) if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == profile_type && rl_prof_elem->bw == bw) /* Return existing profile ID info */ return rl_prof_elem; /* Create new profile ID */ rl_prof_elem = (struct ice_aqc_rl_profile_info *) ice_malloc(hw, sizeof(*rl_prof_elem)); if (!rl_prof_elem) return NULL; status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile); if (status != ICE_SUCCESS) goto exit_add_rl_prof; rl_prof_elem->bw = bw; /* layer_num is zero relative, and fw expects level from 1 to 9 */ rl_prof_elem->profile.level = layer_num + 1; rl_prof_elem->profile.flags = profile_type; rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size); /* Create new entry in HW DB */ buf = &rl_prof_elem->profile; status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), &profiles_added, NULL); if (status || profiles_added != num_profiles) goto exit_add_rl_prof; /* Good entry - add in the list */ rl_prof_elem->prof_id_ref = 0; LIST_ADD(&rl_prof_elem->list_entry, &hw->rl_prof_list[layer_num]); return rl_prof_elem; exit_add_rl_prof: ice_free(hw, rl_prof_elem); return NULL; } /** * ice_sched_cfg_node_bw_lmt - configure node sched params * @hw: pointer to the HW struct * @node: sched node to configure * @rl_type: rate limit type CIR, EIR, or shared * @rl_prof_id: rate limit profile ID * * This function configures node element's BW limit. */ static enum ice_status ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, enum ice_rl_type rl_type, u16 rl_prof_id) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; buf = node->info; data = &buf.data; switch (rl_type) { case ICE_MIN_BW: data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); break; case ICE_MAX_BW: data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id); break; case ICE_SHARED_BW: data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; data->srl_id = CPU_TO_LE16(rl_prof_id); break; default: /* Unknown rate limit type */ return ICE_ERR_PARAM; } /* Configure element */ return ice_sched_update_elem(hw, node, &buf); } /** * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID * @node: sched node * @rl_type: rate limit type * * If existing profile matches, it returns the corresponding rate * limit profile ID, otherwise it returns an invalid ID as error. */ static u16 ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, enum ice_rl_type rl_type) { u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; struct ice_aqc_txsched_elem *data; data = &node->info.data; switch (rl_type) { case ICE_MIN_BW: if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx); break; case ICE_MAX_BW: if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx); break; case ICE_SHARED_BW: if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) rl_prof_id = LE16_TO_CPU(data->srl_id); break; default: break; } return rl_prof_id; } /** * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer * @pi: port information structure * @rl_type: type of rate limit BW - min, max, or shared * @layer_index: layer index * * This function returns requested profile creation layer. */ static u8 ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, u8 layer_index) { struct ice_hw *hw = pi->hw; if (layer_index >= hw->num_tx_sched_layers) return ICE_SCHED_INVAL_LAYER_NUM; switch (rl_type) { case ICE_MIN_BW: if (hw->layer_info[layer_index].max_cir_rl_profiles) return layer_index; break; case ICE_MAX_BW: if (hw->layer_info[layer_index].max_eir_rl_profiles) return layer_index; break; case ICE_SHARED_BW: /* if current layer doesn't support SRL profile creation * then try a layer up or down. */ if (hw->layer_info[layer_index].max_srl_profiles) return layer_index; else if (layer_index < hw->num_tx_sched_layers - 1 && hw->layer_info[layer_index + 1].max_srl_profiles) return layer_index + 1; else if (layer_index > 0 && hw->layer_info[layer_index - 1].max_srl_profiles) return layer_index - 1; break; default: break; } return ICE_SCHED_INVAL_LAYER_NUM; } /** * ice_sched_get_srl_node - get shared rate limit node * @node: tree node * @srl_layer: shared rate limit layer * * This function returns SRL node to be used for shared rate limit purpose. * The caller needs to hold scheduler lock. */ static struct ice_sched_node * ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) { if (srl_layer > node->tx_sched_layer) return node->children[0]; else if (srl_layer < node->tx_sched_layer) /* Node can't be created without a parent. It will always * have a valid parent except root node. */ return node->parent; else return node; } /** * ice_sched_rm_rl_profile - remove RL profile ID * @hw: pointer to the hardware structure * @layer_num: layer number where profiles are saved * @profile_type: profile type like EIR, CIR, or SRL * @profile_id: profile ID to remove * * This function removes rate limit profile from layer 'layer_num' of type * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold * scheduler lock. */ static enum ice_status ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type, u16 profile_id) { struct ice_aqc_rl_profile_info *rl_prof_elem; enum ice_status status = ICE_SUCCESS; if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) return ICE_ERR_PARAM; /* Check the existing list for RL profile */ LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], ice_aqc_rl_profile_info, list_entry) if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == profile_type && LE16_TO_CPU(rl_prof_elem->profile.profile_id) == profile_id) { if (rl_prof_elem->prof_id_ref) rl_prof_elem->prof_id_ref--; /* Remove old profile ID from database */ status = ice_sched_del_rl_profile(hw, rl_prof_elem); if (status && status != ICE_ERR_IN_USE) ice_debug(hw, ICE_DBG_SCHED, "Remove rl profile failed\n"); break; } if (status == ICE_ERR_IN_USE) status = ICE_SUCCESS; return status; } /** * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default * @pi: port information structure * @node: pointer to node structure * @rl_type: rate limit type min, max, or shared * @layer_num: layer number where RL profiles are saved * * This function configures node element's BW rate limit profile ID of * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ static enum ice_status ice_sched_set_node_bw_dflt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u8 layer_num) { enum ice_status status; struct ice_hw *hw; u8 profile_type; u16 rl_prof_id; u16 old_id; hw = pi->hw; switch (rl_type) { case ICE_MIN_BW: profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; break; case ICE_MAX_BW: profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; break; case ICE_SHARED_BW: profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; /* No SRL is configured for default case */ rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; break; default: return ICE_ERR_PARAM; } /* Save existing RL prof ID for later clean up */ old_id = ice_sched_get_node_rl_prof_id(node, rl_type); /* Configure BW scheduling parameters */ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); if (status) return status; /* Remove stale RL profile ID */ if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || old_id == ICE_SCHED_INVAL_PROF_ID) return ICE_SUCCESS; return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id); } /** * ice_sched_set_node_bw - set node's bandwidth * @pi: port information structure * @node: tree node * @rl_type: rate limit type min, max, or shared * @bw: bandwidth in Kbps - Kilo bits per sec * @layer_num: layer number * * This function adds new profile corresponding to requested BW, configures * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile * ID from local database. The caller needs to hold scheduler lock. */ static enum ice_status ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { struct ice_aqc_rl_profile_info *rl_prof_info; enum ice_status status = ICE_ERR_PARAM; struct ice_hw *hw = pi->hw; u16 old_id, rl_prof_id; rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num); if (!rl_prof_info) return status; rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id); /* Save existing RL prof ID for later clean up */ old_id = ice_sched_get_node_rl_prof_id(node, rl_type); /* Configure BW scheduling parameters */ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); if (status) return status; /* New changes has been applied */ /* Increment the profile ID reference count */ rl_prof_info->prof_id_ref++; /* Check for old ID removal */ if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) return ICE_SUCCESS; return ice_sched_rm_rl_profile(hw, layer_num, rl_prof_info->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M, old_id); } /** * ice_sched_set_node_bw_lmt - set node's BW limit * @pi: port information structure * @node: tree node * @rl_type: rate limit type min, max, or shared * @bw: bandwidth in Kbps - Kilo bits per sec * * It updates node's BW limit parameters like BW RL profile ID of type CIR, * EIR, or SRL. The caller needs to hold scheduler lock. * * NOTE: Caller provides the correct SRL node in case of shared profile * settings. */ static enum ice_status ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type, u32 bw) { struct ice_hw *hw; u8 layer_num; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; /* Remove unused RL profile IDs from HW and SW DB */ ice_sched_rm_unused_rl_prof(hw); layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, - node->tx_sched_layer); + node->tx_sched_layer); if (layer_num >= hw->num_tx_sched_layers) return ICE_ERR_PARAM; if (bw == ICE_SCHED_DFLT_BW) return ice_sched_set_node_bw_dflt(pi, node, rl_type, layer_num); return ice_sched_set_node_bw(pi, node, rl_type, bw, layer_num); } /** * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default * @pi: port information structure * @node: pointer to node structure * @rl_type: rate limit type min, max, or shared * * This function configures node element's BW rate limit profile ID of * type CIR, EIR, or SRL to default. This function needs to be called * with the scheduler lock held. */ static enum ice_status ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, struct ice_sched_node *node, enum ice_rl_type rl_type) { return ice_sched_set_node_bw_lmt(pi, node, rl_type, ICE_SCHED_DFLT_BW); } /** * ice_sched_validate_srl_node - Check node for SRL applicability * @node: sched node to configure * @sel_layer: selected SRL layer * * This function checks if the SRL can be applied to a selceted layer node on * behalf of the requested node (first argument). This function needs to be * called with scheduler lock held. */ static enum ice_status ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) { /* SRL profiles are not available on all layers. Check if the * SRL profile can be applied to a node above or below the * requested node. SRL configuration is possible only if the * selected layer's node has single child. */ if (sel_layer == node->tx_sched_layer || ((sel_layer == node->tx_sched_layer + 1) && node->num_children == 1) || ((sel_layer == node->tx_sched_layer - 1) && (node->parent && node->parent->num_children == 1))) return ICE_SUCCESS; return ICE_ERR_CFG; } /** * ice_sched_save_q_bw - save queue node's BW information * @q_ctx: queue context structure * @rl_type: rate limit type min, max, or shared * @bw: bandwidth in Kbps - Kilo bits per sec * * Save BW information of queue type node for post replay use. */ static enum ice_status ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) { switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); break; case ICE_MAX_BW: ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); break; case ICE_SHARED_BW: ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_sched_set_q_bw_lmt - sets queue BW limit * @pi: port information structure * @vsi_handle: sw VSI handle * @tc: traffic class * @q_handle: software queue handle * @rl_type: min, max, or shared * @bw: bandwidth in Kbps * * This function sets BW limit of queue scheduling node. */ static enum ice_status ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; struct ice_q_ctx *q_ctx; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); if (!q_ctx) goto exit_q_bw_lmt; node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); if (!node) { ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); goto exit_q_bw_lmt; } /* Return error if it is not a leaf node */ if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) goto exit_q_bw_lmt; /* SRL bandwidth layer selection */ if (rl_type == ICE_SHARED_BW) { u8 sel_layer; /* selected layer */ sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, node->tx_sched_layer); if (sel_layer >= pi->hw->num_tx_sched_layers) { status = ICE_ERR_PARAM; goto exit_q_bw_lmt; } status = ice_sched_validate_srl_node(node, sel_layer); if (status) goto exit_q_bw_lmt; } if (bw == ICE_SCHED_DFLT_BW) status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); else status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); if (!status) status = ice_sched_save_q_bw(q_ctx, rl_type, bw); exit_q_bw_lmt: ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_q_bw_lmt - configure queue BW limit * @pi: port information structure * @vsi_handle: sw VSI handle * @tc: traffic class * @q_handle: software queue handle * @rl_type: min, max, or shared * @bw: bandwidth in Kbps * * This function configures BW limit of queue scheduling node. */ enum ice_status ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type, u32 bw) { return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, bw); } /** * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit * @pi: port information structure * @vsi_handle: sw VSI handle * @tc: traffic class * @q_handle: software queue handle * @rl_type: min, max, or shared * * This function configures BW default limit of queue scheduling node. */ enum ice_status ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, enum ice_rl_type rl_type) { return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, ICE_SCHED_DFLT_BW); } /** * ice_sched_save_tc_node_bw - save TC node BW limit * @pi: port information structure * @tc: TC number * @rl_type: min or max * @bw: bandwidth in Kbps * * This function saves the modified values of bandwidth settings for later * replay purpose (restore) after reset. */ static enum ice_status ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { if (tc >= ICE_MAX_TRAFFIC_CLASS) return ICE_ERR_PARAM; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw); break; case ICE_MAX_BW: ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw); break; case ICE_SHARED_BW: ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit * @pi: port information structure * @tc: TC number * @rl_type: min or max * @bw: bandwidth in Kbps * * This function configures bandwidth limit of TC node. */ static enum ice_status ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *tc_node; if (tc >= ICE_MAX_TRAFFIC_CLASS) return status; ice_acquire_lock(&pi->sched_lock); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) goto exit_set_tc_node_bw; if (bw == ICE_SCHED_DFLT_BW) status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type); else status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw); if (!status) status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw); exit_set_tc_node_bw: ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_tc_node_bw_lmt - configure TC node BW limit * @pi: port information structure * @tc: TC number * @rl_type: min or max * @bw: bandwidth in Kbps * * This function configures BW limit of TC node. * Note: The minimum guaranteed reservation is done via DCBX. */ enum ice_status ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u32 bw) { return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw); } /** * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit * @pi: port information structure * @tc: TC number * @rl_type: min or max * * This function configures BW default limit of TC node. */ enum ice_status ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type) { return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW); } /** * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information * @pi: port information structure * @tc: traffic class * @rl_type: rate limit type min or max * @bw_alloc: Bandwidth allocation information * * Save BW alloc information of VSI type node for post replay use. */ static enum ice_status ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u16 bw_alloc) { if (tc >= ICE_MAX_TRAFFIC_CLASS) return ICE_ERR_PARAM; switch (rl_type) { case ICE_MIN_BW: ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc], bw_alloc); break; case ICE_MAX_BW: ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc], bw_alloc); break; default: return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc * @pi: port information structure * @tc: TC number * @rl_type: min or max * @bw_alloc: bandwidth alloc * * This function configures bandwidth alloc of TC node, also saves the * changed settings for replay purpose, and return success if it succeeds * in modifying bandwidth alloc setting. */ static enum ice_status ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc) { enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *tc_node; if (tc >= ICE_MAX_TRAFFIC_CLASS) return status; ice_acquire_lock(&pi->sched_lock); tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) goto exit_set_tc_node_bw_alloc; status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type, bw_alloc); if (status) goto exit_set_tc_node_bw_alloc; status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); exit_set_tc_node_bw_alloc: ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc * @pi: port information structure * @tc: TC number * @rl_type: min or max * @bw_alloc: bandwidth alloc * * This function configures BW limit of TC node. * Note: The minimum guaranteed reservation is done via DCBX. */ enum ice_status ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc, enum ice_rl_type rl_type, u8 bw_alloc) { return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc); } /** * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default * @pi: port information structure * @vsi_handle: software VSI handle * * This function retrieves the aggregator ID based on VSI ID and TC, * and sets node's BW limit to default. This function needs to be * called with the scheduler lock held. */ enum ice_status ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle) { struct ice_vsi_ctx *vsi_ctx; enum ice_status status = ICE_SUCCESS; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; ice_for_each_traffic_class(tc) { struct ice_sched_node *node; node = vsi_ctx->sched.ag_node[tc]; if (!node) continue; /* Set min profile to default */ status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW); if (status) break; /* Set max profile to default */ status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW); if (status) break; /* Remove shared profile, if there is one */ status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_SHARED_BW); if (status) break; } return status; } /** * ice_sched_get_node_by_id_type - get node from ID type * @pi: port information structure * @id: identifier * @agg_type: type of aggregator * @tc: traffic class * * This function returns node identified by ID of type aggregator, and * based on traffic class (TC). This function needs to be called with * the scheduler lock held. */ static struct ice_sched_node * ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc) { struct ice_sched_node *node = NULL; struct ice_sched_node *child_node; switch (agg_type) { case ICE_AGG_TYPE_VSI: { struct ice_vsi_ctx *vsi_ctx; u16 vsi_handle = (u16)id; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) break; /* Get sched_vsi_info */ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) break; node = vsi_ctx->sched.vsi_node[tc]; break; } case ICE_AGG_TYPE_AGG: { struct ice_sched_node *tc_node; tc_node = ice_sched_get_tc_node(pi, tc); if (tc_node) node = ice_sched_get_agg_node(pi, tc_node, id); break; } case ICE_AGG_TYPE_Q: /* The current implementation allows single queue to modify */ node = ice_sched_get_node(pi, id); break; case ICE_AGG_TYPE_QG: /* The current implementation allows single qg to modify */ child_node = ice_sched_get_node(pi, id); if (!child_node) break; node = child_node->parent; break; default: break; } return node; } /** * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC * @pi: port information structure * @id: ID (software VSI handle or AGG ID) * @agg_type: aggregator type (VSI or AGG type node) * @tc: traffic class * @rl_type: min or max * @bw: bandwidth in Kbps * * This function sets BW limit of VSI or Aggregator scheduling node * based on TC information from passed in argument BW. */ enum ice_status ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id, enum ice_agg_type agg_type, u8 tc, enum ice_rl_type rl_type, u32 bw) { enum ice_status status = ICE_ERR_PARAM; struct ice_sched_node *node; if (!pi) return status; if (rl_type == ICE_UNKNOWN_BW) return status; ice_acquire_lock(&pi->sched_lock); node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc); if (!node) { ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n"); goto exit_set_node_bw_lmt_per_tc; } if (bw == ICE_SCHED_DFLT_BW) status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); else status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); exit_set_node_bw_lmt_per_tc: ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_validate_vsi_srl_node - validate VSI SRL node * @pi: port information structure * @vsi_handle: software VSI handle * * This function validates SRL node of the VSI node if available SRL layer is * different than the VSI node layer on all TC(s).This function needs to be * called with scheduler lock held. */ static enum ice_status ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle) { u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; u8 tc; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *vsi_node; enum ice_rl_type rl_type = ICE_SHARED_BW; enum ice_status status; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) continue; /* SRL bandwidth layer selection */ if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { u8 node_layer = vsi_node->tx_sched_layer; u8 layer_num; layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node_layer); if (layer_num >= pi->hw->num_tx_sched_layers) return ICE_ERR_PARAM; sel_layer = layer_num; } status = ice_sched_validate_srl_node(vsi_node, sel_layer); if (status) return status; } return ICE_SUCCESS; } /** * ice_sched_set_save_vsi_srl_node_bw - set VSI shared limit values * @pi: port information structure * @vsi_handle: software VSI handle * @tc: traffic class * @srl_node: sched node to configure * @rl_type: rate limit type minimum, maximum, or shared * @bw: minimum, maximum, or shared bandwidth in Kbps * * Configure shared rate limiter(SRL) of VSI type nodes across given traffic * class, and saves those value for later use for replaying purposes. The * caller holds the scheduler lock. */ static enum ice_status ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc, struct ice_sched_node *srl_node, enum ice_rl_type rl_type, u32 bw) { enum ice_status status; if (bw == ICE_SCHED_DFLT_BW) { status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); } else { status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); if (status) return status; status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw); } return status; } /** * ice_sched_set_vsi_node_srl_per_tc - set VSI node BW shared limit for tc * @pi: port information structure * @vsi_handle: software VSI handle * @tc: traffic class * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * Configure shared rate limiter(SRL) of VSI type nodes across requested * traffic class for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW * is passed, it removes the corresponding bw from the node. The caller * holds scheduler lock. */ static enum ice_status ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { struct ice_sched_node *tc_node, *vsi_node, *cfg_node; enum ice_status status; u8 layer_num; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_CFG; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) return ICE_ERR_CFG; layer_num = ice_sched_get_rl_prof_layer(pi, ICE_SHARED_BW, vsi_node->tx_sched_layer); if (layer_num >= pi->hw->num_tx_sched_layers) return ICE_ERR_PARAM; /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(vsi_node, layer_num); if (!cfg_node) return ICE_ERR_CFG; status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, ICE_MIN_BW, min_bw); if (status) return status; status = ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, ICE_MAX_BW, max_bw); if (status) return status; return ice_sched_set_save_vsi_srl_node_bw(pi, vsi_handle, tc, cfg_node, ICE_SHARED_BW, shared_bw); } /** * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit * @pi: port information structure * @vsi_handle: software VSI handle * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * Configure shared rate limiter(SRL) of all VSI type nodes across all traffic * classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is * passed, it removes those value(s) from the node. */ enum ice_status ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw, u32 max_bw, u32 shared_bw) { enum ice_status status = ICE_SUCCESS; u8 tc; if (!pi) return ICE_ERR_PARAM; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); status = ice_sched_validate_vsi_srl_node(pi, vsi_handle); if (status) goto exit_set_vsi_bw_shared_lmt; /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *vsi_node; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) continue; status = ice_sched_set_vsi_node_srl_per_tc(pi, vsi_handle, tc, min_bw, max_bw, shared_bw); if (status) break; } exit_set_vsi_bw_shared_lmt: ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_validate_agg_srl_node - validate AGG SRL node * @pi: port information structure * @agg_id: aggregator ID * * This function validates SRL node of the AGG node if available SRL layer is * different than the AGG node layer on all TC(s).This function needs to be * called with scheduler lock held. */ static enum ice_status ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id) { u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM; struct ice_sched_agg_info *agg_info; bool agg_id_present = false; enum ice_status status = ICE_SUCCESS; u8 tc; LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info, list_entry) if (agg_info->agg_id == agg_id) { agg_id_present = true; break; } if (!agg_id_present) return ICE_ERR_PARAM; /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *agg_node; enum ice_rl_type rl_type = ICE_SHARED_BW; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) continue; /* SRL bandwidth layer selection */ if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) { u8 node_layer = agg_node->tx_sched_layer; u8 layer_num; layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, node_layer); if (layer_num >= pi->hw->num_tx_sched_layers) return ICE_ERR_PARAM; sel_layer = layer_num; } status = ice_sched_validate_srl_node(agg_node, sel_layer); if (status) break; } return status; } /** * ice_sched_validate_agg_id - Validate aggregator id * @pi: port information structure * @agg_id: aggregator ID * * This function validates aggregator id. Caller holds the scheduler lock. */ static enum ice_status ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id) { struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *tmp; bool agg_id_present = false; enum ice_status status; status = ice_sched_validate_agg_srl_node(pi, agg_id); if (status) return status; LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list, ice_sched_agg_info, list_entry) if (agg_info->agg_id == agg_id) { agg_id_present = true; break; } if (!agg_id_present) return ICE_ERR_PARAM; return ICE_SUCCESS; } /** * ice_sched_set_save_agg_srl_node_bw - set aggregator shared limit values * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * @srl_node: sched node to configure * @rl_type: rate limit type minimum, maximum, or shared * @bw: minimum, maximum, or shared bandwidth in Kbps * * Configure shared rate limiter(SRL) of aggregator type nodes across * requested traffic class, and saves those value for later use for * replaying purposes. The caller holds the scheduler lock. */ static enum ice_status ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc, struct ice_sched_node *srl_node, enum ice_rl_type rl_type, u32 bw) { enum ice_status status; if (bw == ICE_SCHED_DFLT_BW) { status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type); } else { status = ice_sched_set_node_bw_lmt(pi, srl_node, rl_type, bw); if (status) return status; status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw); } return status; } /** * ice_sched_set_agg_node_srl_per_tc - set aggregator SRL per tc * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * This function configures the shared rate limiter(SRL) of aggregator type * node for a given traffic class for aggregator matching agg_id. When BW * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller * holds the scheduler lock. */ static enum ice_status ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { struct ice_sched_node *tc_node, *agg_node, *cfg_node; enum ice_rl_type rl_type = ICE_SHARED_BW; enum ice_status status = ICE_ERR_CFG; u8 layer_num; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) return ICE_ERR_CFG; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) return ICE_ERR_CFG; layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, agg_node->tx_sched_layer); if (layer_num >= pi->hw->num_tx_sched_layers) return ICE_ERR_PARAM; /* SRL node may be different */ cfg_node = ice_sched_get_srl_node(agg_node, layer_num); if (!cfg_node) return ICE_ERR_CFG; status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, ICE_MIN_BW, min_bw); if (status) return status; status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, ICE_MAX_BW, max_bw); if (status) return status; status = ice_sched_set_save_agg_srl_node_bw(pi, agg_id, tc, cfg_node, ICE_SHARED_BW, shared_bw); return status; } /** * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit * @pi: port information structure * @agg_id: aggregator ID * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * This function configures the shared rate limiter(SRL) of all aggregator type * nodes across all traffic classes for aggregator matching agg_id. When * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the * node(s). */ enum ice_status ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw, u32 max_bw, u32 shared_bw) { enum ice_status status; u8 tc; if (!pi) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); status = ice_sched_validate_agg_id(pi, agg_id); if (status) goto exit_agg_bw_shared_lmt; /* Return success if no nodes are present across TC */ ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node, *agg_node; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id); if (!agg_node) continue; status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw, max_bw, shared_bw); if (status) break; } exit_agg_bw_shared_lmt: ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_set_agg_bw_shared_lmt_per_tc - set aggregator BW shared lmt per tc * @pi: port information structure * @agg_id: aggregator ID * @tc: traffic class * @min_bw: minimum bandwidth in Kbps * @max_bw: maximum bandwidth in Kbps * @shared_bw: shared bandwidth in Kbps * * This function configures the shared rate limiter(SRL) of aggregator type * node for a given traffic class for aggregator matching agg_id. When BW * value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. */ enum ice_status ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw) { enum ice_status status; if (!pi) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); status = ice_sched_validate_agg_id(pi, agg_id); if (status) goto exit_agg_bw_shared_lmt_per_tc; status = ice_sched_set_agg_node_srl_per_tc(pi, agg_id, tc, min_bw, max_bw, shared_bw); exit_agg_bw_shared_lmt_per_tc: ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_cfg_sibl_node_prio - configure node sibling priority * @pi: port information structure * @node: sched node to configure * @priority: sibling priority * * This function configures node element's sibling priority only. This * function needs to be called with scheduler lock held. */ enum ice_status ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; struct ice_hw *hw = pi->hw; enum ice_status status; if (!hw) return ICE_ERR_PARAM; buf = node->info; data = &buf.data; data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) & ICE_AQC_ELEM_GENERIC_PRIO_M; data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M; data->generic |= priority; /* Configure element */ status = ice_sched_update_elem(hw, node, &buf); return status; } /** * ice_cfg_rl_burst_size - Set burst size value * @hw: pointer to the HW struct * @bytes: burst size in bytes * * This function configures/set the burst size to requested new value. The new * burst size value is used for future rate limit calls. It doesn't change the * existing or previously created RL profiles. */ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) { u16 burst_size_to_prog; if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || bytes > ICE_MAX_BURST_SIZE_ALLOWED) return ICE_ERR_PARAM; if (ice_round_to_num(bytes, 64) <= ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { /* 64 byte granularity case */ /* Disable MSB granularity bit */ burst_size_to_prog = ICE_64_BYTE_GRANULARITY; /* round number to nearest 64 byte granularity */ bytes = ice_round_to_num(bytes, 64); /* The value is in 64 byte chunks */ burst_size_to_prog |= (u16)(bytes / 64); } else { /* k bytes granularity case */ /* Enable MSB granularity bit */ burst_size_to_prog = ICE_KBYTE_GRANULARITY; /* round number to nearest 1024 granularity */ bytes = ice_round_to_num(bytes, 1024); /* check rounding doesn't go beyond allowed */ if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; /* The value is in k bytes */ burst_size_to_prog |= (u16)(bytes / 1024); } hw->max_burst_size = burst_size_to_prog; return ICE_SUCCESS; } /** * ice_sched_replay_node_prio - re-configure node priority * @hw: pointer to the HW struct * @node: sched node to configure * @priority: priority value * * This function configures node element's priority value. It * needs to be called with scheduler lock held. */ static enum ice_status ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, u8 priority) { struct ice_aqc_txsched_elem_data buf; struct ice_aqc_txsched_elem *data; enum ice_status status; buf = node->info; data = &buf.data; data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; data->generic = priority; /* Configure element */ status = ice_sched_update_elem(hw, node, &buf); return status; } /** * ice_sched_replay_node_bw - replay node(s) BW * @hw: pointer to the HW struct * @node: sched node to configure * @bw_t_info: BW type information * * This function restores node's BW from bw_t_info. The caller needs * to hold the scheduler lock. */ static enum ice_status ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, struct ice_bw_type_info *bw_t_info) { struct ice_port_info *pi = hw->port_info; enum ice_status status = ICE_ERR_PARAM; u16 bw_alloc; if (!node) return status; if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) return ICE_SUCCESS; if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) { status = ice_sched_replay_node_prio(hw, node, bw_t_info->generic); if (status) return status; } if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) { status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, bw_t_info->cir_bw.bw); if (status) return status; } if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) { bw_alloc = bw_t_info->cir_bw.bw_alloc; status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, bw_alloc); if (status) return status; } if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) { status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, bw_t_info->eir_bw.bw); if (status) return status; } if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) { bw_alloc = bw_t_info->eir_bw.bw_alloc; status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, bw_alloc); if (status) return status; } if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED)) status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, bw_t_info->shared_bw); return status; } /** * ice_sched_replay_agg_bw - replay aggregator node(s) BW * @hw: pointer to the HW struct * @agg_info: aggregator data structure * * This function re-creates aggregator type nodes. The caller needs to hold * the scheduler lock. */ static enum ice_status ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info) { struct ice_sched_node *tc_node, *agg_node; enum ice_status status = ICE_SUCCESS; u8 tc; if (!agg_info) return ICE_ERR_PARAM; ice_for_each_traffic_class(tc) { if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap, ICE_BW_TYPE_CNT)) continue; tc_node = ice_sched_get_tc_node(hw->port_info, tc); if (!tc_node) { status = ICE_ERR_PARAM; break; } agg_node = ice_sched_get_agg_node(hw->port_info, tc_node, agg_info->agg_id); if (!agg_node) { status = ICE_ERR_PARAM; break; } status = ice_sched_replay_node_bw(hw, agg_node, &agg_info->bw_t_info[tc]); if (status) break; } return status; } /** * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap * @pi: port info struct * @tc_bitmap: 8 bits TC bitmap to check * @ena_tc_bitmap: 8 bits enabled TC bitmap to return * * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs * may be missing, it returns enabled TCs. This function needs to be called with * scheduler lock held. */ static void ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap, ice_bitmap_t *ena_tc_bitmap) { u8 tc; /* Some TC(s) may be missing after reset, adjust for replay */ ice_for_each_traffic_class(tc) if (ice_is_tc_ena(*tc_bitmap, tc) && (ice_sched_get_tc_node(pi, tc))) ice_set_bit(tc, ena_tc_bitmap); } /** * ice_sched_replay_agg - recreate aggregator node(s) * @hw: pointer to the HW struct * * This function recreate aggregator type nodes which are not replayed earlier. * It also replay aggregator BW information. These aggregator nodes are not * associated with VSI type node yet. */ void ice_sched_replay_agg(struct ice_hw *hw) { struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; ice_acquire_lock(&pi->sched_lock); LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, list_entry) /* replay aggregator (re-create aggregator node) */ if (!ice_cmp_bitmap(agg_info->tc_bitmap, agg_info->replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) { ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); enum ice_status status; ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, replay_bitmap); status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, ICE_AGG_TYPE_AGG, replay_bitmap); if (status) { ice_info(hw, "Replay agg id[%d] failed\n", agg_info->agg_id); /* Move on to next one */ continue; } /* Replay aggregator node BW (restore aggregator BW) */ status = ice_sched_replay_agg_bw(hw, agg_info); if (status) ice_info(hw, "Replay agg bw [id=%d] failed\n", agg_info->agg_id); } ice_release_lock(&pi->sched_lock); } /** * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization * @hw: pointer to the HW struct * * This function initialize aggregator(s) TC bitmap to zero. A required * preinit step for replaying aggregators. */ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw) { struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; ice_acquire_lock(&pi->sched_lock); LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info, list_entry) { struct ice_sched_agg_vsi_info *agg_vsi_info; agg_info->tc_bitmap[0] = 0; LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list, ice_sched_agg_vsi_info, list_entry) agg_vsi_info->tc_bitmap[0] = 0; } ice_release_lock(&pi->sched_lock); } /** * ice_sched_replay_root_node_bw - replay root node BW * @pi: port information structure * * Replay root node BW settings. */ enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi) { enum ice_status status = ICE_SUCCESS; if (!pi->hw) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); status = ice_sched_replay_node_bw(pi->hw, pi->root, &pi->root_node_bw_t_info); ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_replay_tc_node_bw - replay TC node(s) BW * @pi: port information structure * * This function replay TC nodes. */ enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi) { enum ice_status status = ICE_SUCCESS; u8 tc; if (!pi->hw) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); ice_for_each_traffic_class(tc) { struct ice_sched_node *tc_node; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; /* TC not present */ status = ice_sched_replay_node_bw(pi->hw, tc_node, &pi->tc_node_bw_t_info[tc]); if (status) break; } ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_replay_vsi_bw - replay VSI type node(s) BW * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @tc_bitmap: 8 bits TC bitmap * * This function replays VSI type nodes bandwidth. This function needs to be * called with scheduler lock held. */ static enum ice_status ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle, ice_bitmap_t *tc_bitmap) { struct ice_sched_node *vsi_node, *tc_node; struct ice_port_info *pi = hw->port_info; struct ice_bw_type_info *bw_t_info; struct ice_vsi_ctx *vsi_ctx; enum ice_status status = ICE_SUCCESS; u8 tc; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); if (!vsi_ctx) return ICE_ERR_PARAM; ice_for_each_traffic_class(tc) { if (!ice_is_tc_ena(*tc_bitmap, tc)) continue; tc_node = ice_sched_get_tc_node(pi, tc); if (!tc_node) continue; vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); if (!vsi_node) continue; bw_t_info = &vsi_ctx->sched.bw_t_info[tc]; status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info); if (status) break; } return status; } /** * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s) * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * * This function replays aggregator node, VSI to aggregator type nodes, and * their node bandwidth information. This function needs to be called with * scheduler lock held. */ static enum ice_status ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_port_info *pi = hw->port_info; struct ice_sched_agg_info *agg_info; enum ice_status status; ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; agg_info = ice_get_vsi_agg_info(hw, vsi_handle); if (!agg_info) return ICE_SUCCESS; /* Not present in list - default Agg case */ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); if (!agg_vsi_info) return ICE_SUCCESS; /* Not present in list - default Agg case */ ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap, replay_bitmap); /* Replay aggregator node associated to vsi_handle */ status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id, ICE_AGG_TYPE_AGG, replay_bitmap); if (status) return status; /* Replay aggregator node BW (restore aggregator BW) */ status = ice_sched_replay_agg_bw(hw, agg_info); if (status) return status; ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS); ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap, replay_bitmap); /* Move this VSI (vsi_handle) to above aggregator */ status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle, replay_bitmap); if (status) return status; /* Replay VSI BW (restore VSI BW) */ return ice_sched_replay_vsi_bw(hw, vsi_handle, agg_vsi_info->tc_bitmap); } /** * ice_replay_vsi_agg - replay VSI to aggregator node * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * * This function replays association of VSI to aggregator type nodes, and * node bandwidth information. */ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle) { struct ice_port_info *pi = hw->port_info; enum ice_status status; ice_acquire_lock(&pi->sched_lock); status = ice_sched_replay_vsi_agg(hw, vsi_handle); ice_release_lock(&pi->sched_lock); return status; } /** * ice_sched_replay_q_bw - replay queue type node BW * @pi: port information structure * @q_ctx: queue context structure * * This function replays queue type node bandwidth. This function needs to be * called with scheduler lock held. */ enum ice_status ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) { struct ice_sched_node *q_node; /* Following also checks the presence of node in tree */ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); if (!q_node) return ICE_ERR_PARAM; return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); } diff --git a/sys/dev/ice/ice_switch.c b/sys/dev/ice/ice_switch.c index 2abdb8096ee4..6fe271734e38 100644 --- a/sys/dev/ice/ice_switch.c +++ b/sys/dev/ice/ice_switch.c @@ -1,4239 +1,4248 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_switch.h" #include "ice_flex_type.h" #include "ice_flow.h" #define ICE_ETH_DA_OFFSET 0 #define ICE_ETH_ETHTYPE_OFFSET 12 #define ICE_ETH_VLAN_TCI_OFFSET 14 #define ICE_MAX_VLAN_ID 0xFFF +#define ICE_ETH_P_8021Q 0x8100 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem * struct to configure any switch filter rules. * {DA (6 bytes), SA(6 bytes), * Ether type (2 bytes for header without VLAN tag) OR * VLAN tag (4 bytes for header with VLAN tag) } * * Word on Hardcoded values * byte 0 = 0x2: to identify it as locally administered DA MAC * byte 6 = 0x2: to identify it as locally administered SA MAC * byte 12 = 0x81 & byte 13 = 0x00: * In case of VLAN filter first two bytes defines ether type (0x8100) * and remaining two bytes are placeholder for programming a given VLAN ID * In case of Ether type filter it is treated as header without VLAN tag * and byte 12 and 13 is used to program a given Ether type instead */ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; /** * ice_init_def_sw_recp - initialize the recipe book keeping tables * @hw: pointer to the HW struct * @recp_list: pointer to sw recipe list * * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list) { struct ice_sw_recipe *recps; u8 i; recps = (struct ice_sw_recipe *) ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps)); if (!recps) return ICE_ERR_NO_MEMORY; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); INIT_LIST_HEAD(&recps[i].rg_list); ice_init_lock(&recps[i].filt_rule_lock); } *recp_list = recps; return ICE_SUCCESS; } /** * ice_aq_get_sw_cfg - get switch configuration * @hw: pointer to the hardware structure * @buf: pointer to the result buffer * @buf_size: length of the buffer available for response * @req_desc: pointer to requested descriptor * @num_elems: pointer to number of elements * @cd: pointer to command details structure or NULL * * Get switch configuration (0x0200) to be placed in buf. * This admin command returns information such as initial VSI/port number * and switch ID it belongs to. * * NOTE: *req_desc is both an input/output parameter. * The caller of this function first calls this function with *request_desc set * to 0. If the response from f/w has *req_desc set to 0, all the switch * configuration information has been returned; if non-zero (meaning not all * the information was returned), the caller should call this function again * with *req_desc set to the previous value returned by f/w to get the * next block of switch configuration information. * * *num_elems is output only parameter. This reflects the number of elements * in response buffer. The caller of this function to use *num_elems while * parsing the response buffer. */ static enum ice_status ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; struct ice_aq_desc desc; enum ice_status status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); cmd = &desc.params.get_sw_conf; cmd->element = CPU_TO_LE16(*req_desc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status) { *req_desc = LE16_TO_CPU(cmd->element); *num_elems = LE16_TO_CPU(cmd->num_elems); } return status; } /** * ice_alloc_rss_global_lut - allocate a RSS global LUT * @hw: pointer to the HW struct * @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource * @global_lut_id: output parameter for the RSS global LUT's ID */ enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id) { struct ice_aqc_alloc_free_res_elem *sw_buf; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(sw_buf, elem, 1); sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!sw_buf) return ICE_ERR_NO_MEMORY; sw_buf->num_elems = CPU_TO_LE16(1); sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH | (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED : ICE_AQC_RES_TYPE_FLAG_DEDICATED)); status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL); if (status) { ice_debug(hw, ICE_DBG_RES, "Failed to allocate %s RSS global LUT, status %d\n", shared_res ? "shared" : "dedicated", status); goto ice_alloc_global_lut_exit; } *global_lut_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp); ice_alloc_global_lut_exit: ice_free(hw, sw_buf); return status; } /** * ice_free_global_lut - free a RSS global LUT * @hw: pointer to the HW struct * @global_lut_id: ID of the RSS global LUT to free */ enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id) { struct ice_aqc_alloc_free_res_elem *sw_buf; u16 buf_len, num_elems = 1; enum ice_status status; buf_len = ice_struct_size(sw_buf, elem, num_elems); sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!sw_buf) return ICE_ERR_NO_MEMORY; sw_buf->num_elems = CPU_TO_LE16(num_elems); sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH); sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(global_lut_id); status = ice_aq_alloc_free_res(hw, num_elems, sw_buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) ice_debug(hw, ICE_DBG_RES, "Failed to free RSS global LUT %d, status %d\n", global_lut_id, status); ice_free(hw, sw_buf); return status; } /** * ice_alloc_sw - allocate resources specific to switch * @hw: pointer to the HW struct * @ena_stats: true to turn on VEB stats * @shared_res: true for shared resource, false for dedicated resource * @sw_id: switch ID returned * @counter_id: VEB counter ID returned * * allocates switch resources (SWID and VEB counter) (0x0208) */ enum ice_status ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *sw_ele; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(sw_buf, elem, 1); sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!sw_buf) return ICE_ERR_NO_MEMORY; /* Prepare buffer for switch ID. * The number of resource entries in buffer is passed as 1 since only a * single switch/VEB instance is allocated, and hence a single sw_id * is requested. */ sw_buf->num_elems = CPU_TO_LE16(1); sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID | (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED : ICE_AQC_RES_TYPE_FLAG_DEDICATED)); status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL); if (status) goto ice_alloc_sw_exit; sw_ele = &sw_buf->elem[0]; *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp); if (ena_stats) { /* Prepare buffer for VEB Counter */ enum ice_adminq_opc opc = ice_aqc_opc_alloc_res; struct ice_aqc_alloc_free_res_elem *counter_buf; struct ice_aqc_res_elem *counter_ele; counter_buf = (struct ice_aqc_alloc_free_res_elem *) ice_malloc(hw, buf_len); if (!counter_buf) { status = ICE_ERR_NO_MEMORY; goto ice_alloc_sw_exit; } /* The number of resource entries in buffer is passed as 1 since * only a single switch/VEB instance is allocated, and hence a * single VEB counter is requested. */ counter_buf->num_elems = CPU_TO_LE16(1); counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER | ICE_AQC_RES_TYPE_FLAG_DEDICATED); status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len, opc, NULL); if (status) { ice_free(hw, counter_buf); goto ice_alloc_sw_exit; } counter_ele = &counter_buf->elem[0]; *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp); ice_free(hw, counter_buf); } ice_alloc_sw_exit: ice_free(hw, sw_buf); return status; } /** * ice_free_sw - free resources specific to switch * @hw: pointer to the HW struct * @sw_id: switch ID returned * @counter_id: VEB counter ID returned * * free switch resources (SWID and VEB counter) (0x0209) * * NOTE: This function frees multiple resources. It continues * releasing other resources even after it encounters error. * The error code returned is the last error it encountered. */ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf; enum ice_status status, ret_status; u16 buf_len; buf_len = ice_struct_size(sw_buf, elem, 1); sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!sw_buf) return ICE_ERR_NO_MEMORY; /* Prepare buffer to free for switch ID res. * The number of resource entries in buffer is passed as 1 since only a * single switch/VEB instance is freed, and hence a single sw_id * is released. */ sw_buf->num_elems = CPU_TO_LE16(1); sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID); sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id); ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_free_res, NULL); if (ret_status) ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); /* Prepare buffer to free for VEB Counter resource */ counter_buf = (struct ice_aqc_alloc_free_res_elem *) ice_malloc(hw, buf_len); if (!counter_buf) { ice_free(hw, sw_buf); return ICE_ERR_NO_MEMORY; } /* The number of resource entries in buffer is passed as 1 since only a * single switch/VEB instance is freed, and hence a single VEB counter * is released */ counter_buf->num_elems = CPU_TO_LE16(1); counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER); counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id); status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) { ice_debug(hw, ICE_DBG_SW, "VEB counter resource could not be freed\n"); ret_status = status; } ice_free(hw, counter_buf); ice_free(hw, sw_buf); return ret_status; } /** * ice_aq_add_vsi * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * * Add a VSI context to the hardware (0x0210) */ enum ice_status ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.vsi_cmd; res = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); if (!vsi_ctx->alloc_from_pool) cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); cmd->vf_id = vsi_ctx->vf_num; cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cd); if (!status) { vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M; vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free); } return status; } /** * ice_aq_free_vsi * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources * @cd: pointer to command details structure or NULL * * Free VSI context info from hardware (0x0213) */ enum ice_status ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); if (keep_vsi_alloc) cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) { vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); } return status; } /** * ice_aq_update_vsi * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * * Update VSI context in the hardware (0x0211) */ enum ice_status ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *resp; struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.vsi_cmd; resp = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cd); if (!status) { vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); } return status; } /** * ice_is_vsi_valid - check whether the VSI is valid or not * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * check whether the VSI is valid or not */ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) { return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; } /** * ice_get_hw_vsi_num - return the HW VSI number * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * return the HW VSI number * Caution: call this function only if VSI is valid (ice_is_vsi_valid) */ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) { return hw->vsi_ctx[vsi_handle]->vsi_num; } /** * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * return the VSI context entry for a given VSI handle */ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) { return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; } /** * ice_save_vsi_ctx - save the VSI context for a given VSI handle * @hw: pointer to the HW struct * @vsi_handle: VSI handle * @vsi: VSI context pointer * * save the VSI context entry for a given VSI handle */ static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi) { hw->vsi_ctx[vsi_handle] = vsi; } /** * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs * @hw: pointer to the HW struct * @vsi_handle: VSI handle */ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle) { struct ice_vsi_ctx *vsi; u8 i; vsi = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi) return; ice_for_each_traffic_class(i) { if (vsi->lan_q_ctx[i]) { ice_free(hw, vsi->lan_q_ctx[i]); vsi->lan_q_ctx[i] = NULL; } } } /** * ice_clear_vsi_ctx - clear the VSI context entry * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * clear the VSI context entry */ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) { struct ice_vsi_ctx *vsi; vsi = ice_get_vsi_ctx(hw, vsi_handle); if (vsi) { ice_clear_vsi_q_ctx(hw, vsi_handle); ice_free(hw, vsi); hw->vsi_ctx[vsi_handle] = NULL; } } /** * ice_clear_all_vsi_ctx - clear all the VSI context entries * @hw: pointer to the HW struct */ void ice_clear_all_vsi_ctx(struct ice_hw *hw) { u16 i; for (i = 0; i < ICE_MAX_VSI; i++) ice_clear_vsi_ctx(hw, i); } /** * ice_add_vsi - add VSI context to the hardware and VSI handle list * @hw: pointer to the HW struct * @vsi_handle: unique VSI handle provided by drivers * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * * Add a VSI context to the hardware also add it into the VSI handle list. * If this function gets called after reset for existing VSIs then update * with the new HW VSI number in the corresponding VSI handle list entry. */ enum ice_status ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_vsi_ctx *tmp_vsi_ctx; enum ice_status status; if (vsi_handle >= ICE_MAX_VSI) return ICE_ERR_PARAM; status = ice_aq_add_vsi(hw, vsi_ctx, cd); if (status) return status; tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); if (!tmp_vsi_ctx) { /* Create a new VSI context */ tmp_vsi_ctx = (struct ice_vsi_ctx *) ice_malloc(hw, sizeof(*tmp_vsi_ctx)); if (!tmp_vsi_ctx) { ice_aq_free_vsi(hw, vsi_ctx, false, cd); return ICE_ERR_NO_MEMORY; } *tmp_vsi_ctx = *vsi_ctx; ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); } else { /* update with new HW VSI num */ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; } return ICE_SUCCESS; } /** * ice_free_vsi- free VSI context from hardware and VSI handle list * @hw: pointer to the HW struct * @vsi_handle: unique VSI handle * @vsi_ctx: pointer to a VSI context struct * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources * @cd: pointer to command details structure or NULL * * Free VSI context info from hardware as well as from VSI handle list */ enum ice_status ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd) { enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); if (!status) ice_clear_vsi_ctx(hw, vsi_handle); return status; } /** * ice_update_vsi * @hw: pointer to the HW struct * @vsi_handle: unique VSI handle * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * * Update VSI context in the hardware */ enum ice_status ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); return ice_aq_update_vsi(hw, vsi_ctx, cd); } /** * ice_aq_get_vsi_params * @hw: pointer to the HW struct * @vsi_ctx: pointer to a VSI context struct * @cd: pointer to command details structure or NULL * * Get VSI context info from hardware (0x0212) */ enum ice_status ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_get_update_free_vsi *cmd; struct ice_aqc_get_vsi_resp *resp; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.vsi_cmd; resp = &desc.params.get_vsi_resp; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params); cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cd); if (!status) { vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) & ICE_AQ_VSI_NUM_M; vsi_ctx->vf_num = resp->vf_id; vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used); vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free); } return status; } /** * ice_aq_add_update_mir_rule - add/update a mirror rule * @hw: pointer to the HW struct * @rule_type: Rule Type * @dest_vsi: VSI number to which packets will be mirrored * @count: length of the list * @mr_buf: buffer for list of mirrored VSI numbers * @cd: pointer to command details structure or NULL * @rule_id: Rule ID * * Add/Update Mirror Rule (0x260). */ enum ice_status ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, u16 count, struct ice_mir_rule_buf *mr_buf, struct ice_sq_cd *cd, u16 *rule_id) { struct ice_aqc_add_update_mir_rule *cmd; struct ice_aq_desc desc; enum ice_status status; __le16 *mr_list = NULL; u16 buf_size = 0; switch (rule_type) { case ICE_AQC_RULE_TYPE_VPORT_INGRESS: case ICE_AQC_RULE_TYPE_VPORT_EGRESS: /* Make sure count and mr_buf are set for these rule_types */ if (!(count && mr_buf)) return ICE_ERR_PARAM; buf_size = count * sizeof(__le16); mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size); if (!mr_list) return ICE_ERR_NO_MEMORY; break; case ICE_AQC_RULE_TYPE_PPORT_INGRESS: case ICE_AQC_RULE_TYPE_PPORT_EGRESS: /* Make sure count and mr_buf are not set for these * rule_types */ if (count || mr_buf) return ICE_ERR_PARAM; break; default: ice_debug(hw, ICE_DBG_SW, "Error due to unsupported rule_type %u\n", rule_type); return ICE_ERR_OUT_OF_RANGE; } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule); /* Pre-process 'mr_buf' items for add/update of virtual port * ingress/egress mirroring (but not physical port ingress/egress * mirroring) */ if (mr_buf) { int i; for (i = 0; i < count; i++) { u16 id; id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M; /* Validate specified VSI number, make sure it is less * than ICE_MAX_VSI, if not return with error. */ if (id >= ICE_MAX_VSI) { ice_debug(hw, ICE_DBG_SW, "Error VSI index (%u) out-of-range\n", id); ice_free(hw, mr_list); return ICE_ERR_OUT_OF_RANGE; } /* add VSI to mirror rule */ if (mr_buf[i].add) mr_list[i] = CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M); else /* remove VSI from mirror rule */ mr_list[i] = CPU_TO_LE16(id); } } cmd = &desc.params.add_update_rule; if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID) cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) | ICE_AQC_RULE_ID_VALID_M); cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M); cmd->num_entries = CPU_TO_LE16(count); cmd->dest = CPU_TO_LE16(dest_vsi); status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd); if (!status) *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M; ice_free(hw, mr_list); return status; } /** * ice_aq_delete_mir_rule - delete a mirror rule * @hw: pointer to the HW struct * @rule_id: Mirror rule ID (to be deleted) * @keep_allocd: if set, the VSI stays part of the PF allocated res, * otherwise it is returned to the shared pool * @cd: pointer to command details structure or NULL * * Delete Mirror Rule (0x261). */ enum ice_status ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, struct ice_sq_cd *cd) { struct ice_aqc_delete_mir_rule *cmd; struct ice_aq_desc desc; /* rule_id should be in the range 0...63 */ if (rule_id >= ICE_MAX_NUM_MIRROR_RULES) return ICE_ERR_OUT_OF_RANGE; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule); cmd = &desc.params.del_rule; rule_id |= ICE_AQC_RULE_ID_VALID_M; cmd->rule_id = CPU_TO_LE16(rule_id); if (keep_allocd) cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_alloc_free_vsi_list * @hw: pointer to the HW struct * @vsi_list_id: VSI list ID returned or used for lookup * @lkup_type: switch rule filter lookup type * @opc: switch rules population command type - pass in the command opcode * * allocates or free a VSI list resource */ static enum ice_status ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type, enum ice_adminq_opc opc) { struct ice_aqc_alloc_free_res_elem *sw_buf; struct ice_aqc_res_elem *vsi_ele; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(sw_buf, elem, 1); sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!sw_buf) return ICE_ERR_NO_MEMORY; sw_buf->num_elems = CPU_TO_LE16(1); if (lkup_type == ICE_SW_LKUP_MAC || lkup_type == ICE_SW_LKUP_MAC_VLAN || lkup_type == ICE_SW_LKUP_ETHERTYPE || lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || lkup_type == ICE_SW_LKUP_LAST) { sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE); } else { status = ICE_ERR_PARAM; goto ice_aq_alloc_free_vsi_list_exit; } if (opc == ice_aqc_opc_free_res) sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id); status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL); if (status) goto ice_aq_alloc_free_vsi_list_exit; if (opc == ice_aqc_opc_alloc_res) { vsi_ele = &sw_buf->elem[0]; *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp); } ice_aq_alloc_free_vsi_list_exit: ice_free(hw, sw_buf); return status; } /** * ice_aq_set_storm_ctrl - Sets storm control configuration * @hw: pointer to the HW struct * @bcast_thresh: represents the upper threshold for broadcast storm control * @mcast_thresh: represents the upper threshold for multicast storm control * @ctl_bitmask: storm control knobs * * Sets the storm control configuration (0x0280) */ enum ice_status ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, u32 ctl_bitmask) { struct ice_aqc_storm_cfg *cmd; struct ice_aq_desc desc; cmd = &desc.params.storm_conf; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg); cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M); cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M); cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_aq_get_storm_ctrl - gets storm control configuration * @hw: pointer to the HW struct * @bcast_thresh: represents the upper threshold for broadcast storm control * @mcast_thresh: represents the upper threshold for multicast storm control * @ctl_bitmask: storm control knobs * * Gets the storm control configuration (0x0281) */ enum ice_status ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, u32 *ctl_bitmask) { enum ice_status status; struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (!status) { struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf; if (bcast_thresh) *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) & ICE_AQ_THRESHOLD_M; if (mcast_thresh) *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) & ICE_AQ_THRESHOLD_M; if (ctl_bitmask) *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl); } return status; } /** * ice_aq_sw_rules - add/update/remove switch rules * @hw: pointer to the HW struct * @rule_list: pointer to switch rule population list * @rule_list_sz: total size of the rule list in bytes * @num_rules: number of switch rules in the rule_list * @opc: switch rules population command type - pass in the command opcode * @cd: pointer to command details structure or NULL * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ static enum ice_status ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && opc != ice_aqc_opc_remove_sw_rules) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, opc); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); desc.params.sw_rules.num_rules_fltr_entry_index = CPU_TO_LE16(num_rules); status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); if (opc != ice_aqc_opc_add_sw_rules && hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) status = ICE_ERR_DOES_NOT_EXIST; return status; } /* ice_init_port_info - Initialize port_info with switch configuration data * @pi: pointer to port_info * @vsi_port_num: VSI number or port number * @type: Type of switch element (port or VSI) * @swid: switch ID of the switch the element is attached to * @pf_vf_num: PF or VF number * @is_vf: true if the element is a VF, false otherwise */ static void ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, u16 swid, u16 pf_vf_num, bool is_vf) { switch (type) { case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK); pi->sw_id = swid; pi->pf_vf_num = pf_vf_num; pi->is_vf = is_vf; pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; break; default: ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); break; } } /* ice_get_initial_sw_cfg - Get initial port and default VSI data * @hw: pointer to the hardware structure */ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) { struct ice_aqc_get_sw_cfg_resp_elem *rbuf; enum ice_status status; u8 num_total_ports; u16 req_desc = 0; u16 num_elems; u8 j = 0; u16 i; num_total_ports = 1; rbuf = (struct ice_aqc_get_sw_cfg_resp_elem *) ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN); if (!rbuf) return ICE_ERR_NO_MEMORY; /* Multiple calls to ice_aq_get_sw_cfg may be required * to get all the switch configuration information. The need * for additional calls is indicated by ice_aq_get_sw_cfg * writing a non-zero value in req_desc */ do { struct ice_aqc_get_sw_cfg_resp_elem *ele; status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, &req_desc, &num_elems, NULL); if (status) break; for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { u16 pf_vf_num, swid, vsi_port_num; bool is_vf = false; u8 res_type; vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) & ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) & ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M; swid = LE16_TO_CPU(ele->swid); if (LE16_TO_CPU(ele->pf_vf_num) & ICE_AQC_GET_SW_CONF_RESP_IS_VF) is_vf = true; res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >> ICE_AQC_GET_SW_CONF_RESP_TYPE_S); switch (res_type) { case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT: case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT: if (j == num_total_ports) { ice_debug(hw, ICE_DBG_SW, "more ports than expected\n"); status = ICE_ERR_CFG; goto out; } ice_init_port_info(hw->port_info, vsi_port_num, res_type, swid, pf_vf_num, is_vf); j++; break; default: break; } } } while (req_desc && !status); out: ice_free(hw, rbuf); return status; } /** * ice_fill_sw_info - Helper function to populate lb_en and lan_en * @hw: pointer to the hardware structure * @fi: filter info structure to fill/update * * This helper function populates the lb_en and lan_en elements of the provided * ice_fltr_info struct using the switch's type and characteristics of the * switch rule being configured. */ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) { fi->lb_en = false; fi->lan_en = false; if ((fi->flag & ICE_FLTR_TX) && (fi->fltr_act == ICE_FWD_TO_VSI || fi->fltr_act == ICE_FWD_TO_VSI_LIST || fi->fltr_act == ICE_FWD_TO_Q || fi->fltr_act == ICE_FWD_TO_QGRP)) { /* Setting LB for prune actions will result in replicated * packets to the internal switch that will be dropped. */ if (fi->lkup_type != ICE_SW_LKUP_VLAN) fi->lb_en = true; /* Set lan_en to TRUE if * 1. The switch is a VEB AND * 2 * 2.1 The lookup is a directional lookup like ethertype, * promiscuous, ethertype-MAC, promiscuous-VLAN * and default-port OR * 2.2 The lookup is VLAN, OR * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC. * * OR * * The switch is a VEPA. * * In all other cases, the LAN enable has to be set to false. */ if (hw->evb_veb) { if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE || fi->lkup_type == ICE_SW_LKUP_PROMISC || fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN || fi->lkup_type == ICE_SW_LKUP_DFLT || fi->lkup_type == ICE_SW_LKUP_VLAN || (fi->lkup_type == ICE_SW_LKUP_MAC && !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) || (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr))) fi->lan_en = true; } else { fi->lan_en = true; } } } /** * ice_fill_sw_rule - Helper function to fill switch rule structure * @hw: pointer to the hardware structure * @f_info: entry containing packet forwarding information * @s_rule: switch rule structure to be filled in based on mac_entry * @opc: switch rules population command type - pass in the command opcode */ static void ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) { u16 vlan_id = ICE_MAX_VLAN_ID + 1; + u16 vlan_tpid = ICE_ETH_P_8021Q; void *daddr = NULL; u16 eth_hdr_sz; u8 *eth_hdr; u32 act = 0; __be16 *off; u8 q_rgn; if (opc == ice_aqc_opc_remove_sw_rules) { s_rule->pdata.lkup_tx_rx.act = 0; s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id); s_rule->pdata.lkup_tx_rx.hdr_len = 0; return; } eth_hdr_sz = sizeof(dummy_eth_header); eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; /* initialize the ether header with a dummy header */ ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA); ice_fill_sw_info(hw, f_info); switch (f_info->fltr_act) { case ICE_FWD_TO_VSI: act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M; if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_VSI_LIST: act |= ICE_SINGLE_ACT_VSI_LIST; act |= (f_info->fwd_id.vsi_list_id << ICE_SINGLE_ACT_VSI_LIST_ID_S) & ICE_SINGLE_ACT_VSI_LIST_ID_M; if (f_info->lkup_type != ICE_SW_LKUP_VLAN) act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_Q: act |= ICE_SINGLE_ACT_TO_Q; act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & ICE_SINGLE_ACT_Q_INDEX_M; break; case ICE_DROP_PACKET: act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP | ICE_SINGLE_ACT_VALID_BIT; break; case ICE_FWD_TO_QGRP: q_rgn = f_info->qgrp_size > 0 ? (u8)ice_ilog2(f_info->qgrp_size) : 0; act |= ICE_SINGLE_ACT_TO_Q; act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) & ICE_SINGLE_ACT_Q_INDEX_M; act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) & ICE_SINGLE_ACT_Q_REGION_M; break; default: return; } if (f_info->lb_en) act |= ICE_SINGLE_ACT_LB_ENABLE; if (f_info->lan_en) act |= ICE_SINGLE_ACT_LAN_ENABLE; switch (f_info->lkup_type) { case ICE_SW_LKUP_MAC: daddr = f_info->l_data.mac.mac_addr; break; case ICE_SW_LKUP_VLAN: vlan_id = f_info->l_data.vlan.vlan_id; + if (f_info->l_data.vlan.tpid_valid) + vlan_tpid = f_info->l_data.vlan.tpid; if (f_info->fltr_act == ICE_FWD_TO_VSI || f_info->fltr_act == ICE_FWD_TO_VSI_LIST) { act |= ICE_SINGLE_ACT_PRUNE; act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS; } break; case ICE_SW_LKUP_ETHERTYPE_MAC: daddr = f_info->l_data.ethertype_mac.mac_addr; /* fall-through */ case ICE_SW_LKUP_ETHERTYPE: off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype); break; case ICE_SW_LKUP_MAC_VLAN: daddr = f_info->l_data.mac_vlan.mac_addr; vlan_id = f_info->l_data.mac_vlan.vlan_id; break; case ICE_SW_LKUP_PROMISC_VLAN: vlan_id = f_info->l_data.mac_vlan.vlan_id; /* fall-through */ case ICE_SW_LKUP_PROMISC: daddr = f_info->l_data.mac_vlan.mac_addr; break; default: break; } s_rule->type = (f_info->flag & ICE_FLTR_RX) ? CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) : CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX); /* Recipe set depending on lookup type */ s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type); s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src); s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act); if (daddr) ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); if (!(vlan_id > ICE_MAX_VLAN_ID)) { off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); *off = CPU_TO_BE16(vlan_id); + off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); + *off = CPU_TO_BE16(vlan_tpid); } /* Create the switch rule with the final dummy Ethernet header */ if (opc != ice_aqc_opc_update_sw_rules) s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz); } /** * ice_add_marker_act * @hw: pointer to the hardware structure * @m_ent: the management entry for which sw marker needs to be added * @sw_marker: sw marker to tag the Rx descriptor with * @l_id: large action resource ID * * Create a large action to hold software marker and update the switch rule * entry pointed by m_ent with newly created large action */ static enum ice_status ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 sw_marker, u16 l_id) { struct ice_aqc_sw_rules_elem *lg_act, *rx_tx; /* For software marker we need 3 large actions * 1. FWD action: FWD TO VSI or VSI LIST * 2. GENERIC VALUE action to hold the profile ID * 3. GENERIC VALUE action to hold the software marker ID */ const u16 num_lg_acts = 3; enum ice_status status; u16 lg_act_size; u16 rules_size; u32 act; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: * 1. Large Action * 2. Look up Tx Rx */ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size); if (!lg_act) return ICE_ERR_NO_MEMORY; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); /* Fill in the first switch rule i.e. large action */ lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT); lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id); lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts); /* First action VSI forwarding or VSI list forwarding depending on how * many VSIs */ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act); /* Second action descriptor type */ act = ICE_LG_ACT_GENERIC; act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act); act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M; /* Third action Marker value */ act |= ICE_LG_ACT_GENERIC; act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M; lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act); /* call the fill switch rule to fill the lookup Tx Rx structure */ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_aqc_opc_update_sw_rules); /* Update the action to point to the large action ID */ rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(ICE_SINGLE_ACT_PTR | ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M)); /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large * action */ rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id); status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, ice_aqc_opc_update_sw_rules, NULL); if (!status) { m_ent->lg_act_idx = l_id; m_ent->sw_marker_id = sw_marker; } ice_free(hw, lg_act); return status; } /** * ice_add_counter_act - add/update filter rule with counter action * @hw: pointer to the hardware structure * @m_ent: the management entry for which counter needs to be added * @counter_id: VLAN counter ID returned as part of allocate resource * @l_id: large action resource ID */ static enum ice_status ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, u16 counter_id, u16 l_id) { struct ice_aqc_sw_rules_elem *lg_act; struct ice_aqc_sw_rules_elem *rx_tx; enum ice_status status; /* 2 actions will be added while adding a large action counter */ const int num_acts = 2; u16 lg_act_size; u16 rules_size; u16 f_rule_id; u32 act; u16 id; if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: * 1. Large Action * 2. Look up Tx Rx */ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts); rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size); if (!lg_act) return ICE_ERR_NO_MEMORY; rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size); /* Fill in the first switch rule i.e. large action */ lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT); lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id); lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts); /* First action VSI forwarding or VSI list forwarding depending on how * many VSIs */ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id : m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act); /* Second action counter ID */ act = ICE_LG_ACT_STAT_COUNT; act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) & ICE_LG_ACT_STAT_COUNT_M; lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act); /* call the fill switch rule to fill the lookup Tx Rx structure */ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_aqc_opc_update_sw_rules); act = ICE_SINGLE_ACT_PTR; act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M; rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act); /* Use the filter rule ID of the previously created rule with single * act. Once the update happens, hardware will treat this as large * action */ f_rule_id = m_ent->fltr_info.fltr_rule_id; rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id); status = ice_aq_sw_rules(hw, lg_act, rules_size, 2, ice_aqc_opc_update_sw_rules, NULL); if (!status) { m_ent->lg_act_idx = l_id; m_ent->counter_index = counter_id; } ice_free(hw, lg_act); return status; } /** * ice_create_vsi_list_map * @hw: pointer to the hardware structure * @vsi_handle_arr: array of VSI handles to set in the VSI mapping * @num_vsi: number of VSI handles in the array * @vsi_list_id: VSI list ID generated as part of allocate resource * * Helper function to create a new entry of VSI list ID to VSI mapping * using the given VSI list ID */ static struct ice_vsi_list_map_info * ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id) { struct ice_switch_info *sw = hw->switch_info; struct ice_vsi_list_map_info *v_map; int i; v_map = (struct ice_vsi_list_map_info *)ice_malloc(hw, sizeof(*v_map)); if (!v_map) return NULL; v_map->vsi_list_id = vsi_list_id; v_map->ref_cnt = 1; for (i = 0; i < num_vsi; i++) ice_set_bit(vsi_handle_arr[i], v_map->vsi_map); LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head); return v_map; } /** * ice_update_vsi_list_rule * @hw: pointer to the hardware structure * @vsi_handle_arr: array of VSI handles to form a VSI list * @num_vsi: number of VSI handles in the array * @vsi_list_id: VSI list ID generated as part of allocate resource * @remove: Boolean value to indicate if this is a remove action * @opc: switch rules population command type - pass in the command opcode * @lkup_type: lookup type of the filter * * Call AQ command to add a new switch rule or update existing switch rule * using the given VSI list ID */ static enum ice_status ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 vsi_list_id, bool remove, enum ice_adminq_opc opc, enum ice_sw_lkup_type lkup_type) { struct ice_aqc_sw_rules_elem *s_rule; enum ice_status status; u16 s_rule_size; u16 rule_type; int i; if (!num_vsi) return ICE_ERR_PARAM; if (lkup_type == ICE_SW_LKUP_MAC || lkup_type == ICE_SW_LKUP_MAC_VLAN || lkup_type == ICE_SW_LKUP_ETHERTYPE || lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || lkup_type == ICE_SW_LKUP_LAST) rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : ICE_AQC_SW_RULES_T_VSI_LIST_SET; else if (lkup_type == ICE_SW_LKUP_VLAN) rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR : ICE_AQC_SW_RULES_T_PRUNE_LIST_SET; else return ICE_ERR_PARAM; s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi); s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size); if (!s_rule) return ICE_ERR_NO_MEMORY; for (i = 0; i < num_vsi; i++) { if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) { status = ICE_ERR_PARAM; goto exit; } /* AQ call requires hw_vsi_id(s) */ s_rule->pdata.vsi_list.vsi[i] = CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i])); } s_rule->type = CPU_TO_LE16(rule_type); s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi); s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id); status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL); exit: ice_free(hw, s_rule); return status; } /** * ice_create_vsi_list_rule - Creates and populates a VSI list rule * @hw: pointer to the HW struct * @vsi_handle_arr: array of VSI handles to form a VSI list * @num_vsi: number of VSI handles in the array * @vsi_list_id: stores the ID of the VSI list to be created * @lkup_type: switch rule filter's lookup type */ static enum ice_status ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type) { enum ice_status status; status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type, ice_aqc_opc_alloc_res); if (status) return status; /* Update the newly created VSI list to include the specified VSIs */ return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi, *vsi_list_id, false, ice_aqc_opc_add_sw_rules, lkup_type); } /** * ice_create_pkt_fwd_rule * @hw: pointer to the hardware structure * @recp_list: corresponding filter management list * @f_entry: entry containing packet forwarding information * * Create switch rule with given filter information and add an entry * to the corresponding filter management list to track this switch rule * and VSI mapping */ static enum ice_status ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_aqc_sw_rules_elem *s_rule; enum ice_status status; s_rule = (struct ice_aqc_sw_rules_elem *) ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE); if (!s_rule) return ICE_ERR_NO_MEMORY; fm_entry = (struct ice_fltr_mgmt_list_entry *) ice_malloc(hw, sizeof(*fm_entry)); if (!fm_entry) { status = ICE_ERR_NO_MEMORY; goto ice_create_pkt_fwd_rule_exit; } fm_entry->fltr_info = f_entry->fltr_info; /* Initialize all the fields for the management entry */ fm_entry->vsi_count = 1; fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX; fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID; fm_entry->counter_index = ICE_INVAL_COUNTER_ID; ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule, ice_aqc_opc_add_sw_rules); status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, ice_aqc_opc_add_sw_rules, NULL); if (status) { ice_free(hw, fm_entry); goto ice_create_pkt_fwd_rule_exit; } f_entry->fltr_info.fltr_rule_id = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); fm_entry->fltr_info.fltr_rule_id = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); /* The book keeping entries will get removed when base driver * calls remove filter AQ command */ LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules); ice_create_pkt_fwd_rule_exit: ice_free(hw, s_rule); return status; } /** * ice_update_pkt_fwd_rule * @hw: pointer to the hardware structure * @f_info: filter information for switch rule * * Call AQ command to update a previously created switch rule with a * VSI list ID */ static enum ice_status ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; enum ice_status status; s_rule = (struct ice_aqc_sw_rules_elem *) ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE); if (!s_rule) return ICE_ERR_NO_MEMORY; ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id); /* Update switch rule with new rule set to forward VSI list */ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, ice_aqc_opc_update_sw_rules, NULL); ice_free(hw, s_rule); return status; } /** * ice_update_sw_rule_bridge_mode * @hw: pointer to the HW struct * * Updates unicast switch filter rules based on VEB/VEPA mode */ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; enum ice_status status = ICE_SUCCESS; struct LIST_HEAD_TYPE *rule_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_info *fi = &fm_entry->fltr_info; u8 *addr = fi->l_data.mac.mac_addr; /* Update unicast Tx rules to reflect the selected * VEB/VEPA mode */ if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) && (fi->fltr_act == ICE_FWD_TO_VSI || fi->fltr_act == ICE_FWD_TO_VSI_LIST || fi->fltr_act == ICE_FWD_TO_Q || fi->fltr_act == ICE_FWD_TO_QGRP)) { status = ice_update_pkt_fwd_rule(hw, fi); if (status) break; } } ice_release_lock(rule_lock); return status; } /** * ice_add_update_vsi_list * @hw: pointer to the hardware structure * @m_entry: pointer to current filter management list entry * @cur_fltr: filter information from the book keeping entry * @new_fltr: filter information with the new VSI to be added * * Call AQ command to add or update previously created VSI list with new VSI. * * Helper function to do book keeping associated with adding filter information * The algorithm to do the book keeping is described below : * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) * if only one VSI has been added till now * Allocate a new VSI list and add two VSIs * to this list using switch rule command * Update the previously created switch rule with the * newly created VSI list ID * if a VSI list was previously created * Add the new VSI to the previously created VSI list set * using the update switch rule command */ static enum ice_status ice_add_update_vsi_list(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_entry, struct ice_fltr_info *cur_fltr, struct ice_fltr_info *new_fltr) { enum ice_status status = ICE_SUCCESS; u16 vsi_list_id = 0; if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) return ICE_ERR_NOT_IMPL; if ((new_fltr->fltr_act == ICE_FWD_TO_Q || new_fltr->fltr_act == ICE_FWD_TO_QGRP) && (cur_fltr->fltr_act == ICE_FWD_TO_VSI || cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST)) return ICE_ERR_NOT_IMPL; if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) { /* Only one entry existed in the mapping and it was not already * a part of a VSI list. So, create a VSI list with the old and * new VSIs. */ struct ice_fltr_info tmp_fltr; u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) return ICE_ERR_ALREADY_EXISTS; vsi_handle_arr[0] = cur_fltr->vsi_handle; vsi_handle_arr[1] = new_fltr->vsi_handle; status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, &vsi_list_id, new_fltr->lkup_type); if (status) return status; tmp_fltr = *new_fltr; tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; /* Update the previous switch rule of "MAC forward to VSI" to * "MAC fwd to VSI list" */ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) return status; cur_fltr->fwd_id.vsi_list_id = vsi_list_id; cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; m_entry->vsi_list_info = ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); + if (!m_entry->vsi_list_info) + return ICE_ERR_NO_MEMORY; + /* If this entry was large action then the large action needs * to be updated to point to FWD to VSI list */ if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) status = ice_add_marker_act(hw, m_entry, m_entry->sw_marker_id, m_entry->lg_act_idx); } else { u16 vsi_handle = new_fltr->vsi_handle; enum ice_adminq_opc opcode; if (!m_entry->vsi_list_info) return ICE_ERR_CFG; /* A rule already exists with the new VSI being added */ if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle)) return ICE_SUCCESS; /* Update the previously created VSI list set with * the new VSI ID passed in */ vsi_list_id = cur_fltr->fwd_id.vsi_list_id; opcode = ice_aqc_opc_update_sw_rules; status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, false, opcode, new_fltr->lkup_type); /* update VSI list mapping info with new VSI ID */ if (!status) ice_set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map); } if (!status) m_entry->vsi_count++; return status; } /** * ice_find_rule_entry - Search a rule entry * @list_head: head of rule list * @f_info: rule information * * Helper function to search for a given rule entry * Returns pointer to entry storing the rule if found */ static struct ice_fltr_mgmt_list_entry * ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head, struct ice_fltr_info *f_info) { struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry, list_entry) { if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, sizeof(f_info->l_data)) && f_info->flag == list_itr->fltr_info.flag) { ret = list_itr; break; } } return ret; } /** * ice_find_vsi_list_entry - Search VSI list map with VSI count 1 * @recp_list: VSI lists needs to be searched * @vsi_handle: VSI handle to be found in VSI list * @vsi_list_id: VSI list ID found containing vsi_handle * * Helper function to search a VSI list with single entry containing given VSI * handle element. This can be extended further to search VSI list with more * than 1 vsi_count. Returns pointer to VSI list entry if found. */ static struct ice_vsi_list_map_info * ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, u16 *vsi_list_id) { struct ice_vsi_list_map_info *map_info = NULL; struct LIST_HEAD_TYPE *list_head; list_head = &recp_list->filt_rules; if (recp_list->adv_rule) { struct ice_adv_fltr_mgmt_list_entry *list_itr; LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry, list_entry) { if (list_itr->vsi_list_info) { map_info = list_itr->vsi_list_info; if (ice_is_bit_set(map_info->vsi_map, vsi_handle)) { *vsi_list_id = map_info->vsi_list_id; return map_info; } } } } else { struct ice_fltr_mgmt_list_entry *list_itr; LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry, list_entry) { if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { map_info = list_itr->vsi_list_info; if (ice_is_bit_set(map_info->vsi_map, vsi_handle)) { *vsi_list_id = map_info->vsi_list_id; return map_info; } } } } return NULL; } /** * ice_add_rule_internal - add rule for a given lookup type * @hw: pointer to the hardware structure * @recp_list: recipe list for which rule has to be added * @lport: logic port number on which function add rule * @f_entry: structure containing MAC forwarding information * * Adds or updates the rule lists for a given recipe */ static enum ice_status ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, u8 lport, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = ICE_SUCCESS; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) return ICE_ERR_PARAM; /* Load the hw_vsi_id only if the fwd action is fwd to VSI */ if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI) f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); rule_lock = &recp_list->filt_rule_lock; ice_acquire_lock(rule_lock); new_fltr = &f_entry->fltr_info; if (new_fltr->flag & ICE_FLTR_RX) new_fltr->src = lport; else if (new_fltr->flag & ICE_FLTR_TX) new_fltr->src = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr); if (!m_entry) { status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry); goto exit_add_rule_internal; } cur_fltr = &m_entry->fltr_info; status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); exit_add_rule_internal: ice_release_lock(rule_lock); return status; } /** * ice_remove_vsi_list_rule * @hw: pointer to the hardware structure * @vsi_list_id: VSI list ID generated as part of allocate resource * @lkup_type: switch rule filter lookup type * * The VSI list should be emptied before this function is called to remove the * VSI list. */ static enum ice_status ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, enum ice_sw_lkup_type lkup_type) { /* Free the vsi_list resource that we allocated. It is assumed that the * list is empty at this point. */ return ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, ice_aqc_opc_free_res); } /** * ice_rem_update_vsi_list * @hw: pointer to the hardware structure * @vsi_handle: VSI handle of the VSI to remove * @fm_list: filter management entry for which the VSI list management needs to * be done */ static enum ice_status ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle, struct ice_fltr_mgmt_list_entry *fm_list) { enum ice_sw_lkup_type lkup_type; enum ice_status status = ICE_SUCCESS; u16 vsi_list_id; if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || fm_list->vsi_count == 0) return ICE_ERR_PARAM; /* A rule with the VSI being removed does not exist */ if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle)) return ICE_ERR_DOES_NOT_EXIST; lkup_type = fm_list->fltr_info.lkup_type; vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true, ice_aqc_opc_update_sw_rules, lkup_type); if (status) return status; fm_list->vsi_count--; ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map); if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) { struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info; struct ice_vsi_list_map_info *vsi_list_info = fm_list->vsi_list_info; u16 rem_vsi_handle; rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map, ICE_MAX_VSI); if (!ice_is_vsi_valid(hw, rem_vsi_handle)) return ICE_ERR_OUT_OF_RANGE; /* Make sure VSI list is empty before removing it below */ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1, vsi_list_id, true, ice_aqc_opc_update_sw_rules, lkup_type); if (status) return status; tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI; tmp_fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, rem_vsi_handle); tmp_fltr_info.vsi_handle = rem_vsi_handle; status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info); if (status) { ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n", tmp_fltr_info.fwd_id.hw_vsi_id, status); return status; } fm_list->fltr_info = tmp_fltr_info; } if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { struct ice_vsi_list_map_info *vsi_list_info = fm_list->vsi_list_info; /* Remove the VSI list since it is no longer used */ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); if (status) { ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n", vsi_list_id, status); return status; } LIST_DEL(&vsi_list_info->list_entry); ice_free(hw, vsi_list_info); fm_list->vsi_list_info = NULL; } return status; } /** * ice_remove_rule_internal - Remove a filter rule of a given type * * @hw: pointer to the hardware structure * @recp_list: recipe list for which the rule needs to removed * @f_entry: rule entry containing filter information */ static enum ice_status ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_mgmt_list_entry *list_elem; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = ICE_SUCCESS; bool remove_rule = false; u16 vsi_handle; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) return ICE_ERR_PARAM; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); rule_lock = &recp_list->filt_rule_lock; ice_acquire_lock(rule_lock); list_elem = ice_find_rule_entry(&recp_list->filt_rules, &f_entry->fltr_info); if (!list_elem) { status = ICE_ERR_DOES_NOT_EXIST; goto exit; } if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { remove_rule = true; } else if (!list_elem->vsi_list_info) { status = ICE_ERR_DOES_NOT_EXIST; goto exit; } else if (list_elem->vsi_list_info->ref_cnt > 1) { /* a ref_cnt > 1 indicates that the vsi_list is being * shared by multiple rules. Decrement the ref_cnt and * remove this rule, but do not modify the list, as it * is in-use by other rules. */ list_elem->vsi_list_info->ref_cnt--; remove_rule = true; } else { /* a ref_cnt of 1 indicates the vsi_list is only used * by one rule. However, the original removal request is only * for a single VSI. Update the vsi_list first, and only * remove the rule if there are no further VSIs in this list. */ vsi_handle = f_entry->fltr_info.vsi_handle; status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); if (status) goto exit; /* if VSI count goes to zero after updating the VSI list */ if (list_elem->vsi_count == 0) remove_rule = true; } if (remove_rule) { /* Remove the lookup rule */ struct ice_aqc_sw_rules_elem *s_rule; s_rule = (struct ice_aqc_sw_rules_elem *) ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE); if (!s_rule) { status = ICE_ERR_NO_MEMORY; goto exit; } ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, ice_aqc_opc_remove_sw_rules); status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, ice_aqc_opc_remove_sw_rules, NULL); /* Remove a book keeping from the list */ ice_free(hw, s_rule); if (status) goto exit; LIST_DEL(&list_elem->list_entry); ice_free(hw, list_elem); } exit: ice_release_lock(rule_lock); return status; } /** * ice_aq_get_res_alloc - get allocated resources * @hw: pointer to the HW struct * @num_entries: pointer to u16 to store the number of resource entries returned * @buf: pointer to buffer * @buf_size: size of buf * @cd: pointer to command details structure or NULL * * The caller-supplied buffer must be large enough to store the resource * information for all resource types. Each resource type is an * ice_aqc_get_res_resp_elem structure. */ enum ice_status ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, struct ice_aqc_get_res_resp_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_get_res_alloc *resp; enum ice_status status; struct ice_aq_desc desc; if (!buf) return ICE_ERR_BAD_PTR; if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN) return ICE_ERR_INVAL_SIZE; resp = &desc.params.get_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && num_entries) *num_entries = LE16_TO_CPU(resp->resp_elem_num); return status; } /** * ice_aq_get_res_descs - get allocated resource descriptors * @hw: pointer to the hardware structure * @num_entries: number of resource entries in buffer * @buf: structure to hold response data buffer * @buf_size: size of buffer * @res_type: resource type * @res_shared: is resource shared * @desc_id: input - first desc ID to start; output - next desc ID * @cd: pointer to command details structure or NULL */ enum ice_status ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries, struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id, struct ice_sq_cd *cd) { struct ice_aqc_get_allocd_res_desc *cmd; struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.get_res_desc; if (!buf) return ICE_ERR_PARAM; if (buf_size != (num_entries * sizeof(*buf))) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc); cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | (res_shared ? ICE_AQC_RES_TYPE_FLAG_SHARED : 0)); cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status) *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc); return status; } /** * ice_add_mac_rule - Add a MAC address based filter rule * @hw: pointer to the hardware structure * @m_list: list of MAC addresses and forwarding information * @sw: pointer to switch info struct for which function add rule * @lport: logic port number on which function add rule * * IMPORTANT: When the ucast_shared flag is set to false and m_list has * multiple unicast addresses, the function assumes that all the * addresses are unique in a given add_mac call. It doesn't * check for duplicates in this case, removing duplicates from a given * list should be taken care of in the caller of this function. */ static enum ice_status ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list, struct ice_switch_info *sw, u8 lport) { struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC]; struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; struct LIST_HEAD_TYPE *rule_head; u16 total_elem_left, s_rule_size; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = ICE_SUCCESS; u16 num_unicast = 0; u8 elem_sent; s_rule = NULL; rule_lock = &recp_list->filt_rule_lock; rule_head = &recp_list->filt_rules; LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry, list_entry) { u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; u16 vsi_handle; u16 hw_vsi_id; m_list_itr->fltr_info.flag = ICE_FLTR_TX; vsi_handle = m_list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; m_list_itr->fltr_info.src = hw_vsi_id; if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || IS_ZERO_ETHER_ADDR(add)) return ICE_ERR_PARAM; if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ ice_acquire_lock(rule_lock); if (ice_find_rule_entry(rule_head, &m_list_itr->fltr_info)) { ice_release_lock(rule_lock); return ICE_ERR_ALREADY_EXISTS; } ice_release_lock(rule_lock); num_unicast++; } else if (IS_MULTICAST_ETHER_ADDR(add) || (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) { m_list_itr->status = ice_add_rule_internal(hw, recp_list, lport, m_list_itr); if (m_list_itr->status) return m_list_itr->status; } } ice_acquire_lock(rule_lock); /* Exit if no suitable entries were found for adding bulk switch rule */ if (!num_unicast) { status = ICE_SUCCESS; goto ice_add_mac_exit; } /* Allocate switch rule buffer for the bulk update for unicast */ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; s_rule = (struct ice_aqc_sw_rules_elem *) ice_calloc(hw, num_unicast, s_rule_size); if (!s_rule) { status = ICE_ERR_NO_MEMORY; goto ice_add_mac_exit; } r_iter = s_rule; LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; if (IS_UNICAST_ETHER_ADDR(mac_addr)) { ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, ice_aqc_opc_add_sw_rules); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } } /* Call AQ bulk switch rule update for all unicast addresses */ r_iter = s_rule; /* Call AQ switch rule in AQ_MAX chunk */ for (total_elem_left = num_unicast; total_elem_left > 0; total_elem_left -= elem_sent) { struct ice_aqc_sw_rules_elem *entry = r_iter; elem_sent = MIN_T(u8, total_elem_left, (ICE_AQ_MAX_BUF_LEN / s_rule_size)); status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, elem_sent, ice_aqc_opc_add_sw_rules, NULL); if (status) goto ice_add_mac_exit; r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + (elem_sent * s_rule_size)); } /* Fill up rule ID based on the value returned from FW */ r_iter = s_rule; LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; struct ice_fltr_mgmt_list_entry *fm_entry; if (IS_UNICAST_ETHER_ADDR(mac_addr)) { f_info->fltr_rule_id = LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index); f_info->fltr_act = ICE_FWD_TO_VSI; /* Create an entry to track this MAC address */ fm_entry = (struct ice_fltr_mgmt_list_entry *) ice_malloc(hw, sizeof(*fm_entry)); if (!fm_entry) { status = ICE_ERR_NO_MEMORY; goto ice_add_mac_exit; } fm_entry->fltr_info = *f_info; fm_entry->vsi_count = 1; /* The book keeping entries will get removed when * base driver calls remove filter AQ command */ LIST_ADD(&fm_entry->list_entry, rule_head); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } } ice_add_mac_exit: ice_release_lock(rule_lock); if (s_rule) ice_free(hw, s_rule); return status; } /** * ice_add_mac - Add a MAC address based filter rule * @hw: pointer to the hardware structure * @m_list: list of MAC addresses and forwarding information * * Function add MAC rule for logical port from HW struct */ enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list) { if (!m_list || !hw) return ICE_ERR_PARAM; return ice_add_mac_rule(hw, m_list, hw->switch_info, hw->port_info->lport); } /** * ice_add_vlan_internal - Add one VLAN based filter rule * @hw: pointer to the hardware structure * @recp_list: recipe list for which rule has to be added * @f_entry: filter entry containing one VLAN information */ static enum ice_status ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list, struct ice_fltr_list_entry *f_entry) { struct ice_fltr_mgmt_list_entry *v_list_itr; struct ice_fltr_info *new_fltr, *cur_fltr; enum ice_sw_lkup_type lkup_type; u16 vsi_list_id = 0, vsi_handle; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = ICE_SUCCESS; if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle)) return ICE_ERR_PARAM; f_entry->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle); new_fltr = &f_entry->fltr_info; /* VLAN ID should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) return ICE_ERR_PARAM; if (new_fltr->src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; new_fltr->src = new_fltr->fwd_id.hw_vsi_id; lkup_type = new_fltr->lkup_type; vsi_handle = new_fltr->vsi_handle; rule_lock = &recp_list->filt_rule_lock; ice_acquire_lock(rule_lock); v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr); if (!v_list_itr) { struct ice_vsi_list_map_info *map_info = NULL; if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { /* All VLAN pruning rules use a VSI list. Check if * there is already a VSI list containing VSI that we * want to add. If found, use the same vsi_list_id for * this new VLAN rule or else create a new list. */ map_info = ice_find_vsi_list_entry(recp_list, vsi_handle, &vsi_list_id); if (!map_info) { status = ice_create_vsi_list_rule(hw, &vsi_handle, 1, &vsi_list_id, lkup_type); if (status) goto exit; } /* Convert the action to forwarding to a VSI list. */ new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; new_fltr->fwd_id.vsi_list_id = vsi_list_id; } status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry); if (!status) { v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr); if (!v_list_itr) { status = ICE_ERR_DOES_NOT_EXIST; goto exit; } /* reuse VSI list for new rule and increment ref_cnt */ if (map_info) { v_list_itr->vsi_list_info = map_info; map_info->ref_cnt++; } else { v_list_itr->vsi_list_info = ice_create_vsi_list_map(hw, &vsi_handle, 1, vsi_list_id); } } } else if (v_list_itr->vsi_list_info->ref_cnt == 1) { /* Update existing VSI list to add new VSI ID only if it used * by one VLAN rule. */ cur_fltr = &v_list_itr->fltr_info; status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, new_fltr); } else { /* If VLAN rule exists and VSI list being used by this rule is * referenced by more than 1 VLAN rule. Then create a new VSI * list appending previous VSI with new VSI and update existing * VLAN rule to point to new VSI list ID */ struct ice_fltr_info tmp_fltr; u16 vsi_handle_arr[2]; u16 cur_handle; /* Current implementation only supports reusing VSI list with * one VSI count. We should never hit below condition */ if (v_list_itr->vsi_count > 1 && v_list_itr->vsi_list_info->ref_cnt > 1) { ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n"); status = ICE_ERR_CFG; goto exit; } cur_handle = ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map, ICE_MAX_VSI); /* A rule already exists with the new VSI being added */ if (cur_handle == vsi_handle) { status = ICE_ERR_ALREADY_EXISTS; goto exit; } vsi_handle_arr[0] = cur_handle; vsi_handle_arr[1] = vsi_handle; status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2, &vsi_list_id, lkup_type); if (status) goto exit; tmp_fltr = v_list_itr->fltr_info; tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id; tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; /* Update the previous switch rule to a new VSI list which * includes current VSI that is requested */ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) goto exit; /* before overriding VSI list map info. decrement ref_cnt of * previous VSI list */ v_list_itr->vsi_list_info->ref_cnt--; /* now update to newly created list */ v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id; v_list_itr->vsi_list_info = ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2, vsi_list_id); v_list_itr->vsi_count++; } exit: ice_release_lock(rule_lock); return status; } /** * ice_add_vlan_rule - Add VLAN based filter rule * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information * @sw: pointer to switch info struct for which function add rule */ static enum ice_status ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list, struct ice_switch_info *sw) { struct ice_fltr_list_entry *v_list_itr; struct ice_sw_recipe *recp_list; recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN]; LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry, list_entry) { if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) return ICE_ERR_PARAM; v_list_itr->fltr_info.flag = ICE_FLTR_TX; v_list_itr->status = ice_add_vlan_internal(hw, recp_list, v_list_itr); if (v_list_itr->status) return v_list_itr->status; } return ICE_SUCCESS; } /** * ice_add_vlan - Add a VLAN based filter rule * @hw: pointer to the hardware structure * @v_list: list of VLAN and forwarding information * * Function add VLAN rule for logical port from HW struct */ enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list) { if (!v_list || !hw) return ICE_ERR_PARAM; return ice_add_vlan_rule(hw, v_list, hw->switch_info); } /** * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule * @hw: pointer to the hardware structure * @em_list: list of ether type MAC filter, MAC is optional * @sw: pointer to switch info struct for which function add rule * @lport: logic port number on which function add rule * * This function requires the caller to populate the entries in * the filter list with the necessary fields (including flags to * indicate Tx or Rx rules). */ static enum ice_status ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list, struct ice_switch_info *sw, u8 lport) { struct ice_fltr_list_entry *em_list_itr; LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry, list_entry) { struct ice_sw_recipe *recp_list; enum ice_sw_lkup_type l_type; l_type = em_list_itr->fltr_info.lkup_type; recp_list = &sw->recp_list[l_type]; if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) return ICE_ERR_PARAM; em_list_itr->status = ice_add_rule_internal(hw, recp_list, lport, em_list_itr); if (em_list_itr->status) return em_list_itr->status; } return ICE_SUCCESS; } /** * ice_add_eth_mac - Add a ethertype based filter rule * @hw: pointer to the hardware structure * @em_list: list of ethertype and forwarding information * * Function add ethertype rule for logical port from HW struct */ enum ice_status ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list) { if (!em_list || !hw) return ICE_ERR_PARAM; return ice_add_eth_mac_rule(hw, em_list, hw->switch_info, hw->port_info->lport); } /** * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule * @hw: pointer to the hardware structure * @em_list: list of ethertype or ethertype MAC entries * @sw: pointer to switch info struct for which function add rule */ static enum ice_status ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list, struct ice_switch_info *sw) { struct ice_fltr_list_entry *em_list_itr, *tmp; LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry, list_entry) { struct ice_sw_recipe *recp_list; enum ice_sw_lkup_type l_type; l_type = em_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC && l_type != ICE_SW_LKUP_ETHERTYPE) return ICE_ERR_PARAM; recp_list = &sw->recp_list[l_type]; em_list_itr->status = ice_remove_rule_internal(hw, recp_list, em_list_itr); if (em_list_itr->status) return em_list_itr->status; } return ICE_SUCCESS; } /** * ice_remove_eth_mac - remove a ethertype based filter rule * @hw: pointer to the hardware structure * @em_list: list of ethertype and forwarding information * */ enum ice_status ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list) { if (!em_list || !hw) return ICE_ERR_PARAM; return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info); } /** * ice_rem_sw_rule_info * @hw: pointer to the hardware structure * @rule_head: pointer to the switch list structure that we want to delete */ static void ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head) { if (!LIST_EMPTY(rule_head)) { struct ice_fltr_mgmt_list_entry *entry; struct ice_fltr_mgmt_list_entry *tmp; LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head, ice_fltr_mgmt_list_entry, list_entry) { LIST_DEL(&entry->list_entry); ice_free(hw, entry); } } } /** * ice_rem_all_sw_rules_info * @hw: pointer to the hardware structure */ void ice_rem_all_sw_rules_info(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; u8 i; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct LIST_HEAD_TYPE *rule_head; rule_head = &sw->recp_list[i].filt_rules; if (!sw->recp_list[i].adv_rule) ice_rem_sw_rule_info(hw, rule_head); } } /** * ice_cfg_dflt_vsi - change state of VSI to set/clear default * @pi: pointer to the port_info structure * @vsi_handle: VSI handle to set as default * @set: true to add the above mentioned switch rule, false to remove it * @direction: ICE_FLTR_RX or ICE_FLTR_TX * * add filter rule to set/unset given VSI as default VSI for the switch * (represented by swid) */ enum ice_status ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, u8 direction) { struct ice_aqc_sw_rules_elem *s_rule; struct ice_fltr_info f_info; struct ice_hw *hw = pi->hw; enum ice_adminq_opc opcode; enum ice_status status; u16 s_rule_size; u16 hw_vsi_id; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : ICE_SW_RULE_RX_TX_NO_HDR_SIZE; s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size); if (!s_rule) return ICE_ERR_NO_MEMORY; ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM); f_info.lkup_type = ICE_SW_LKUP_DFLT; f_info.flag = direction; f_info.fltr_act = ICE_FWD_TO_VSI; f_info.fwd_id.hw_vsi_id = hw_vsi_id; if (f_info.flag & ICE_FLTR_RX) { f_info.src = pi->lport; f_info.src_id = ICE_SRC_ID_LPORT; if (!set) f_info.fltr_rule_id = pi->dflt_rx_vsi_rule_id; } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; if (!set) f_info.fltr_rule_id = pi->dflt_tx_vsi_rule_id; } if (set) opcode = ice_aqc_opc_add_sw_rules; else opcode = ice_aqc_opc_remove_sw_rules; ice_fill_sw_rule(hw, &f_info, s_rule, opcode); status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); if (status || !(f_info.flag & ICE_FLTR_TX_RX)) goto out; if (set) { u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); if (f_info.flag & ICE_FLTR_TX) { pi->dflt_tx_vsi_num = hw_vsi_id; pi->dflt_tx_vsi_rule_id = index; } else if (f_info.flag & ICE_FLTR_RX) { pi->dflt_rx_vsi_num = hw_vsi_id; pi->dflt_rx_vsi_rule_id = index; } } else { if (f_info.flag & ICE_FLTR_TX) { pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; } else if (f_info.flag & ICE_FLTR_RX) { pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; } } out: ice_free(hw, s_rule); return status; } /** * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry * @list_head: head of rule list * @f_info: rule information * * Helper function to search for a unicast rule entry - this is to be used * to remove unicast MAC filter that is not shared with other VSIs on the * PF switch. * * Returns pointer to entry storing the rule if found */ static struct ice_fltr_mgmt_list_entry * ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head, struct ice_fltr_info *f_info) { struct ice_fltr_mgmt_list_entry *list_itr; LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry, list_entry) { if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, sizeof(f_info->l_data)) && f_info->fwd_id.hw_vsi_id == list_itr->fltr_info.fwd_id.hw_vsi_id && f_info->flag == list_itr->fltr_info.flag) return list_itr; } return NULL; } /** * ice_remove_mac_rule - remove a MAC based filter rule * @hw: pointer to the hardware structure * @m_list: list of MAC addresses and forwarding information * @recp_list: list from which function remove MAC address * * This function removes either a MAC filter rule or a specific VSI from a * VSI list for a multicast MAC address. * * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by * ice_add_mac. Caller should be aware that this call will only work if all * the entries passed into m_list were added previously. It will not attempt to * do a partial remove of entries that were found. */ static enum ice_status ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list, struct ice_sw_recipe *recp_list) { struct ice_fltr_list_entry *list_itr, *tmp; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ if (!m_list) return ICE_ERR_PARAM; rule_lock = &recp_list->filt_rule_lock; LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry, list_entry) { enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0]; u16 vsi_handle; if (l_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; vsi_handle = list_itr->fltr_info.vsi_handle; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; list_itr->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) { /* Don't remove the unicast address that belongs to * another VSI on the switch, since it is not being * shared... */ ice_acquire_lock(rule_lock); if (!ice_find_ucast_rule_entry(&recp_list->filt_rules, &list_itr->fltr_info)) { ice_release_lock(rule_lock); return ICE_ERR_DOES_NOT_EXIST; } ice_release_lock(rule_lock); } list_itr->status = ice_remove_rule_internal(hw, recp_list, list_itr); if (list_itr->status) return list_itr->status; } return ICE_SUCCESS; } /** * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure * @m_list: list of MAC addresses and forwarding information * */ enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list) { struct ice_sw_recipe *recp_list; recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; return ice_remove_mac_rule(hw, m_list, recp_list); } /** * ice_remove_vlan_rule - Remove VLAN based filter rule * @hw: pointer to the hardware structure * @v_list: list of VLAN entries and forwarding information * @recp_list: list from which function remove VLAN */ static enum ice_status ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list, struct ice_sw_recipe *recp_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry, list_entry) { enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; if (l_type != ICE_SW_LKUP_VLAN) return ICE_ERR_PARAM; v_list_itr->status = ice_remove_rule_internal(hw, recp_list, v_list_itr); if (v_list_itr->status) return v_list_itr->status; } return ICE_SUCCESS; } /** * ice_remove_vlan - remove a VLAN address based filter rule * @hw: pointer to the hardware structure * @v_list: list of VLAN and forwarding information * */ enum ice_status ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list) { struct ice_sw_recipe *recp_list; if (!v_list || !hw) return ICE_ERR_PARAM; recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN]; return ice_remove_vlan_rule(hw, v_list, recp_list); } /** * ice_vsi_uses_fltr - Determine if given VSI uses specified filter * @fm_entry: filter entry to inspect * @vsi_handle: VSI handle to compare with filter info */ static bool ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle) { return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && fm_entry->fltr_info.vsi_handle == vsi_handle) || (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + fm_entry->vsi_list_info && (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map, vsi_handle)))); } /** * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from * @vsi_list_head: pointer to the list to add entry to * @fi: pointer to fltr_info of filter entry to copy & add * * Helper function, used when creating a list of filters to remove from * a specific VSI. The entry added to vsi_list_head is a COPY of the * original filter entry, with the exception of fltr_info.fltr_act and * fltr_info.fwd_id fields. These are set such that later logic can * extract which VSI to remove the fltr from, and pass on that information. */ static enum ice_status ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct LIST_HEAD_TYPE *vsi_list_head, struct ice_fltr_info *fi) { struct ice_fltr_list_entry *tmp; /* this memory is freed up in the caller function * once filters for this VSI are removed */ tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp)); if (!tmp) return ICE_ERR_NO_MEMORY; tmp->fltr_info = *fi; /* Overwrite these fields to indicate which VSI to remove filter from, * so find and remove logic can extract the information from the * list entries. Note that original entries will still have proper * values. */ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; tmp->fltr_info.vsi_handle = vsi_handle; tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); LIST_ADD(&tmp->list_entry, vsi_list_head); return ICE_SUCCESS; } /** * ice_add_to_vsi_fltr_list - Add VSI filters to the list * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from * @lkup_list_head: pointer to the list that has certain lookup type filters * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle * * Locates all filters in lkup_list_head that are used by the given VSI, * and adds COPIES of those entries to vsi_list_head (intended to be used * to remove the listed filters). * Note that this means all entries in vsi_list_head must be explicitly * deallocated by the caller when done with list. */ static enum ice_status ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle, struct LIST_HEAD_TYPE *lkup_list_head, struct LIST_HEAD_TYPE *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; enum ice_status status = ICE_SUCCESS; /* check to make sure VSI ID is valid and within boundary */ if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head, ice_fltr_mgmt_list_entry, list_entry) { - struct ice_fltr_info *fi; - - fi = &fm_entry->fltr_info; - if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle)) + if (!ice_vsi_uses_fltr(fm_entry, vsi_handle)) continue; status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, - vsi_list_head, fi); + vsi_list_head, + &fm_entry->fltr_info); if (status) return status; } return status; } /** * ice_determine_promisc_mask * @fi: filter info to parse * * Helper function to determine which ICE_PROMISC_ mask corresponds * to given filter into. */ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi) { u16 vid = fi->l_data.mac_vlan.vlan_id; u8 *macaddr = fi->l_data.mac.mac_addr; bool is_tx_fltr = false; u8 promisc_mask = 0; if (fi->flag == ICE_FLTR_TX) is_tx_fltr = true; if (IS_BROADCAST_ETHER_ADDR(macaddr)) promisc_mask |= is_tx_fltr ? ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX; else if (IS_MULTICAST_ETHER_ADDR(macaddr)) promisc_mask |= is_tx_fltr ? ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX; else if (IS_UNICAST_ETHER_ADDR(macaddr)) promisc_mask |= is_tx_fltr ? ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX; if (vid) promisc_mask |= is_tx_fltr ? ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX; return promisc_mask; } /** * _ice_get_vsi_promisc - get promiscuous mode of given VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to retrieve info from * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI * @sw: pointer to switch info struct for which function add rule */ static enum ice_status _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid, struct ice_switch_info *sw) { struct ice_fltr_mgmt_list_entry *itr; struct LIST_HEAD_TYPE *rule_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; *vid = 0; *promisc_mask = 0; rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules; rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock; ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry, list_entry) { /* Continue if this filter doesn't apply to this VSI or the * VSI ID is not in the VSI map for this filter */ if (!ice_vsi_uses_fltr(itr, vsi_handle)) continue; *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info); } ice_release_lock(rule_lock); return ICE_SUCCESS; } /** * ice_get_vsi_promisc - get promiscuous mode of given VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to retrieve info from * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI */ enum ice_status ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid) { return _ice_get_vsi_promisc(hw, vsi_handle, promisc_mask, vid, hw->switch_info); } /** * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to retrieve info from * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI * @sw: pointer to switch info struct for which function add rule */ static enum ice_status _ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid, struct ice_switch_info *sw) { struct ice_fltr_mgmt_list_entry *itr; struct LIST_HEAD_TYPE *rule_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; *vid = 0; *promisc_mask = 0; rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules; rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock; ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry, list_entry) { /* Continue if this filter doesn't apply to this VSI or the * VSI ID is not in the VSI map for this filter */ if (!ice_vsi_uses_fltr(itr, vsi_handle)) continue; *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info); } ice_release_lock(rule_lock); return ICE_SUCCESS; } /** * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to retrieve info from * @promisc_mask: pointer to mask to be filled in * @vid: VLAN ID of promisc VLAN VSI */ enum ice_status ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid) { return _ice_get_vsi_vlan_promisc(hw, vsi_handle, promisc_mask, vid, hw->switch_info); } /** * ice_remove_promisc - Remove promisc based filter rules * @hw: pointer to the hardware structure * @recp_id: recipe ID for which the rule needs to removed * @v_list: list of promisc entries */ static enum ice_status ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *v_list) { struct ice_fltr_list_entry *v_list_itr, *tmp; struct ice_sw_recipe *recp_list; recp_list = &hw->switch_info->recp_list[recp_id]; LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry, list_entry) { v_list_itr->status = ice_remove_rule_internal(hw, recp_list, v_list_itr); if (v_list_itr->status) return v_list_itr->status; } return ICE_SUCCESS; } /** * _ice_clear_vsi_promisc - clear specified promiscuous mode(s) * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to clear mode * @promisc_mask: mask of promiscuous config bits to clear * @vid: VLAN ID to clear VLAN promiscuous * @sw: pointer to switch info struct for which function add rule */ static enum ice_status _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, struct ice_switch_info *sw) { struct ice_fltr_list_entry *fm_entry, *tmp; struct LIST_HEAD_TYPE remove_list_head; struct ice_fltr_mgmt_list_entry *itr; struct LIST_HEAD_TYPE *rule_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = ICE_SUCCESS; u8 recipe_id; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) recipe_id = ICE_SW_LKUP_PROMISC_VLAN; else recipe_id = ICE_SW_LKUP_PROMISC; rule_head = &sw->recp_list[recipe_id].filt_rules; rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; INIT_LIST_HEAD(&remove_list_head); ice_acquire_lock(rule_lock); LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_info *fltr_info; u8 fltr_promisc_mask = 0; if (!ice_vsi_uses_fltr(itr, vsi_handle)) continue; fltr_info = &itr->fltr_info; if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN && vid != fltr_info->l_data.mac_vlan.vlan_id) continue; fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info); /* Skip if filter is not completely specified by given mask */ if (fltr_promisc_mask & ~promisc_mask) continue; status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle, &remove_list_head, fltr_info); if (status) { ice_release_lock(rule_lock); goto free_fltr_list; } } ice_release_lock(rule_lock); status = ice_remove_promisc(hw, recipe_id, &remove_list_head); free_fltr_list: LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head, ice_fltr_list_entry, list_entry) { LIST_DEL(&fm_entry->list_entry); ice_free(hw, fm_entry); } return status; } /** * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to clear mode * @promisc_mask: mask of promiscuous config bits to clear * @vid: VLAN ID to clear VLAN promiscuous */ enum ice_status ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { return _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid, hw->switch_info); } /** * _ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to configure * @promisc_mask: mask of promiscuous config bits * @vid: VLAN ID to set VLAN promiscuous * @lport: logical port number to configure promisc mode * @sw: pointer to switch info struct for which function add rule */ static enum ice_status _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid, u8 lport, struct ice_switch_info *sw) { enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR }; struct ice_fltr_list_entry f_list_entry; struct ice_fltr_info new_fltr; enum ice_status status = ICE_SUCCESS; bool is_tx_fltr; u16 hw_vsi_id; int pkt_type; u8 recipe_id; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM); if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) { new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN; new_fltr.l_data.mac_vlan.vlan_id = vid; recipe_id = ICE_SW_LKUP_PROMISC_VLAN; } else { new_fltr.lkup_type = ICE_SW_LKUP_PROMISC; recipe_id = ICE_SW_LKUP_PROMISC; } /* Separate filters must be set for each direction/packet type * combination, so we will loop over the mask value, store the * individual type, and clear it out in the input mask as it * is found. */ while (promisc_mask) { struct ice_sw_recipe *recp_list; u8 *mac_addr; pkt_type = 0; is_tx_fltr = false; if (promisc_mask & ICE_PROMISC_UCAST_RX) { promisc_mask &= ~ICE_PROMISC_UCAST_RX; pkt_type = UCAST_FLTR; } else if (promisc_mask & ICE_PROMISC_UCAST_TX) { promisc_mask &= ~ICE_PROMISC_UCAST_TX; pkt_type = UCAST_FLTR; is_tx_fltr = true; } else if (promisc_mask & ICE_PROMISC_MCAST_RX) { promisc_mask &= ~ICE_PROMISC_MCAST_RX; pkt_type = MCAST_FLTR; } else if (promisc_mask & ICE_PROMISC_MCAST_TX) { promisc_mask &= ~ICE_PROMISC_MCAST_TX; pkt_type = MCAST_FLTR; is_tx_fltr = true; } else if (promisc_mask & ICE_PROMISC_BCAST_RX) { promisc_mask &= ~ICE_PROMISC_BCAST_RX; pkt_type = BCAST_FLTR; } else if (promisc_mask & ICE_PROMISC_BCAST_TX) { promisc_mask &= ~ICE_PROMISC_BCAST_TX; pkt_type = BCAST_FLTR; is_tx_fltr = true; } /* Check for VLAN promiscuous flag */ if (promisc_mask & ICE_PROMISC_VLAN_RX) { promisc_mask &= ~ICE_PROMISC_VLAN_RX; } else if (promisc_mask & ICE_PROMISC_VLAN_TX) { promisc_mask &= ~ICE_PROMISC_VLAN_TX; is_tx_fltr = true; } /* Set filter DA based on packet type */ mac_addr = new_fltr.l_data.mac.mac_addr; if (pkt_type == BCAST_FLTR) { ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM); } else if (pkt_type == MCAST_FLTR || pkt_type == UCAST_FLTR) { /* Use the dummy ether header DA */ ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN, ICE_NONDMA_TO_NONDMA); if (pkt_type == MCAST_FLTR) mac_addr[0] |= 0x1; /* Set multicast bit */ } /* Need to reset this to zero for all iterations */ new_fltr.flag = 0; if (is_tx_fltr) { new_fltr.flag |= ICE_FLTR_TX; new_fltr.src = hw_vsi_id; } else { new_fltr.flag |= ICE_FLTR_RX; new_fltr.src = lport; } new_fltr.fltr_act = ICE_FWD_TO_VSI; new_fltr.vsi_handle = vsi_handle; new_fltr.fwd_id.hw_vsi_id = hw_vsi_id; f_list_entry.fltr_info = new_fltr; recp_list = &sw->recp_list[recipe_id]; status = ice_add_rule_internal(hw, recp_list, lport, &f_list_entry); if (status != ICE_SUCCESS) goto set_promisc_exit; } set_promisc_exit: return status; } /** * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s) * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to configure * @promisc_mask: mask of promiscuous config bits * @vid: VLAN ID to set VLAN promiscuous */ enum ice_status ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid) { return _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid, hw->port_info->lport, hw->switch_info); } /** * _ice_set_vlan_vsi_promisc * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to configure * @promisc_mask: mask of promiscuous config bits * @rm_vlan_promisc: Clear VLANs VSI promisc mode * @lport: logical port number to configure promisc mode * @sw: pointer to switch info struct for which function add rule * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ static enum ice_status _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc, u8 lport, struct ice_switch_info *sw) { struct ice_fltr_list_entry *list_itr, *tmp; struct LIST_HEAD_TYPE vsi_list_head; struct LIST_HEAD_TYPE *vlan_head; struct ice_lock *vlan_lock; /* Lock to protect filter rule list */ enum ice_status status; u16 vlan_id; INIT_LIST_HEAD(&vsi_list_head); vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; ice_acquire_lock(vlan_lock); status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head, &vsi_list_head); ice_release_lock(vlan_lock); if (status) goto free_fltr_list; LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry, list_entry) { vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; if (rm_vlan_promisc) status = _ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id, sw); else status = _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id, lport, sw); if (status) break; } free_fltr_list: LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head, ice_fltr_list_entry, list_entry) { LIST_DEL(&list_itr->list_entry); ice_free(hw, list_itr); } return status; } /** * ice_set_vlan_vsi_promisc * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to configure * @promisc_mask: mask of promiscuous config bits * @rm_vlan_promisc: Clear VLANs VSI promisc mode * * Configure VSI with all associated VLANs to given promiscuous mode(s) */ enum ice_status ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc) { return _ice_set_vlan_vsi_promisc(hw, vsi_handle, promisc_mask, rm_vlan_promisc, hw->port_info->lport, hw->switch_info); } /** * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from * @recp_list: recipe list from which function remove fltr * @lkup: switch rule filter lookup type */ static void ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle, struct ice_sw_recipe *recp_list, enum ice_sw_lkup_type lkup) { struct ice_fltr_list_entry *fm_entry; struct LIST_HEAD_TYPE remove_list_head; struct LIST_HEAD_TYPE *rule_head; struct ice_fltr_list_entry *tmp; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status status; INIT_LIST_HEAD(&remove_list_head); rule_lock = &recp_list[lkup].filt_rule_lock; rule_head = &recp_list[lkup].filt_rules; ice_acquire_lock(rule_lock); status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head, &remove_list_head); ice_release_lock(rule_lock); if (status) - return; + goto free_fltr_list; switch (lkup) { case ICE_SW_LKUP_MAC: ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]); break; case ICE_SW_LKUP_VLAN: ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]); break; case ICE_SW_LKUP_PROMISC: case ICE_SW_LKUP_PROMISC_VLAN: ice_remove_promisc(hw, lkup, &remove_list_head); break; case ICE_SW_LKUP_MAC_VLAN: ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n"); break; case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE_MAC: ice_remove_eth_mac(hw, &remove_list_head); break; case ICE_SW_LKUP_DFLT: ice_debug(hw, ICE_DBG_SW, "Remove filters for this lookup type hasn't been implemented yet\n"); break; case ICE_SW_LKUP_LAST: ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n"); break; } +free_fltr_list: LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head, ice_fltr_list_entry, list_entry) { LIST_DEL(&fm_entry->list_entry); ice_free(hw, fm_entry); } } /** * ice_remove_vsi_fltr_rule - Remove all filters for a VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from * @sw: pointer to switch info struct */ static void ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle, struct ice_switch_info *sw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_MAC); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_MAC_VLAN); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_PROMISC); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_VLAN); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_DFLT); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_ETHERTYPE); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC); ice_remove_vsi_lkup_fltr(hw, vsi_handle, sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN); } /** * ice_remove_vsi_fltr - Remove all filters for a VSI * @hw: pointer to the hardware structure * @vsi_handle: VSI handle to remove filters from */ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle) { ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info); } /** * ice_alloc_res_cntr - allocating resource counter * @hw: pointer to the hardware structure * @type: type of resource * @alloc_shared: if set it is shared else dedicated * @num_items: number of entries requested for FD resource type * @counter_id: counter index returned by AQ call */ static enum ice_status ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 *counter_id) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; u16 buf_len; /* Allocate resource */ buf_len = ice_struct_size(buf, elem, 1); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; buf->num_elems = CPU_TO_LE16(num_items); buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, ice_aqc_opc_alloc_res, NULL); if (status) goto exit; *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp); exit: ice_free(hw, buf); return status; } /** * ice_free_res_cntr - free resource counter * @hw: pointer to the hardware structure * @type: type of resource * @alloc_shared: if set it is shared else dedicated * @num_items: number of entries to be freed for FD resource type * @counter_id: counter ID resource which needs to be freed */ static enum ice_status ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 counter_id) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; u16 buf_len; /* Free resource */ buf_len = ice_struct_size(buf, elem, 1); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; buf->num_elems = CPU_TO_LE16(num_items); buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) & ICE_AQC_RES_TYPE_M) | alloc_shared); buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id); status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n"); ice_free(hw, buf); return status; } /** * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type * @hw: pointer to the hardware structure * @counter_id: returns counter index */ enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id) { return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, counter_id); } /** * ice_free_vlan_res_counter - Free counter resource for VLAN type * @hw: pointer to the hardware structure * @counter_id: counter index to be freed */ enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id) { return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER, ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, counter_id); } /** * ice_alloc_res_lg_act - add large action resource * @hw: pointer to the hardware structure * @l_id: large action ID to fill it in * @num_acts: number of actions to hold with a large action entry */ static enum ice_status ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts) { struct ice_aqc_alloc_free_res_elem *sw_buf; enum ice_status status; u16 buf_len; if (num_acts > ICE_MAX_LG_ACT || num_acts == 0) return ICE_ERR_PARAM; /* Allocate resource for large action */ buf_len = ice_struct_size(sw_buf, elem, 1); sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!sw_buf) return ICE_ERR_NO_MEMORY; sw_buf->num_elems = CPU_TO_LE16(1); /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1. * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3. * If num_acts is greater than 2, then use * ICE_AQC_RES_TYPE_WIDE_TABLE_4. * The num_acts cannot exceed 4. This was ensured at the * beginning of the function. */ if (num_acts == 1) sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1); else if (num_acts == 2) sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2); else sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4); status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, ice_aqc_opc_alloc_res, NULL); if (!status) *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp); ice_free(hw, sw_buf); return status; } /** * ice_add_mac_with_sw_marker - add filter with sw marker * @hw: pointer to the hardware structure * @f_info: filter info structure containing the MAC filter information * @sw_marker: sw marker to tag the Rx descriptor with */ enum ice_status ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, u16 sw_marker) { struct ice_fltr_mgmt_list_entry *m_entry; struct ice_fltr_list_entry fl_info; struct ice_sw_recipe *recp_list; struct LIST_HEAD_TYPE l_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status ret; bool entry_exists; u16 lg_act_id; if (f_info->fltr_act != ICE_FWD_TO_VSI) return ICE_ERR_PARAM; if (f_info->lkup_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; if (sw_marker == ICE_INVAL_SW_MARKER_ID) return ICE_ERR_PARAM; if (!ice_is_vsi_valid(hw, f_info->vsi_handle)) return ICE_ERR_PARAM; f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle); /* Add filter if it doesn't exist so then the adding of large * action always results in update */ INIT_LIST_HEAD(&l_head); fl_info.fltr_info = *f_info; LIST_ADD(&fl_info.list_entry, &l_head); entry_exists = false; ret = ice_add_mac_rule(hw, &l_head, hw->switch_info, hw->port_info->lport); if (ret == ICE_ERR_ALREADY_EXISTS) entry_exists = true; else if (ret) return ret; recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; rule_lock = &recp_list->filt_rule_lock; ice_acquire_lock(rule_lock); /* Get the book keeping entry for the filter */ m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info); if (!m_entry) goto exit_error; /* If counter action was enabled for this rule then don't enable * sw marker large action */ if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) { ret = ICE_ERR_PARAM; goto exit_error; } /* if same marker was added before */ if (m_entry->sw_marker_id == sw_marker) { ret = ICE_ERR_ALREADY_EXISTS; goto exit_error; } /* Allocate a hardware table entry to hold large act. Three actions * for marker based large action */ ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3); if (ret) goto exit_error; if (lg_act_id == ICE_INVAL_LG_ACT_INDEX) goto exit_error; /* Update the switch rule to add the marker action */ ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id); if (!ret) { ice_release_lock(rule_lock); return ret; } exit_error: ice_release_lock(rule_lock); /* only remove entry if it did not exist previously */ if (!entry_exists) ret = ice_remove_mac(hw, &l_head); return ret; } /** * ice_add_mac_with_counter - add filter with counter enabled * @hw: pointer to the hardware structure * @f_info: pointer to filter info structure containing the MAC filter * information */ enum ice_status ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_fltr_mgmt_list_entry *m_entry; struct ice_fltr_list_entry fl_info; struct ice_sw_recipe *recp_list; struct LIST_HEAD_TYPE l_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ enum ice_status ret; bool entry_exist; u16 counter_id; u16 lg_act_id; if (f_info->fltr_act != ICE_FWD_TO_VSI) return ICE_ERR_PARAM; if (f_info->lkup_type != ICE_SW_LKUP_MAC) return ICE_ERR_PARAM; if (!ice_is_vsi_valid(hw, f_info->vsi_handle)) return ICE_ERR_PARAM; f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle); recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC]; entry_exist = false; rule_lock = &recp_list->filt_rule_lock; /* Add filter if it doesn't exist so then the adding of large * action always results in update */ INIT_LIST_HEAD(&l_head); fl_info.fltr_info = *f_info; LIST_ADD(&fl_info.list_entry, &l_head); ret = ice_add_mac_rule(hw, &l_head, hw->switch_info, hw->port_info->lport); if (ret == ICE_ERR_ALREADY_EXISTS) entry_exist = true; else if (ret) return ret; ice_acquire_lock(rule_lock); m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info); if (!m_entry) { ret = ICE_ERR_BAD_PTR; goto exit_error; } /* Don't enable counter for a filter for which sw marker was enabled */ if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) { ret = ICE_ERR_PARAM; goto exit_error; } /* If a counter was already enabled then don't need to add again */ if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) { ret = ICE_ERR_ALREADY_EXISTS; goto exit_error; } /* Allocate a hardware table entry to VLAN counter */ ret = ice_alloc_vlan_res_counter(hw, &counter_id); if (ret) goto exit_error; /* Allocate a hardware table entry to hold large act. Two actions for * counter based large action */ ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2); if (ret) goto exit_error; if (lg_act_id == ICE_INVAL_LG_ACT_INDEX) goto exit_error; /* Update the switch rule to add the counter action */ ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id); if (!ret) { ice_release_lock(rule_lock); return ret; } exit_error: ice_release_lock(rule_lock); /* only remove entry if it did not exist previously */ if (!entry_exist) ret = ice_remove_mac(hw, &l_head); return ret; } /** * ice_replay_fltr - Replay all the filters stored by a specific list head * @hw: pointer to the hardware structure * @list_head: list for which filters needs to be replayed * @recp_id: Recipe ID for which rules need to be replayed */ static enum ice_status ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head) { struct ice_fltr_mgmt_list_entry *itr; enum ice_status status = ICE_SUCCESS; struct ice_sw_recipe *recp_list; u8 lport = hw->port_info->lport; struct LIST_HEAD_TYPE l_head; if (LIST_EMPTY(list_head)) return status; recp_list = &hw->switch_info->recp_list[recp_id]; /* Move entries from the given list_head to a temporary l_head so that * they can be replayed. Otherwise when trying to re-add the same * filter, the function will return already exists */ LIST_REPLACE_INIT(list_head, &l_head); /* Mark the given list_head empty by reinitializing it so filters * could be added again by *handler */ LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_list_entry f_entry; u16 vsi_handle; f_entry.fltr_info = itr->fltr_info; if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) { status = ice_add_rule_internal(hw, recp_list, lport, &f_entry); if (status != ICE_SUCCESS) goto end; continue; } /* Add a filter per VSI separately */ ice_for_each_set_bit(vsi_handle, itr->vsi_list_info->vsi_map, ICE_MAX_VSI) { if (!ice_is_vsi_valid(hw, vsi_handle)) break; ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; if (recp_id == ICE_SW_LKUP_VLAN) status = ice_add_vlan_internal(hw, recp_list, &f_entry); else status = ice_add_rule_internal(hw, recp_list, lport, &f_entry); if (status != ICE_SUCCESS) goto end; } } end: /* Clear the filter management list */ ice_rem_sw_rule_info(hw, &l_head); return status; } /** * ice_replay_all_fltr - replay all filters stored in bookkeeping lists * @hw: pointer to the hardware structure * * NOTE: This function does not clean up partially added filters on error. * It is up to caller of the function to issue a reset or fail early. */ enum ice_status ice_replay_all_fltr(struct ice_hw *hw) { struct ice_switch_info *sw = hw->switch_info; enum ice_status status = ICE_SUCCESS; u8 i; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules; status = ice_replay_fltr(hw, i, head); if (status != ICE_SUCCESS) return status; } return status; } /** * ice_replay_vsi_fltr - Replay filters for requested VSI * @hw: pointer to the hardware structure * @pi: pointer to port information structure * @sw: pointer to switch info struct for which function replays filters * @vsi_handle: driver VSI handle * @recp_id: Recipe ID for which rules need to be replayed * @list_head: list for which filters need to be replayed * * Replays the filter of recipe recp_id for a VSI represented via vsi_handle. * It is required to pass valid VSI handle. */ static enum ice_status ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi, struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id, struct LIST_HEAD_TYPE *list_head) { struct ice_fltr_mgmt_list_entry *itr; enum ice_status status = ICE_SUCCESS; struct ice_sw_recipe *recp_list; u16 hw_vsi_id; if (LIST_EMPTY(list_head)) return status; recp_list = &sw->recp_list[recp_id]; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry, list_entry) { struct ice_fltr_list_entry f_entry; f_entry.fltr_info = itr->fltr_info; if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN && itr->fltr_info.vsi_handle == vsi_handle) { /* update the src in case it is VSI num */ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) f_entry.fltr_info.src = hw_vsi_id; status = ice_add_rule_internal(hw, recp_list, pi->lport, &f_entry); if (status != ICE_SUCCESS) goto end; continue; } if (!itr->vsi_list_info || !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle)) continue; /* Clearing it so that the logic can add it back */ ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; /* update the src in case it is VSI num */ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI) f_entry.fltr_info.src = hw_vsi_id; if (recp_id == ICE_SW_LKUP_VLAN) status = ice_add_vlan_internal(hw, recp_list, &f_entry); else status = ice_add_rule_internal(hw, recp_list, pi->lport, &f_entry); if (status != ICE_SUCCESS) goto end; } end: return status; } /** * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists * @hw: pointer to the hardware structure * @pi: pointer to port information structure * @vsi_handle: driver VSI handle * * Replays filters for requested VSI via vsi_handle. */ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; enum ice_status status = ICE_SUCCESS; u8 i; /* Update the recipes that were created */ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct LIST_HEAD_TYPE *head; head = &sw->recp_list[i].filt_replay_rules; if (!sw->recp_list[i].adv_rule) status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i, head); if (status != ICE_SUCCESS) return status; } return ICE_SUCCESS; } /** * ice_rm_all_sw_replay_rule - helper function to delete filter replay rules * @hw: pointer to the HW struct * @sw: pointer to switch info struct for which function removes filters * * Deletes the filter replay rules for given switch */ void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw) { u8 i; if (!sw) return; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) { struct LIST_HEAD_TYPE *l_head; l_head = &sw->recp_list[i].filt_replay_rules; if (!sw->recp_list[i].adv_rule) ice_rem_sw_rule_info(hw, l_head); } } } /** * ice_rm_all_sw_replay_rule_info - deletes filter replay rules * @hw: pointer to the HW struct * * Deletes the filter replay rules. */ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw) { ice_rm_sw_replay_rule_info(hw, hw->switch_info); } diff --git a/sys/dev/ice/ice_switch.h b/sys/dev/ice/ice_switch.h index 2eff07d4d0d3..a121ee4e42a8 100644 --- a/sys/dev/ice/ice_switch.h +++ b/sys/dev/ice/ice_switch.h @@ -1,485 +1,485 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_SWITCH_H_ #define _ICE_SWITCH_H_ #include "ice_common.h" #include "ice_protocol_type.h" #define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_MAX_SW 256 #define ICE_DFLT_VSI_INVAL 0xff #define ICE_FLTR_RX BIT(0) #define ICE_FLTR_TX BIT(1) #define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) #define DUMMY_ETH_HDR_LEN 16 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ (DUMMY_ETH_HDR_LEN * \ sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) #define ICE_SW_RULE_LG_ACT_SIZE(n) \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) /* Worst case buffer length for ice_aqc_opc_get_res_alloc */ #define ICE_MAX_RES_TYPES 0x80 #define ICE_AQ_GET_RES_ALLOC_BUF_LEN \ (ICE_MAX_RES_TYPES * sizeof(struct ice_aqc_get_res_resp_elem)) #define ICE_VSI_INVAL_ID 0xFFFF #define ICE_INVAL_Q_HANDLE 0xFFFF /* VSI context structure for add/get/update/free operations */ struct ice_vsi_ctx { u16 vsi_num; u16 vsis_allocd; u16 vsis_unallocated; u16 flags; struct ice_aqc_vsi_props info; struct ice_sched_vsi_info sched; u8 alloc_from_pool; u8 vf_num; u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS]; struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS]; }; /* This is to be used by add/update mirror rule Admin Queue command */ struct ice_mir_rule_buf { u16 vsi_idx; /* VSI index */ /* For each VSI, user can specify whether corresponding VSI * should be added/removed to/from mirror rule * * add mirror rule: this should always be TRUE. * update mirror rule: add(true) or remove(false) VSI to/from * mirror rule */ u8 add; }; /* Switch recipe ID enum values are specific to hardware */ enum ice_sw_lkup_type { ICE_SW_LKUP_ETHERTYPE = 0, ICE_SW_LKUP_MAC = 1, ICE_SW_LKUP_MAC_VLAN = 2, ICE_SW_LKUP_PROMISC = 3, ICE_SW_LKUP_VLAN = 4, ICE_SW_LKUP_DFLT = 5, ICE_SW_LKUP_ETHERTYPE_MAC = 8, ICE_SW_LKUP_PROMISC_VLAN = 9, ICE_SW_LKUP_LAST }; /* type of filter src ID */ enum ice_src_id { ICE_SRC_ID_UNKNOWN = 0, ICE_SRC_ID_VSI, ICE_SRC_ID_QUEUE, ICE_SRC_ID_LPORT, }; struct ice_fltr_info { /* Look up information: how to look up packet */ enum ice_sw_lkup_type lkup_type; /* Forward action: filter action to do after lookup */ enum ice_sw_fwd_act_type fltr_act; /* rule ID returned by firmware once filter rule is created */ u16 fltr_rule_id; u16 flag; /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ u16 src; enum ice_src_id src_id; union { struct { u8 mac_addr[ETH_ALEN]; } mac; struct { u8 mac_addr[ETH_ALEN]; u16 vlan_id; } mac_vlan; struct { u16 vlan_id; + u16 tpid; + u8 tpid_valid; } vlan; /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE * if just using ethertype as filter. Set lkup_type as * ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be * passed in as filter. */ struct { u16 ethertype; u8 mac_addr[ETH_ALEN]; /* optional */ } ethertype_mac; } l_data; /* Make sure to zero out the memory of l_data before using * it or only set the data associated with lookup match * rest everything should be zero */ /* Depending on filter action */ union { /* queue ID in case of ICE_FWD_TO_Q and starting * queue ID in case of ICE_FWD_TO_QGRP. */ u16 q_id:11; u16 hw_vsi_id:10; - u16 vsi_id:10; u16 vsi_list_id:10; } fwd_id; /* Sw VSI handle */ u16 vsi_handle; /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field * determines the range of queues the packet needs to be forwarded to. * Note that qgrp_size must be set to a power of 2. */ u8 qgrp_size; /* Rule creations populate these indicators basing on the switch type */ u8 lb_en; /* Indicate if packet can be looped back */ u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; struct ice_adv_lkup_elem { enum ice_protocol_type type; union ice_prot_hdr h_u; /* Header values */ union ice_prot_hdr m_u; /* Mask of header values to match */ }; struct ice_sw_act_ctrl { /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */ u16 src; u16 flag; enum ice_sw_fwd_act_type fltr_act; /* Depending on filter action */ union { /* This is a queue ID in case of ICE_FWD_TO_Q and starting * queue ID in case of ICE_FWD_TO_QGRP. */ u16 q_id:11; u16 vsi_id:10; u16 hw_vsi_id:10; u16 vsi_list_id:10; } fwd_id; /* software VSI handle */ u16 vsi_handle; u8 qgrp_size; }; struct ice_rule_query_data { /* Recipe ID for which the requested rule was added */ u16 rid; /* Rule ID that was added or is supposed to be removed */ u16 rule_id; /* vsi_handle for which Rule was added or is supposed to be removed */ u16 vsi_handle; }; struct ice_adv_rule_info { enum ice_sw_tunnel_type tun_type; struct ice_sw_act_ctrl sw_act; u32 priority; u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ u16 fltr_rule_id; }; /* A collection of one or more four word recipe */ struct ice_sw_recipe { /* For a chained recipe the root recipe is what should be used for * programming rules */ u8 is_root; u8 root_rid; u8 recp_created; /* Number of extraction words */ u8 n_ext_words; /* Protocol ID and Offset pair (extraction word) to describe the * recipe */ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; u16 word_masks[ICE_MAX_CHAIN_WORDS]; /* if this recipe is a collection of other recipe */ u8 big_recp; /* if this recipe is part of another bigger recipe then chain index * corresponding to this recipe */ u8 chain_idx; /* if this recipe is a collection of other recipe then count of other * recipes and recipe IDs of those recipes */ u8 n_grp_count; /* Bit map specifying the IDs associated with this group of recipe */ ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES); enum ice_sw_tunnel_type tun_type; /* List of type ice_fltr_mgmt_list_entry or adv_rule */ u8 adv_rule; struct LIST_HEAD_TYPE filt_rules; struct LIST_HEAD_TYPE filt_replay_rules; struct ice_lock filt_rule_lock; /* protect filter rule structure */ /* Profiles this recipe should be associated with */ struct LIST_HEAD_TYPE fv_list; /* Profiles this recipe is associated with */ u8 num_profs, *prof_ids; /* Bit map for possible result indexes */ ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS); /* This allows user to specify the recipe priority. * For now, this becomes 'fwd_priority' when recipe * is created, usually recipes can have 'fwd' and 'join' * priority. */ u8 priority; struct LIST_HEAD_TYPE rg_list; /* AQ buffer associated with this recipe */ struct ice_aqc_recipe_data_elem *root_buf; /* This struct saves the fv_words for a given lookup */ struct ice_prot_lkup_ext lkup_exts; }; /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */ struct ice_vsi_list_map_info { struct LIST_ENTRY_TYPE list_entry; ice_declare_bitmap(vsi_map, ICE_MAX_VSI); u16 vsi_list_id; /* counter to track how many rules are reusing this VSI list */ u16 ref_cnt; }; struct ice_fltr_list_entry { struct LIST_ENTRY_TYPE list_entry; enum ice_status status; struct ice_fltr_info fltr_info; }; /** * enum ice_fltr_marker - Marker for syncing OS and driver filter lists * @ICE_FLTR_NOT_FOUND: initial state, indicates filter has not been found * @ICE_FLTR_FOUND: set when a filter has been found in both lists * * This enumeration is used to help sync an operating system provided filter * list with the filters previously added. * * This is required for FreeBSD because the operating system does not provide * individual indications of whether a filter has been added or deleted, but * instead just notifies the driver with the entire new list. * * To use this marker state, the driver shall initially reset all filters to * the ICE_FLTR_NOT_FOUND state. Then, for each filter in the OS list, it * shall search the driver list for the filter. If found, the filter state * will be set to ICE_FLTR_FOUND. If not found, that filter will be added. * Finally, the driver shall search the internal filter list for all filters * still marked as ICE_FLTR_NOT_FOUND and remove them. */ enum ice_fltr_marker { ICE_FLTR_NOT_FOUND, ICE_FLTR_FOUND, }; /* This defines an entry in the list that maintains MAC or VLAN membership * to HW list mapping, since multiple VSIs can subscribe to the same MAC or * VLAN. As an optimization the VSI list should be created only when a * second VSI becomes a subscriber to the same MAC address. VSI lists are always * used for VLAN membership. */ struct ice_fltr_mgmt_list_entry { /* back pointer to VSI list ID to VSI list mapping */ struct ice_vsi_list_map_info *vsi_list_info; u16 vsi_count; #define ICE_INVAL_LG_ACT_INDEX 0xffff u16 lg_act_idx; #define ICE_INVAL_SW_MARKER_ID 0xffff u16 sw_marker_id; struct LIST_ENTRY_TYPE list_entry; struct ice_fltr_info fltr_info; #define ICE_INVAL_COUNTER_ID 0xff u8 counter_index; enum ice_fltr_marker marker; }; struct ice_adv_fltr_mgmt_list_entry { struct LIST_ENTRY_TYPE list_entry; struct ice_adv_lkup_elem *lkups; struct ice_adv_rule_info rule_info; u16 lkups_cnt; struct ice_vsi_list_map_info *vsi_list_info; u16 vsi_count; }; enum ice_promisc_flags { ICE_PROMISC_UCAST_RX = 0x1, ICE_PROMISC_UCAST_TX = 0x2, ICE_PROMISC_MCAST_RX = 0x4, ICE_PROMISC_MCAST_TX = 0x8, ICE_PROMISC_BCAST_RX = 0x10, ICE_PROMISC_BCAST_TX = 0x20, ICE_PROMISC_VLAN_RX = 0x40, ICE_PROMISC_VLAN_TX = 0x80, }; /* VSI related commands */ enum ice_status ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); enum ice_status ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd); enum ice_status ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); enum ice_status ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); enum ice_status ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, bool keep_vsi_alloc, struct ice_sq_cd *cd); enum ice_status ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle); void ice_clear_all_vsi_ctx(struct ice_hw *hw); enum ice_status ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); enum ice_status ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi, u16 count, struct ice_mir_rule_buf *mr_buf, struct ice_sq_cd *cd, u16 *rule_id); enum ice_status ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd, struct ice_sq_cd *cd); enum ice_status ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh, u32 *ctl_bitmask); enum ice_status ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh, u32 ctl_bitmask); /* Switch config */ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id); enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id); -/* Switch/bridge related commands */ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id); enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id); enum ice_status ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id, u16 *counter_id); enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id); enum ice_status ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, struct ice_aqc_get_res_resp_elem *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries, struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id, struct ice_sq_cd *cd); enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list); enum ice_status ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list); void ice_rem_all_sw_rules_info(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst); enum ice_status ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list); enum ice_status ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list); enum ice_status ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info, u16 sw_marker); enum ice_status ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle); /* Promisc/defport setup for VSIs */ enum ice_status ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, u8 direction); enum ice_status ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); enum ice_status ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); enum ice_status ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, bool rm_vlan_promisc); /* Get VSIs Promisc/defport settings */ enum ice_status ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid); enum ice_status ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask, u16 *vid); enum ice_status ice_replay_all_fltr(struct ice_hw *hw); enum ice_status ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list); u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle); bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi, u16 vsi_handle); void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); #endif /* _ICE_SWITCH_H_ */ diff --git a/sys/dev/ice/ice_type.h b/sys/dev/ice/ice_type.h index 2ac2bb30f1e4..e32ee4d120f0 100644 --- a/sys/dev/ice/ice_type.h +++ b/sys/dev/ice/ice_type.h @@ -1,1280 +1,1293 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ #define ETH_ALEN 6 #define ETH_HEADER_LEN 14 #define BIT(a) (1UL << (a)) #ifndef BIT_ULL #define BIT_ULL(a) (1ULL << (a)) #endif /* BIT_ULL */ #define BITS_PER_BYTE 8 #define _FORCE_ #define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_DWORD 4 #define ICE_MAX_TRAFFIC_CLASS 8 #ifndef MIN_T #define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b)) #endif #define IS_ASCII(_ch) ((_ch) < 0x80) #define STRUCT_HACK_VAR_LEN /** * ice_struct_size - size of struct with C99 flexible array member * @ptr: pointer to structure * @field: flexible array member (last member of the structure) * @num: number of elements of that flexible array member */ #define ice_struct_size(ptr, field, num) \ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num)) #define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0])) #include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_devids.h" #include "ice_osdep.h" #include "ice_bitops.h" /* Must come before ice_controlq.h */ #include "ice_controlq.h" #include "ice_lan_tx_rx.h" #include "ice_flex_type.h" #include "ice_protocol_type.h" #include "ice_vlan_mode.h" static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc) { return !!(bitmap & BIT(tc)); } #define DIV_64BIT(n, d) ((n) / (d)) static inline u64 round_up_64bit(u64 a, u32 b) { return DIV_64BIT(((a) + (b) / 2), (b)); } static inline u32 ice_round_to_num(u32 N, u32 R) { return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) : ((((N) + (R) - 1) / (R)) * (R))); } /* Driver always calls main vsi_handle first */ #define ICE_MAIN_VSI_HANDLE 0 /* Switch from ms to the 1usec global time (this is the GTIME resolution) */ #define ICE_MS_TO_GTIME(time) ((time) * 1000) /* Data type manipulation macros. */ #define ICE_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) #define ICE_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) #define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) #define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF)) /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_TRACE BIT_ULL(0) /* for function-trace only */ #define ICE_DBG_INIT BIT_ULL(1) #define ICE_DBG_RELEASE BIT_ULL(2) #define ICE_DBG_FW_LOG BIT_ULL(3) #define ICE_DBG_LINK BIT_ULL(4) #define ICE_DBG_PHY BIT_ULL(5) #define ICE_DBG_QCTX BIT_ULL(6) #define ICE_DBG_NVM BIT_ULL(7) #define ICE_DBG_LAN BIT_ULL(8) #define ICE_DBG_FLOW BIT_ULL(9) #define ICE_DBG_DCB BIT_ULL(10) #define ICE_DBG_DIAG BIT_ULL(11) #define ICE_DBG_FD BIT_ULL(12) #define ICE_DBG_SW BIT_ULL(13) #define ICE_DBG_SCHED BIT_ULL(14) #define ICE_DBG_PKG BIT_ULL(16) #define ICE_DBG_RES BIT_ULL(17) #define ICE_DBG_AQ_MSG BIT_ULL(24) #define ICE_DBG_AQ_DESC BIT_ULL(25) #define ICE_DBG_AQ_DESC_BUF BIT_ULL(26) #define ICE_DBG_AQ_CMD BIT_ULL(27) #define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \ ICE_DBG_AQ_DESC | \ ICE_DBG_AQ_DESC_BUF | \ ICE_DBG_AQ_CMD) #define ICE_DBG_USER BIT_ULL(31) #define ICE_DBG_ALL 0xFFFFFFFFFFFFFFFFULL #define IS_UNICAST_ETHER_ADDR(addr) \ ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0)) #define IS_MULTICAST_ETHER_ADDR(addr) \ ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1)) /* Check whether an address is broadcast. */ #define IS_BROADCAST_ETHER_ADDR(addr) \ ((bool)((((u16 *)(addr))[0] == ((u16)0xffff)))) #define IS_ZERO_ETHER_ADDR(addr) \ (((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) && \ ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) && \ ((bool)((((u16 *)(addr))[2] == ((u16)0x0))))) #ifndef IS_ETHER_ADDR_EQUAL #define IS_ETHER_ADDR_EQUAL(addr1, addr2) \ (((bool)((((u16 *)(addr1))[0] == ((u16 *)(addr2))[0]))) && \ ((bool)((((u16 *)(addr1))[1] == ((u16 *)(addr2))[1]))) && \ ((bool)((((u16 *)(addr1))[2] == ((u16 *)(addr2))[2])))) #endif enum ice_aq_res_ids { ICE_NVM_RES_ID = 1, ICE_SPD_RES_ID, ICE_CHANGE_LOCK_RES_ID, ICE_GLOBAL_CFG_LOCK_RES_ID }; /* FW update timeout definitions are in milliseconds */ #define ICE_NVM_TIMEOUT 180000 #define ICE_CHANGE_LOCK_TIMEOUT 1000 #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 enum ice_aq_res_access_type { ICE_RES_READ = 1, ICE_RES_WRITE }; struct ice_driver_ver { u8 major_ver; u8 minor_ver; u8 build_ver; u8 subbuild_ver; u8 driver_string[32]; }; enum ice_fc_mode { ICE_FC_NONE = 0, ICE_FC_RX_PAUSE, ICE_FC_TX_PAUSE, ICE_FC_FULL, ICE_FC_AUTO, ICE_FC_PFC, ICE_FC_DFLT }; enum ice_phy_cache_mode { ICE_FC_MODE = 0, ICE_SPEED_MODE, ICE_FEC_MODE }; enum ice_fec_mode { ICE_FEC_NONE = 0, ICE_FEC_RS, ICE_FEC_BASER, ICE_FEC_AUTO }; struct ice_phy_cache_mode_data { union { enum ice_fec_mode curr_user_fec_req; enum ice_fc_mode curr_user_fc_req; u16 curr_user_speed_req; } data; }; enum ice_set_fc_aq_failures { ICE_SET_FC_AQ_FAIL_NONE = 0, ICE_SET_FC_AQ_FAIL_GET, ICE_SET_FC_AQ_FAIL_SET, ICE_SET_FC_AQ_FAIL_UPDATE }; /* These are structs for managing the hardware information and the operations */ /* MAC types */ enum ice_mac_type { ICE_MAC_UNKNOWN = 0, ICE_MAC_VF, ICE_MAC_E810, ICE_MAC_GENERIC, }; /* Media Types */ enum ice_media_type { ICE_MEDIA_UNKNOWN = 0, ICE_MEDIA_FIBER, ICE_MEDIA_BASET, ICE_MEDIA_BACKPLANE, ICE_MEDIA_DA, ICE_MEDIA_AUI, }; /* Software VSI types. */ enum ice_vsi_type { ICE_VSI_PF = 0, ICE_VSI_VF = 1, ICE_VSI_LB = 6, }; struct ice_link_status { /* Refer to ice_aq_phy_type for bits definition */ u64 phy_type_low; u64 phy_type_high; u8 topo_media_conflict; u16 max_frame_size; u16 link_speed; u16 req_speeds; + u8 link_cfg_err; u8 lse_ena; /* Link Status Event notification */ u8 link_info; u8 an_info; u8 ext_info; u8 fec_info; u8 pacing; /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of * ice_aqc_get_phy_caps structure */ u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; }; /* Different data queue types: These are mainly for SW consumption. */ enum ice_q { ICE_DATA_Q_DOORBELL, ICE_DATA_Q_CMPL, ICE_DATA_Q_QUANTA, ICE_DATA_Q_RX, ICE_DATA_Q_TX, }; /* Different reset sources for which a disable queue AQ call has to be made in * order to clean the Tx scheduler as a part of the reset */ enum ice_disq_rst_src { ICE_NO_RESET = 0, ICE_VM_RESET, ICE_VF_RESET, }; /* PHY info such as phy_type, etc... */ struct ice_phy_info { struct ice_link_status link_info; struct ice_link_status link_info_old; u64 phy_type_low; u64 phy_type_high; enum ice_media_type media_type; u8 get_link_info; /* Please refer to struct ice_aqc_get_link_status_data to get * detail of enable bit in curr_user_speed_req */ u16 curr_user_speed_req; enum ice_fec_mode curr_user_fec_req; enum ice_fc_mode curr_user_fc_req; struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg; }; #define ICE_MAX_NUM_MIRROR_RULES 64 /* Common HW capabilities for SW use */ struct ice_hw_common_caps { /* Write CSR protection */ u64 wr_csr_prot; u32 switching_mode; /* switching mode supported - EVB switching (including cloud) */ #define ICE_NVM_IMAGE_TYPE_EVB 0x0 /* Manageablity mode & supported protocols over MCTP */ u32 mgmt_mode; #define ICE_MGMT_MODE_PASS_THRU_MODE_M 0xF #define ICE_MGMT_MODE_CTL_INTERFACE_M 0xF0 #define ICE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00 u32 mgmt_protocols_mctp; #define ICE_MGMT_MODE_PROTO_RSVD BIT(0) #define ICE_MGMT_MODE_PROTO_PLDM BIT(1) #define ICE_MGMT_MODE_PROTO_OEM BIT(2) #define ICE_MGMT_MODE_PROTO_NC_SI BIT(3) u32 os2bmc; u32 valid_functions; /* DCB capabilities */ u32 active_tc_bitmap; u32 maxtc; /* RSS related capabilities */ u32 rss_table_size; /* 512 for PFs and 64 for VFs */ u32 rss_table_entry_width; /* RSS Entry width in bits */ /* Tx/Rx queues */ u32 num_rxq; /* Number/Total Rx queues */ u32 rxq_first_id; /* First queue ID for Rx queues */ u32 num_txq; /* Number/Total Tx queues */ u32 txq_first_id; /* First queue ID for Tx queues */ /* MSI-X vectors */ u32 num_msix_vectors; u32 msix_vector_first_id; /* Max MTU for function or device */ u32 max_mtu; /* WOL related */ u32 num_wol_proxy_fltr; u32 wol_proxy_vsi_seid; /* LED/SDP pin count */ u32 led_pin_num; u32 sdp_pin_num; /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */ #define ICE_MAX_SUPPORTED_GPIO_LED 12 #define ICE_MAX_SUPPORTED_GPIO_SDP 8 u8 led[ICE_MAX_SUPPORTED_GPIO_LED]; u8 sdp[ICE_MAX_SUPPORTED_GPIO_SDP]; /* SR-IOV virtualization */ u8 sr_iov_1_1; /* SR-IOV enabled */ /* EVB capabilities */ u8 evb_802_1_qbg; /* Edge Virtual Bridging */ u8 evb_802_1_qbh; /* Bridge Port Extension */ u8 dcb; u8 iscsi; u8 mgmt_cem; /* WoL and APM support */ #define ICE_WOL_SUPPORT_M BIT(0) #define ICE_ACPI_PROG_MTHD_M BIT(1) #define ICE_PROXY_SUPPORT_M BIT(2) u8 apm_wol_support; u8 acpi_prog_mthd; u8 proxy_support; bool sec_rev_disabled; bool update_disabled; bool nvm_unified_update; #define ICE_NVM_MGMT_SEC_REV_DISABLED BIT(0) #define ICE_NVM_MGMT_UPDATE_DISABLED BIT(1) #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) }; /* Function specific capabilities */ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; u32 num_allocd_vfs; /* Number of allocated VFs */ u32 vf_base_id; /* Logical ID of the first VF */ u32 guar_num_vsi; }; /* Device wide capabilities */ struct ice_hw_dev_caps { struct ice_hw_common_caps common_cap; u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ u32 num_funcs; }; /* Information about MAC such as address, etc... */ struct ice_mac_info { u8 lan_addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; u8 port_addr[ETH_ALEN]; u8 wol_addr[ETH_ALEN]; }; /* PCI bus types */ enum ice_bus_type { ice_bus_unknown = 0, ice_bus_pci_express, ice_bus_embedded, /* Is device Embedded versus card */ ice_bus_reserved }; /* PCI bus speeds */ enum ice_pcie_bus_speed { ice_pcie_speed_unknown = 0xff, ice_pcie_speed_2_5GT = 0x14, ice_pcie_speed_5_0GT = 0x15, ice_pcie_speed_8_0GT = 0x16, ice_pcie_speed_16_0GT = 0x17 }; /* PCI bus widths */ enum ice_pcie_link_width { ice_pcie_lnk_width_resrv = 0x00, ice_pcie_lnk_x1 = 0x01, ice_pcie_lnk_x2 = 0x02, ice_pcie_lnk_x4 = 0x04, ice_pcie_lnk_x8 = 0x08, ice_pcie_lnk_x12 = 0x0C, ice_pcie_lnk_x16 = 0x10, ice_pcie_lnk_x32 = 0x20, ice_pcie_lnk_width_unknown = 0xff, }; /* Reset types used to determine which kind of reset was requested. These * defines match what the RESET_TYPE field of the GLGEN_RSTAT register. * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register * because its reset source is different than the other types listed. */ enum ice_reset_req { ICE_RESET_POR = 0, ICE_RESET_INVAL = 0, ICE_RESET_CORER = 1, ICE_RESET_GLOBR = 2, ICE_RESET_EMPR = 3, ICE_RESET_PFR = 4, }; /* Bus parameters */ struct ice_bus_info { enum ice_pcie_bus_speed speed; enum ice_pcie_link_width width; enum ice_bus_type type; u16 domain_num; u16 device; u8 func; u8 bus_num; }; /* Flow control (FC) parameters */ struct ice_fc_info { enum ice_fc_mode current_mode; /* FC mode in effect */ enum ice_fc_mode req_mode; /* FC mode requested by caller */ }; /* Option ROM version information */ struct ice_orom_info { u8 major; /* Major version of OROM */ u8 patch; /* Patch version of OROM */ u16 build; /* Build version of OROM */ u32 srev; /* Security revision */ }; /* NVM version information */ struct ice_nvm_info { u32 eetrack; u32 srev; u8 major; u8 minor; }; /* Minimum Security Revision information */ struct ice_minsrev_info { u32 nvm; u32 orom; u8 nvm_valid : 1; u8 orom_valid : 1; }; /* netlist version information */ struct ice_netlist_info { u32 major; /* major high/low */ u32 minor; /* minor high/low */ u32 type; /* type high/low */ u32 rev; /* revision high/low */ u32 hash; /* SHA-1 hash word */ u16 cust_ver; /* customer version */ }; /* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules * of the flash image. */ enum ice_flash_bank { ICE_INVALID_FLASH_BANK, ICE_1ST_FLASH_BANK, ICE_2ND_FLASH_BANK, }; /* Enumeration of which flash bank is desired to read from, either the active * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from * code which just wants to read the active or inactive flash bank. */ enum ice_bank_select { ICE_ACTIVE_FLASH_BANK, ICE_INACTIVE_FLASH_BANK, }; /* information for accessing NVM, OROM, and Netlist flash banks */ struct ice_bank_info { u32 nvm_ptr; /* Pointer to 1st NVM bank */ u32 nvm_size; /* Size of NVM bank */ u32 orom_ptr; /* Pointer to 1st OROM bank */ u32 orom_size; /* Size of OROM bank */ u32 netlist_ptr; /* Pointer to 1st Netlist bank */ u32 netlist_size; /* Size of Netlist bank */ enum ice_flash_bank nvm_bank; /* Active NVM bank */ enum ice_flash_bank orom_bank; /* Active OROM bank */ enum ice_flash_bank netlist_bank; /* Active Netlist bank */ }; /* Flash Chip Information */ struct ice_flash_info { struct ice_orom_info orom; /* Option ROM version info */ struct ice_nvm_info nvm; /* NVM version information */ struct ice_netlist_info netlist;/* Netlist version info */ struct ice_bank_info banks; /* Flash Bank information */ u16 sr_words; /* Shadow RAM size in words */ u32 flash_size; /* Size of available flash in bytes */ u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; struct ice_link_default_override_tlv { u8 options; #define ICE_LINK_OVERRIDE_OPT_M 0x3F #define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0) #define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1) #define ICE_LINK_OVERRIDE_PORT_DIS BIT(2) #define ICE_LINK_OVERRIDE_EN BIT(3) #define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4) #define ICE_LINK_OVERRIDE_EEE_EN BIT(5) u8 phy_config; #define ICE_LINK_OVERRIDE_PHY_CFG_S 8 #define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S) #define ICE_LINK_OVERRIDE_PAUSE_M 0x3 #define ICE_LINK_OVERRIDE_LESM_EN BIT(6) #define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7) u8 fec_options; #define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF u8 rsvd1; u64 phy_type_low; u64 phy_type_high; }; #define ICE_NVM_VER_LEN 32 /* Max number of port to queue branches w.r.t topology */ #define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS #define ice_for_each_traffic_class(_i) \ for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) /* ICE_DFLT_AGG_ID means that all new VM(s)/VSI node connects * to driver defined policy for default aggregator */ #define ICE_INVAL_TEID 0xFFFFFFFF #define ICE_DFLT_AGG_ID 0 struct ice_sched_node { struct ice_sched_node *parent; struct ice_sched_node *sibling; /* next sibling in the same layer */ struct ice_sched_node **children; struct ice_aqc_txsched_elem_data info; u32 agg_id; /* aggregator group ID */ u16 vsi_handle; u8 in_use; /* suspended or in use */ u8 tx_sched_layer; /* Logical Layer (1-9) */ u8 num_children; u8 tc_num; u8 owner; #define ICE_SCHED_NODE_OWNER_LAN 0 #define ICE_SCHED_NODE_OWNER_AE 1 #define ICE_SCHED_NODE_OWNER_RDMA 2 }; /* Access Macros for Tx Sched Elements data */ #define ICE_TXSCHED_GET_NODE_TEID(x) LE32_TO_CPU((x)->info.node_teid) #define ICE_TXSCHED_GET_PARENT_TEID(x) LE32_TO_CPU((x)->info.parent_teid) #define ICE_TXSCHED_GET_CIR_RL_ID(x) \ LE16_TO_CPU((x)->info.cir_bw.bw_profile_idx) #define ICE_TXSCHED_GET_EIR_RL_ID(x) \ LE16_TO_CPU((x)->info.eir_bw.bw_profile_idx) #define ICE_TXSCHED_GET_SRL_ID(x) LE16_TO_CPU((x)->info.srl_id) #define ICE_TXSCHED_GET_CIR_BWALLOC(x) \ LE16_TO_CPU((x)->info.cir_bw.bw_alloc) #define ICE_TXSCHED_GET_EIR_BWALLOC(x) \ LE16_TO_CPU((x)->info.eir_bw.bw_alloc) struct ice_sched_rl_profile { u32 rate; /* In Kbps */ struct ice_aqc_rl_profile_elem info; }; /* The aggregator type determines if identifier is for a VSI group, * aggregator group, aggregator of queues, or queue group. */ enum ice_agg_type { ICE_AGG_TYPE_UNKNOWN = 0, ICE_AGG_TYPE_TC, ICE_AGG_TYPE_AGG, /* aggregator */ ICE_AGG_TYPE_VSI, ICE_AGG_TYPE_QG, ICE_AGG_TYPE_Q }; /* Rate limit types */ enum ice_rl_type { ICE_UNKNOWN_BW = 0, ICE_MIN_BW, /* for CIR profile */ ICE_MAX_BW, /* for EIR profile */ ICE_SHARED_BW /* for shared profile */ }; #define ICE_SCHED_MIN_BW 500 /* in Kbps */ #define ICE_SCHED_MAX_BW 100000000 /* in Kbps */ #define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */ #define ICE_SCHED_NO_PRIORITY 0 #define ICE_SCHED_NO_BW_WT 0 #define ICE_SCHED_DFLT_RL_PROF_ID 0 #define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF #define ICE_SCHED_DFLT_BW_WT 4 #define ICE_SCHED_INVAL_PROF_ID 0xFFFF #define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */ /* Access Macros for Tx Sched RL Profile data */ #define ICE_TXSCHED_GET_RL_PROF_ID(p) LE16_TO_CPU((p)->info.profile_id) #define ICE_TXSCHED_GET_RL_MBS(p) LE16_TO_CPU((p)->info.max_burst_size) #define ICE_TXSCHED_GET_RL_MULTIPLIER(p) LE16_TO_CPU((p)->info.rl_multiply) #define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) LE16_TO_CPU((p)->info.wake_up_calc) #define ICE_TXSCHED_GET_RL_ENCODE(p) LE16_TO_CPU((p)->info.rl_encode) +#define ICE_MAX_PORT_PER_PCI_DEV 8 + /* The following tree example shows the naming conventions followed under * ice_port_info struct for default scheduler tree topology. * * A tree on a port * * ---> root node * (TC0)/ / / / \ \ \ \(TC7) ---> num_branches (range:1- 8) * * * * * * * * * | * / | * * | * / |-> num_elements (range:1 - 9) * * | implies num_of_layers * / | * (a)* | * * (a) is the last_node_teid(not of type Leaf). A leaf node is created under * (a) as child node where queues get added, add Tx/Rx queue admin commands; * need TEID of (a) to add queues. * * This tree * -> has 8 branches (one for each TC) * -> First branch (TC0) has 4 elements * -> has 4 layers * -> (a) is the topmost layer node created by firmware on branch 0 * * Note: Above asterisk tree covers only basic terminology and scenario. * Refer to the documentation for more info. */ /* Data structure for saving BW information */ enum ice_bw_type { ICE_BW_TYPE_PRIO, ICE_BW_TYPE_CIR, ICE_BW_TYPE_CIR_WT, ICE_BW_TYPE_EIR, ICE_BW_TYPE_EIR_WT, ICE_BW_TYPE_SHARED, ICE_BW_TYPE_CNT /* This must be last */ }; struct ice_bw { u32 bw; u16 bw_alloc; }; struct ice_bw_type_info { ice_declare_bitmap(bw_t_bitmap, ICE_BW_TYPE_CNT); u8 generic; struct ice_bw cir_bw; struct ice_bw eir_bw; u32 shared_bw; }; /* VSI queue context structure for given TC */ struct ice_q_ctx { u16 q_handle; u32 q_teid; /* bw_t_info saves queue BW information */ struct ice_bw_type_info bw_t_info; }; /* VSI type list entry to locate corresponding VSI/aggregator nodes */ struct ice_sched_vsi_info { struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS]; struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS]; u16 max_lanq[ICE_MAX_TRAFFIC_CLASS]; /* bw_t_info saves VSI BW information */ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS]; }; /* CEE or IEEE 802.1Qaz ETS Configuration data */ struct ice_dcb_ets_cfg { u8 willing; u8 cbs; u8 maxtcs; u8 prio_table[ICE_MAX_TRAFFIC_CLASS]; u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS]; u8 tsatable[ICE_MAX_TRAFFIC_CLASS]; }; /* CEE or IEEE 802.1Qaz PFC Configuration data */ struct ice_dcb_pfc_cfg { u8 willing; u8 mbc; u8 pfccap; u8 pfcena; }; /* CEE or IEEE 802.1Qaz Application Priority data */ struct ice_dcb_app_priority_table { u16 prot_id; u8 priority; u8 selector; }; #define ICE_MAX_USER_PRIORITY 8 #define ICE_DCBX_MAX_APPS 32 #define ICE_LLDPDU_SIZE 1500 #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 #define ICE_TLV_STATUS_ERR 0x4 #define ICE_APP_PROT_ID_FCOE 0x8906 #define ICE_APP_PROT_ID_ISCSI 0x0cbc #define ICE_APP_PROT_ID_ISCSI_860 0x035c #define ICE_APP_PROT_ID_FIP 0x8914 #define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_TCPIP 0x2 #define ICE_CEE_APP_SEL_ETHTYPE 0x0 #define ICE_CEE_APP_SEL_TCPIP 0x1 struct ice_dcbx_cfg { u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct ice_dcb_ets_cfg etscfg; struct ice_dcb_ets_cfg etsrec; struct ice_dcb_pfc_cfg pfc; struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; u8 dcbx_mode; #define ICE_DCBX_MODE_CEE 0x1 #define ICE_DCBX_MODE_IEEE 0x2 u8 app_mode; #define ICE_DCBX_APPS_NON_WILLING 0x1 }; struct ice_qos_cfg { struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */ u8 is_sw_lldp : 1; }; struct ice_port_info { struct ice_sched_node *root; /* Root Node per Port */ struct ice_hw *hw; /* back pointer to HW instance */ u32 last_node_teid; /* scheduler last node info */ u16 sw_id; /* Initial switch ID belongs to port */ u16 pf_vf_num; u8 port_state; #define ICE_SCHED_PORT_STATE_INIT 0x0 #define ICE_SCHED_PORT_STATE_READY 0x1 u8 lport; #define ICE_LPORT_MASK 0xff u16 dflt_tx_vsi_rule_id; u16 dflt_tx_vsi_num; u16 dflt_rx_vsi_rule_id; u16 dflt_rx_vsi_num; struct ice_fc_info fc; struct ice_mac_info mac; struct ice_phy_info phy; struct ice_lock sched_lock; /* protect access to TXSched tree */ struct ice_sched_node * sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct ice_bw_type_info root_node_bw_t_info; struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS]; struct ice_qos_cfg qos_cfg; u8 is_vf:1; }; struct ice_switch_info { struct LIST_HEAD_TYPE vsi_list_map_head; struct ice_sw_recipe *recp_list; u16 prof_res_bm_init; u16 max_used_prof_index; ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; /* Enum defining the different states of the mailbox snapshot in the * PF-VF mailbox overflow detection algorithm. The snapshot can be in * states: * 1. ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot * within the mailbox buffer. * 2. ICE_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot * 3. ICE_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the * mailbox and mark any VFs sending more messages than the threshold limit set. * 4. ICE_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to 0xFFFFFFFF. */ enum ice_mbx_snapshot_state { ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0, ICE_MAL_VF_DETECT_STATE_TRAVERSE, ICE_MAL_VF_DETECT_STATE_DETECT, ICE_MAL_VF_DETECT_STATE_INVALID = 0xFFFFFFFF, }; /* Structure to hold information of the static snapshot and the mailbox * buffer data used to generate and track the snapshot. * 1. state: the state of the mailbox snapshot in the malicious VF * detection state handler ice_mbx_vf_state_handler() * 2. head : head of the mailbox snapshot in a circular mailbox buffer * 3. tail : tail of the mailbox snapshot in a circular mailbox buffer * 4. num_iterations: number of messages traversed in circular mailbox buffer * 5. num_msg_proc: number of messages processed in mailbox * 6. num_pending_arq: number of pending asynchronous messages * 7. max_num_msgs_mbx: maximum messages in mailbox for currently * serviced work item or interrupt. */ struct ice_mbx_snap_buffer_data { enum ice_mbx_snapshot_state state; u32 head; u32 tail; u32 num_iterations; u16 num_msg_proc; u16 num_pending_arq; u16 max_num_msgs_mbx; }; /* Structure to track messages sent by VFs on mailbox: * 1. vf_cntr : a counter array of VFs to track the number of * asynchronous messages sent by each VF * 2. vfcntr_len : number of entries in VF counter array */ struct ice_mbx_vf_counter { u32 *vf_cntr; u32 vfcntr_len; }; /* Structure to hold data relevant to the captured static snapshot * of the PF-VF mailbox. */ struct ice_mbx_snapshot { struct ice_mbx_snap_buffer_data mbx_buf; struct ice_mbx_vf_counter mbx_vf; }; /* Structure to hold data to be used for capturing or updating a * static snapshot. * 1. num_msg_proc: number of messages processed in mailbox * 2. num_pending_arq: number of pending asynchronous messages * 3. max_num_msgs_mbx: maximum messages in mailbox for currently * serviced work item or interrupt. * 4. async_watermark_val: An upper threshold set by caller to determine * if the pending arq count is large enough to assume that there is * the possibility of a mailicious VF. */ struct ice_mbx_data { u16 num_msg_proc; u16 num_pending_arq; u16 max_num_msgs_mbx; u16 async_watermark_val; }; /* Port hardware description */ struct ice_hw { u8 *hw_addr; void *back; struct ice_aqc_layer_props *layer_info; struct ice_port_info *port_info; /* 2D Array for each Tx Sched RL Profile type */ struct ice_sched_rl_profile **cir_profiles; struct ice_sched_rl_profile **eir_profiles; struct ice_sched_rl_profile **srl_profiles; /* PSM clock frequency for calculating RL profile params */ u32 psm_clk_freq; u64 debug_mask; /* BITMAP for debug mask */ enum ice_mac_type mac_type; /* pci info */ u16 device_id; u16 vendor_id; u16 subsystem_device_id; u16 subsystem_vendor_id; u8 revision_id; u8 pf_id; /* device profile info */ u16 max_burst_size; /* driver sets this value */ /* Tx Scheduler values */ u8 num_tx_sched_layers; u8 num_tx_sched_phys_layers; u8 flattened_layers; u8 max_cgds; u8 sw_entry_point_layer; u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct LIST_HEAD_TYPE agg_list; /* lists all aggregator */ /* List contain profile ID(s) and other params per layer */ struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM]; struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; u8 evb_veb; /* true for VEB, false for VEPA */ u8 reset_ongoing; /* true if HW is in reset, false otherwise */ struct ice_bus_info bus; struct ice_flash_info flash; struct ice_hw_dev_caps dev_caps; /* device capabilities */ struct ice_hw_func_caps func_caps; /* function capabilities */ struct ice_switch_info *switch_info; /* switch filter lists */ /* Control Queue info */ struct ice_ctl_q_info adminq; struct ice_ctl_q_info mailboxq; u8 api_branch; /* API branch version */ u8 api_maj_ver; /* API major version */ u8 api_min_ver; /* API minor version */ u8 api_patch; /* API patch version */ u8 fw_branch; /* firmware branch version */ u8 fw_maj_ver; /* firmware major version */ u8 fw_min_ver; /* firmware minor version */ u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL * register. Used for determining the ITR/INTRL granularity during * initialization. */ #define ICE_MAX_AGG_BW_200G 0x0 #define ICE_MAX_AGG_BW_100G 0X1 #define ICE_MAX_AGG_BW_50G 0x2 #define ICE_MAX_AGG_BW_25G 0x3 /* ITR granularity for different speeds */ #define ICE_ITR_GRAN_ABOVE_25 2 #define ICE_ITR_GRAN_MAX_25 4 /* ITR granularity in 1 us */ u8 itr_gran; /* INTRL granularity for different speeds */ #define ICE_INTRL_GRAN_ABOVE_25 4 #define ICE_INTRL_GRAN_MAX_25 8 /* INTRL granularity in 1 us */ u8 intrl_gran; u8 ucast_shared; /* true if VSIs can share unicast addr */ #define ICE_PHY_PER_NAC 1 #define ICE_MAX_QUAD 2 #define ICE_NUM_QUAD_TYPE 2 #define ICE_PORTS_PER_QUAD 4 #define ICE_PHY_0_LAST_QUAD 1 #define ICE_PORTS_PER_PHY 8 #define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; u32 active_track_id; u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; enum ice_aq_err pkg_dwnld_status; /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; /* Driver's Ice segment format version and id (from the Ice seg) */ struct ice_pkg_ver ice_seg_fmt_ver; u8 ice_seg_id[ICE_SEG_ID_SIZE]; /* Pointer to the ice segment */ struct ice_seg *seg; /* Pointer to allocated copy of pkg memory */ u8 *pkg_copy; u32 pkg_size; /* tunneling info */ struct ice_lock tnl_lock; struct ice_tunnel_table tnl; /* HW block tables */ struct ice_blk_info blk[ICE_BLK_COUNT]; struct ice_lock fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */ struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT]; struct ice_lock rss_locks; /* protect RSS configuration */ struct LIST_HEAD_TYPE rss_list_head; struct ice_mbx_snapshot mbx_snapshot; - struct ice_vlan_mode_ops vlan_mode_ops; + u8 dvm_ena; }; /* Statistics collected by each port, VSI, VEB, and S-channel */ struct ice_eth_stats { u64 rx_bytes; /* gorc */ u64 rx_unicast; /* uprc */ u64 rx_multicast; /* mprc */ u64 rx_broadcast; /* bprc */ u64 rx_discards; /* rdpc */ u64 rx_unknown_protocol; /* rupp */ u64 tx_bytes; /* gotc */ u64 tx_unicast; /* uptc */ u64 tx_multicast; /* mptc */ u64 tx_broadcast; /* bptc */ u64 tx_discards; /* tdpc */ u64 tx_errors; /* tepc */ u64 rx_no_desc; /* repc */ u64 rx_errors; /* repc */ }; #define ICE_MAX_UP 8 /* Statistics collected per VEB per User Priority (UP) for up to 8 UPs */ struct ice_veb_up_stats { u64 up_rx_pkts[ICE_MAX_UP]; u64 up_rx_bytes[ICE_MAX_UP]; u64 up_tx_pkts[ICE_MAX_UP]; u64 up_tx_bytes[ICE_MAX_UP]; }; /* Statistics collected by the MAC */ struct ice_hw_port_stats { /* eth stats collected by the port */ struct ice_eth_stats eth; /* additional port specific stats */ u64 tx_dropped_link_down; /* tdold */ u64 crc_errors; /* crcerrs */ u64 illegal_bytes; /* illerrc */ u64 error_bytes; /* errbc */ u64 mac_local_faults; /* mlfc */ u64 mac_remote_faults; /* mrfc */ u64 rx_len_errors; /* rlec */ u64 link_xon_rx; /* lxonrxc */ u64 link_xoff_rx; /* lxoffrxc */ u64 link_xon_tx; /* lxontxc */ u64 link_xoff_tx; /* lxofftxc */ u64 priority_xon_rx[8]; /* pxonrxc[8] */ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ u64 priority_xon_tx[8]; /* pxontxc[8] */ u64 priority_xoff_tx[8]; /* pxofftxc[8] */ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ u64 rx_size_64; /* prc64 */ u64 rx_size_127; /* prc127 */ u64 rx_size_255; /* prc255 */ u64 rx_size_511; /* prc511 */ u64 rx_size_1023; /* prc1023 */ u64 rx_size_1522; /* prc1522 */ u64 rx_size_big; /* prc9522 */ u64 rx_undersize; /* ruc */ u64 rx_fragments; /* rfc */ u64 rx_oversize; /* roc */ u64 rx_jabber; /* rjc */ u64 tx_size_64; /* ptc64 */ u64 tx_size_127; /* ptc127 */ u64 tx_size_255; /* ptc255 */ u64 tx_size_511; /* ptc511 */ u64 tx_size_1023; /* ptc1023 */ u64 tx_size_1522; /* ptc1522 */ u64 tx_size_big; /* ptc9522 */ u64 mac_short_pkt_dropped; /* mspdc */ /* EEE LPI */ u32 tx_lpi_status; u32 rx_lpi_status; u64 tx_lpi_count; /* etlpic */ u64 rx_lpi_count; /* erlpic */ }; enum ice_sw_fwd_act_type { ICE_FWD_TO_VSI = 0, ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */ ICE_FWD_TO_Q, ICE_FWD_TO_QGRP, ICE_DROP_PACKET, ICE_INVAL_ACT }; struct ice_aq_get_set_rss_lut_params { u16 vsi_handle; /* software VSI handle */ u16 lut_size; /* size of the LUT buffer */ u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */ u8 *lut; /* input RSS LUT for set and output RSS LUT for get */ u8 global_lut_id; /* only valid when lut_type is global */ }; /* Checksum and Shadow RAM pointers */ #define ICE_SR_NVM_CTRL_WORD 0x00 #define ICE_SR_PHY_ANALOG_PTR 0x04 #define ICE_SR_OPTION_ROM_PTR 0x05 #define ICE_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06 #define ICE_SR_AUTO_GENERATED_POINTERS_PTR 0x07 #define ICE_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08 #define ICE_SR_EMP_GLOBAL_MODULE_PTR 0x09 #define ICE_SR_EMP_IMAGE_PTR 0x0B #define ICE_SR_PE_IMAGE_PTR 0x0C #define ICE_SR_CSR_PROTECTED_LIST_PTR 0x0D #define ICE_SR_MNG_CFG_PTR 0x0E #define ICE_SR_EMP_MODULE_PTR 0x0F #define ICE_SR_PBA_BLOCK_PTR 0x16 #define ICE_SR_BOOT_CFG_PTR 0x132 #define ICE_SR_NVM_WOL_CFG 0x19 #define ICE_NVM_OROM_VER_OFF 0x02 #define ICE_SR_NVM_DEV_STARTER_VER 0x18 #define ICE_SR_ALTERNATE_SAN_MAC_ADDR_PTR 0x27 #define ICE_SR_PERMANENT_SAN_MAC_ADDR_PTR 0x28 #define ICE_SR_NVM_MAP_VER 0x29 #define ICE_SR_NVM_IMAGE_VER 0x2A #define ICE_SR_NVM_STRUCTURE_VER 0x2B #define ICE_SR_NVM_EETRACK_LO 0x2D #define ICE_SR_NVM_EETRACK_HI 0x2E #define ICE_NVM_VER_LO_SHIFT 0 #define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT) #define ICE_NVM_VER_HI_SHIFT 12 #define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT) #define ICE_OEM_EETRACK_ID 0xffffffff #define ICE_OROM_VER_PATCH_SHIFT 0 #define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT) #define ICE_OROM_VER_BUILD_SHIFT 8 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) #define ICE_OROM_VER_SHIFT 24 #define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) #define ICE_SR_VPD_PTR 0x2F #define ICE_SR_PXE_SETUP_PTR 0x30 #define ICE_SR_PXE_CFG_CUST_OPTIONS_PTR 0x31 #define ICE_SR_NVM_ORIGINAL_EETRACK_LO 0x34 #define ICE_SR_NVM_ORIGINAL_EETRACK_HI 0x35 #define ICE_SR_VLAN_CFG_PTR 0x37 #define ICE_SR_POR_REGS_AUTO_LOAD_PTR 0x38 #define ICE_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A #define ICE_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B #define ICE_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C #define ICE_SR_PHY_CFG_SCRIPT_PTR 0x3D #define ICE_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E #define ICE_SR_SW_CHECKSUM_WORD 0x3F #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41 #define ICE_SR_1ST_NVM_BANK_PTR 0x42 #define ICE_SR_NVM_BANK_SIZE 0x43 #define ICE_SR_1ST_OROM_BANK_PTR 0x44 #define ICE_SR_OROM_BANK_SIZE 0x45 #define ICE_SR_NETLIST_BANK_PTR 0x46 #define ICE_SR_NETLIST_BANK_SIZE 0x47 #define ICE_SR_EMP_SR_SETTINGS_PTR 0x48 #define ICE_SR_CONFIGURATION_METADATA_PTR 0x4D #define ICE_SR_IMMEDIATE_VALUES_PTR 0x4E #define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134 #define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118 /* CSS Header words */ #define ICE_NVM_CSS_SREV_L 0x14 #define ICE_NVM_CSS_SREV_H 0x15 /* Length of CSS header section in words */ #define ICE_CSS_HEADER_LENGTH 330 /* Offset of Shadow RAM copy in the NVM bank area. */ #define ICE_NVM_SR_COPY_WORD_OFFSET ROUND_UP(ICE_CSS_HEADER_LENGTH, 32) /* Size in bytes of Option ROM trailer */ #define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH) /* The Link Topology Netlist section is stored as a series of words. It is * stored in the NVM as a TLV, with the first two words containing the type * and length. */ #define ICE_NETLIST_LINK_TOPO_MOD_ID 0x011B #define ICE_NETLIST_TYPE_OFFSET 0x0000 #define ICE_NETLIST_LEN_OFFSET 0x0001 /* The Link Topology section follows the TLV header. When reading the netlist * using ice_read_netlist_module, we need to account for the 2-word TLV * header. */ #define ICE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2) #define ICE_LINK_TOPO_MODULE_LEN ICE_NETLIST_LINK_TOPO_OFFSET(0x0000) #define ICE_LINK_TOPO_NODE_COUNT ICE_NETLIST_LINK_TOPO_OFFSET(0x0001) #define ICE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0) /* The Netlist ID Block is located after all of the Link Topology nodes. */ #define ICE_NETLIST_ID_BLK_SIZE 0x30 #define ICE_NETLIST_ID_BLK_OFFSET(n) ICE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n)) /* netlist ID block field offsets (word offsets) */ #define ICE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02 #define ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03 #define ICE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04 #define ICE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05 #define ICE_NETLIST_ID_BLK_TYPE_LOW 0x06 #define ICE_NETLIST_ID_BLK_TYPE_HIGH 0x07 #define ICE_NETLIST_ID_BLK_REV_LOW 0x08 #define ICE_NETLIST_ID_BLK_REV_HIGH 0x09 #define ICE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n)) #define ICE_NETLIST_ID_BLK_CUST_VER 0x2F /* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ #define ICE_SR_VPD_SIZE_WORDS 512 #define ICE_SR_PCIE_ALT_SIZE_WORDS 512 #define ICE_SR_CTRL_WORD_1_S 0x06 #define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S) #define ICE_SR_CTRL_WORD_VALID 0x1 #define ICE_SR_CTRL_WORD_OROM_BANK BIT(3) #define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4) #define ICE_SR_CTRL_WORD_NVM_BANK BIT(5) #define ICE_SR_NVM_PTR_4KB_UNITS BIT(15) /* Shadow RAM related */ #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 #define ICE_SR_BUF_ALIGNMENT 4096 #define ICE_SR_WORDS_IN_1KB 512 /* Checksum should be calculated such that after adding all the words, * including the checksum word itself, the sum should be 0xBABA. */ #define ICE_SR_SW_CHECKSUM_BASE 0xBABA /* Link override related */ #define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10 #define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4 #define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2 #define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1 #define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2 #define ICE_FW_API_LINK_OVERRIDE_MAJ 1 #define ICE_FW_API_LINK_OVERRIDE_MIN 5 #define ICE_FW_API_LINK_OVERRIDE_PATCH 2 #define ICE_PBA_FLAG_DFLT 0xFAFA /* Hash redirection LUT for VSI - maximum array size */ #define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4) /* * Defines for values in the VF_PE_DB_SIZE bits in the GLPCI_LBARCTRL register. * This is needed to determine the BAR0 space for the VFs */ #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_0KB 0x0 #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1 #define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2 /* AQ API version for LLDP_FILTER_CONTROL */ #define ICE_FW_API_LLDP_FLTR_MAJ 1 #define ICE_FW_API_LLDP_FLTR_MIN 7 #define ICE_FW_API_LLDP_FLTR_PATCH 1 + +/* AQ API version for report default configuration */ +#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1 +#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 +#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 + +/* AQ API version for FW health reports */ +#define ICE_FW_API_HEALTH_REPORT_MAJ 1 +#define ICE_FW_API_HEALTH_REPORT_MIN 7 +#define ICE_FW_API_HEALTH_REPORT_PATCH 6 #endif /* _ICE_TYPE_H_ */ diff --git a/sys/dev/ice/ice_vlan_mode.c b/sys/dev/ice/ice_vlan_mode.c index 7d259a1d9c98..7860715264e8 100644 --- a/sys/dev/ice/ice_vlan_mode.c +++ b/sys/dev/ice/ice_vlan_mode.c @@ -1,72 +1,281 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ -#include "ice_vlan_mode.h" #include "ice_common.h" /** - * ice_set_svm - set single VLAN mode + * ice_pkg_supports_dvm - determine if DDP supports Double VLAN mode (DVM) + * @hw: pointer to the HW struct + * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false) + */ +static enum ice_status +ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm) +{ + u16 meta_init_size = sizeof(struct ice_meta_init_section); + struct ice_meta_init_section *sect; + struct ice_buf_build *bld; + enum ice_status status; + + /* if anything fails, we assume there is no DVM support */ + *dvm = false; + + bld = ice_pkg_buf_alloc_single_section(hw, + ICE_SID_RXPARSER_METADATA_INIT, + meta_init_size, (void **)§); + if (!bld) + return ICE_ERR_NO_MEMORY; + + /* only need to read a single section */ + sect->count = CPU_TO_LE16(1); + sect->offset = CPU_TO_LE16(ICE_META_VLAN_MODE_ENTRY); + + status = ice_aq_upload_section(hw, + (struct ice_buf_hdr *)ice_pkg_buf(bld), + ICE_PKG_BUF_SIZE, NULL); + if (!status) { + ice_declare_bitmap(entry, ICE_META_INIT_BITS); + u32 arr[ICE_META_INIT_DW_CNT]; + u16 i; + + /* convert to host bitmap format */ + for (i = 0; i < ICE_META_INIT_DW_CNT; i++) + arr[i] = LE32_TO_CPU(sect->entry[0].bm[i]); + + ice_bitmap_from_array32(entry, arr, (u16)ICE_META_INIT_BITS); + + /* check if DVM is supported */ + *dvm = ice_is_bit_set(entry, ICE_META_VLAN_MODE_BIT); + } + + ice_pkg_buf_free(hw, bld); + + return status; +} + +/** + * ice_aq_get_vlan_mode - get the VLAN mode of the device + * @hw: pointer to the HW structure + * @get_params: structure FW fills in based on the current VLAN mode config + * + * Get VLAN Mode Parameters (0x020D) + */ +static enum ice_status +ice_aq_get_vlan_mode(struct ice_hw *hw, + struct ice_aqc_get_vlan_mode *get_params) +{ + struct ice_aq_desc desc; + + if (!get_params) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_get_vlan_mode_parameters); + + return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params), + NULL); +} + +/** + * ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled * @hw: pointer to the HW structure + * + * Returns true if the hardware/firmware is configured in double VLAN mode, + * else return false signaling that the hardware/firmware is configured in + * single VLAN mode. + * + * Also, return false if this call fails for any reason (i.e. firmware doesn't + * support this AQ call). */ -static enum ice_status ice_set_svm_dflt(struct ice_hw *hw) +static bool ice_aq_is_dvm_ena(struct ice_hw *hw) { - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); + struct ice_aqc_get_vlan_mode get_params = { 0 }; + enum ice_status status; + + status = ice_aq_get_vlan_mode(hw, &get_params); + if (status) { + ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n", + status); + return false; + } - return ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL); + return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA); } /** - * ice_init_vlan_mode_ops - initialize VLAN mode configuration ops + * ice_is_dvm_ena - check if double VLAN mode is enabled * @hw: pointer to the HW structure + * + * The device is configured in single or double VLAN mode on initialization and + * this cannot be dynamically changed during runtime. Based on this there is no + * need to make an AQ call every time the driver needs to know the VLAN mode. + * Instead, use the cached VLAN mode. */ -void ice_init_vlan_mode_ops(struct ice_hw *hw) +bool ice_is_dvm_ena(struct ice_hw *hw) { - hw->vlan_mode_ops.set_dvm = NULL; - hw->vlan_mode_ops.set_svm = ice_set_svm_dflt; + return hw->dvm_ena; } /** - * ice_set_vlan_mode + * ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded * @hw: pointer to the HW structure + * + * This is only called after downloading the DDP and after the global + * configuration lock has been released because all ports on a device need to + * cache the VLAN mode. */ -enum ice_status ice_set_vlan_mode(struct ice_hw *hw) +void ice_cache_vlan_mode(struct ice_hw *hw) { - enum ice_status status = ICE_ERR_NOT_IMPL; + hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false; +} - if (hw->vlan_mode_ops.set_dvm) - status = hw->vlan_mode_ops.set_dvm(hw); +/** + * ice_is_dvm_supported - check if Double VLAN Mode is supported + * @hw: pointer to the hardware structure + * + * Returns true if Double VLAN Mode (DVM) is supported and false if only Single + * VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and + * firmware must support it, otherwise only SVM is supported. This function + * should only be called while the global config lock is held and after the + * package has been successfully downloaded. + */ +static bool ice_is_dvm_supported(struct ice_hw *hw) +{ + struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 }; + enum ice_status status; + bool pkg_supports_dvm; + status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n", + status); + return false; + } + + if (!pkg_supports_dvm) + return false; + + /* If firmware returns success, then it supports DVM, else it only + * supports SVM + */ + status = ice_aq_get_vlan_mode(hw, &get_vlan_mode); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n", + status); + return false; + } + + return true; +} + +/** + * ice_aq_set_vlan_mode - set the VLAN mode of the device + * @hw: pointer to the HW structure + * @set_params: requested VLAN mode configuration + * + * Set VLAN Mode Parameters (0x020C) + */ +static enum ice_status +ice_aq_set_vlan_mode(struct ice_hw *hw, + struct ice_aqc_set_vlan_mode *set_params) +{ + u8 rdma_packet, mng_vlan_prot_id; + struct ice_aq_desc desc; + + if (!set_params) + return ICE_ERR_PARAM; + + if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX) + return ICE_ERR_PARAM; + + rdma_packet = set_params->rdma_packet; + if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING && + rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING) + return ICE_ERR_PARAM; + + mng_vlan_prot_id = set_params->mng_vlan_prot_id; + if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER && + mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_set_vlan_mode_parameters); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params), + NULL); +} + +/** + * ice_set_svm - set single VLAN mode + * @hw: pointer to the HW structure + */ +static enum ice_status ice_set_svm(struct ice_hw *hw) +{ + struct ice_aqc_set_vlan_mode *set_params; + enum ice_status status; + + status = ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n"); + return status; + } + + set_params = (struct ice_aqc_set_vlan_mode *) + ice_malloc(hw, sizeof(*set_params)); + if (!set_params) + return ICE_ERR_NO_MEMORY; + + /* default configuration for SVM configurations */ + set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG; + set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING; + set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER; + + status = ice_aq_set_vlan_mode(hw, set_params); if (status) - return hw->vlan_mode_ops.set_svm(hw); + ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n"); + ice_free(hw, set_params); return status; } + +/** + * ice_set_vlan_mode + * @hw: pointer to the HW structure + */ +enum ice_status ice_set_vlan_mode(struct ice_hw *hw) +{ + + if (!ice_is_dvm_supported(hw)) + return ICE_SUCCESS; + + return ice_set_svm(hw); +} diff --git a/sys/dev/ice/ice_vlan_mode.h b/sys/dev/ice/ice_vlan_mode.h index a90c69aea615..e18b4eef90f4 100644 --- a/sys/dev/ice/ice_vlan_mode.h +++ b/sys/dev/ice/ice_vlan_mode.h @@ -1,60 +1,44 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_VLAN_MODE_H_ #define _ICE_VLAN_MODE_H_ +#include "ice_osdep.h" + struct ice_hw; +bool ice_is_dvm_ena(struct ice_hw *hw); +void ice_cache_vlan_mode(struct ice_hw *hw); enum ice_status ice_set_vlan_mode(struct ice_hw *hw); -void ice_init_vlan_mode_ops(struct ice_hw *hw); - -/* This structure defines the VLAN mode configuration interface. It is used to set the VLAN mode. - * - * Note: These operations will be called while the global configuration lock is held. - * - * enum ice_status (*set_svm)(struct ice_hw *hw); - * This function is called when the DDP and/or Firmware don't support double VLAN mode (DVM) or - * if the set_dvm op is not implemented and/or returns failure. It will set the device in - * single VLAN mode (SVM). - * - * enum ice_status (*set_dvm)(struct ice_hw *hw); - * This function is called when the DDP and Firmware support double VLAN mode (DVM). It should - * be implemented to set double VLAN mode. If it fails or remains unimplemented, set_svm will - * be called as a fallback plan. - */ -struct ice_vlan_mode_ops { - enum ice_status (*set_svm)(struct ice_hw *hw); - enum ice_status (*set_dvm)(struct ice_hw *hw); -}; #endif /* _ICE_VLAN_MODE_H */ diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c index 25eaae8ed26d..d1f01ed64db4 100644 --- a/sys/dev/ice/if_ice_iflib.c +++ b/sys/dev/ice/if_ice_iflib.c @@ -1,2933 +1,2956 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file if_ice_iflib.c * @brief iflib driver implementation * * Contains the main entry point for the iflib driver implementation. It * implements the various ifdi driver methods, and sets up the module and * driver values to load an iflib driver. */ #include "ice_iflib.h" #include "ice_drv_info.h" #include "ice_switch.h" #include "ice_sched.h" #include #include #include #include #include /* * Device method prototypes */ static void *ice_register(device_t); static int ice_if_attach_pre(if_ctx_t); static int ice_attach_pre_recovery_mode(struct ice_softc *sc); static int ice_if_attach_post(if_ctx_t); static void ice_attach_post_recovery_mode(struct ice_softc *sc); static int ice_if_detach(if_ctx_t); static int ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static int ice_if_msix_intr_assign(if_ctx_t ctx, int msix); static void ice_if_queues_free(if_ctx_t ctx); static int ice_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ice_if_intr_enable(if_ctx_t ctx); static void ice_if_intr_disable(if_ctx_t ctx); static int ice_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static int ice_if_promisc_set(if_ctx_t ctx, int flags); static void ice_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); static int ice_if_media_change(if_ctx_t ctx); static void ice_if_init(if_ctx_t ctx); static void ice_if_timer(if_ctx_t ctx, uint16_t qid); static void ice_if_update_admin_status(if_ctx_t ctx); static void ice_if_multi_set(if_ctx_t ctx); static void ice_if_vlan_register(if_ctx_t ctx, u16 vtag); static void ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static void ice_if_stop(if_ctx_t ctx); static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter); static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); static int ice_if_suspend(if_ctx_t ctx); static int ice_if_resume(if_ctx_t ctx); static int ice_msix_que(void *arg); static int ice_msix_admin(void *arg); /* * Helper function prototypes */ static int ice_pci_mapping(struct ice_softc *sc); static void ice_free_pci_mapping(struct ice_softc *sc); static void ice_update_link_status(struct ice_softc *sc, bool update_media); static void ice_init_device_features(struct ice_softc *sc); static void ice_init_tx_tracking(struct ice_vsi *vsi); static void ice_handle_reset_event(struct ice_softc *sc); static void ice_handle_pf_reset_request(struct ice_softc *sc); static void ice_prepare_for_reset(struct ice_softc *sc); static int ice_rebuild_pf_vsi_qmap(struct ice_softc *sc); static void ice_rebuild(struct ice_softc *sc); static void ice_rebuild_recovery_mode(struct ice_softc *sc); static void ice_free_irqvs(struct ice_softc *sc); static void ice_update_rx_mbuf_sz(struct ice_softc *sc); static void ice_poll_for_media_avail(struct ice_softc *sc); static void ice_setup_scctx(struct ice_softc *sc); static int ice_allocate_msix(struct ice_softc *sc); static void ice_admin_timer(void *arg); static void ice_transition_recovery_mode(struct ice_softc *sc); static void ice_transition_safe_mode(struct ice_softc *sc); /* * Device Interface Declaration */ /** * @var ice_methods * @brief ice driver method entry points * * List of device methods implementing the generic device interface used by * the device stack to interact with the ice driver. Since this is an iflib * driver, most of the methods point to the generic iflib implementation. */ static device_method_t ice_methods[] = { /* Device interface */ DEVMETHOD(device_register, ice_register), DEVMETHOD(device_probe, iflib_device_probe_vendor), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; /** * @var ice_iflib_methods * @brief iflib method entry points * * List of device methods used by the iflib stack to interact with this * driver. These are the real main entry points used to interact with this * driver. */ static device_method_t ice_iflib_methods[] = { DEVMETHOD(ifdi_attach_pre, ice_if_attach_pre), DEVMETHOD(ifdi_attach_post, ice_if_attach_post), DEVMETHOD(ifdi_detach, ice_if_detach), DEVMETHOD(ifdi_tx_queues_alloc, ice_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ice_if_rx_queues_alloc), DEVMETHOD(ifdi_msix_intr_assign, ice_if_msix_intr_assign), DEVMETHOD(ifdi_queues_free, ice_if_queues_free), DEVMETHOD(ifdi_mtu_set, ice_if_mtu_set), DEVMETHOD(ifdi_intr_enable, ice_if_intr_enable), DEVMETHOD(ifdi_intr_disable, ice_if_intr_disable), DEVMETHOD(ifdi_rx_queue_intr_enable, ice_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, ice_if_tx_queue_intr_enable), DEVMETHOD(ifdi_promisc_set, ice_if_promisc_set), DEVMETHOD(ifdi_media_status, ice_if_media_status), DEVMETHOD(ifdi_media_change, ice_if_media_change), DEVMETHOD(ifdi_init, ice_if_init), DEVMETHOD(ifdi_stop, ice_if_stop), DEVMETHOD(ifdi_timer, ice_if_timer), DEVMETHOD(ifdi_update_admin_status, ice_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ice_if_multi_set), DEVMETHOD(ifdi_vlan_register, ice_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, ice_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, ice_if_get_counter), DEVMETHOD(ifdi_priv_ioctl, ice_if_priv_ioctl), DEVMETHOD(ifdi_i2c_req, ice_if_i2c_req), DEVMETHOD(ifdi_suspend, ice_if_suspend), DEVMETHOD(ifdi_resume, ice_if_resume), DEVMETHOD_END }; /** * @var ice_driver * @brief driver structure for the generic device stack * * driver_t definition used to setup the generic device methods. */ static driver_t ice_driver = { .name = "ice", .methods = ice_methods, .size = sizeof(struct ice_softc), }; /** * @var ice_iflib_driver * @brief driver structure for the iflib stack * * driver_t definition used to setup the iflib device methods. */ static driver_t ice_iflib_driver = { .name = "ice", .methods = ice_iflib_methods, .size = sizeof(struct ice_softc), }; extern struct if_txrx ice_txrx; extern struct if_txrx ice_recovery_txrx; /** * @var ice_sctx * @brief ice driver shared context * * Structure defining shared values (context) that is used by all instances of * the device. Primarily used to setup details about how the iflib stack * should treat this driver. Also defines the default, minimum, and maximum * number of descriptors in each ring. */ static struct if_shared_ctx ice_sctx = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = ICE_MAX_FRAME_SIZE, /* We could technically set this as high as ICE_MAX_DMA_SEG_SIZE, but * that doesn't make sense since that would be larger than the maximum * size of a single packet. */ .isc_tx_maxsegsize = ICE_MAX_FRAME_SIZE, /* XXX: This is only used by iflib to ensure that * scctx->isc_tx_tso_size_max + the VLAN header is a valid size. */ .isc_tso_maxsize = ICE_TSO_SIZE + sizeof(struct ether_vlan_header), /* XXX: This is used by iflib to set the number of segments in the TSO * DMA tag. However, scctx->isc_tx_tso_segsize_max is used to set the * related ifnet parameter. */ .isc_tso_maxsegsize = ICE_MAX_DMA_SEG_SIZE, .isc_rx_maxsize = ICE_MAX_FRAME_SIZE, .isc_rx_nsegments = ICE_MAX_RX_SEGS, .isc_rx_maxsegsize = ICE_MAX_FRAME_SIZE, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ice_vendor_info_array, .isc_driver_version = __DECONST(char *, ice_driver_version), .isc_driver = &ice_iflib_driver, /* * IFLIB_NEED_SCRATCH ensures that mbufs have scratch space available * for hardware checksum offload * * IFLIB_TSO_INIT_IP ensures that the TSO packets have zeroed out the * IP sum field, required by our hardware to calculate valid TSO * checksums. * * IFLIB_ADMIN_ALWAYS_RUN ensures that the administrative task runs * even when the interface is down. * * IFLIB_SKIP_MSIX allows the driver to handle allocating MSI-X * vectors manually instead of relying on iflib code to do this. */ .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_ADMIN_ALWAYS_RUN | IFLIB_SKIP_MSIX, .isc_nrxd_min = {ICE_MIN_DESC_COUNT}, .isc_ntxd_min = {ICE_MIN_DESC_COUNT}, .isc_nrxd_max = {ICE_IFLIB_MAX_DESC_COUNT}, .isc_ntxd_max = {ICE_IFLIB_MAX_DESC_COUNT}, .isc_nrxd_default = {ICE_DEFAULT_DESC_COUNT}, .isc_ntxd_default = {ICE_DEFAULT_DESC_COUNT}, }; /** * @var ice_devclass * @brief ice driver device class * * device class used to setup the ice driver module kobject class. */ devclass_t ice_devclass; DRIVER_MODULE(ice, pci, ice_driver, ice_devclass, ice_module_event_handler, 0); MODULE_VERSION(ice, 1); MODULE_DEPEND(ice, pci, 1, 1, 1); MODULE_DEPEND(ice, ether, 1, 1, 1); MODULE_DEPEND(ice, iflib, 1, 1, 1); IFLIB_PNP_INFO(pci, ice, ice_vendor_info_array); /* Static driver-wide sysctls */ #include "ice_iflib_sysctls.h" /** * ice_pci_mapping - Map PCI BAR memory * @sc: device private softc * * Map PCI BAR 0 for device operation. */ static int ice_pci_mapping(struct ice_softc *sc) { int rc; /* Map BAR0 */ rc = ice_map_bar(sc->dev, &sc->bar0, 0); if (rc) return rc; return 0; } /** * ice_free_pci_mapping - Release PCI BAR memory * @sc: device private softc * * Release PCI BARs which were previously mapped by ice_pci_mapping(). */ static void ice_free_pci_mapping(struct ice_softc *sc) { /* Free BAR0 */ ice_free_bar(sc->dev, &sc->bar0); } /* * Device methods */ /** * ice_register - register device method callback * @dev: the device being registered * * Returns a pointer to the shared context structure, which is used by iflib. */ static void * ice_register(device_t dev __unused) { return &ice_sctx; } /* ice_register */ /** * ice_setup_scctx - Setup the iflib softc context structure * @sc: the device private structure * * Setup the parameters in if_softc_ctx_t structure used by the iflib stack * when loading. */ static void ice_setup_scctx(struct ice_softc *sc) { if_softc_ctx_t scctx = sc->scctx; struct ice_hw *hw = &sc->hw; bool safe_mode, recovery_mode; safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE); recovery_mode = ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE); /* * If the driver loads in Safe mode or Recovery mode, limit iflib to * a single queue pair. */ if (safe_mode || recovery_mode) { scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets_max = 1; scctx->isc_nrxqsets_max = 1; } else { /* * iflib initially sets the isc_ntxqsets and isc_nrxqsets to * the values of the override sysctls. Cache these initial * values so that the driver can be aware of what the iflib * sysctl value is when setting up MSI-X vectors. */ sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets; sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets; if (scctx->isc_ntxqsets == 0) scctx->isc_ntxqsets = hw->func_caps.common_cap.rss_table_size; if (scctx->isc_nrxqsets == 0) scctx->isc_nrxqsets = hw->func_caps.common_cap.rss_table_size; scctx->isc_ntxqsets_max = hw->func_caps.common_cap.num_txq; scctx->isc_nrxqsets_max = hw->func_caps.common_cap.num_rxq; /* * Sanity check that the iflib sysctl values are within the * maximum supported range. */ if (sc->ifc_sysctl_ntxqs > scctx->isc_ntxqsets_max) sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets_max; if (sc->ifc_sysctl_nrxqs > scctx->isc_nrxqsets_max) sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets_max; } scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct ice_tx_desc), DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union ice_32b_rx_flex_desc), DBA_ALIGN); scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS; scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS; scctx->isc_tx_tso_size_max = ICE_TSO_SIZE; scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE; scctx->isc_msix_bar = PCIR_BAR(ICE_MSIX_BAR); scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size; /* * If the driver loads in recovery mode, disable Tx/Rx functionality */ if (recovery_mode) scctx->isc_txrx = &ice_recovery_txrx; else scctx->isc_txrx = &ice_txrx; /* * If the driver loads in Safe mode or Recovery mode, disable * advanced features including hardware offloads. */ if (safe_mode || recovery_mode) { scctx->isc_capenable = ICE_SAFE_CAPS; scctx->isc_tx_csum_flags = 0; } else { scctx->isc_capenable = ICE_FULL_CAPS; scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD; } scctx->isc_capabilities = scctx->isc_capenable; } /* ice_setup_scctx */ /** * ice_if_attach_pre - Early device attach logic * @ctx: the iflib context structure * * Called by iflib during the attach process. Earliest main driver entry * point which performs necessary hardware and driver initialization. Called * before the Tx and Rx queues are allocated. */ static int ice_if_attach_pre(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); enum ice_fw_modes fw_mode; enum ice_status status; if_softc_ctx_t scctx; struct ice_hw *hw; device_t dev; int err; device_printf(iflib_get_dev(ctx), "Loading the iflib ice driver\n"); sc->ctx = ctx; sc->media = iflib_get_media(ctx); sc->sctx = iflib_get_sctx(ctx); sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx); dev = sc->dev = iflib_get_dev(ctx); scctx = sc->scctx = iflib_get_softc_ctx(ctx); hw = &sc->hw; hw->back = sc; snprintf(sc->admin_mtx_name, sizeof(sc->admin_mtx_name), "%s:admin", device_get_nameunit(dev)); mtx_init(&sc->admin_mtx, sc->admin_mtx_name, NULL, MTX_DEF); callout_init_mtx(&sc->admin_timer, &sc->admin_mtx, 0); ASSERT_CTX_LOCKED(sc); if (ice_pci_mapping(sc)) { err = (ENXIO); goto destroy_admin_timer; } /* Save off the PCI information */ ice_save_pci_info(hw, dev); /* create tunables as early as possible */ ice_add_device_tunables(sc); /* Setup ControlQ lengths */ ice_set_ctrlq_len(hw); fw_mode = ice_get_fw_mode(hw); if (fw_mode == ICE_FW_MODE_REC) { device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); err = ice_attach_pre_recovery_mode(sc); if (err) goto free_pci_mapping; return (0); } /* Initialize the hw data structure */ status = ice_init_hw(hw); if (status) { if (status == ICE_ERR_FW_API_VER) { /* Enter recovery mode, so that the driver remains * loaded. This way, if the system administrator * cannot update the driver, they may still attempt to * downgrade the NVM. */ err = ice_attach_pre_recovery_mode(sc); if (err) goto free_pci_mapping; return (0); } else { err = EIO; device_printf(dev, "Unable to initialize hw, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } goto free_pci_mapping; } /* Notify firmware of the device driver version */ err = ice_send_version(sc); if (err) goto deinit_hw; ice_load_pkg_file(sc); err = ice_init_link_events(sc); if (err) { device_printf(dev, "ice_init_link_events failed: %s\n", ice_err_str(err)); goto deinit_hw; } ice_print_nvm_version(sc); ice_init_device_features(sc); /* Setup the MAC address */ iflib_set_mac(ctx, hw->port_info->mac.lan_addr); /* Setup the iflib softc context structure */ ice_setup_scctx(sc); /* Initialize the Tx queue manager */ err = ice_resmgr_init(&sc->tx_qmgr, hw->func_caps.common_cap.num_txq); if (err) { device_printf(dev, "Unable to initialize Tx queue manager: %s\n", ice_err_str(err)); goto deinit_hw; } /* Initialize the Rx queue manager */ err = ice_resmgr_init(&sc->rx_qmgr, hw->func_caps.common_cap.num_rxq); if (err) { device_printf(dev, "Unable to initialize Rx queue manager: %s\n", ice_err_str(err)); goto free_tx_qmgr; } /* Initialize the interrupt resource manager */ err = ice_alloc_intr_tracking(sc); if (err) /* Errors are already printed */ goto free_rx_qmgr; /* Determine maximum number of VSIs we'll prepare for */ sc->num_available_vsi = min(ICE_MAX_VSI_AVAILABLE, hw->func_caps.guar_num_vsi); if (!sc->num_available_vsi) { err = EIO; device_printf(dev, "No VSIs allocated to host\n"); goto free_intr_tracking; } /* Allocate storage for the VSI pointers */ sc->all_vsi = (struct ice_vsi **) malloc(sizeof(struct ice_vsi *) * sc->num_available_vsi, M_ICE, M_WAITOK | M_ZERO); if (!sc->all_vsi) { err = ENOMEM; device_printf(dev, "Unable to allocate VSI array\n"); goto free_intr_tracking; } /* * Prepare the statically allocated primary PF VSI in the softc * structure. Other VSIs will be dynamically allocated as needed. */ ice_setup_pf_vsi(sc); err = ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max, scctx->isc_nrxqsets_max); if (err) { device_printf(dev, "Unable to allocate VSI Queue maps\n"); goto free_main_vsi; } /* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */ err = ice_allocate_msix(sc); if (err) goto free_main_vsi; return 0; free_main_vsi: /* ice_release_vsi will free the queue maps if they were allocated */ ice_release_vsi(&sc->pf_vsi); free(sc->all_vsi, M_ICE); sc->all_vsi = NULL; free_intr_tracking: ice_free_intr_tracking(sc); free_rx_qmgr: ice_resmgr_destroy(&sc->rx_qmgr); free_tx_qmgr: ice_resmgr_destroy(&sc->tx_qmgr); deinit_hw: ice_deinit_hw(hw); free_pci_mapping: ice_free_pci_mapping(sc); destroy_admin_timer: mtx_lock(&sc->admin_mtx); callout_stop(&sc->admin_timer); mtx_unlock(&sc->admin_mtx); mtx_destroy(&sc->admin_mtx); return err; } /* ice_if_attach_pre */ /** * ice_attach_pre_recovery_mode - Limited driver attach_pre for FW recovery * @sc: the device private softc * * Loads the device driver in limited Firmware Recovery mode, intended to * allow users to update the firmware to attempt to recover the device. * * @remark We may enter recovery mode in case either (a) the firmware is * detected to be in an invalid state and must be re-programmed, or (b) the * driver detects that the loaded firmware has a non-compatible API version * that the driver cannot operate with. */ static int ice_attach_pre_recovery_mode(struct ice_softc *sc) { ice_set_state(&sc->state, ICE_STATE_RECOVERY_MODE); /* Setup the iflib softc context */ ice_setup_scctx(sc); /* Setup the PF VSI back pointer */ sc->pf_vsi.sc = sc; /* * We still need to allocate MSI-X vectors since we need one vector to * run the administrative admin interrupt */ return ice_allocate_msix(sc); } /** * ice_update_link_status - notify OS of link state change * @sc: device private softc structure * @update_media: true if we should update media even if link didn't change * * Called to notify iflib core of link status changes. Should be called once * during attach_post, and whenever link status changes during runtime. * * This call only updates the currently supported media types if the link * status changed, or if update_media is set to true. */ static void ice_update_link_status(struct ice_softc *sc, bool update_media) { struct ice_hw *hw = &sc->hw; enum ice_status status; /* Never report link up when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Report link status to iflib only once each time it changes */ if (!ice_testandset_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED)) { if (sc->link_up) { /* link is up */ uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info); ice_set_default_local_lldp_mib(sc); iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); ice_link_up_msg(sc); update_media = true; } else { /* link is down */ iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); update_media = true; } } /* Update the supported media types */ if (update_media) { status = ice_add_media_types(sc, sc->media); if (status) device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } /* TODO: notify VFs of link state change */ } /** * ice_if_attach_post - Late device attach logic * @ctx: the iflib context structure * * Called by iflib to finish up attaching the device. Performs any attach * logic which must wait until after the Tx and Rx queues have been * allocated. */ static int ice_if_attach_post(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); int err; ASSERT_CTX_LOCKED(sc); /* We don't yet support loading if MSI-X is not supported */ if (sc->scctx->isc_intr != IFLIB_INTR_MSIX) { device_printf(sc->dev, "The ice driver does not support loading without MSI-X\n"); return (ENOTSUP); } /* The ifnet structure hasn't yet been initialized when the attach_pre * handler is called, so wait until attach_post to setup the * isc_max_frame_size. */ sc->ifp = ifp; sc->scctx->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; /* * If we are in recovery mode, only perform a limited subset of * initialization to support NVM recovery. */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { ice_attach_post_recovery_mode(sc); return (0); } sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; err = ice_initialize_vsi(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to initialize Main VSI: %s\n", ice_err_str(err)); return err; } + /* Enable FW health event reporting */ + ice_init_health_events(sc); + /* Configure the main PF VSI for RSS */ err = ice_config_rss(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to configure RSS for the main VSI, err %s\n", ice_err_str(err)); return err; } /* Configure switch to drop transmitted LLDP and PAUSE frames */ err = ice_cfg_pf_ethertype_filters(sc); if (err) return err; ice_get_and_print_bus_info(sc); ice_set_link_management_mode(sc); ice_init_saved_phy_cfg(sc); ice_add_device_sysctls(sc); /* Get DCBX/LLDP state and start DCBX agent */ ice_init_dcb_setup(sc); /* Setup link configuration parameters */ ice_init_link_configuration(sc); ice_update_link_status(sc, true); /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Start the admin timer */ mtx_lock(&sc->admin_mtx); callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); mtx_unlock(&sc->admin_mtx); return 0; } /* ice_if_attach_post */ /** * ice_attach_post_recovery_mode - Limited driver attach_post for FW recovery * @sc: the device private softc * * Performs minimal work to prepare the driver to recover an NVM in case the * firmware is in recovery mode. */ static void ice_attach_post_recovery_mode(struct ice_softc *sc) { /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Start the admin timer */ mtx_lock(&sc->admin_mtx); callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc); mtx_unlock(&sc->admin_mtx); } /** * ice_free_irqvs - Free IRQ vector memory * @sc: the device private softc structure * * Free IRQ vector memory allocated during ice_if_msix_intr_assign. */ static void ice_free_irqvs(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; if_ctx_t ctx = sc->ctx; int i; /* If the irqvs array is NULL, then there are no vectors to free */ if (sc->irqvs == NULL) return; /* Free the IRQ vectors */ for (i = 0; i < sc->num_irq_vectors; i++) iflib_irq_free(ctx, &sc->irqvs[i].irq); /* Clear the irqv pointers */ for (i = 0; i < vsi->num_rx_queues; i++) vsi->rx_queues[i].irqv = NULL; for (i = 0; i < vsi->num_tx_queues; i++) vsi->tx_queues[i].irqv = NULL; /* Release the vector array memory */ free(sc->irqvs, M_ICE); sc->irqvs = NULL; sc->num_irq_vectors = 0; } /** * ice_if_detach - Device driver detach logic * @ctx: iflib context structure * * Perform device shutdown logic to detach the device driver. * * Note that there is no guarantee of the ordering of ice_if_queues_free() and * ice_if_detach(). It is possible for the functions to be called in either * order, and they must not assume to have a strict ordering. */ static int ice_if_detach(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; int i; ASSERT_CTX_LOCKED(sc); /* Indicate that we're detaching */ ice_set_state(&sc->state, ICE_STATE_DETACHING); /* Stop the admin timer */ mtx_lock(&sc->admin_mtx); callout_stop(&sc->admin_timer); mtx_unlock(&sc->admin_mtx); mtx_destroy(&sc->admin_mtx); /* Free allocated media types */ ifmedia_removeall(sc->media); /* Free the Tx and Rx sysctl contexts, and assign NULL to the node * pointers. Note, the calls here and those in ice_if_queues_free() * are *BOTH* necessary, as we cannot guarantee which path will be * run first */ ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); /* Release MSI-X resources */ ice_free_irqvs(sc); for (i = 0; i < sc->num_available_vsi; i++) { if (sc->all_vsi[i]) ice_release_vsi(sc->all_vsi[i]); } if (sc->all_vsi) { free(sc->all_vsi, M_ICE); sc->all_vsi = NULL; } /* Release MSI-X memory */ pci_release_msi(sc->dev); if (sc->msix_table != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->msix_table), sc->msix_table); sc->msix_table = NULL; } ice_free_intr_tracking(sc); /* Destroy the queue managers */ ice_resmgr_destroy(&sc->tx_qmgr); ice_resmgr_destroy(&sc->rx_qmgr); if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) ice_deinit_hw(&sc->hw); ice_free_pci_mapping(sc); return 0; } /* ice_if_detach */ /** * ice_if_tx_queues_alloc - Allocate Tx queue memory * @ctx: iflib context structure * @vaddrs: virtual addresses for the queue memory * @paddrs: physical addresses for the queue memory * @ntxqs: the number of Tx queues per set (should always be 1) * @ntxqsets: the number of Tx queue sets to allocate * * Called by iflib to allocate Tx queues for the device. Allocates driver * memory to track each queue, the status arrays used for descriptor * status reporting, and Tx queue sysctls. */ static int ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int __invariant_only ntxqs, int ntxqsets) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_tx_queue *txq; int err, i, j; MPASS(ntxqs == 1); MPASS(sc->scctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT); ASSERT_CTX_LOCKED(sc); /* Do not bother allocating queues if we're in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); /* Allocate queue structure memory */ if (!(vsi->tx_queues = (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_WAITOK | M_ZERO))) { device_printf(sc->dev, "Unable to allocate Tx queue memory\n"); return (ENOMEM); } /* Allocate report status arrays */ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (!(txq->tx_rsq = (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_WAITOK))) { device_printf(sc->dev, "Unable to allocate tx_rsq memory\n"); err = ENOMEM; goto free_tx_queues; } /* Initialize report status array */ for (j = 0; j < sc->scctx->isc_ntxd[0]; j++) txq->tx_rsq[j] = QIDX_INVALID; } /* Assign queues from PF space to the main VSI */ err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, ntxqsets); if (err) { device_printf(sc->dev, "Unable to assign PF queues: %s\n", ice_err_str(err)); goto free_tx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; /* Add Tx queue sysctls context */ ice_vsi_add_txqs_ctx(vsi); for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { txq->me = i; txq->vsi = vsi; /* store the queue size for easier access */ txq->desc_count = sc->scctx->isc_ntxd[0]; /* get the virtual and physical address of the hardware queues */ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); txq->tx_base = (struct ice_tx_desc *)vaddrs[i]; txq->tx_paddr = paddrs[i]; ice_add_txq_sysctls(txq); } vsi->num_tx_queues = ntxqsets; return (0); free_tx_queues: for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (txq->tx_rsq != NULL) { free(txq->tx_rsq, M_ICE); txq->tx_rsq = NULL; } } free(vsi->tx_queues, M_ICE); vsi->tx_queues = NULL; return err; } /** * ice_if_rx_queues_alloc - Allocate Rx queue memory * @ctx: iflib context structure * @vaddrs: virtual addresses for the queue memory * @paddrs: physical addresses for the queue memory * @nrxqs: number of Rx queues per set (should always be 1) * @nrxqsets: number of Rx queue sets to allocate * * Called by iflib to allocate Rx queues for the device. Allocates driver * memory to track each queue, as well as sets up the Rx queue sysctls. */ static int ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int __invariant_only nrxqs, int nrxqsets) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_rx_queue *rxq; int err, i; MPASS(nrxqs == 1); MPASS(sc->scctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT); ASSERT_CTX_LOCKED(sc); /* Do not bother allocating queues if we're in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); /* Allocate queue structure memory */ if (!(vsi->rx_queues = (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_WAITOK | M_ZERO))) { device_printf(sc->dev, "Unable to allocate Rx queue memory\n"); return (ENOMEM); } /* Assign queues from PF space to the main VSI */ err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, nrxqsets); if (err) { device_printf(sc->dev, "Unable to assign PF queues: %s\n", ice_err_str(err)); goto free_rx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; /* Add Rx queue sysctls context */ ice_vsi_add_rxqs_ctx(vsi); for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) { rxq->me = i; rxq->vsi = vsi; /* store the queue size for easier access */ rxq->desc_count = sc->scctx->isc_nrxd[0]; /* get the virtual and physical address of the hardware queues */ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i]; rxq->rx_paddr = paddrs[i]; ice_add_rxq_sysctls(rxq); } vsi->num_rx_queues = nrxqsets; return (0); free_rx_queues: free(vsi->rx_queues, M_ICE); vsi->rx_queues = NULL; return err; } /** * ice_if_queues_free - Free queue memory * @ctx: the iflib context structure * * Free queue memory allocated by ice_if_tx_queues_alloc() and * ice_if_rx_queues_alloc(). * * There is no guarantee that ice_if_queues_free() and ice_if_detach() will be * called in the same order. It's possible for ice_if_queues_free() to be * called prior to ice_if_detach(), and vice versa. * * For this reason, the main VSI is a static member of the ice_softc, which is * not free'd until after iflib finishes calling both of these functions. * * Thus, care must be taken in how we manage the memory being freed by this * function, and in what tasks it can and must perform. */ static void ice_if_queues_free(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_tx_queue *txq; int i; /* Free the Tx and Rx sysctl contexts, and assign NULL to the node * pointers. Note, the calls here and those in ice_if_detach() * are *BOTH* necessary, as we cannot guarantee which path will be * run first */ ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); /* Release MSI-X IRQ vectors, if not yet released in ice_if_detach */ ice_free_irqvs(sc); if (vsi->tx_queues != NULL) { /* free the tx_rsq arrays */ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { if (txq->tx_rsq != NULL) { free(txq->tx_rsq, M_ICE); txq->tx_rsq = NULL; } } free(vsi->tx_queues, M_ICE); vsi->tx_queues = NULL; vsi->num_tx_queues = 0; } if (vsi->rx_queues != NULL) { free(vsi->rx_queues, M_ICE); vsi->rx_queues = NULL; vsi->num_rx_queues = 0; } } /** * ice_msix_que - Fast interrupt handler for MSI-X receive queues * @arg: The Rx queue memory * * Interrupt filter function for iflib MSI-X interrupts. Called by iflib when * an MSI-X interrupt for a given queue is triggered. Currently this just asks * iflib to schedule the main Rx thread. */ static int ice_msix_que(void *arg) { struct ice_rx_queue __unused *rxq = (struct ice_rx_queue *)arg; /* TODO: dynamic ITR algorithm?? */ return (FILTER_SCHEDULE_THREAD); } /** * ice_msix_admin - Fast interrupt handler for MSI-X admin interrupt * @arg: pointer to device softc memory * * Called by iflib when an administrative interrupt occurs. Should perform any * fast logic for handling the interrupt cause, and then indicate whether the * admin task needs to be queued. */ static int ice_msix_admin(void *arg) { struct ice_softc *sc = (struct ice_softc *)arg; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; u32 oicr; /* There is no safe way to modify the enabled miscellaneous causes of * the OICR vector at runtime, as doing so would be prone to race * conditions. Reading PFINT_OICR will unmask the associated interrupt * causes and allow future interrupts to occur. The admin interrupt * vector will not be re-enabled until after we exit this function, * but any delayed tasks must be resilient against possible "late * arrival" interrupts that occur while we're already handling the * task. This is done by using state bits and serializing these * delayed tasks via the admin status task function. */ oicr = rd32(hw, PFINT_OICR); /* Processing multiple controlq interrupts on a single vector does not * provide an indication of which controlq triggered the interrupt. * We might try reading the INTEVENT bit of the respective PFINT_*_CTL * registers. However, the INTEVENT bit is not guaranteed to be set as * it gets automatically cleared when the hardware acknowledges the * interrupt. * * This means we don't really have a good indication of whether or * which controlq triggered this interrupt. We'll just notify the * admin task that it should check all the controlqs. */ ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); if (oicr & PFINT_OICR_VFLR_M) { ice_set_state(&sc->state, ICE_STATE_VFLR_PENDING); } if (oicr & PFINT_OICR_MAL_DETECT_M) { ice_set_state(&sc->state, ICE_STATE_MDD_PENDING); } if (oicr & PFINT_OICR_GRST_M) { u32 reset; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> GLGEN_RSTAT_RESET_TYPE_S; if (reset == ICE_RESET_CORER) sc->soft_stats.corer_count++; else if (reset == ICE_RESET_GLOBR) sc->soft_stats.globr_count++; else sc->soft_stats.empr_count++; /* There are a couple of bits at play for handling resets. * First, the ICE_STATE_RESET_OICR_RECV bit is used to * indicate that the driver has received an OICR with a reset * bit active, indicating that a CORER/GLOBR/EMPR is about to * happen. Second, we set hw->reset_ongoing to indicate that * the hardware is in reset. We will set this back to false as * soon as the driver has determined that the hardware is out * of reset. * * If the driver wishes to trigger a reqest, it can set one of * the ICE_STATE_RESET_*_REQ bits, which will trigger the * correct type of reset. */ if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) hw->reset_ongoing = true; } if (oicr & PFINT_OICR_ECC_ERR_M) { device_printf(dev, "ECC Error detected!\n"); ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); } if (oicr & PFINT_OICR_PE_CRITERR_M) { device_printf(dev, "Critical Protocol Engine Error detected!\n"); ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); } if (oicr & PFINT_OICR_PCI_EXCEPTION_M) { device_printf(dev, "PCI Exception detected!\n"); ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ); } if (oicr & PFINT_OICR_HMC_ERR_M) { /* Log the HMC errors, but don't disable the interrupt cause */ ice_log_hmc_error(hw, dev); } return (FILTER_SCHEDULE_THREAD); } /** * ice_allocate_msix - Allocate MSI-X vectors for the interface * @sc: the device private softc * * Map the MSI-X bar, and then request MSI-X vectors in a two-stage process. * * First, determine a suitable total number of vectors based on the number * of CPUs, RSS buckets, the administrative vector, and other demands such as * RDMA. * * Request the desired amount of vectors, and see how many we obtain. If we * don't obtain as many as desired, reduce the demands by lowering the number * of requested queues or reducing the demand from other features such as * RDMA. * * @remark This function is required because the driver sets the * IFLIB_SKIP_MSIX flag indicating that the driver will manage MSI-X vectors * manually. * * @remark This driver will only use MSI-X vectors. If this is not possible, * neither MSI or legacy interrupts will be tried. * * @post on success this function must set the following scctx parameters: * isc_vectors, isc_nrxqsets, isc_ntxqsets, and isc_intr. * * @returns zero on success or an error code on failure. */ static int ice_allocate_msix(struct ice_softc *sc) { bool iflib_override_queue_count = false; if_softc_ctx_t scctx = sc->scctx; device_t dev = sc->dev; cpuset_t cpus; int bar, queues, vectors, requested; int err = 0; /* Allocate the MSI-X bar */ bar = scctx->isc_msix_bar; sc->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (!sc->msix_table) { device_printf(dev, "Unable to map MSI-X table\n"); return (ENOMEM); } /* Check if the iflib queue count sysctls have been set */ if (sc->ifc_sysctl_ntxqs || sc->ifc_sysctl_nrxqs) iflib_override_queue_count = true; err = bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus); if (err) { device_printf(dev, "%s: Unable to fetch the CPU list: %s\n", __func__, ice_err_str(err)); CPU_COPY(&all_cpus, &cpus); } /* Attempt to mimic behavior of iflib_msix_init */ if (iflib_override_queue_count) { /* * If the override sysctls have been set, limit the queues to * the number of logical CPUs. */ queues = mp_ncpus; } else { /* * Otherwise, limit the queue count to the CPUs associated * with the NUMA node the device is associated with. */ queues = CPU_COUNT(&cpus); } /* Clamp to the number of RSS buckets */ queues = imin(queues, rss_getnumbuckets()); /* * Clamp the number of queue pairs to the minimum of the requested Tx * and Rx queues. */ queues = imin(queues, sc->ifc_sysctl_ntxqs ?: scctx->isc_ntxqsets); queues = imin(queues, sc->ifc_sysctl_nrxqs ?: scctx->isc_nrxqsets); /* * Determine the number of vectors to request. Note that we also need * to allocate one vector for administrative tasks. */ requested = queues + 1; vectors = requested; err = pci_alloc_msix(dev, &vectors); if (err) { device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n", vectors, ice_err_str(err)); goto err_free_msix_table; } /* If we don't receive enough vectors, reduce demands */ if (vectors < requested) { int diff = requested - vectors; device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n", requested, vectors); /* * If we still have a difference, we need to reduce the number * of queue pairs. * * However, we still need at least one vector for the admin * interrupt and one queue pair. */ if (queues <= diff) { device_printf(dev, "Unable to allocate sufficient MSI-X vectors\n"); err = (ERANGE); goto err_pci_release_msi; } queues -= diff; } device_printf(dev, "Using %d Tx and Rx queues\n", queues); device_printf(dev, "Using MSI-X interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = queues; scctx->isc_ntxqsets = queues; scctx->isc_intr = IFLIB_INTR_MSIX; /* Interrupt allocation tracking isn't required in recovery mode, * since neither RDMA nor VFs are enabled. */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); /* Keep track of which interrupt indices are being used for what */ sc->lan_vectors = vectors; err = ice_resmgr_assign_contiguous(&sc->imgr, sc->pf_imap, sc->lan_vectors); if (err) { device_printf(dev, "Unable to assign PF interrupt mapping: %s\n", ice_err_str(err)); goto err_pci_release_msi; } return (0); err_pci_release_msi: pci_release_msi(dev); err_free_msix_table: if (sc->msix_table != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->msix_table), sc->msix_table); sc->msix_table = NULL; } return (err); } /** * ice_if_msix_intr_assign - Assign MSI-X interrupt vectors to queues * @ctx: the iflib context structure * @msix: the number of vectors we were assigned * * Called by iflib to assign MSI-X vectors to queues. Currently requires that * we get at least the same number of vectors as we have queues, and that we * always have the same number of Tx and Rx queues. * * Tx queues use a softirq instead of using their own hardware interrupt. */ static int ice_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; int err, i, vector; ASSERT_CTX_LOCKED(sc); if (vsi->num_rx_queues != vsi->num_tx_queues) { device_printf(sc->dev, "iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing number of Tx and Rx queues\n", vsi->num_tx_queues, vsi->num_rx_queues); return (EOPNOTSUPP); } if (msix < (vsi->num_rx_queues + 1)) { device_printf(sc->dev, "Not enough MSI-X vectors to assign one vector to each queue pair\n"); return (EOPNOTSUPP); } /* Save the number of vectors for future use */ sc->num_irq_vectors = vsi->num_rx_queues + 1; /* Allocate space to store the IRQ vector data */ if (!(sc->irqvs = (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (sc->num_irq_vectors), M_ICE, M_NOWAIT))) { device_printf(sc->dev, "Unable to allocate irqv memory\n"); return (ENOMEM); } /* Administrative interrupt events will use vector 0 */ err = iflib_irq_alloc_generic(ctx, &sc->irqvs[0].irq, 1, IFLIB_INTR_ADMIN, ice_msix_admin, sc, 0, "admin"); if (err) { device_printf(sc->dev, "Failed to register Admin queue handler: %s\n", ice_err_str(err)); goto free_irqvs; } sc->irqvs[0].me = 0; /* Do not allocate queue interrupts when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (0); for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) { struct ice_rx_queue *rxq = &vsi->rx_queues[i]; struct ice_tx_queue *txq = &vsi->tx_queues[i]; int rid = vector + 1; char irq_name[16]; snprintf(irq_name, sizeof(irq_name), "rxq%d", i); err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid, IFLIB_INTR_RXTX, ice_msix_que, rxq, rxq->me, irq_name); if (err) { device_printf(sc->dev, "Failed to allocate q int %d err: %s\n", i, ice_err_str(err)); vector--; i--; goto fail; } sc->irqvs[vector].me = vector; rxq->irqv = &sc->irqvs[vector]; bzero(irq_name, sizeof(irq_name)); snprintf(irq_name, sizeof(irq_name), "txq%d", i); iflib_softirq_alloc_generic(ctx, &sc->irqvs[vector].irq, IFLIB_INTR_TX, txq, txq->me, irq_name); txq->irqv = &sc->irqvs[vector]; } return (0); fail: for (; i >= 0; i--, vector--) iflib_irq_free(ctx, &sc->irqvs[vector].irq); iflib_irq_free(ctx, &sc->irqvs[0].irq); free_irqvs: free(sc->irqvs, M_ICE); sc->irqvs = NULL; return err; } /** * ice_if_mtu_set - Set the device MTU * @ctx: iflib context structure * @mtu: the MTU requested * * Called by iflib to configure the device's Maximum Transmission Unit (MTU). * * @pre assumes the caller holds the iflib CTX lock */ static int ice_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); ASSERT_CTX_LOCKED(sc); /* Do not support configuration when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); if (mtu < ICE_MIN_MTU || mtu > ICE_MAX_MTU) return (EINVAL); sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size; return (0); } /** * ice_if_intr_enable - Enable device interrupts * @ctx: iflib context structure * * Called by iflib to request enabling device interrupts. */ static void ice_if_intr_enable(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; ASSERT_CTX_LOCKED(sc); /* Enable ITR 0 */ ice_enable_intr(hw, sc->irqvs[0].me); /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Enable all queue interrupts */ for (int i = 0; i < vsi->num_rx_queues; i++) ice_enable_intr(hw, vsi->rx_queues[i].irqv->me); } /** * ice_if_intr_disable - Disable device interrupts * @ctx: iflib context structure * * Called by iflib to request disabling device interrupts. */ static void ice_if_intr_disable(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_hw *hw = &sc->hw; unsigned int i; ASSERT_CTX_LOCKED(sc); /* IFDI_INTR_DISABLE may be called prior to interrupts actually being * assigned to queues. Instead of assuming that the interrupt * assignment in the rx_queues structure is valid, just disable all * possible interrupts * * Note that we choose not to disable ITR 0 because this handles the * AdminQ interrupts, and we want to keep processing these even when * the interface is offline. */ for (i = 1; i < hw->func_caps.common_cap.num_msix_vectors; i++) ice_disable_intr(hw, i); } /** * ice_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt * @ctx: iflib context structure * @rxqid: the Rx queue to enable * * Enable a specific Rx queue interrupt. * * This function is not protected by the iflib CTX lock. */ static int ice_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me); return (0); } /** * ice_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt * @ctx: iflib context structure * @txqid: the Tx queue to enable * * Enable a specific Tx queue interrupt. * * This function is not protected by the iflib CTX lock. */ static int ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_vsi *vsi = &sc->pf_vsi; struct ice_hw *hw = &sc->hw; /* Do not enable queue interrupts in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me); return (0); } /** * ice_if_promisc_set - Set device promiscuous mode * @ctx: iflib context structure * @flags: promiscuous flags to configure * * Called by iflib to configure device promiscuous mode. * * @remark Calls to this function will always overwrite the previous setting */ static int ice_if_promisc_set(if_ctx_t ctx, int flags) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; bool promisc_enable = flags & IFF_PROMISC; bool multi_enable = flags & IFF_ALLMULTI; /* Do not support configuration when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return (ENOSYS); if (multi_enable) return (EOPNOTSUPP); if (promisc_enable) { status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx, ICE_VSI_PROMISC_MASK, 0); if (status && status != ICE_ERR_ALREADY_EXISTS) { device_printf(dev, "Failed to enable promiscuous mode for PF VSI, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } else { status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx, ICE_VSI_PROMISC_MASK, 0); if (status) { device_printf(dev, "Failed to disable promiscuous mode for PF VSI, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); return (EIO); } } return (0); } /** * ice_if_media_change - Change device media * @ctx: device ctx structure * * Called by iflib when a media change is requested. This operation is not * supported by the hardware, so we just return an error code. */ static int ice_if_media_change(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); device_printf(sc->dev, "Media change is not supported.\n"); return (ENODEV); } /** * ice_if_media_status - Report current device media * @ctx: iflib context structure * @ifmr: ifmedia request structure to update * * Updates the provided ifmr with current device media status, including link * status and media type. */ static void ice_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ice_link_status *li = &sc->hw.port_info->phy.link_info; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; /* Never report link up or media types when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; if (!sc->link_up) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_FDX; if (li->phy_type_low) ifmr->ifm_active |= ice_get_phy_type_low(li->phy_type_low); else if (li->phy_type_high) ifmr->ifm_active |= ice_get_phy_type_high(li->phy_type_high); else ifmr->ifm_active |= IFM_UNKNOWN; /* Report flow control status as well */ if (li->an_info & ICE_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (li->an_info & ICE_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; } /** * ice_init_tx_tracking - Initialize Tx queue software tracking values * @vsi: the VSI to initialize * * Initialize Tx queue software tracking values, including the Report Status * queue, and related software tracking values. */ static void ice_init_tx_tracking(struct ice_vsi *vsi) { struct ice_tx_queue *txq; size_t j; int i; for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) { txq->tx_rs_cidx = txq->tx_rs_pidx = 0; /* Initialize the last processed descriptor to be the end of * the ring, rather than the start, so that we avoid an * off-by-one error in ice_ift_txd_credits_update for the * first packet. */ txq->tx_cidx_processed = txq->desc_count - 1; for (j = 0; j < txq->desc_count; j++) txq->tx_rsq[j] = QIDX_INVALID; } } /** * ice_update_rx_mbuf_sz - Update the Rx buffer size for all queues * @sc: the device softc * * Called to update the Rx queue mbuf_sz parameter for configuring the receive * buffer sizes when programming hardware. */ static void ice_update_rx_mbuf_sz(struct ice_softc *sc) { uint32_t mbuf_sz = iflib_get_rx_mbuf_sz(sc->ctx); struct ice_vsi *vsi = &sc->pf_vsi; MPASS(mbuf_sz <= UINT16_MAX); vsi->mbuf_sz = mbuf_sz; } /** * ice_if_init - Initialize the device * @ctx: iflib ctx structure * * Called by iflib to bring the device up, i.e. ifconfig ice0 up. Initializes * device filters and prepares the Tx and Rx engines. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_init(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); device_t dev = sc->dev; int err; ASSERT_CTX_LOCKED(sc); /* * We've seen an issue with 11.3/12.1 where sideband routines are * called after detach is called. This would call routines after * if_stop, causing issues with the teardown process. This has * seemingly been fixed in STABLE snapshots, but it seems like a * good idea to have this guard here regardless. */ if (ice_driver_is_detaching(sc)) return; if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { device_printf(sc->dev, "request to start interface cannot be completed as the device failed to reset\n"); return; } if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(sc->dev, "request to start interface while device is prepared for impending reset\n"); return; } ice_update_rx_mbuf_sz(sc); /* Update the MAC address... User might use a LAA */ err = ice_update_laa_mac(sc); if (err) { device_printf(dev, "LAA address change failed, err %s\n", ice_err_str(err)); return; } /* Initialize software Tx tracking values */ ice_init_tx_tracking(&sc->pf_vsi); err = ice_cfg_vsi_for_tx(&sc->pf_vsi); if (err) { device_printf(dev, "Unable to configure the main VSI for Tx: %s\n", ice_err_str(err)); return; } err = ice_cfg_vsi_for_rx(&sc->pf_vsi); if (err) { device_printf(dev, "Unable to configure the main VSI for Rx: %s\n", ice_err_str(err)); goto err_cleanup_tx; } err = ice_control_rx_queues(&sc->pf_vsi, true); if (err) { device_printf(dev, "Unable to enable Rx rings for transmit: %s\n", ice_err_str(err)); goto err_cleanup_tx; } err = ice_cfg_pf_default_mac_filters(sc); if (err) { device_printf(dev, "Unable to configure default MAC filters: %s\n", ice_err_str(err)); goto err_stop_rx; } /* We use software interrupts for Tx, so we only program the hardware * interrupts for Rx. */ ice_configure_rxq_interrupts(&sc->pf_vsi); ice_configure_rx_itr(&sc->pf_vsi); /* Configure promiscuous mode */ ice_if_promisc_set(ctx, if_getflags(sc->ifp)); ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED); return; err_stop_rx: ice_control_rx_queues(&sc->pf_vsi, false); err_cleanup_tx: ice_vsi_disable_tx(&sc->pf_vsi); } /** * ice_poll_for_media_avail - Re-enable link if media is detected * @sc: device private structure * * Intended to be called from the driver's timer function, this function * sends the Get Link Status AQ command and re-enables HW link if the * command says that media is available. * * If the driver doesn't have the "NO_MEDIA" state set, then this does nothing, * since media removal events are supposed to be sent to the driver through * a link status event. */ static void ice_poll_for_media_avail(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; struct ice_port_info *pi = hw->port_info; if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) { pi->phy.get_link_info = true; ice_get_link_status(pi, &sc->link_up); if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { enum ice_status status; /* Re-enable link and re-apply user link settings */ - ice_apply_saved_phy_cfg(sc); + ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC); /* Update the OS about changes in media capability */ status = ice_add_media_types(sc, sc->media); if (status) device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA); } } } /** * ice_if_timer - called by iflib periodically * @ctx: iflib ctx structure * @qid: the queue this timer was called for * * This callback is triggered by iflib periodically. We use it to update the * hw statistics. * * @remark this function is not protected by the iflib CTX lock. */ static void ice_if_timer(if_ctx_t ctx, uint16_t qid) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); uint64_t prev_link_xoff_rx = sc->stats.cur.link_xoff_rx; if (qid != 0) return; /* Do not attempt to update stats when in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Update device statistics */ ice_update_pf_stats(sc); /* * For proper watchdog management, the iflib stack needs to know if * we've been paused during the last interval. Check if the * link_xoff_rx stat changed, and set the isc_pause_frames, if so. */ if (sc->stats.cur.link_xoff_rx != prev_link_xoff_rx) sc->scctx->isc_pause_frames = 1; /* Update the primary VSI stats */ ice_update_vsi_hw_stats(&sc->pf_vsi); } /** * ice_admin_timer - called periodically to trigger the admin task * @arg: callout(9) argument pointing to the device private softc structure * * Timer function used as part of a callout(9) timer that will periodically * trigger the admin task, even when the interface is down. * * @remark this function is not called by iflib and is not protected by the * iflib CTX lock. * * @remark because this is a callout function, it cannot sleep and should not * attempt taking the iflib CTX lock. */ static void ice_admin_timer(void *arg) { struct ice_softc *sc = (struct ice_softc *)arg; + /* + * There is a point where callout routines are no longer + * cancelable. So there exists a window of time where the + * driver enters detach() and tries to cancel the callout, but the + * callout routine has passed the cancellation point. The detach() + * routine is unaware of this and tries to free resources that the + * callout routine needs. So we check for the detach state flag to + * at least shrink the window of opportunity. + */ + if (ice_driver_is_detaching(sc)) + return; + /* Fire off the admin task */ iflib_admin_intr_deferred(sc->ctx); /* Reschedule the admin timer */ callout_schedule(&sc->admin_timer, hz/2); } /** * ice_transition_recovery_mode - Transition to recovery mode * @sc: the device private softc * * Called when the driver detects that the firmware has entered recovery mode * at run time. */ static void ice_transition_recovery_mode(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; int i; device_printf(sc->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); /* Tell the stack that the link has gone down */ iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0); /* Request that the device be re-initialized */ ice_request_stack_reinit(sc); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_vsi_del_txqs_ctx(vsi); ice_vsi_del_rxqs_ctx(vsi); for (i = 0; i < sc->num_available_vsi; i++) { if (sc->all_vsi[i]) ice_release_vsi(sc->all_vsi[i]); } sc->num_available_vsi = 0; if (sc->all_vsi) { free(sc->all_vsi, M_ICE); sc->all_vsi = NULL; } /* Destroy the interrupt manager */ ice_resmgr_destroy(&sc->imgr); /* Destroy the queue managers */ ice_resmgr_destroy(&sc->tx_qmgr); ice_resmgr_destroy(&sc->rx_qmgr); ice_deinit_hw(&sc->hw); } /** * ice_transition_safe_mode - Transition to safe mode * @sc: the device private softc * * Called when the driver attempts to reload the DDP package during a device * reset, and the new download fails. If so, we must transition to safe mode * at run time. * * @remark although safe mode normally allocates only a single queue, we can't * change the number of queues dynamically when using iflib. Due to this, we * do not attempt to reduce the number of queues. */ static void ice_transition_safe_mode(struct ice_softc *sc) { /* Indicate that we are in Safe mode */ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en); ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); ice_clear_bit(ICE_FEATURE_RSS, sc->feat_en); } /** * ice_if_update_admin_status - update admin status * @ctx: iflib ctx structure * * Called by iflib to update the admin status. For our purposes, this means * check the adminq, and update the link status. It's ultimately triggered by * our admin interrupt, or by the ice_if_timer periodically. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_update_admin_status(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); enum ice_fw_modes fw_mode; bool reschedule = false; u16 pending = 0; ASSERT_CTX_LOCKED(sc); /* Check if the firmware entered recovery mode at run time */ fw_mode = ice_get_fw_mode(&sc->hw); if (fw_mode == ICE_FW_MODE_REC) { if (!ice_testandset_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { /* If we just entered recovery mode, log a warning to * the system administrator and deinit driver state * that is no longer functional. */ ice_transition_recovery_mode(sc); } } else if (fw_mode == ICE_FW_MODE_ROLLBACK) { if (!ice_testandset_state(&sc->state, ICE_STATE_ROLLBACK_MODE)) { /* Rollback mode isn't fatal, but we don't want to * repeatedly post a message about it. */ ice_print_rollback_msg(&sc->hw); } } /* Handle global reset events */ ice_handle_reset_event(sc); /* Handle PF reset requests */ ice_handle_pf_reset_request(sc); /* Handle MDD events */ ice_handle_mdd_event(sc); if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED) || ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET) || ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { /* * If we know the control queues are disabled, skip processing * the control queues entirely. */ ; } else if (ice_testandclear_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING)) { ice_process_ctrlq(sc, ICE_CTL_Q_ADMIN, &pending); if (pending > 0) reschedule = true; ice_process_ctrlq(sc, ICE_CTL_Q_MAILBOX, &pending); if (pending > 0) reschedule = true; } /* Poll for link up */ ice_poll_for_media_avail(sc); /* Check and update link status */ ice_update_link_status(sc, false); /* * If there are still messages to process, we need to reschedule * ourselves. Otherwise, we can just re-enable the interrupt. We'll be * woken up at the next interrupt or timer event. */ if (reschedule) { ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING); iflib_admin_intr_deferred(ctx); } else { ice_enable_intr(&sc->hw, sc->irqvs[0].me); } } /** * ice_prepare_for_reset - Prepare device for an impending reset * @sc: The device private softc * * Prepare the driver for an impending reset, shutting down VSIs, clearing the * scheduler setup, and shutting down controlqs. Uses the * ICE_STATE_PREPARED_FOR_RESET to indicate whether we've already prepared the * driver for reset or not. */ static void ice_prepare_for_reset(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; /* If we're already prepared, there's nothing to do */ if (ice_testandset_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) return; log(LOG_INFO, "%s: preparing to reset device logic\n", sc->ifp->if_xname); /* In recovery mode, hardware is not initialized */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; /* Release the main PF VSI queue mappings */ ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, sc->pf_vsi.num_tx_queues); ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, sc->pf_vsi.num_rx_queues); ice_clear_hw_tbls(hw); if (hw->port_info) ice_sched_clear_port(hw->port_info); ice_shutdown_all_ctrlq(hw); } /** * ice_rebuild_pf_vsi_qmap - Rebuild the main PF VSI queue mapping * @sc: the device softc pointer * * Loops over the Tx and Rx queues for the main PF VSI and reassigns the queue * mapping after a reset occurred. */ static int ice_rebuild_pf_vsi_qmap(struct ice_softc *sc) { struct ice_vsi *vsi = &sc->pf_vsi; struct ice_tx_queue *txq; struct ice_rx_queue *rxq; int err, i; /* Re-assign Tx queues from PF space to the main VSI */ err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, vsi->num_tx_queues); if (err) { device_printf(sc->dev, "Unable to re-assign PF Tx queues: %s\n", ice_err_str(err)); return (err); } /* Re-assign Rx queues from PF space to this VSI */ err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, vsi->num_rx_queues); if (err) { device_printf(sc->dev, "Unable to re-assign PF Rx queues: %s\n", ice_err_str(err)); goto err_release_tx_queues; } vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS; /* Re-assign Tx queue tail pointers */ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]); /* Re-assign Rx queue tail pointers */ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) rxq->tail = QRX_TAIL(vsi->rx_qmap[i]); return (0); err_release_tx_queues: ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, sc->pf_vsi.num_tx_queues); return (err); } /* determine if the iflib context is active */ #define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING)) /** * ice_rebuild_recovery_mode - Rebuild driver state while in recovery mode * @sc: The device private softc * * Handle a driver rebuild while in recovery mode. This will only rebuild the * limited functionality supported while in recovery mode. */ static void ice_rebuild_recovery_mode(struct ice_softc *sc) { device_t dev = sc->dev; /* enable PCIe bus master */ pci_enable_busmaster(dev); /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Now that the rebuild is finished, we're no longer prepared to reset */ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); log(LOG_INFO, "%s: device rebuild successful\n", sc->ifp->if_xname); /* In order to completely restore device functionality, the iflib core * needs to be reset. We need to request an iflib reset. Additionally, * because the state of IFC_DO_RESET is cached within task_fn_admin in * the iflib core, we also want re-run the admin task so that iflib * resets immediately instead of waiting for the next interrupt. */ ice_request_stack_reinit(sc); return; } /** * ice_rebuild - Rebuild driver state post reset * @sc: The device private softc * * Restore driver state after a reset occurred. Restart the controlqs, setup * the hardware port, and re-enable the VSIs. */ static void ice_rebuild(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; int err; sc->rebuild_ticks = ticks; /* If we're rebuilding, then a reset has succeeded. */ ice_clear_state(&sc->state, ICE_STATE_RESET_FAILED); /* * If the firmware is in recovery mode, only restore the limited * functionality supported by recovery mode. */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) { ice_rebuild_recovery_mode(sc); return; } /* enable PCIe bus master */ pci_enable_busmaster(dev); status = ice_init_all_ctrlq(hw); if (status) { device_printf(dev, "failed to re-init controlqs, err %s\n", ice_status_str(status)); goto err_shutdown_ctrlq; } /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { device_printf(dev, "Failed to query scheduler resources, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); goto err_shutdown_ctrlq; } err = ice_send_version(sc); if (err) goto err_shutdown_ctrlq; err = ice_init_link_events(sc); if (err) { device_printf(dev, "ice_init_link_events failed: %s\n", ice_err_str(err)); goto err_shutdown_ctrlq; } status = ice_clear_pf_cfg(hw); if (status) { device_printf(dev, "failed to clear PF configuration, err %s\n", ice_status_str(status)); goto err_shutdown_ctrlq; } ice_clear_pxe_mode(hw); status = ice_get_caps(hw); if (status) { device_printf(dev, "failed to get capabilities, err %s\n", ice_status_str(status)); goto err_shutdown_ctrlq; } status = ice_sched_init_port(hw->port_info); if (status) { device_printf(dev, "failed to initialize port, err %s\n", ice_status_str(status)); goto err_sched_cleanup; } /* If we previously loaded the package, it needs to be reloaded now */ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) { status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); if (status) { ice_log_pkg_init(sc, &status); ice_transition_safe_mode(sc); } } ice_reset_pf_stats(sc); err = ice_rebuild_pf_vsi_qmap(sc); if (err) { device_printf(sc->dev, "Unable to re-assign main VSI queues, err %s\n", ice_err_str(err)); goto err_sched_cleanup; } err = ice_initialize_vsi(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to re-initialize Main VSI, err %s\n", ice_err_str(err)); goto err_release_queue_allocations; } /* Replay all VSI configuration */ err = ice_replay_all_vsi_cfg(sc); if (err) goto err_deinit_pf_vsi; + /* Re-enable FW health event reporting */ + ice_init_health_events(sc); + /* Reconfigure the main PF VSI for RSS */ err = ice_config_rss(&sc->pf_vsi); if (err) { device_printf(sc->dev, "Unable to reconfigure RSS for the main VSI, err %s\n", ice_err_str(err)); goto err_deinit_pf_vsi; } /* Refresh link status */ ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED); sc->hw.port_info->phy.get_link_info = true; ice_get_link_status(sc->hw.port_info, &sc->link_up); ice_update_link_status(sc, true); /* Configure interrupt causes for the administrative interrupt */ ice_configure_misc_interrupts(sc); /* Enable ITR 0 right away, so that we can handle admin interrupts */ ice_enable_intr(&sc->hw, sc->irqvs[0].me); /* Now that the rebuild is finished, we're no longer prepared to reset */ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); log(LOG_INFO, "%s: device rebuild successful\n", sc->ifp->if_xname); /* In order to completely restore device functionality, the iflib core * needs to be reset. We need to request an iflib reset. Additionally, * because the state of IFC_DO_RESET is cached within task_fn_admin in * the iflib core, we also want re-run the admin task so that iflib * resets immediately instead of waiting for the next interrupt. */ ice_request_stack_reinit(sc); return; err_deinit_pf_vsi: ice_deinit_vsi(&sc->pf_vsi); err_release_queue_allocations: ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap, sc->pf_vsi.num_tx_queues); ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap, sc->pf_vsi.num_rx_queues); err_sched_cleanup: ice_sched_cleanup_all(hw); err_shutdown_ctrlq: ice_shutdown_all_ctrlq(hw); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); device_printf(dev, "Driver rebuild failed, please reload the device driver\n"); } /** * ice_handle_reset_event - Handle reset events triggered by OICR * @sc: The device private softc * * Handle reset events triggered by an OICR notification. This includes CORER, * GLOBR, and EMPR resets triggered by software on this or any other PF or by * firmware. * * @pre assumes the iflib context lock is held, and will unlock it while * waiting for the hardware to finish reset. */ static void ice_handle_reset_event(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; enum ice_status status; device_t dev = sc->dev; /* When a CORER, GLOBR, or EMPR is about to happen, the hardware will * trigger an OICR interrupt. Our OICR handler will determine when * this occurs and set the ICE_STATE_RESET_OICR_RECV bit as * appropriate. */ if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_OICR_RECV)) return; ice_prepare_for_reset(sc); /* * Release the iflib context lock and wait for the device to finish * resetting. */ IFLIB_CTX_UNLOCK(sc); status = ice_check_reset(hw); IFLIB_CTX_LOCK(sc); if (status) { device_printf(dev, "Device never came out of reset, err %s\n", ice_status_str(status)); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); return; } /* We're done with the reset, so we can rebuild driver state */ sc->hw.reset_ongoing = false; ice_rebuild(sc); /* In the unlikely event that a PF reset request occurs at the same * time as a global reset, clear the request now. This avoids * resetting a second time right after we reset due to a global event. */ if (ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) device_printf(dev, "Ignoring PFR request that occurred while a reset was ongoing\n"); } /** * ice_handle_pf_reset_request - Initiate PF reset requested by software * @sc: The device private softc * * Initiate a PF reset requested by software. We handle this in the admin task * so that only one thread actually handles driver preparation and cleanup, * rather than having multiple threads possibly attempt to run this code * simultaneously. * * @pre assumes the iflib context lock is held and will unlock it while * waiting for the PF reset to complete. */ static void ice_handle_pf_reset_request(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; enum ice_status status; /* Check for PF reset requests */ if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ)) return; /* Make sure we're prepared for reset */ ice_prepare_for_reset(sc); /* * Release the iflib context lock and wait for the device to finish * resetting. */ IFLIB_CTX_UNLOCK(sc); status = ice_reset(hw, ICE_RESET_PFR); IFLIB_CTX_LOCK(sc); if (status) { device_printf(sc->dev, "device PF reset failed, err %s\n", ice_status_str(status)); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); return; } sc->soft_stats.pfr_count++; ice_rebuild(sc); } /** * ice_init_device_features - Init device driver features * @sc: driver softc structure * * @pre assumes that the function capabilities bits have been set up by * ice_init_hw(). */ static void ice_init_device_features(struct ice_softc *sc) { /* * A failed pkg file download triggers safe mode, disabling advanced * device feature support */ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) return; /* Set capabilities that all devices support */ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap); ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap); - ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_cap); + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_1, sc->feat_cap); + ice_set_bit(ICE_FEATURE_LINK_MGMT_VER_2, sc->feat_cap); + ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); /* Disable features due to hardware limitations... */ if (!sc->hw.func_caps.common_cap.rss_table_size) ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); + /* Disable features due to firmware limitations... */ + if (!ice_is_fw_health_report_supported(&sc->hw)) + ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); /* Disable capabilities not supported by the OS */ ice_disable_unsupported_features(sc->feat_cap); /* RSS is always enabled for iflib */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS)) ice_set_bit(ICE_FEATURE_RSS, sc->feat_en); } /** * ice_if_multi_set - Callback to update Multicast filters in HW * @ctx: iflib ctx structure * * Called by iflib in response to SIOCDELMULTI and SIOCADDMULTI. Must search * the if_multiaddrs list and determine which filters have been added or * removed from the list, and update HW programming to reflect the new list. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_multi_set(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); int err; ASSERT_CTX_LOCKED(sc); /* Do not handle multicast configuration in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; err = ice_sync_multicast_filters(sc); if (err) { device_printf(sc->dev, "Failed to synchronize multicast filter list: %s\n", ice_err_str(err)); return; } } /** * ice_if_vlan_register - Register a VLAN with the hardware * @ctx: iflib ctx pointer * @vtag: VLAN to add * * Programs the main PF VSI with a hardware filter for the given VLAN. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); enum ice_status status; ASSERT_CTX_LOCKED(sc); /* Do not handle VLAN configuration in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; status = ice_add_vlan_hw_filter(&sc->pf_vsi, vtag); if (status) { device_printf(sc->dev, "Failure adding VLAN %d to main VSI, err %s aq_err %s\n", vtag, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); } } /** * ice_if_vlan_unregister - Remove a VLAN filter from the hardware * @ctx: iflib ctx pointer * @vtag: VLAN to add * * Removes the previously programmed VLAN filter from the main PF VSI. * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); enum ice_status status; ASSERT_CTX_LOCKED(sc); /* Do not handle VLAN configuration in recovery mode */ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) return; status = ice_remove_vlan_hw_filter(&sc->pf_vsi, vtag); if (status) { device_printf(sc->dev, "Failure removing VLAN %d from main VSI, err %s aq_err %s\n", vtag, ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); } } /** * ice_if_stop - Stop the device * @ctx: iflib context structure * * Called by iflib to stop the device and bring it down. (i.e. ifconfig ice0 * down) * * @pre assumes the caller holds the iflib CTX lock */ static void ice_if_stop(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); ASSERT_CTX_LOCKED(sc); /* * The iflib core may call IFDI_STOP prior to the first call to * IFDI_INIT. This will cause us to attempt to remove MAC filters we * don't have, and disable Tx queues which aren't yet configured. * Although it is likely these extra operations are harmless, they do * cause spurious warning messages to be displayed, which may confuse * users. * * To avoid these messages, we use a state bit indicating if we've * been initialized. It will be set when ice_if_init is called, and * cleared here in ice_if_stop. */ if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED)) return; if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) { device_printf(sc->dev, "request to stop interface cannot be completed as the device failed to reset\n"); return; } if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { device_printf(sc->dev, "request to stop interface while device is prepared for impending reset\n"); return; } /* Remove the MAC filters, stop Tx, and stop Rx. We don't check the * return of these functions because there's nothing we can really do * if they fail, and the functions already print error messages. * Just try to shut down as much as we can. */ ice_rm_pf_default_mac_filters(sc); /* Dissociate the Tx and Rx queues from the interrupts */ ice_flush_txq_interrupts(&sc->pf_vsi); ice_flush_rxq_interrupts(&sc->pf_vsi); /* Disable the Tx and Rx queues */ ice_vsi_disable_tx(&sc->pf_vsi); ice_control_rx_queues(&sc->pf_vsi, false); } /** * ice_if_get_counter - Get current value of an ifnet statistic * @ctx: iflib context pointer * @counter: ifnet counter to read * * Reads the current value of an ifnet counter for the device. * * This function is not protected by the iflib CTX lock. */ static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); /* Return the counter for the main PF VSI */ return ice_get_ifnet_counter(&sc->pf_vsi, counter); } /** * ice_request_stack_reinit - Request that iflib re-initialize * @sc: the device private softc * * Request that the device be brought down and up, to re-initialize. For * example, this may be called when a device reset occurs, or when Tx and Rx * queues need to be re-initialized. * * This is required because the iflib state is outside the driver, and must be * re-initialized if we need to resart Tx and Rx queues. */ void ice_request_stack_reinit(struct ice_softc *sc) { if (CTX_ACTIVE(sc->ctx)) { iflib_request_reset(sc->ctx); iflib_admin_intr_deferred(sc->ctx); } } /** * ice_driver_is_detaching - Check if the driver is detaching/unloading * @sc: device private softc * * Returns true if the driver is detaching, false otherwise. * * @remark on newer kernels, take advantage of iflib_in_detach in order to * report detachment correctly as early as possible. * * @remark this function is used by various code paths that want to avoid * running if the driver is about to be removed. This includes sysctls and * other driver access points. Note that it does not fully resolve * detach-based race conditions as it is possible for a thread to race with * iflib_in_detach. */ bool ice_driver_is_detaching(struct ice_softc *sc) { return (ice_test_state(&sc->state, ICE_STATE_DETACHING) || iflib_in_detach(sc->ctx)); } /** * ice_if_priv_ioctl - Device private ioctl handler * @ctx: iflib context pointer * @command: The ioctl command issued * @data: ioctl specific data * * iflib callback for handling custom driver specific ioctls. * * @pre Assumes that the iflib context lock is held. */ static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); struct ifdrv *ifd; device_t dev = sc->dev; if (data == NULL) return (EINVAL); ASSERT_CTX_LOCKED(sc); /* Make sure the command type is valid */ switch (command) { case SIOCSDRVSPEC: case SIOCGDRVSPEC: /* Accepted commands */ break; case SIOCGPRIVATE_0: /* * Although we do not support this ioctl command, it's * expected that iflib will forward it to the IFDI_PRIV_IOCTL * handler. Do not print a message in this case */ return (ENOTSUP); default: /* * If we get a different command for this function, it's * definitely unexpected, so log a message indicating what * command we got for debugging purposes. */ device_printf(dev, "%s: unexpected ioctl command %08lx\n", __func__, command); return (EINVAL); } ifd = (struct ifdrv *)data; switch (ifd->ifd_cmd) { case ICE_NVM_ACCESS: return ice_handle_nvm_access_ioctl(sc, ifd); default: return EINVAL; } } /** * ice_if_i2c_req - I2C request handler for iflib * @ctx: iflib context pointer * @req: The I2C parameters to use * * Read from the port's I2C eeprom using the parameters from the ioctl. * * @remark The iflib-only part is pretty simple. */ static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); return ice_handle_i2c_req(sc, req); } /** * ice_if_suspend - PCI device suspend handler for iflib * @ctx: iflib context pointer * * Deinitializes the driver and clears HW resources in preparation for * suspend or an FLR. * * @returns 0; this return value is ignored */ static int ice_if_suspend(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); /* At least a PFR is always going to happen after this; * either via FLR or during the D3->D0 transition. */ ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ); ice_prepare_for_reset(sc); return (0); } /** * ice_if_resume - PCI device resume handler for iflib * @ctx: iflib context pointer * * Reinitializes the driver and the HW after PCI resume or after * an FLR. An init is performed by iflib after this function is finished. * * @returns 0; this return value is ignored */ static int ice_if_resume(if_ctx_t ctx) { struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx); ice_rebuild(sc); return (0); } diff --git a/sys/dev/ice/virtchnl.h b/sys/dev/ice/virtchnl.h index 185e007050fa..936cbba1da14 100644 --- a/sys/dev/ice/virtchnl.h +++ b/sys/dev/ice/virtchnl.h @@ -1,1207 +1,1676 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _VIRTCHNL_H_ #define _VIRTCHNL_H_ /* Description: - * This header file describes the VF-PF communication protocol used - * by the drivers for all devices starting from our 40G product line + * This header file describes the Virtual Function (VF) - Physical Function + * (PF) communication protocol used by the drivers for all devices starting + * from our 40G product line * * Admin queue buffer usage: * desc->opcode is always aqc_opc_send_msg_to_pf * flags, retval, datalen, and data addr are all used normally. * The Firmware copies the cookie fields when sending messages between the * PF and VF, but uses all other fields internally. Due to this limitation, * we must send all messages as "indirect", i.e. using an external buffer. * * All the VSI indexes are relative to the VF. Each VF can have maximum of * three VSIs. All the queue indexes are relative to the VSI. Each VF can * have a maximum of sixteen queues for all of its VSIs. * * The PF is required to return a status code in v_retval for all messages - * except RESET_VF, which does not require any response. The return value - * is of status_code type, defined in the shared type.h. + * except RESET_VF, which does not require any response. The returned value + * is of virtchnl_status_code type, defined in the shared type.h. * * In general, VF driver initialization should roughly follow the order of * these opcodes. The VF driver must first validate the API version of the * PF driver, then request a reset, then get resources, then configure * queues and interrupts. After these operations are complete, the VF * driver may start its queues, optionally add MAC and VLAN filters, and * process traffic. */ /* START GENERIC DEFINES * Need to ensure the following enums and defines hold the same meaning and * value in current and future projects */ /* Error Codes */ enum virtchnl_status_code { VIRTCHNL_STATUS_SUCCESS = 0, VIRTCHNL_STATUS_ERR_PARAM = -5, VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, }; /* Backward compatibility */ #define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM #define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED #define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3 #define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4 #define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5 #define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6 #define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7 enum virtchnl_link_speed { VIRTCHNL_LINK_SPEED_UNKNOWN = 0, VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT), VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT), VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT), VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT), VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT), VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT), VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT), VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT), }; /* for hsplit_0 field of Rx HMC context */ /* deprecated with AVF 1.0 */ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0, VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1, VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2, VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4, VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; #define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6 /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field * of the virtchnl_msg structure. */ enum virtchnl_ops { /* The PF sends status change events to VFs using * the VIRTCHNL_OP_EVENT opcode. * VFs send requests to the PF using the other ops. * Use of "advanced opcode" features must be negotiated as part of capabilities * exchange and are not considered part of base mode feature set. */ VIRTCHNL_OP_UNKNOWN = 0, VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */ VIRTCHNL_OP_RESET_VF = 2, VIRTCHNL_OP_GET_VF_RESOURCES = 3, VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6, VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, VIRTCHNL_OP_ENABLE_QUEUES = 8, VIRTCHNL_OP_DISABLE_QUEUES = 9, VIRTCHNL_OP_ADD_ETH_ADDR = 10, VIRTCHNL_OP_DEL_ETH_ADDR = 11, VIRTCHNL_OP_ADD_VLAN = 12, VIRTCHNL_OP_DEL_VLAN = 13, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, VIRTCHNL_OP_GET_STATS = 15, VIRTCHNL_OP_RSVD = 16, VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ /* opcode 19 is reserved */ /* opcodes 20, 21, and 22 are reserved */ VIRTCHNL_OP_CONFIG_RSS_KEY = 23, VIRTCHNL_OP_CONFIG_RSS_LUT = 24, VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, VIRTCHNL_OP_REQUEST_QUEUES = 29, VIRTCHNL_OP_ENABLE_CHANNELS = 30, VIRTCHNL_OP_DISABLE_CHANNELS = 31, VIRTCHNL_OP_ADD_CLOUD_FILTER = 32, VIRTCHNL_OP_DEL_CLOUD_FILTER = 33, /* opcode 34 is reserved */ - /* opcodes 39, 40, 41, 42 and 43 are reserved */ + /* opcodes 38, 39, 40, 41, 42 and 43 are reserved */ /* opcode 44 is reserved */ /* opcode 45, 46, 47, 48 and 49 are reserved */ VIRTCHNL_OP_GET_MAX_RSS_QREGION = 50, + VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS = 51, + VIRTCHNL_OP_ADD_VLAN_V2 = 52, + VIRTCHNL_OP_DEL_VLAN_V2 = 53, + VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 = 54, + VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, + VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, + VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, + VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 = 58, + VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 = 59, + /* opcodes 60 through 69 are reserved */ VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, VIRTCHNL_OP_MAX, }; static inline const char *virtchnl_op_str(enum virtchnl_ops v_opcode) { switch (v_opcode) { case VIRTCHNL_OP_UNKNOWN: return "VIRTCHNL_OP_UNKNOWN"; case VIRTCHNL_OP_VERSION: return "VIRTCHNL_OP_VERSION"; case VIRTCHNL_OP_RESET_VF: return "VIRTCHNL_OP_RESET_VF"; case VIRTCHNL_OP_GET_VF_RESOURCES: return "VIRTCHNL_OP_GET_VF_RESOURCES"; case VIRTCHNL_OP_CONFIG_TX_QUEUE: return "VIRTCHNL_OP_CONFIG_TX_QUEUE"; case VIRTCHNL_OP_CONFIG_RX_QUEUE: return "VIRTCHNL_OP_CONFIG_RX_QUEUE"; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: return "VIRTCHNL_OP_CONFIG_VSI_QUEUES"; case VIRTCHNL_OP_CONFIG_IRQ_MAP: return "VIRTCHNL_OP_CONFIG_IRQ_MAP"; case VIRTCHNL_OP_ENABLE_QUEUES: return "VIRTCHNL_OP_ENABLE_QUEUES"; case VIRTCHNL_OP_DISABLE_QUEUES: return "VIRTCHNL_OP_DISABLE_QUEUES"; case VIRTCHNL_OP_ADD_ETH_ADDR: return "VIRTCHNL_OP_ADD_ETH_ADDR"; case VIRTCHNL_OP_DEL_ETH_ADDR: return "VIRTCHNL_OP_DEL_ETH_ADDR"; case VIRTCHNL_OP_ADD_VLAN: return "VIRTCHNL_OP_ADD_VLAN"; case VIRTCHNL_OP_DEL_VLAN: return "VIRTCHNL_OP_DEL_VLAN"; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: return "VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE"; case VIRTCHNL_OP_GET_STATS: return "VIRTCHNL_OP_GET_STATS"; case VIRTCHNL_OP_RSVD: return "VIRTCHNL_OP_RSVD"; case VIRTCHNL_OP_EVENT: return "VIRTCHNL_OP_EVENT"; case VIRTCHNL_OP_CONFIG_RSS_KEY: return "VIRTCHNL_OP_CONFIG_RSS_KEY"; case VIRTCHNL_OP_CONFIG_RSS_LUT: return "VIRTCHNL_OP_CONFIG_RSS_LUT"; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: return "VIRTCHNL_OP_GET_RSS_HENA_CAPS"; case VIRTCHNL_OP_SET_RSS_HENA: return "VIRTCHNL_OP_SET_RSS_HENA"; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING"; case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING"; case VIRTCHNL_OP_REQUEST_QUEUES: return "VIRTCHNL_OP_REQUEST_QUEUES"; case VIRTCHNL_OP_ENABLE_CHANNELS: return "VIRTCHNL_OP_ENABLE_CHANNELS"; case VIRTCHNL_OP_DISABLE_CHANNELS: return "VIRTCHNL_OP_DISABLE_CHANNELS"; case VIRTCHNL_OP_ADD_CLOUD_FILTER: return "VIRTCHNL_OP_ADD_CLOUD_FILTER"; case VIRTCHNL_OP_DEL_CLOUD_FILTER: return "VIRTCHNL_OP_DEL_CLOUD_FILTER"; case VIRTCHNL_OP_GET_MAX_RSS_QREGION: return "VIRTCHNL_OP_GET_MAX_RSS_QREGION"; case VIRTCHNL_OP_ENABLE_QUEUES_V2: return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; case VIRTCHNL_OP_DISABLE_QUEUES_V2: return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; case VIRTCHNL_OP_MAP_QUEUE_VECTOR: return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: + return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"; + case VIRTCHNL_OP_ADD_VLAN_V2: + return "VIRTCHNL_OP_ADD_VLAN_V2"; + case VIRTCHNL_OP_DEL_VLAN_V2: + return "VIRTCHNL_OP_DEL_VLAN_V2"; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + return "VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2"; + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + return "VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2"; + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + return "VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2"; + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + return "VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2"; + case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: + return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2"; + case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: + return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2"; case VIRTCHNL_OP_MAX: return "VIRTCHNL_OP_MAX"; default: return "Unsupported (update virtchnl.h)"; } } /* These macros are used to generate compilation errors if a structure/union * is not exactly the correct length. It gives a divide by zero error if the * structure/union is not of the correct size, otherwise it creates an enum * that is never used. */ #define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } #define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) } /* Virtual channel message descriptor. This overlays the admin queue * descriptor. All other data is passed in external buffers. */ struct virtchnl_msg { u8 pad[8]; /* AQ flags/opcode/len/retval fields */ - enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */ - enum virtchnl_status_code v_retval; /* ditto for desc->retval */ + + /* avoid confusion with desc->opcode */ + enum virtchnl_ops v_opcode; + + /* ditto for desc->retval */ + enum virtchnl_status_code v_retval; u32 vfid; /* used by PF when sending to VF */ }; VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); /* Message descriptions and data structures. */ /* VIRTCHNL_OP_VERSION * VF posts its version number to the PF. PF responds with its version number * in the same format, along with a return code. * Reply from PF has its major/minor versions also in param0 and param1. * If there is a major version mismatch, then the VF cannot operate. * If there is a minor version mismatch, then the VF can operate but should * add a warning to the system log. * * This enum element MUST always be specified as == 1, regardless of other * changes in the API. The PF must always respond to this message without * error regardless of version mismatch. */ #define VIRTCHNL_VERSION_MAJOR 1 #define VIRTCHNL_VERSION_MINOR 1 +#define VIRTCHNL_VERSION_MAJOR_2 2 +#define VIRTCHNL_VERSION_MINOR_0 0 #define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0 struct virtchnl_version_info { u32 major; u32 minor; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info); #define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0)) #define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1)) /* VIRTCHNL_OP_RESET_VF * VF sends this request to PF with no parameters * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register * until reset completion is indicated. The admin queue must be reinitialized * after this operation. * * When reset is complete, PF must ensure that all queues in all VSIs associated * with the VF are stopped, all queue configurations in the HMC are set to 0, * and all MAC and VLAN filters (except the default MAC address) on all VSIs * are cleared. */ /* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV * vsi_type should always be 6 for backward compatibility. Add other fields * as needed. */ enum virtchnl_vsi_type { VIRTCHNL_VSI_TYPE_INVALID = 0, VIRTCHNL_VSI_SRIOV = 6, }; /* VIRTCHNL_OP_GET_VF_RESOURCES * Version 1.0 VF sends this request to PF with no parameters * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities * PF responds with an indirect message containing * virtchnl_vf_resource and one or more * virtchnl_vsi_resource structures. */ struct virtchnl_vsi_resource { u16 vsi_id; u16 num_queue_pairs; - enum virtchnl_vsi_type vsi_type; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; u16 qset_handle; u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); /* VF capability flags * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including * TX/RX Checksum offloading and TSO for non-tunnelled packets. */ #define VIRTCHNL_VF_OFFLOAD_L2 0x00000001 #define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002 #define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004 #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 #define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 #define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080 /* 0X00000100 is reserved */ #define VIRTCHNL_VF_LARGE_NUM_QPAIRS 0x00000200 +#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 0x00008000 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 #define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 #define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000 #define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000 #define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000 #define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000 #define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000 #define VIRTCHNL_VF_OFFLOAD_USO 0X02000000 /* 0x04000000 is reserved */ /* 0X08000000 and 0X10000000 are reserved */ /* 0X20000000 is reserved */ /* 0X40000000 is reserved */ + /* 0X80000000 is reserved */ /* Define below the capability flags that are not offloads */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080 #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ VIRTCHNL_VF_OFFLOAD_RSS_PF) struct virtchnl_vf_resource { u16 num_vsis; u16 num_queue_pairs; u16 max_vectors; u16 max_mtu; u32 vf_cap_flags; u32 rss_key_size; u32 rss_lut_size; struct virtchnl_vsi_resource vsi_res[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource); /* VIRTCHNL_OP_CONFIG_TX_QUEUE * VF sends this message to set up parameters for one TX queue. * External data buffer contains one instance of virtchnl_txq_info. * PF configures requested queue and returns a status code. */ /* Tx queue config info */ struct virtchnl_txq_info { u16 vsi_id; u16 queue_id; u16 ring_len; /* number of descriptors, multiple of 8 */ u16 headwb_enabled; /* deprecated with AVF 1.0 */ u64 dma_ring_addr; u64 dma_headwb_addr; /* deprecated with AVF 1.0 */ }; VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. * PF configures requested queue and returns a status code. The * crc_disable flag disables CRC stripping on the VF. Setting * the crc_disable flag to 1 will disable CRC stripping for each * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC * offload must have been set prior to sending this info or the PF * will ignore the request. This flag should be set the same for * all of the queues for a VF. */ /* Rx queue config info */ struct virtchnl_rxq_info { u16 vsi_id; u16 queue_id; u32 ring_len; /* number of descriptors, multiple of 32 */ u16 hdr_size; u16 splithdr_enabled; /* deprecated with AVF 1.0 */ u32 databuffer_size; u32 max_pkt_size; u8 crc_disable; u8 pad1[3]; u64 dma_ring_addr; - enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */ + + /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ + s32 rx_split_pos; u32 pad2; }; VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info); /* VIRTCHNL_OP_CONFIG_VSI_QUEUES * VF sends this message to set parameters for active TX and RX queues * associated with the specified VSI. * PF configures queues and returns status. * If the number of queues specified is greater than the number of queues * associated with the VSI, an error is returned and no queues are configured. * NOTE: The VF is not required to configure all queues in a single request. * It may send multiple messages. PF drivers must correctly handle all VF * requests. */ struct virtchnl_queue_pair_info { /* NOTE: vsi_id and queue_id should be identical for both queues. */ struct virtchnl_txq_info txq; struct virtchnl_rxq_info rxq; }; VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info); struct virtchnl_vsi_queue_config_info { u16 vsi_id; u16 num_queue_pairs; u32 pad; struct virtchnl_queue_pair_info qpair[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); /* VIRTCHNL_OP_REQUEST_QUEUES * VF sends this message to request the PF to allocate additional queues to * this VF. Each VF gets a guaranteed number of queues on init but asking for * additional queues must be negotiated. This is a best effort request as it * is possible the PF does not have enough queues left to support the request. * If the PF cannot support the number requested it will respond with the * maximum number it is able to support. If the request is successful, PF will * then reset the VF to institute required changes. */ /* VF resource request */ struct virtchnl_vf_res_request { u16 num_queue_pairs; }; /* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues * are to be associated with the specified vector. * The "other" causes are always mapped to vector 0. The VF may not request * that vector 0 be used for traffic. * PF configures interrupt mapping and returns status. * NOTE: due to hardware requirements, all active queues (both TX and RX) * should be mapped to interrupts, even if the driver intends to operate * only in polling mode. In this case the interrupt may be disabled, but * the ITR timer will still run to trigger writebacks. */ struct virtchnl_vector_map { u16 vsi_id; u16 vector_id; u16 rxq_map; u16 txq_map; u16 rxitr_idx; u16 txitr_idx; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map); struct virtchnl_irq_map_info { u16 num_vectors; struct virtchnl_vector_map vecmap[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info); /* VIRTCHNL_OP_ENABLE_QUEUES * VIRTCHNL_OP_DISABLE_QUEUES * VF sends these message to enable or disable TX/RX queue pairs. * The queues fields are bitmaps indicating which queues to act upon. * (Currently, we only support 16 queues per VF, but we make the field * u32 to allow for expansion.) * PF performs requested action and returns status. * NOTE: The VF is not required to enable/disable all queues in a single * request. It may send multiple messages. * PF drivers must correctly handle all VF requests. */ struct virtchnl_queue_select { u16 vsi_id; u16 pad; u32 rx_queues; u32 tx_queues; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select); /* VIRTCHNL_OP_GET_MAX_RSS_QREGION * * if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES * then this op must be supported. * * VF sends this message in order to query the max RSS queue region * size supported by PF, when VIRTCHNL_VF_LARGE_NUM_QPAIRS is enabled. * This information should be used when configuring the RSS LUT and/or * configuring queue region based filters. * * The maximum RSS queue region is 2^qregion_width. So, a qregion_width * of 6 would inform the VF that the PF supports a maximum RSS queue region * of 64. * * A queue region represents a range of queues that can be used to configure * a RSS LUT. For example, if a VF is given 64 queues, but only a max queue * region size of 16 (i.e. 2^qregion_width = 16) then it will only be able * to configure the RSS LUT with queue indices from 0 to 15. However, other * filters can be used to direct packets to queues >15 via specifying a queue * base/offset and queue region width. */ struct virtchnl_max_rss_qregion { u16 vport_id; u16 qregion_width; u8 pad[4]; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_max_rss_qregion); /* VIRTCHNL_OP_ADD_ETH_ADDR * VF sends this message in order to add one or more unicast or multicast * address filters for the specified VSI. * PF adds the filters and returns status. */ /* VIRTCHNL_OP_DEL_ETH_ADDR * VF sends this message in order to remove one or more unicast or multicast * filters for the specified VSI. * PF removes the filters and returns status. */ /* VIRTCHNL_ETHER_ADDR_LEGACY * Prior to adding the @type member to virtchnl_ether_addr, there were 2 pad * bytes. Moving forward all VF drivers should not set type to * VIRTCHNL_ETHER_ADDR_LEGACY. This is only here to not break previous/legacy * behavior. The control plane function (i.e. PF) can use a best effort method * of tracking the primary/device unicast in this case, but there is no * guarantee and functionality depends on the implementation of the PF. */ /* VIRTCHNL_ETHER_ADDR_PRIMARY * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_PRIMARY for the * primary/device unicast MAC address filter for VIRTCHNL_OP_ADD_ETH_ADDR and * VIRTCHNL_OP_DEL_ETH_ADDR. This allows for the underlying control plane * function (i.e. PF) to accurately track and use this MAC address for * displaying on the host and for VM/function reset. */ /* VIRTCHNL_ETHER_ADDR_EXTRA * All VF drivers should set @type to VIRTCHNL_ETHER_ADDR_EXTRA for any extra * unicast and/or multicast filters that are being added/deleted via * VIRTCHNL_OP_DEL_ETH_ADDR/VIRTCHNL_OP_ADD_ETH_ADDR respectively. */ struct virtchnl_ether_addr { u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; u8 type; #define VIRTCHNL_ETHER_ADDR_LEGACY 0 #define VIRTCHNL_ETHER_ADDR_PRIMARY 1 #define VIRTCHNL_ETHER_ADDR_EXTRA 2 #define VIRTCHNL_ETHER_ADDR_TYPE_MASK 3 /* first two bits of type are valid */ u8 pad; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr); struct virtchnl_ether_addr_list { u16 vsi_id; u16 num_elements; struct virtchnl_ether_addr list[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list); /* VIRTCHNL_OP_ADD_VLAN * VF sends this message to add one or more VLAN tag filters for receives. * PF adds the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ /* VIRTCHNL_OP_DEL_VLAN * VF sends this message to remove one or more VLAN tag filters for receives. * PF removes the filters and returns status. * If a port VLAN is configured by the PF, this operation will return an * error to the VF. */ struct virtchnl_vlan_filter_list { u16 vsi_id; u16 num_elements; u16 vlan_id[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list); +/* This enum is used for all of the VIRTCHNL_VF_OFFLOAD_VLAN_V2_CAPS related + * structures and opcodes. + * + * VIRTCHNL_VLAN_UNSUPPORTED - This field is not supported and if a VF driver + * populates it the PF should return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED. + * + * VIRTCHNL_VLAN_ETHERTYPE_8100 - This field supports 0x8100 ethertype. + * VIRTCHNL_VLAN_ETHERTYPE_88A8 - This field supports 0x88A8 ethertype. + * VIRTCHNL_VLAN_ETHERTYPE_9100 - This field supports 0x9100 ethertype. + * + * VIRTCHNL_VLAN_ETHERTYPE_AND - Used when multiple ethertypes can be supported + * by the PF concurrently. For example, if the PF can support + * VIRTCHNL_VLAN_ETHERTYPE_8100 AND VIRTCHNL_VLAN_ETHERTYPE_88A8 filters it + * would OR the following bits: + * + * VIRTHCNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * The VF would interpret this as VLAN filtering can be supported on both 0x8100 + * and 0x88A8 VLAN ethertypes. + * + * VIRTCHNL_ETHERTYPE_XOR - Used when only a single ethertype can be supported + * by the PF concurrently. For example if the PF can support + * VIRTCHNL_VLAN_ETHERTYPE_8100 XOR VIRTCHNL_VLAN_ETHERTYPE_88A8 stripping + * offload it would OR the following bits: + * + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_XOR; + * + * The VF would interpret this as VLAN stripping can be supported on either + * 0x8100 or 0x88a8 VLAN ethertypes. So when requesting VLAN stripping via + * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 the specified ethertype will override + * the previously set value. + * + * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 - Used to tell the VF to insert and/or + * strip the VLAN tag using the L2TAG1 field of the Tx/Rx descriptors. + * + * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to insert hardware + * offloaded VLAN tags using the L2TAG2 field of the Tx descriptor. + * + * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 - Used to tell the VF to strip hardware + * offloaded VLAN tags using the L2TAG2_2 field of the Rx descriptor. + * + * VIRTCHNL_VLAN_PRIO - This field supports VLAN priority bits. This is used for + * VLAN filtering if the underlying PF supports it. + * + * VIRTCHNL_VLAN_TOGGLE_ALLOWED - This field is used to say whether a + * certain VLAN capability can be toggled. For example if the underlying PF/CP + * allows the VF to toggle VLAN filtering, stripping, and/or insertion it should + * set this bit along with the supported ethertypes. + */ +enum virtchnl_vlan_support { + VIRTCHNL_VLAN_UNSUPPORTED = 0, + VIRTCHNL_VLAN_ETHERTYPE_8100 = 0x00000001, + VIRTCHNL_VLAN_ETHERTYPE_88A8 = 0x00000002, + VIRTCHNL_VLAN_ETHERTYPE_9100 = 0x00000004, + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1 = 0x00000100, + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2 = 0x00000200, + VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 = 0x00000400, + VIRTCHNL_VLAN_PRIO = 0x01000000, + VIRTCHNL_VLAN_FILTER_MASK = 0x10000000, + VIRTCHNL_VLAN_ETHERTYPE_AND = 0x20000000, + VIRTCHNL_VLAN_ETHERTYPE_XOR = 0x40000000, + VIRTCHNL_VLAN_TOGGLE = 0x80000000 +}; + +/* This structure is used as part of the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS + * for filtering, insertion, and stripping capabilities. + * + * If only outer capabilities are supported (for filtering, insertion, and/or + * stripping) then this refers to the outer most or single VLAN from the VF's + * perspective. + * + * If only inner capabilities are supported (for filtering, insertion, and/or + * stripping) then this refers to the outer most or single VLAN from the VF's + * perspective. Functionally this is the same as if only outer capabilities are + * supported. The VF driver is just forced to use the inner fields when + * adding/deleting filters and enabling/disabling offloads (if supported). + * + * If both outer and inner capabilities are supported (for filtering, insertion, + * and/or stripping) then outer refers to the outer most or single VLAN and + * inner refers to the second VLAN, if it exists, in the packet. + * + * There is no support for tunneled VLAN offloads, so outer or inner are never + * referring to a tunneled packet from the VF's perspective. + */ +struct virtchnl_vlan_supported_caps { + u32 outer; + u32 inner; +}; + +/* The PF populates these fields based on the supported VLAN filtering. If a + * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will + * reject any VIRTCHNL_OP_ADD_VLAN_V2 or VIRTCHNL_OP_DEL_VLAN_V2 messages using + * the unsupported fields. + * + * Also, a VF is only allowed to toggle its VLAN filtering setting if the + * VIRTCHNL_VLAN_TOGGLE bit is set. + * + * The ethertype(s) specified in the ethertype_init field are the ethertypes + * enabled for VLAN filtering. VLAN filtering in this case refers to the outer + * most VLAN from the VF's perspective. If both inner and outer filtering are + * allowed then ethertype_init only refers to the outer most VLAN as only + * VLAN ethertype supported for inner VLAN filtering is + * VIRTCHNL_VLAN_ETHERTYPE_8100. By default, inner VLAN filtering is disabled + * when both inner and outer filtering are allowed. + * + * The max_filters field tells the VF how many VLAN filters it's allowed to have + * at any one time. If it exceeds this amount and tries to add another filter, + * then the request will be rejected by the PF. To prevent failures, the VF + * should keep track of how many VLAN filters it has added and not attempt to + * add more than max_filters. + */ +struct virtchnl_vlan_filtering_caps { + struct virtchnl_vlan_supported_caps filtering_support; + u32 ethertype_init; + u16 max_filters; + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_filtering_caps); + +/* This enum is used for the virtchnl_vlan_offload_caps structure to specify + * if the PF supports a different ethertype for stripping and insertion. + * + * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION - The ethertype(s) specified + * for stripping affect the ethertype(s) specified for insertion and visa versa + * as well. If the VF tries to configure VLAN stripping via + * VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 with VIRTCHNL_VLAN_ETHERTYPE_8100 then + * that will be the ethertype for both stripping and insertion. + * + * VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED - The ethertype(s) specified for + * stripping do not affect the ethertype(s) specified for insertion and visa + * versa. + */ +enum virtchnl_vlan_ethertype_match { + VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION = 0, + VIRTCHNL_ETHERTYPE_MATCH_NOT_REQUIRED = 1, +}; + +/* The PF populates these fields based on the supported VLAN offloads. If a + * field is VIRTCHNL_VLAN_UNSUPPORTED then it's not supported and the PF will + * reject any VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 or + * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 messages using the unsupported fields. + * + * Also, a VF is only allowed to toggle its VLAN offload setting if the + * VIRTCHNL_VLAN_TOGGLE_ALLOWED bit is set. + * + * The VF driver needs to be aware of how the tags are stripped by hardware and + * inserted by the VF driver based on the level of offload support. The PF will + * populate these fields based on where the VLAN tags are expected to be + * offloaded via the VIRTHCNL_VLAN_TAG_LOCATION_* bits. The VF will need to + * interpret these fields. See the definition of the + * VIRTCHNL_VLAN_TAG_LOCATION_* bits above the virtchnl_vlan_support + * enumeration. + */ +struct virtchnl_vlan_offload_caps { + struct virtchnl_vlan_supported_caps stripping_support; + struct virtchnl_vlan_supported_caps insertion_support; + u32 ethertype_init; + u8 ethertype_match; + u8 pad[3]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_vlan_offload_caps); + +/* VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS + * VF sends this message to determine its VLAN capabilities. + * + * PF will mark which capabilities it supports based on hardware support and + * current configuration. For example, if a port VLAN is configured the PF will + * not allow outer VLAN filtering, stripping, or insertion to be configured so + * it will block these features from the VF. + * + * The VF will need to cross reference its capabilities with the PFs + * capabilities in the response message from the PF to determine the VLAN + * support. + */ +struct virtchnl_vlan_caps { + struct virtchnl_vlan_filtering_caps filtering; + struct virtchnl_vlan_offload_caps offloads; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_caps); + +struct virtchnl_vlan { + u16 tci; /* tci[15:13] = PCP and tci[11:0] = VID */ + u16 tci_mask; /* only valid if VIRTCHNL_VLAN_FILTER_MASK set in + * filtering caps + */ + u16 tpid; /* 0x8100, 0x88a8, etc. and only type(s) set in + * filtering caps. Note that tpid here does not refer to + * VIRTCHNL_VLAN_ETHERTYPE_*, but it refers to the + * actual 2-byte VLAN TPID + */ + u8 pad[2]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_vlan); + +struct virtchnl_vlan_filter { + struct virtchnl_vlan inner; + struct virtchnl_vlan outer; + u8 pad[16]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(32, virtchnl_vlan_filter); + +/* VIRTCHNL_OP_ADD_VLAN_V2 + * VIRTCHNL_OP_DEL_VLAN_V2 + * + * VF sends these messages to add/del one or more VLAN tag filters for Rx + * traffic. + * + * The PF attempts to add the filters and returns status. + * + * The VF should only ever attempt to add/del virtchnl_vlan_filter(s) using the + * supported fields negotiated via VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS. + */ +struct virtchnl_vlan_filter_list_v2 { + u16 vport_id; + u16 num_elements; + u8 pad[4]; + struct virtchnl_vlan_filter filters[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_vlan_filter_list_v2); + +/* VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 + * VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 + * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 + * VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 + * + * VF sends this message to enable or disable VLAN stripping or insertion. It + * also needs to specify an ethertype. The VF knows which VLAN ethertypes are + * allowed and whether or not it's allowed to enable/disable the specific + * offload via the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to + * parse the virtchnl_vlan_caps.offloads fields to determine which offload + * messages are allowed. + * + * For example, if the PF populates the virtchnl_vlan_caps.offloads in the + * following manner the VF will be allowed to enable and/or disable 0x8100 inner + * VLAN insertion and/or stripping via the opcodes listed above. Inner in this + * case means the outer most or single VLAN from the VF's perspective. This is + * because no outer offloads are supported. See the comments above the + * virtchnl_vlan_supported_caps structure for more details. + * + * virtchnl_vlan_caps.offloads.stripping_support.inner = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100; + * + * virtchnl_vlan_caps.offloads.insertion_support.inner = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100; + * + * In order to enable inner (again note that in this case inner is the outer + * most or single VLAN from the VF's perspective) VLAN stripping for 0x8100 + * VLANs, the VF would populate the virtchnl_vlan_setting structure in the + * following manner and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. + * + * virtchnl_vlan_setting.inner_ethertype_setting = + * VIRTCHNL_VLAN_ETHERTYPE_8100; + * + * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on + * initialization. + * + * The reason that VLAN TPID(s) are not being used for the + * outer_ethertype_setting and inner_ethertype_setting fields is because it's + * possible a device could support VLAN insertion and/or stripping offload on + * multiple ethertypes concurrently, so this method allows a VF to request + * multiple ethertypes in one message using the virtchnl_vlan_support + * enumeration. + * + * For example, if the PF populates the virtchnl_vlan_caps.offloads in the + * following manner the VF will be allowed to enable 0x8100 and 0x88a8 outer + * VLAN insertion and stripping simultaneously. The + * virtchnl_vlan_caps.offloads.ethertype_match field will also have to be + * populated based on what the PF can support. + * + * virtchnl_vlan_caps.offloads.stripping_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * virtchnl_vlan_caps.offloads.insertion_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * In order to enable outer VLAN stripping for 0x8100 and 0x88a8 VLANs, the VF + * would populate the virthcnl_vlan_offload_structure in the following manner + * and send the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2 message. + * + * virtchnl_vlan_setting.outer_ethertype_setting = + * VIRTHCNL_VLAN_ETHERTYPE_8100 | + * VIRTHCNL_VLAN_ETHERTYPE_88A8; + * + * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on + * initialization. + * + * There is also the case where a PF and the underlying hardware can support + * VLAN offloads on multiple ethertypes, but not concurrently. For example, if + * the PF populates the virtchnl_vlan_caps.offloads in the following manner the + * VF will be allowed to enable and/or disable 0x8100 XOR 0x88a8 outer VLAN + * offloads. The ethertypes must match for stripping and insertion. + * + * virtchnl_vlan_caps.offloads.stripping_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_XOR; + * + * virtchnl_vlan_caps.offloads.insertion_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_XOR; + * + * virtchnl_vlan_caps.offloads.ethertype_match = + * VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION; + * + * In order to enable outer VLAN stripping for 0x88a8 VLANs, the VF would + * populate the virtchnl_vlan_setting structure in the following manner and send + * the VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2. Also, this will change the + * ethertype for VLAN insertion if it's enabled. So, for completeness, a + * VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 with the same ethertype should be sent. + * + * virtchnl_vlan_setting.outer_ethertype_setting = VIRTHCNL_VLAN_ETHERTYPE_88A8; + * + * virtchnl_vlan_setting.vport_id = vport_id or vsi_id assigned to the VF on + * initialization. + * + * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2 + * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 + * + * VF sends this message to enable or disable VLAN filtering. It also needs to + * specify an ethertype. The VF knows which VLAN ethertypes are allowed and + * whether or not it's allowed to enable/disable filtering via the + * VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS message. The VF needs to + * parse the virtchnl_vlan_caps.filtering fields to determine which, if any, + * filtering messages are allowed. + * + * For example, if the PF populates the virtchnl_vlan_caps.filtering in the + * following manner the VF will be allowed to enable/disable 0x8100 and 0x88a8 + * outer VLAN filtering together. Note, that the VIRTCHNL_VLAN_ETHERTYPE_AND + * means that all filtering ethertypes will to be enabled and disabled together + * regardless of the request from the VF. This means that the underlying + * hardware only supports VLAN filtering for all VLAN the specified ethertypes + * or none of them. + * + * virtchnl_vlan_caps.filtering.filtering_support.outer = + * VIRTCHNL_VLAN_TOGGLE | + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTHCNL_VLAN_ETHERTYPE_88A8 | + * VIRTCHNL_VLAN_ETHERTYPE_9100 | + * VIRTCHNL_VLAN_ETHERTYPE_AND; + * + * In order to enable outer VLAN filtering for 0x88a8 and 0x8100 VLANs (0x9100 + * VLANs aren't supported by the VF driver), the VF would populate the + * virtchnl_vlan_setting structure in the following manner and send the + * VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2. The same message format would be used + * to disable outer VLAN filtering for 0x88a8 and 0x8100 VLANs, but the + * VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2 opcode is used. + * + * virtchnl_vlan_setting.outer_ethertype_setting = + * VIRTCHNL_VLAN_ETHERTYPE_8100 | + * VIRTCHNL_VLAN_ETHERTYPE_88A8; + * + */ +struct virtchnl_vlan_setting { + u32 outer_ethertype_setting; + u32 inner_ethertype_setting; + u16 vport_id; + u8 pad[6]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vlan_setting); + /* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE * VF sends VSI id and flags. * PF returns status code in retval. * Note: we assume that broadcast accept mode is always enabled. */ struct virtchnl_promisc_info { u16 vsi_id; u16 flags; }; VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info); #define FLAG_VF_UNICAST_PROMISC 0x00000001 #define FLAG_VF_MULTICAST_PROMISC 0x00000002 /* VIRTCHNL_OP_GET_STATS * VF sends this message to request stats for the selected VSI. VF uses * the virtchnl_queue_select struct to specify the VSI. The queue_id * field is ignored by the PF. * * PF replies with struct virtchnl_eth_stats in an external buffer. */ struct virtchnl_eth_stats { u64 rx_bytes; /* received bytes */ u64 rx_unicast; /* received unicast pkts */ u64 rx_multicast; /* received multicast pkts */ u64 rx_broadcast; /* received broadcast pkts */ u64 rx_discards; u64 rx_unknown_protocol; u64 tx_bytes; /* transmitted bytes */ u64 tx_unicast; /* transmitted unicast pkts */ u64 tx_multicast; /* transmitted multicast pkts */ u64 tx_broadcast; /* transmitted broadcast pkts */ u64 tx_discards; u64 tx_errors; }; /* VIRTCHNL_OP_CONFIG_RSS_KEY * VIRTCHNL_OP_CONFIG_RSS_LUT * VF sends these messages to configure RSS. Only supported if both PF * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during * configuration negotiation. If this is the case, then the RSS fields in * the VF resource struct are valid. * Both the key and LUT are initialized to 0 by the PF, meaning that * RSS is effectively disabled until set up by the VF. */ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. * By default, the PF sets these to all possible traffic types that the * hardware supports. The VF can query this value if it wants to change the * traffic types that are hashed by the hardware. */ struct virtchnl_rss_hena { u64 hena; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena); /* This is used by PF driver to enforce how many channels can be supported. * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise * PF driver will allow only max 4 channels */ #define VIRTCHNL_MAX_ADQ_CHANNELS 4 #define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16 /* VIRTCHNL_OP_ENABLE_CHANNELS * VIRTCHNL_OP_DISABLE_CHANNELS * VF sends these messages to enable or disable channels based on * the user specified queue count and queue offset for each traffic class. * This struct encompasses all the information that the PF needs from * VF to create a channel. */ struct virtchnl_channel_info { u16 count; /* number of queues in a channel */ u16 offset; /* queues in a channel start from 'offset' */ u32 pad; u64 max_tx_rate; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info); struct virtchnl_tc_info { u32 num_tc; u32 pad; struct virtchnl_channel_info list[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info); /* VIRTCHNL_ADD_CLOUD_FILTER * VIRTCHNL_DEL_CLOUD_FILTER * VF sends these messages to add or delete a cloud filter based on the * user specified match and action filters. These structures encompass * all the information that the PF needs from the VF to add/delete a * cloud filter. */ struct virtchnl_l4_spec { u8 src_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; u8 dst_mac[VIRTCHNL_ETH_LENGTH_OF_ADDRESS]; /* vlan_prio is part of this 16 bit field even from OS perspective * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio * in future, when decided to offload vlan_prio, pass that information * as part of the "vlan_id" field, Bit14..12 */ __be16 vlan_id; __be16 pad; /* reserved for future use */ __be32 src_ip[4]; __be32 dst_ip[4]; __be16 src_port; __be16 dst_port; }; VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec); union virtchnl_flow_spec { struct virtchnl_l4_spec tcp_spec; u8 buffer[128]; /* reserved for future use */ }; VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec); enum virtchnl_action { /* action types */ VIRTCHNL_ACTION_DROP = 0, VIRTCHNL_ACTION_TC_REDIRECT, VIRTCHNL_ACTION_PASSTHRU, VIRTCHNL_ACTION_QUEUE, VIRTCHNL_ACTION_Q_REGION, VIRTCHNL_ACTION_MARK, VIRTCHNL_ACTION_COUNT, }; enum virtchnl_flow_type { /* flow types */ VIRTCHNL_TCP_V4_FLOW = 0, VIRTCHNL_TCP_V6_FLOW, VIRTCHNL_UDP_V4_FLOW, VIRTCHNL_UDP_V6_FLOW, }; struct virtchnl_filter { union virtchnl_flow_spec data; union virtchnl_flow_spec mask; - enum virtchnl_flow_type flow_type; - enum virtchnl_action action; + + /* see enum virtchnl_flow_type */ + s32 flow_type; + + /* see enum virtchnl_action */ + s32 action; u32 action_meta; u8 field_flags; }; VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other * messages in response to this one. */ enum virtchnl_event_codes { VIRTCHNL_EVENT_UNKNOWN = 0, VIRTCHNL_EVENT_LINK_CHANGE, VIRTCHNL_EVENT_RESET_IMPENDING, VIRTCHNL_EVENT_PF_DRIVER_CLOSE, }; #define PF_EVENT_SEVERITY_INFO 0 #define PF_EVENT_SEVERITY_ATTENTION 1 #define PF_EVENT_SEVERITY_ACTION_REQUIRED 2 #define PF_EVENT_SEVERITY_CERTAIN_DOOM 255 struct virtchnl_pf_event { - enum virtchnl_event_codes event; + /* see enum virtchnl_event_codes */ + s32 event; union { /* If the PF driver does not support the new speed reporting * capabilities then use link_event else use link_event_adv to * get the speed and link information. The ability to understand * new speeds is indicated by setting the capability flag * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter * in virtchnl_vf_resource struct and can be used to determine * which link event struct to use below. */ struct { enum virtchnl_link_speed link_speed; - u8 link_status; + bool link_status; + u8 pad[3]; } link_event; struct { /* link_speed provided in Mbps */ u32 link_speed; u8 link_status; + u8 pad[3]; } link_event_adv; + struct { + /* link_speed provided in Mbps */ + u32 link_speed; + u16 vport_id; + u8 link_status; + u8 pad; + } link_event_adv_vport; } event_data; - int severity; + s32 severity; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event); /* VF reset states - these are written into the RSTAT register: * VFGEN_RSTAT on the VF * When the PF initiates a reset, it writes 0 * When the reset is complete, it writes 1 * When the PF detects that the VF has recovered, it writes 2 * VF checks this register periodically to determine if a reset has occurred, * then polls it to know when the reset is complete. * If either the PF or VF reads the register while the hardware * is in a reset state, it will return DEADBEEF, which, when masked * will result in 3. */ enum virtchnl_vfr_states { VIRTCHNL_VFR_INPROGRESS = 0, VIRTCHNL_VFR_COMPLETED, VIRTCHNL_VFR_VFACTIVE, }; /* TX and RX queue types are valid in legacy as well as split queue models. * With Split Queue model, 2 additional types are introduced - TX_COMPLETION * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW * posts completions. */ enum virtchnl_queue_type { VIRTCHNL_QUEUE_TYPE_TX = 0, VIRTCHNL_QUEUE_TYPE_RX = 1, VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2, VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3, VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4, VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5 }; /* structure to specify a chunk of contiguous queues */ struct virtchnl_queue_chunk { - enum virtchnl_queue_type type; + /* see enum virtchnl_queue_type */ + s32 type; u16 start_queue_id; u16 num_queues; }; VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); /* structure to specify several chunks of contiguous queues */ struct virtchnl_queue_chunks { u16 num_chunks; u16 rsvd; struct virtchnl_queue_chunk chunks[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_chunks); /* VIRTCHNL_OP_ENABLE_QUEUES_V2 * VIRTCHNL_OP_DISABLE_QUEUES_V2 * VIRTCHNL_OP_DEL_QUEUES * - * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES + * If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0 * then all of these ops are available. * * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are * available. * * PF sends these messages to enable, disable or delete queues specified in * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues * to be enabled/disabled/deleted. Also applicable to single queue RX or * TX. CP performs requested action and returns status. */ struct virtchnl_del_ena_dis_queues { u16 vport_id; u16 pad; struct virtchnl_queue_chunks chunks; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_del_ena_dis_queues); /* Virtchannel interrupt throttling rate index */ enum virtchnl_itr_idx { VIRTCHNL_ITR_IDX_0 = 0, VIRTCHNL_ITR_IDX_1 = 1, VIRTCHNL_ITR_IDX_NO_ITR = 3, }; /* Queue to vector mapping */ struct virtchnl_queue_vector { u16 queue_id; u16 vector_id; u8 pad[4]; - enum virtchnl_itr_idx itr_idx; - enum virtchnl_queue_type queue_type; + + /* see enum virtchnl_itr_idx */ + s32 itr_idx; + + /* see enum virtchnl_queue_type */ + s32 queue_type; }; VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queue_vector); /* VIRTCHNL_OP_MAP_QUEUE_VECTOR - * VIRTCHNL_OP_UNMAP_QUEUE_VECTOR - * - * If VIRTCHNL_CAP_EXT_FEATURES was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES - * then all of these ops are available. * * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available. * * PF sends this message to map or unmap queues to vectors and ITR index * registers. External data buffer contains virtchnl_queue_vector_maps structure * that contains num_qv_maps of virtchnl_queue_vector structures. * CP maps the requested queue vector maps after validating the queue and vector * ids and returns a status code. */ struct virtchnl_queue_vector_maps { u16 vport_id; u16 num_qv_maps; u8 pad[4]; struct virtchnl_queue_vector qv_maps[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps); /* Since VF messages are limited by u16 size, precalculate the maximum possible * values of nested elements in virtchnl structures that virtual channel can * possibly handle in a single message. */ enum virtchnl_vector_limits { VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX = ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) / sizeof(struct virtchnl_queue_pair_info), VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX = ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) / sizeof(struct virtchnl_vector_map), VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX = ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) / sizeof(struct virtchnl_ether_addr), VIRTCHNL_OP_ADD_DEL_VLAN_MAX = ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) / sizeof(u16), VIRTCHNL_OP_ENABLE_CHANNELS_MAX = ((u16)(~0) - sizeof(struct virtchnl_tc_info)) / sizeof(struct virtchnl_channel_info), VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX = ((u16)(~0) - sizeof(struct virtchnl_del_ena_dis_queues)) / sizeof(struct virtchnl_queue_chunk), VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX = ((u16)(~0) - sizeof(struct virtchnl_queue_vector_maps)) / sizeof(struct virtchnl_queue_vector), + + VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX = + ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list_v2)) / + sizeof(struct virtchnl_vlan_filter), }; /** * virtchnl_vc_validate_vf_msg * @ver: Virtchnl version info * @v_opcode: Opcode for the message * @msg: pointer to the msg buffer * @msglen: msg length * * validate msg format against struct for each opcode */ static inline int virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, u8 *msg, u16 msglen) { bool err_msg_format = false; u32 valid_len = 0; /* Validate message length. */ switch (v_opcode) { case VIRTCHNL_OP_VERSION: valid_len = sizeof(struct virtchnl_version_info); break; case VIRTCHNL_OP_RESET_VF: break; case VIRTCHNL_OP_GET_VF_RESOURCES: if (VF_IS_V11(ver)) valid_len = sizeof(u32); break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct virtchnl_txq_info); break; case VIRTCHNL_OP_CONFIG_RX_QUEUE: valid_len = sizeof(struct virtchnl_rxq_info); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: valid_len = sizeof(struct virtchnl_vsi_queue_config_info); if (msglen >= valid_len) { struct virtchnl_vsi_queue_config_info *vqc = (struct virtchnl_vsi_queue_config_info *)msg; if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs > VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) { err_msg_format = true; break; } valid_len += (vqc->num_queue_pairs * sizeof(struct virtchnl_queue_pair_info)); } break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: valid_len = sizeof(struct virtchnl_irq_map_info); if (msglen >= valid_len) { struct virtchnl_irq_map_info *vimi = (struct virtchnl_irq_map_info *)msg; if (vimi->num_vectors == 0 || vimi->num_vectors > VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) { err_msg_format = true; break; } valid_len += (vimi->num_vectors * sizeof(struct virtchnl_vector_map)); } break; case VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_GET_MAX_RSS_QREGION: break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: valid_len = sizeof(struct virtchnl_ether_addr_list); if (msglen >= valid_len) { struct virtchnl_ether_addr_list *veal = (struct virtchnl_ether_addr_list *)msg; if (veal->num_elements == 0 || veal->num_elements > VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) { err_msg_format = true; break; } valid_len += veal->num_elements * sizeof(struct virtchnl_ether_addr); } break; case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_DEL_VLAN: valid_len = sizeof(struct virtchnl_vlan_filter_list); if (msglen >= valid_len) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; if (vfl->num_elements == 0 || vfl->num_elements > VIRTCHNL_OP_ADD_DEL_VLAN_MAX) { err_msg_format = true; break; } valid_len += vfl->num_elements * sizeof(u16); } break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: valid_len = sizeof(struct virtchnl_promisc_info); break; case VIRTCHNL_OP_GET_STATS: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: valid_len = sizeof(struct virtchnl_rss_key); if (msglen >= valid_len) { struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; if (vrk->key_len == 0) { /* zero length is allowed as input */ break; } valid_len += vrk->key_len - 1; } break; case VIRTCHNL_OP_CONFIG_RSS_LUT: valid_len = sizeof(struct virtchnl_rss_lut); if (msglen >= valid_len) { struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; if (vrl->lut_entries == 0) { /* zero entries is allowed as input */ break; } valid_len += vrl->lut_entries - 1; } break; case VIRTCHNL_OP_GET_RSS_HENA_CAPS: break; case VIRTCHNL_OP_SET_RSS_HENA: valid_len = sizeof(struct virtchnl_rss_hena); break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: break; case VIRTCHNL_OP_REQUEST_QUEUES: valid_len = sizeof(struct virtchnl_vf_res_request); break; case VIRTCHNL_OP_ENABLE_CHANNELS: valid_len = sizeof(struct virtchnl_tc_info); if (msglen >= valid_len) { struct virtchnl_tc_info *vti = (struct virtchnl_tc_info *)msg; if (vti->num_tc == 0 || vti->num_tc > VIRTCHNL_OP_ENABLE_CHANNELS_MAX) { err_msg_format = true; break; } valid_len += (vti->num_tc - 1) * sizeof(struct virtchnl_channel_info); } break; case VIRTCHNL_OP_DISABLE_CHANNELS: break; case VIRTCHNL_OP_ADD_CLOUD_FILTER: case VIRTCHNL_OP_DEL_CLOUD_FILTER: valid_len = sizeof(struct virtchnl_filter); break; + case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: + break; + case VIRTCHNL_OP_ADD_VLAN_V2: + case VIRTCHNL_OP_DEL_VLAN_V2: + valid_len = sizeof(struct virtchnl_vlan_filter_list_v2); + if (msglen >= valid_len) { + struct virtchnl_vlan_filter_list_v2 *vfl = + (struct virtchnl_vlan_filter_list_v2 *)msg; + + if (vfl->num_elements == 0 || vfl->num_elements > + VIRTCHNL_OP_ADD_DEL_VLAN_V2_MAX) { + err_msg_format = true; + break; + } + + valid_len += (vfl->num_elements - 1) * + sizeof(struct virtchnl_vlan_filter); + } + break; + case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2: + case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: + case VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2: + case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: + valid_len = sizeof(struct virtchnl_vlan_setting); + break; case VIRTCHNL_OP_ENABLE_QUEUES_V2: case VIRTCHNL_OP_DISABLE_QUEUES_V2: valid_len = sizeof(struct virtchnl_del_ena_dis_queues); if (msglen >= valid_len) { struct virtchnl_del_ena_dis_queues *qs = (struct virtchnl_del_ena_dis_queues *)msg; if (qs->chunks.num_chunks == 0 || qs->chunks.num_chunks > VIRTCHNL_OP_ENABLE_DISABLE_DEL_QUEUES_V2_MAX) { err_msg_format = true; break; } valid_len += (qs->chunks.num_chunks - 1) * sizeof(struct virtchnl_queue_chunk); } break; case VIRTCHNL_OP_MAP_QUEUE_VECTOR: valid_len = sizeof(struct virtchnl_queue_vector_maps); if (msglen >= valid_len) { struct virtchnl_queue_vector_maps *v_qp = (struct virtchnl_queue_vector_maps *)msg; if (v_qp->num_qv_maps == 0 || v_qp->num_qv_maps > VIRTCHNL_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) { err_msg_format = true; break; } valid_len += (v_qp->num_qv_maps - 1) * sizeof(struct virtchnl_queue_vector); } break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: return VIRTCHNL_STATUS_ERR_PARAM; } /* few more checks */ if (err_msg_format || valid_len != msglen) return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; return 0; } #endif /* _VIRTCHNL_H_ */ diff --git a/sys/dev/ice/virtchnl_inline_ipsec.h b/sys/dev/ice/virtchnl_inline_ipsec.h index 9da16a153f48..438a3022e7e8 100644 --- a/sys/dev/ice/virtchnl_inline_ipsec.h +++ b/sys/dev/ice/virtchnl_inline_ipsec.h @@ -1,594 +1,594 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2021, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _VIRTCHNL_INLINE_IPSEC_H_ #define _VIRTCHNL_INLINE_IPSEC_H_ #define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3 #define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16 #define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128 #define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2 #define VIRTCHNL_IPSEC_MAX_KEY_LEN 128 #define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8 #define VIRTCHNL_IPSEC_SA_DESTROY 0 #define VIRTCHNL_IPSEC_BROADCAST_VFID 0xFFFFFFFF #define VIRTCHNL_IPSEC_INVALID_REQ_ID 0xFFFF #define VIRTCHNL_IPSEC_INVALID_SA_CFG_RESP 0xFFFFFFFF #define VIRTCHNL_IPSEC_INVALID_SP_CFG_RESP 0xFFFFFFFF /* crypto type */ #define VIRTCHNL_AUTH 1 #define VIRTCHNL_CIPHER 2 #define VIRTCHNL_AEAD 3 /* algorithm type */ /* Hash Algorithm */ #define VIRTCHNL_HASH_NO_ALG 0 /* NULL algorithm */ #define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */ #define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */ #define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */ #define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */ #define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */ #define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */ #define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */ #define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */ #define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */ #define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */ #define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */ #define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */ #define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */ #define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */ /* Cipher Algorithm */ #define VIRTCHNL_CIPHER_NO_ALG 15 /* NULL algorithm */ #define VIRTCHNL_3DES_CBC 16 /* Triple DES algorithm in CBC mode */ #define VIRTCHNL_AES_CBC 17 /* AES algorithm in CBC mode */ #define VIRTCHNL_AES_CTR 18 /* AES algorithm in Counter mode */ /* AEAD Algorithm */ #define VIRTCHNL_AES_CCM 19 /* AES algorithm in CCM mode */ #define VIRTCHNL_AES_GCM 20 /* AES algorithm in GCM mode */ #define VIRTCHNL_CHACHA20_POLY1305 21 /* algorithm of ChaCha20-Poly1305 */ /* protocol type */ #define VIRTCHNL_PROTO_ESP 1 #define VIRTCHNL_PROTO_AH 2 #define VIRTCHNL_PROTO_RSVD1 3 /* sa mode */ #define VIRTCHNL_SA_MODE_TRANSPORT 1 #define VIRTCHNL_SA_MODE_TUNNEL 2 #define VIRTCHNL_SA_MODE_TRAN_TUN 3 #define VIRTCHNL_SA_MODE_UNKNOWN 4 /* sa direction */ #define VIRTCHNL_DIR_INGRESS 1 #define VIRTCHNL_DIR_EGRESS 2 #define VIRTCHNL_DIR_INGRESS_EGRESS 3 /* sa termination */ #define VIRTCHNL_TERM_SOFTWARE 1 #define VIRTCHNL_TERM_HARDWARE 2 /* sa ip type */ #define VIRTCHNL_IPV4 1 #define VIRTCHNL_IPV6 2 /* for virtchnl_ipsec_resp */ enum inline_ipsec_resp { INLINE_IPSEC_SUCCESS = 0, INLINE_IPSEC_FAIL = -1, INLINE_IPSEC_ERR_FIFO_FULL = -2, INLINE_IPSEC_ERR_NOT_READY = -3, INLINE_IPSEC_ERR_VF_DOWN = -4, INLINE_IPSEC_ERR_INVALID_PARAMS = -5, INLINE_IPSEC_ERR_NO_MEM = -6, }; /* Detailed opcodes for DPDK and IPsec use */ enum inline_ipsec_ops { INLINE_IPSEC_OP_GET_CAP = 0, INLINE_IPSEC_OP_GET_STATUS = 1, INLINE_IPSEC_OP_SA_CREATE = 2, INLINE_IPSEC_OP_SA_UPDATE = 3, INLINE_IPSEC_OP_SA_DESTROY = 4, INLINE_IPSEC_OP_SP_CREATE = 5, INLINE_IPSEC_OP_SP_DESTROY = 6, INLINE_IPSEC_OP_SA_READ = 7, INLINE_IPSEC_OP_EVENT = 8, INLINE_IPSEC_OP_RESP = 9, }; #pragma pack(1) /* Not all valid, if certain field is invalid, set 1 for all bits */ struct virtchnl_algo_cap { u32 algo_type; u16 block_size; u16 min_key_size; u16 max_key_size; u16 inc_key_size; u16 min_iv_size; u16 max_iv_size; u16 inc_iv_size; u16 min_digest_size; u16 max_digest_size; u16 inc_digest_size; u16 min_aad_size; u16 max_aad_size; u16 inc_aad_size; }; #pragma pack() /* vf record the capability of crypto from the virtchnl */ struct virtchnl_sym_crypto_cap { u8 crypto_type; u8 algo_cap_num; struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM]; }; /* VIRTCHNL_OP_GET_IPSEC_CAP * VF pass virtchnl_ipsec_cap to PF * and PF return capability of ipsec from virtchnl. */ struct virtchnl_ipsec_cap { /* max number of SA per VF */ u16 max_sa_num; /* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */ u8 virtchnl_protocol_type; /* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */ u8 virtchnl_sa_mode; /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ u8 virtchnl_direction; /* type of esn - !0:enable/0:disable */ u8 esn_enabled; /* type of udp_encap - !0:enable/0:disable */ u8 udp_encap_enabled; /* termination mode - value ref VIRTCHNL_TERM_XXX */ u8 termination_mode; /* SA index mode - !0:enable/0:disable */ u8 sa_index_sw_enabled; /* auditing mode - !0:enable/0:disable */ u8 audit_enabled; /* lifetime byte limit - !0:enable/0:disable */ u8 byte_limit_enabled; /* drop on authentication failure - !0:enable/0:disable */ u8 drop_on_auth_fail_enabled; /* anti-replay window check - !0:enable/0:disable */ u8 arw_check_enabled; /* number of supported crypto capability */ u8 crypto_cap_num; /* descriptor ID */ u16 desc_id; /* crypto capabilities */ struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM]; }; #pragma pack(1) /* configuration of crypto function */ struct virtchnl_ipsec_crypto_cfg_item { u8 crypto_type; u32 algo_type; /* Length of valid IV data. */ u16 iv_len; /* Length of digest */ u16 digest_len; /* SA salt */ u32 salt; /* The length of the symmetric key */ u16 key_len; /* key data buffer */ u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN]; }; #pragma pack() struct virtchnl_ipsec_sym_crypto_cfg { struct virtchnl_ipsec_crypto_cfg_item items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER]; }; #pragma pack(1) /* VIRTCHNL_OP_IPSEC_SA_CREATE * VF send this SA configuration to PF using virtchnl; * PF create SA as configuration and PF driver will return * an unique index (sa_idx) for the created SA. */ struct virtchnl_ipsec_sa_cfg { /* IPsec SA Protocol - AH/ESP */ u8 virtchnl_protocol_type; /* termination mode - value ref VIRTCHNL_TERM_XXX */ u8 virtchnl_termination; /* type of outer IP - IPv4/IPv6 */ u8 virtchnl_ip_type; /* type of esn - !0:enable/0:disable */ u8 esn_enabled; /* udp encap - !0:enable/0:disable */ u8 udp_encap_enabled; /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ u8 virtchnl_direction; /* reserved */ u8 reserved1; /* SA security parameter index */ u32 spi; /* outer src ip address */ u8 src_addr[16]; /* outer dst ip address */ u8 dst_addr[16]; /* SPD reference. Used to link an SA with its policy. * PF drivers may ignore this field. */ u16 spd_ref; /* high 32 bits of esn */ u32 esn_hi; /* low 32 bits of esn */ u32 esn_low; /* When enabled, sa_index must be valid */ u8 sa_index_en; /* SA index when sa_index_en is true */ u32 sa_index; /* auditing mode - enable/disable */ u8 audit_en; /* lifetime byte limit - enable/disable * When enabled, byte_limit_hard and byte_limit_soft * must be valid. */ u8 byte_limit_en; /* hard byte limit count */ u64 byte_limit_hard; /* soft byte limit count */ u64 byte_limit_soft; /* drop on authentication failure - enable/disable */ u8 drop_on_auth_fail_en; /* anti-reply window check - enable/disable * When enabled, arw_size must be valid. */ u8 arw_check_en; /* size of arw window, offset by 1. Setting to 0 * represents ARW window size of 1. Setting to 127 * represents ARW window size of 128 */ u8 arw_size; /* no ip offload mode - enable/disable * When enabled, ip type and address must not be valid. */ u8 no_ip_offload_en; /* SA Domain. Used to logical separate an SADB into groups. * PF drivers supporting a single group ignore this field. */ u16 sa_domain; /* crypto configuration */ struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; }; #pragma pack() /* VIRTCHNL_OP_IPSEC_SA_UPDATE * VF send configuration of index of SA to PF * PF will update SA according to configuration */ struct virtchnl_ipsec_sa_update { u32 sa_index; /* SA to update */ u32 esn_hi; /* high 32 bits of esn */ u32 esn_low; /* low 32 bits of esn */ }; #pragma pack(1) /* VIRTCHNL_OP_IPSEC_SA_DESTROY * VF send configuration of index of SA to PF * PF will destroy SA according to configuration * flag bitmap indicate all SA or just selected SA will * be destroyed */ struct virtchnl_ipsec_sa_destroy { /* All zero bitmap indicates all SA will be destroyed. * Non-zero bitmap indicates the selected SA in * array sa_index will be destroyed. */ u8 flag; /* selected SA index */ u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM]; }; /* VIRTCHNL_OP_IPSEC_SA_READ * VF send this SA configuration to PF using virtchnl; * PF read SA and will return configuration for the created SA. */ struct virtchnl_ipsec_sa_read { /* SA valid - invalid/valid */ u8 valid; /* SA active - inactive/active */ u8 active; /* SA SN rollover - not_rollover/rollover */ u8 sn_rollover; /* IPsec SA Protocol - AH/ESP */ u8 virtchnl_protocol_type; /* termination mode - value ref VIRTCHNL_TERM_XXX */ u8 virtchnl_termination; /* auditing mode - enable/disable */ u8 audit_en; /* lifetime byte limit - enable/disable * When set to limit, byte_limit_hard and byte_limit_soft * must be valid. */ u8 byte_limit_en; /* hard byte limit count */ u64 byte_limit_hard; /* soft byte limit count */ u64 byte_limit_soft; /* drop on authentication failure - enable/disable */ u8 drop_on_auth_fail_en; /* anti-replay window check - enable/disable * When set to check, arw_size, arw_top, and arw must be valid */ u8 arw_check_en; /* size of arw window, offset by 1. Setting to 0 * represents ARW window size of 1. Setting to 127 * represents ARW window size of 128 */ u8 arw_size; /* reserved */ u8 reserved1; /* top of anti-replay-window */ u64 arw_top; /* anti-replay-window */ u8 arw[16]; /* packets processed */ u64 packets_processed; /* bytes processed */ u64 bytes_processed; /* packets dropped */ u32 packets_dropped; /* authentication failures */ u32 auth_fails; /* ARW check failures */ u32 arw_fails; /* type of esn - enable/disable */ u8 esn; /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */ u8 virtchnl_direction; /* SA security parameter index */ u32 spi; /* SA salt */ u32 salt; /* high 32 bits of esn */ u32 esn_hi; /* low 32 bits of esn */ u32 esn_low; /* SA Domain. Used to logical separate an SADB into groups. * PF drivers supporting a single group ignore this field. */ u16 sa_domain; /* SPD reference. Used to link an SA with its policy. * PF drivers may ignore this field. */ u16 spd_ref; /* crypto configuration. Salt and keys are set to 0 */ struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg; }; #pragma pack() -/* Add whitelist entry in IES */ +/* Add allowlist entry in IES */ struct virtchnl_ipsec_sp_cfg { u32 spi; u32 dip[4]; /* Drop frame if true or redirect to QAT if false. */ u8 drop; /* Congestion domain. For future use. */ u8 cgd; /* 0 for IPv4 table, 1 for IPv6 table. */ u8 table_id; /* Set TC (congestion domain) if true. For future use. */ u8 set_tc; }; #pragma pack(1) -/* Delete whitelist entry in IES */ +/* Delete allowlist entry in IES */ struct virtchnl_ipsec_sp_destroy { /* 0 for IPv4 table, 1 for IPv6 table. */ u8 table_id; u32 rule_id; }; #pragma pack() -/* Response from IES to whitelist operations */ +/* Response from IES to allowlist operations */ struct virtchnl_ipsec_sp_cfg_resp { u32 rule_id; }; struct virtchnl_ipsec_sa_cfg_resp { u32 sa_handle; }; #define INLINE_IPSEC_EVENT_RESET 0x1 #define INLINE_IPSEC_EVENT_CRYPTO_ON 0x2 #define INLINE_IPSEC_EVENT_CRYPTO_OFF 0x4 struct virtchnl_ipsec_event { u32 ipsec_event_data; }; #define INLINE_IPSEC_STATUS_AVAILABLE 0x1 #define INLINE_IPSEC_STATUS_UNAVAILABLE 0x2 struct virtchnl_ipsec_status { u32 status; }; struct virtchnl_ipsec_resp { u32 resp; }; /* Internal message descriptor for VF <-> IPsec communication */ struct inline_ipsec_msg { u16 ipsec_opcode; u16 req_id; union { /* IPsec request */ struct virtchnl_ipsec_sa_cfg sa_cfg[0]; struct virtchnl_ipsec_sp_cfg sp_cfg[0]; struct virtchnl_ipsec_sa_update sa_update[0]; struct virtchnl_ipsec_sa_destroy sa_destroy[0]; struct virtchnl_ipsec_sp_destroy sp_destroy[0]; /* IPsec response */ struct virtchnl_ipsec_sa_cfg_resp sa_cfg_resp[0]; struct virtchnl_ipsec_sp_cfg_resp sp_cfg_resp[0]; struct virtchnl_ipsec_cap ipsec_cap[0]; struct virtchnl_ipsec_status ipsec_status[0]; /* response to del_sa, del_sp, update_sa */ struct virtchnl_ipsec_resp ipsec_resp[0]; /* IPsec event (no req_id is required) */ struct virtchnl_ipsec_event event[0]; /* Reserved */ struct virtchnl_ipsec_sa_read sa_read[0]; } ipsec_data; }; static inline u16 virtchnl_inline_ipsec_val_msg_len(u16 opcode) { u16 valid_len = sizeof(struct inline_ipsec_msg); switch (opcode) { case INLINE_IPSEC_OP_GET_CAP: case INLINE_IPSEC_OP_GET_STATUS: break; case INLINE_IPSEC_OP_SA_CREATE: valid_len += sizeof(struct virtchnl_ipsec_sa_cfg); break; case INLINE_IPSEC_OP_SP_CREATE: valid_len += sizeof(struct virtchnl_ipsec_sp_cfg); break; case INLINE_IPSEC_OP_SA_UPDATE: valid_len += sizeof(struct virtchnl_ipsec_sa_update); break; case INLINE_IPSEC_OP_SA_DESTROY: valid_len += sizeof(struct virtchnl_ipsec_sa_destroy); break; case INLINE_IPSEC_OP_SP_DESTROY: valid_len += sizeof(struct virtchnl_ipsec_sp_destroy); break; /* Only for msg length caculation of response to VF in case of * inline ipsec failure. */ case INLINE_IPSEC_OP_RESP: valid_len += sizeof(struct virtchnl_ipsec_resp); break; default: valid_len = 0; break; } return valid_len; } #endif /* _VIRTCHNL_INLINE_IPSEC_H_ */ diff --git a/sys/dev/ice/virtchnl_lan_desc.h b/sys/dev/ice/virtchnl_lan_desc.h new file mode 100644 index 000000000000..5ca63253da34 --- /dev/null +++ b/sys/dev/ice/virtchnl_lan_desc.h @@ -0,0 +1,549 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2021, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*$FreeBSD$*/ + +#ifndef _VIRTCHNL_LAN_DESC_H_ +#define _VIRTCHNL_LAN_DESC_H_ + +/* Rx */ +/* For virtchnl_splitq_base_rx_flex desc members */ +#define VIRTCHNL_RXD_FLEX_PTYPE_S 0 +#define VIRTCHNL_RXD_FLEX_PTYPE_M \ + MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_PTYPE_S) +#define VIRTCHNL_RXD_FLEX_UMBCAST_S 10 +#define VIRTCHNL_RXD_FLEX_UMBCAST_M \ + MAKEMASK(0x3UL, VIRTCHNL_RXD_FLEX_UMBCAST_S) +#define VIRTCHNL_RXD_FLEX_FF0_S 12 +#define VIRTCHNL_RXD_FLEX_FF0_M MAKEMASK(0xFUL, VIRTCHNL_RXD_FLEX_FF0_S) +#define VIRTCHNL_RXD_FLEX_LEN_PBUF_S 0 +#define VIRTCHNL_RXD_FLEX_LEN_PBUF_M \ + MAKEMASK(0x3FFFUL, VIRTCHNL_RXD_FLEX_LEN_PBUF_S) +#define VIRTCHNL_RXD_FLEX_GEN_S 14 +#define VIRTCHNL_RXD_FLEX_GEN_M BIT_ULL(VIRTCHNL_RXD_FLEX_GEN_S) +#define VIRTCHNL_RXD_FLEX_BUFQ_ID_S 15 +#define VIRTCHNL_RXD_FLEX_BUFQ_ID_M \ + BIT_ULL(VIRTCHNL_RXD_FLEX_BUFQ_ID_S) +#define VIRTCHNL_RXD_FLEX_LEN_HDR_S 0 +#define VIRTCHNL_RXD_FLEX_LEN_HDR_M \ + MAKEMASK(0x3FFUL, VIRTCHNL_RXD_FLEX_LEN_HDR_S) +#define VIRTCHNL_RXD_FLEX_RSC_S 10 +#define VIRTCHNL_RXD_FLEX_RSC_M BIT_ULL(VIRTCHNL_RXD_FLEX_RSC_S) +#define VIRTCHNL_RXD_FLEX_SPH_S 11 +#define VIRTCHNL_RXD_FLEX_SPH_M BIT_ULL(VIRTCHNL_RXD_FLEX_SPH_S) +#define VIRTCHNL_RXD_FLEX_MISS_S 12 +#define VIRTCHNL_RXD_FLEX_MISS_M \ + BIT_ULL(VIRTCHNL_RXD_FLEX_MISS_S) +#define VIRTCHNL_RXD_FLEX_FF1_S 13 +#define VIRTCHNL_RXD_FLEX_FF1_M MAKEMASK(0x7UL, VIRTCHNL_RXD_FLEX_FF1_M) + +/* For virtchnl_singleq_base_rx_legacy desc members */ +#define VIRTCHNL_RXD_QW1_LEN_SPH_S 63 +#define VIRTCHNL_RXD_QW1_LEN_SPH_M BIT_ULL(VIRTCHNL_RXD_QW1_LEN_SPH_S) +#define VIRTCHNL_RXD_QW1_LEN_HBUF_S 52 +#define VIRTCHNL_RXD_QW1_LEN_HBUF_M \ + MAKEMASK(0x7FFULL, VIRTCHNL_RXD_QW1_LEN_HBUF_S) +#define VIRTCHNL_RXD_QW1_LEN_PBUF_S 38 +#define VIRTCHNL_RXD_QW1_LEN_PBUF_M \ + MAKEMASK(0x3FFFULL, VIRTCHNL_RXD_QW1_LEN_PBUF_S) +#define VIRTCHNL_RXD_QW1_PTYPE_S 30 +#define VIRTCHNL_RXD_QW1_PTYPE_M \ + MAKEMASK(0xFFULL, VIRTCHNL_RXD_QW1_PTYPE_S) +#define VIRTCHNL_RXD_QW1_ERROR_S 19 +#define VIRTCHNL_RXD_QW1_ERROR_M \ + MAKEMASK(0xFFUL, VIRTCHNL_RXD_QW1_ERROR_S) +#define VIRTCHNL_RXD_QW1_STATUS_S 0 +#define VIRTCHNL_RXD_QW1_STATUS_M \ + MAKEMASK(0x7FFFFUL, VIRTCHNL_RXD_QW1_STATUS_S) + +enum virtchnl_rx_flex_desc_status_error_0_qw1_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0, + VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, +}; + +enum virtchnl_rx_flex_desc_status_error_0_qw0_bits { + VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S = 0, + VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, + VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ +}; + +enum virtchnl_rx_flex_desc_status_error_1_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_FLEX_DESC_STATUS1_RSVD_S = 0, /* 2 bits */ + VIRTCHNL_RX_FLEX_DESC_STATUS1_ATRAEFAIL_S = 2, + VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 3, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 4, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 5, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 6, + VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 7, + VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ +}; + +enum virtchnl_rx_base_desc_status_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0, + VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1, + VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2, + VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3, + VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4, + VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8, + VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11, + VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14, + VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15, + VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */ + VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18, + VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +enum virtchnl_rx_desc_fltstat_values { + VIRTCHNL_RX_DESC_FLTSTAT_NO_DATA = 0, + VIRTCHNL_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + VIRTCHNL_RX_DESC_FLTSTAT_RSV = 2, + VIRTCHNL_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +enum virtchnl_rx_base_desc_error_bits { + /* Note: These are predefined bit offsets */ + VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0, + VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1, + VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2, + VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */ + VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3, + VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4, + VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5, + VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6, + VIRTCHNL_RX_BASE_DESC_ERROR_RSVD_S = 7 +}; + +/* Receive Descriptors */ +/* splitq buf + | 16| 0| + ---------------------------------------------------------------- + | RSV | Buffer ID | + ---------------------------------------------------------------- + | Rx packet buffer adresss | + ---------------------------------------------------------------- + | Rx header buffer adresss | + ---------------------------------------------------------------- + | RSV | + ---------------------------------------------------------------- + | 0| + */ +struct virtchnl_splitq_rx_buf_desc { + struct { + __le16 buf_id; /* Buffer Identifier */ + __le16 rsvd0; + __le32 rsvd1; + } qword0; + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 rsvd2; +}; /* read used with buffer queues*/ + +/* singleq buf + | 0| + ---------------------------------------------------------------- + | Rx packet buffer adresss | + ---------------------------------------------------------------- + | Rx header buffer adresss | + ---------------------------------------------------------------- + | RSV | + ---------------------------------------------------------------- + | RSV | + ---------------------------------------------------------------- + | 0| + */ +struct virtchnl_singleq_rx_buf_desc { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + __le64 rsvd1; + __le64 rsvd2; +}; /* read used with buffer queues*/ + +union virtchnl_rx_buf_desc { + struct virtchnl_singleq_rx_buf_desc read; + struct virtchnl_splitq_rx_buf_desc split_rd; +}; + +/* (0x00) singleq wb(compl) */ +struct virtchnl_singleq_base_rx_desc { + struct { + struct { + __le16 mirroring_status; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow Director filter id */ + } hi_dword; + } qword0; + struct { + /* status/error/PTYPE/length */ + __le64 status_error_ptype_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + __le32 reserved; + __le32 fd_id; + } qword3; +}; /* writeback */ + +/* (0x01) singleq flex compl */ +struct virtchnl_rx_flex_desc { + /* Qword 0 */ + u8 rxdid; /* descriptor builder profile id */ + u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ + __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ + __le16 pkt_len; /* [15:14] are reserved */ + __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ + /* sph=[11:11] */ + /* ff1/ext=[15:12] */ + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 flex_meta0; + __le16 flex_meta1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 time_stamp_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flex_meta2; + __le16 flex_meta3; + union { + struct { + __le16 flex_meta4; + __le16 flex_meta5; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* (0x02) */ +struct virtchnl_rx_flex_desc_nic { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 flow_id; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor Switch Profile + * RxDID Profile Id 3 + * Flex-field 0: Source Vsi + */ +struct virtchnl_rx_flex_desc_sw { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 src_vsi; /* [10:15] are reserved */ + __le16 flex_md1_rsvd; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC VEB Profile + * RxDID Profile Id 4 + * Flex-field 0: Destination Vsi + */ +struct virtchnl_rx_flex_desc_nic_veb_dbg { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 dst_vsi; /* [0:12]: destination vsi */ + /* 13: vsi valid bit */ + /* [14:15] are reserved */ + __le16 flex_field_1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le32 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC ACL Profile + * RxDID Profile Id 5 + * Flex-field 0: ACL Counter 0 + * Flex-field 1: ACL Counter 1 + * Flex-field 2: ACL Counter 2 + */ +struct virtchnl_rx_flex_desc_nic_acl_dbg { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le16 acl_ctr0; + __le16 acl_ctr1; + + /* Qword 2 */ + __le16 status_error1; + u8 flex_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 acl_ctr2; + __le16 rsvd; /* flex words 2-3 are reserved */ + __le32 ts_high; +}; + +/* Rx Flex Descriptor NIC Profile + * RxDID Profile Id 6 + * Flex-field 0: RSS hash lower 16-bits + * Flex-field 1: RSS hash upper 16-bits + * Flex-field 2: Flow Id lower 16-bits + * Flex-field 3: Source Vsi + * Flex-field 4: reserved, Vlan id taken from L2Tag + */ +struct virtchnl_rx_flex_desc_nic_2 { + /* Qword 0 */ + u8 rxdid; + u8 mir_id_umb_cast; + __le16 ptype_flexi_flags0; + __le16 pkt_len; + __le16 hdr_len_sph_flex_flags1; + + /* Qword 1 */ + __le16 status_error0; + __le16 l2tag1; + __le32 rss_hash; + + /* Qword 2 */ + __le16 status_error1; + u8 flexi_flags2; + u8 ts_low; + __le16 l2tag2_1st; + __le16 l2tag2_2nd; + + /* Qword 3 */ + __le16 flow_id; + __le16 src_vsi; + union { + struct { + __le16 rsvd; + __le16 flow_id_ipv6; + } flex; + __le32 ts_high; + } flex_ts; +}; + +/* Rx Flex Descriptor Advanced (Split Queue Model) + * RxDID Profile Id 7 + */ +struct virtchnl_rx_flex_desc_adv { + /* Qword 0 */ + u8 rxdid_ucast; /* profile_id=[3:0] */ + /* rsvd=[5:4] */ + /* ucast=[7:6] */ + u8 status_err0_qw0; + __le16 ptype_err_fflags0; /* ptype=[9:0] */ + /* ip_hdr_err=[10:10] */ + /* udp_len_err=[11:11] */ + /* ff0=[15:12] */ + __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ + /* gen=[14:14] only in splitq */ + /* bufq_id=[15:15] only in splitq */ + __le16 hdrlen_flags; /* header=[9:0] */ + /* rsc=[10:10] only in splitq */ + /* sph=[11:11] only in splitq */ + /* ext_udp_0=[12:12] */ + /* int_udp_0=[13:13] */ + /* trunc_mirr=[14:14] */ + /* miss_prepend=[15:15] */ + /* Qword 1 */ + u8 status_err0_qw1; + u8 status_err1; + u8 fflags1; + u8 ts_low; + __le16 fmd0; + __le16 fmd1; + /* Qword 2 */ + __le16 fmd2; + u8 fflags2; + u8 hash3; + __le16 fmd3; + __le16 fmd4; + /* Qword 3 */ + __le16 fmd5; + __le16 fmd6; + __le16 fmd7_0; + __le16 fmd7_1; +}; /* writeback */ + +/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile + * RxDID Profile Id 8 + * Flex-field 0: BufferID + * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW) + * Flex-field 2: Hash[15:0] + * Flex-flags 2: Hash[23:16] + * Flex-field 3: L2TAG2 + * Flex-field 5: L2TAG1 + * Flex-field 7: Timestamp (upper 32 bits) + */ +struct virtchnl_rx_flex_desc_adv_nic_3 { + /* Qword 0 */ + u8 rxdid_ucast; /* profile_id=[3:0] */ + /* rsvd=[5:4] */ + /* ucast=[7:6] */ + u8 status_err0_qw0; + __le16 ptype_err_fflags0; /* ptype=[9:0] */ + /* ip_hdr_err=[10:10] */ + /* udp_len_err=[11:11] */ + /* ff0=[15:12] */ + __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ + /* gen=[14:14] only in splitq */ + /* bufq_id=[15:15] only in splitq */ + __le16 hdrlen_flags; /* header=[9:0] */ + /* rsc=[10:10] only in splitq */ + /* sph=[11:11] only in splitq */ + /* ext_udp_0=[12:12] */ + /* int_udp_0=[13:13] */ + /* trunc_mirr=[14:14] */ + /* miss_prepend=[15:15] */ + /* Qword 1 */ + u8 status_err0_qw1; + u8 status_err1; + u8 fflags1; + u8 ts_low; + __le16 buf_id; /* only in splitq */ + union { + __le16 raw_cs; + __le16 l2tag1; + __le16 rscseglen; + } misc; + /* Qword 2 */ + __le16 hash1; + union { + u8 fflags2; + u8 mirrorid; + u8 hash2; + } ff2_mirrid_hash2; + u8 hash3; + __le16 l2tag2; + __le16 fmd4; + /* Qword 3 */ + __le16 l2tag1; + __le16 fmd6; + __le32 ts_high; +}; /* writeback */ + +union virtchnl_rx_desc { + struct virtchnl_singleq_rx_buf_desc read; + struct virtchnl_singleq_base_rx_desc base_wb; + struct virtchnl_rx_flex_desc flex_wb; + struct virtchnl_rx_flex_desc_adv flex_wb_adv; +}; + +#endif /* _VIRTCHNL_LAN_DESC_H_ */