Page MenuHomeFreeBSD

D21959.diff
No OneTemporary

D21959.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: MAINTAINERS
===================================================================
--- MAINTAINERS
+++ MAINTAINERS
@@ -99,10 +99,12 @@
#x11 phabricator group.
(to avoid drm graphics drivers
impact)
+sys/contrib/dev/ice erj Pre-commit phabricator review requested.
sys/contrib/ipfilter cy Pre-commit review requested.
sys/dev/e1000 erj Pre-commit phabricator review requested.
sys/dev/ixgbe erj Pre-commit phabricator review requested.
sys/dev/ixl erj Pre-commit phabricator review requested.
+sys/dev/ice erj Pre-commit phabricator review requested.
sys/dev/sound/usb hselasky If in doubt, ask.
sys/dev/usb hselasky If in doubt, ask.
sys/dev/xen royger Pre-commit review recommended.
Index: sys/amd64/conf/GENERIC
===================================================================
--- sys/amd64/conf/GENERIC
+++ sys/amd64/conf/GENERIC
@@ -245,6 +245,7 @@
device ixv # Intel PRO/10GbE PCIE VF Ethernet
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
+device ice # Intel 800 Series Physical Function
device vmx # VMware VMXNET3 Ethernet
# PCI Ethernet NICs.
Index: sys/amd64/conf/NOTES
===================================================================
--- sys/amd64/conf/NOTES
+++ sys/amd64/conf/NOTES
@@ -291,6 +291,8 @@
# bxe: Broadcom NetXtreme II (BCM5771X/BCM578XX) PCIe 10Gb Ethernet
# adapters.
+# ice: Intel 800 Series Physical Function
+# Requires the ice_ddp module for full functionality
# ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter
# Requires the ipw firmware module
# iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters
@@ -316,6 +318,8 @@
device iwn # Intel 4965/1000/5000/6000 wireless NICs.
device ixl # Intel 700 Series Physical Function
device iavf # Intel Adaptive Virtual Function
+device ice # Intel 800 Series Physical Function
+device ice_ddp # Intel 800 Series DDP Package
device mthca # Mellanox HCA InfiniBand
device mlx4 # Shared code module between IB and Ethernet
device mlx4ib # Mellanox ConnectX HCA InfiniBand
Index: sys/arm64/conf/NOTES
===================================================================
--- sys/arm64/conf/NOTES
+++ sys/arm64/conf/NOTES
@@ -83,6 +83,8 @@
device al_eth # Annapurna Alpine Ethernet NIC
device dwc_rk # Rockchip Designware
device dwc_socfpga # Altera SOCFPGA Ethernet MAC
+device ice # Intel 800 Series Physical Function
+device ice_ddp # Intel 800 Series DDP Package
# Etherswitch devices
device e6000sw # Marvell mv88e6085 based switches
Index: sys/conf/files.amd64
===================================================================
--- sys/conf/files.amd64
+++ sys/conf/files.amd64
@@ -144,6 +144,52 @@
dev/amdgpio/amdgpio.c optional amdgpio
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
+dev/ice/if_ice_iflib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_lib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_osdep.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_resmgr.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_strings.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_recovery_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_common.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_controlq.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_dcb.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flex_pipe.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flow.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_nvm.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sched.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sriov.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_switch.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+ice_ddp.c optional ice_ddp \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
+ no-implicit-rule before-depend local \
+ clean "ice_ddp.c"
+ice_ddp.fwo optional ice_ddp \
+ dependency "ice_ddp.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "ice_ddp.fwo"
+ice_ddp.fw optional ice_ddp \
+ dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
+ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
+ no-obj no-implicit-rule \
+ clean "ice_ddp.fw"
dev/ioat/ioat.c optional ioat pci
dev/ioat/ioat_test.c optional ioat pci
dev/ixl/if_ixl.c optional ixl pci \
Index: sys/conf/files.arm64
===================================================================
--- sys/conf/files.arm64
+++ sys/conf/files.arm64
@@ -235,6 +235,52 @@
dev/axgbe/xgbe-drv.c optional axgbe
dev/axgbe/xgbe-mdio.c optional axgbe
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
+dev/ice/if_ice_iflib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_lib.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_osdep.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_resmgr.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_strings.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_recovery_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iflib_txrx.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_common.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_controlq.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_dcb.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flex_pipe.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_flow.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_nvm.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sched.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_sriov.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_switch.c optional ice pci \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+ice_ddp.c optional ice_ddp \
+ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01030900 -mice_ddp -c${.TARGET}" \
+ no-implicit-rule before-depend local \
+ clean "ice_ddp.c"
+ice_ddp.fwo optional ice_ddp \
+ dependency "ice_ddp.fw" \
+ compile-with "${NORMAL_FWO}" \
+ no-implicit-rule \
+ clean "ice_ddp.fwo"
+ice_ddp.fw optional ice_ddp \
+ dependency "$S/contrib/dev/ice/ice-1.3.9.0.pkg" \
+ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.9.0.pkg ice_ddp.fw" \
+ no-obj no-implicit-rule \
+ clean "ice_ddp.fw"
dev/iicbus/sy8106a.c optional sy8106a fdt
dev/iicbus/twsi/mv_twsi.c optional twsi fdt
dev/iicbus/twsi/a10_twsi.c optional twsi fdt
Index: sys/contrib/dev/ice/LICENSE
===================================================================
--- /dev/null
+++ sys/contrib/dev/ice/LICENSE
@@ -0,0 +1,41 @@
+Copyright (c) 2006-2018, Intel Corporation.
+All rights reserved.
+
+Redistribution. Redistribution and use in binary form, without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions must reproduce the above copyright notice and the
+ following disclaimer in the documentation and/or other materials
+ provided with the distribution.
+* Neither the name of Intel Corporation nor the names of its suppliers
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+* No reverse engineering, decompilation, or disassembly of this software
+ is permitted.
+
+Limited patent license. Intel Corporation grants a world-wide,
+royalty-free, non-exclusive license under patents it now or hereafter
+owns or controls to make, have made, use, import, offer to sell and
+sell ("Utilize") this software, but solely to the extent that any
+such patent is necessary to Utilize the software alone, or in
+combination with an operating system licensed under an approved Open
+Source license as listed by the Open Source Initiative at
+http://opensource.org/licenses. The patent license shall not apply to
+any other combinations which include this software. No hardware per
+se is licensed hereunder.
+
+DISCLAIMER. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+
Index: sys/contrib/dev/ice/README
===================================================================
--- /dev/null
+++ sys/contrib/dev/ice/README
@@ -0,0 +1,197 @@
+Dynamic Device Personalization (DDP) Package
+============================================
+February 21, 2020
+
+
+Contents
+========
+- Overview
+- Safe Mode
+- Notes
+- Installation & Troubleshooting
+- Legal
+
+
+Overview
+========
+Adapters based on the Intel(R) Ethernet Controller 800 Series require a Dynamic
+Device Personalization (DDP) package file to enable advanced features (such as
+dynamic tunneling, Flow Director, RSS, and ADQ).
+
+DDP allows you to change the packet processing pipeline of a device by applying
+a profile package to the device at runtime. Profiles can be used to, for
+example, add support for new protocols, change existing protocols, or change
+default settings. DDP profiles can also be rolled back without rebooting the
+system.
+
+The DDP package loads during device initialization. The driver checks to see if
+the DDP package is present and compatible. If this file exists, the driver will
+load it into the device. If the DDP package file is missing or incompatible
+with the driver, the driver will go into Safe Mode where it will use the
+configuration contained in the device's NVM. See "Safe Mode" later in this
+README for more information.
+
+A general purpose, OS-default DDP package is automatically installed with all
+supported Intel Ethernet Controller 800 Series drivers on Microsoft* Windows*,
+ESX*, FreeBSD*, and Linux* operating systems. Additional DDP packages are
+available to address needs for specific market segments. For example, a
+telecommunications (Comms) DDP package is available to support certain
+market-specific protocols in addition to the protocols in the OS-default
+package.
+
+The OS-default DDP package supports the following:
+- MAC
+- EtherType
+- VLAN
+- IPv4
+- IPv6
+- TCP
+- ARP
+- UDP
+- SCTP
+- ICMP
+- ICMPv6
+- CTRL
+- LLDP
+- VXLAN-GPE
+- VXLAN (non-GPE)
+- Geneve
+- GRE
+- NVGRE
+- RoCEv2
+
+
+Safe Mode
+=========
+Safe Mode disables advanced and performance features, and supports only basic
+traffic and minimal functionality, such as updating the NVM or downloading a
+new driver or DDP package.
+
+See the Intel(R) Ethernet Adapters and Devices User Guide for more details on
+DDP and Safe Mode.
+
+
+Notes
+=====
+- You cannot update the DDP package if any PF drivers are already loaded. To
+overwrite a package, unload all PFs and then reload the driver with the new
+package.
+
+- Except for Linux, you can only use one DDP package per driver, even if you
+have more than one device installed that uses the driver.
+
+- Only the first loaded PF per device can download a package for that device.
+
+- If you are using DPDK, see the DPDK documentation at https://www.dpdk.org/
+for installation instructions and more information.
+
+
+Installation and Troubleshooting
+================================
+
+Microsoft* Windows*
+-------------------
+The DDP package is installed as part of the driver binary. You don't need to
+take additional steps to install the DDP package file.
+
+If you encounter issues with the DDP package file, download the latest driver.
+
+
+ESX
+---
+The DDP package is installed as part of the driver binary. You don't need to
+take additional steps to install the DDP package file.
+
+If you encounter issues with the DDP package file, download the latest driver.
+
+
+FreeBSD
+-------
+The FreeBSD driver automatically installs the default DDP package file during
+driver installation. See the ice driver README for general installation and
+building instructions.
+
+The DDP package loads during device initialization. The driver looks for the
+ice_ddp module and checks that it contains a valid DDP package file.
+
+If you encounter issues with the DDP package file, you may need to download an
+updated driver or ice_ddp module. See the log messages for more information.
+
+NOTE: It's important to do 'make install' during initial ice driver
+installation so that the driver loads the DDP package automatically.
+
+
+Linux
+-----
+The Linux driver automatically installs the default DDP package file during
+driver installation. See the ice driver README for general installation and
+building instructions.
+
+The DDP package loads during device initialization. The driver looks for
+intel/ice/ddp/ice.pkg in your firmware root (typically /lib/firmware/ or
+/lib/firmware/updates/) and checks that it contains a valid DDP package file.
+The ice.pkg file is a symbolic link to the default DDP package file installed
+by the linux-firmware software package or the ice out-of-tree driver
+installation.
+
+If you encounter issues with the DDP package file, you may need to download an
+updated driver or DDP package file. See the log messages for more information.
+
+You can install specific DDP package files for different physical devices in
+the same system. To install a specific DDP package:
+
+1. Download the DDP package file (ice-x.x.x.x.zip) you want for your device. In
+addition to licensing information and this README, this zip file contains the
+following files:
+ ice-x.x.x.x.pkg
+ ice.pkg
+
+NOTE: The ice.pkg file is a Linux symbolic link file pointing to
+ice-x.x.x.x.pkg (in the same path).
+
+2. Rename the ice-x.x.x.x.pkg file as ice-xxxxxxxxxxxxxxxx.pkg, where
+'xxxxxxxxxxxxxxxx' is the unique 64-bit PCI Express device serial number (in
+hex) of the device you want the package downloaded on. The filename must
+include the complete serial number (including leading zeros) and be all
+lowercase. For example, if the 64-bit serial number is b887a3ffffca0568, then
+the file name would be ice-b887a3ffffca0568.pkg.
+
+To find the serial number from the PCI bus address, you can use the following
+command:
+
+# lspci -vv -s af:00.0 | grep -i Serial
+Capabilities: [150 v1] Device Serial Number b8-87-a3-ff-ff-ca-05-68
+
+You can use the following command to format the serial number without the
+dashes:
+
+# lspci -vv -s af:00.0 | grep -i Serial | awk '{print $7}' | sed s/-//g
+b887a3ffffca0568
+
+3. Copy the renamed DDP package file to /lib/firmware/updates/intel/ice/ddp/.
+If the directory does not yet exist, create it before copying the file.
+
+4. Unload all of the PFs on the device.
+
+5. Reload the driver with the new package.
+
+NOTE: The presence of a device-specific DDP package file overrides the loading
+of the default DDP package file (ice.pkg).
+
+
+Legal / Disclaimers
+===================
+Copyright (c) 2019 - 2020, Intel Corporation.
+
+Intel and the Intel logo are trademarks of Intel Corporation or its
+subsidiaries in the U.S. and/or other countries.
+
+*Other names and brands may be claimed as the property of others.
+
+This software and the related documents are Intel copyrighted materials, and
+your use of them is governed by the express license under which they were
+provided to you ("License"). Unless the License provides otherwise, you may not
+use, modify, copy, publish, distribute, disclose or transmit this software or
+the related documents without Intel's prior written permission.
+This software and the related documents are provided as is, with no express or
+implied warranties, other than those that are expressly stated in the License.
Index: sys/dev/ice/ice_adminq_cmd.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_adminq_cmd.h
@@ -0,0 +1,2968 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_ADMINQ_CMD_H_
+#define _ICE_ADMINQ_CMD_H_
+
+/* This header file defines the Admin Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+#define ICE_MAX_VSI 768
+#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
+#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
+
+struct ice_aqc_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get version (direct 0x0001) */
+struct ice_aqc_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ice_aqc_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Queue Shutdown (direct 0x0003) */
+struct ice_aqc_q_shutdown {
+ u8 driver_unloading;
+#define ICE_AQC_DRIVER_UNLOADING BIT(0)
+ u8 reserved[15];
+};
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ice_aqc_get_exp_err {
+ __le32 reason;
+#define ICE_AQC_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ice_aqc_req_res {
+ __le16 res_id;
+#define ICE_AQC_RES_ID_NVM 1
+#define ICE_AQC_RES_ID_SDP 2
+#define ICE_AQC_RES_ID_CHNG_LOCK 3
+#define ICE_AQC_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define ICE_AQC_RES_ACCESS_READ 1
+#define ICE_AQC_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define ICE_AQ_RES_GLBL_SUCCESS 0
+#define ICE_AQ_RES_GLBL_IN_PROG 1
+#define ICE_AQ_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ice_aqc_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ice_aqc_list_caps_elem {
+ __le16 cap;
+#define ICE_AQC_CAPS_SWITCHING_MODE 0x0001
+#define ICE_AQC_CAPS_MANAGEABILITY_MODE 0x0002
+#define ICE_AQC_CAPS_OS2BMC 0x0004
+#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
+#define ICE_AQC_MAX_VALID_FUNCTIONS 0x8
+#define ICE_AQC_CAPS_ALTERNATE_RAM 0x0006
+#define ICE_AQC_CAPS_WOL_PROXY 0x0008
+#define ICE_AQC_CAPS_SRIOV 0x0012
+#define ICE_AQC_CAPS_VF 0x0013
+#define ICE_AQC_CAPS_802_1QBG 0x0015
+#define ICE_AQC_CAPS_802_1BR 0x0016
+#define ICE_AQC_CAPS_VSI 0x0017
+#define ICE_AQC_CAPS_DCB 0x0018
+#define ICE_AQC_CAPS_RSVD 0x0021
+#define ICE_AQC_CAPS_ISCSI 0x0022
+#define ICE_AQC_CAPS_RSS 0x0040
+#define ICE_AQC_CAPS_RXQS 0x0041
+#define ICE_AQC_CAPS_TXQS 0x0042
+#define ICE_AQC_CAPS_MSIX 0x0043
+#define ICE_AQC_CAPS_MAX_MTU 0x0047
+#define ICE_AQC_CAPS_NVM_VER 0x0048
+#define ICE_AQC_CAPS_CEM 0x00F2
+#define ICE_AQC_CAPS_IWARP 0x0051
+#define ICE_AQC_CAPS_LED 0x0061
+#define ICE_AQC_CAPS_SDP 0x0062
+#define ICE_AQC_CAPS_WR_CSR_PROT 0x0064
+#define ICE_AQC_CAPS_NO_DROP_POLICY 0x0065
+#define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073
+#define ICE_AQC_CAPS_SKU 0x0074
+#define ICE_AQC_CAPS_PORT_MAP 0x0075
+#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/* Manage MAC address, read command - indirect (0x0107)
+ * This struct is also used for the response
+ */
+struct ice_aqc_manage_mac_read {
+ __le16 flags; /* Zeroed by device driver */
+#define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4)
+#define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5)
+#define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6)
+#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7)
+#define ICE_AQC_MAN_MAC_MC_MAG_EN BIT(8)
+#define ICE_AQC_MAN_MAC_WOL_PRESERVE_ON_PFR BIT(9)
+#define ICE_AQC_MAN_MAC_READ_S 4
+#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S)
+ u8 rsvd[2];
+ u8 num_addr; /* Used in response */
+ u8 rsvd1[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response buffer format for manage MAC read command */
+struct ice_aqc_manage_mac_read_resp {
+ u8 lport_num;
+ u8 addr_type;
+#define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0
+#define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Manage MAC address, write command - direct (0x0108) */
+struct ice_aqc_manage_mac_write {
+ u8 rsvd;
+ u8 flags;
+#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
+#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
+#define ICE_AQC_MAN_MAC_WR_S 6
+#define ICE_AQC_MAN_MAC_WR_M MAKEMASK(3, ICE_AQC_MAN_MAC_WR_S)
+#define ICE_AQC_MAN_MAC_UPDATE_LAA 0
+#define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S)
+ /* byte stream in network order */
+ u8 mac_addr[ETH_ALEN];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Clear PXE Command and response (direct 0x0110) */
+struct ice_aqc_clear_pxe {
+ u8 rx_cnt;
+#define ICE_AQC_CLEAR_PXE_RX_CNT 0x2
+ u8 reserved[15];
+};
+
+/* Configure No-Drop Policy Command (direct 0x0112) */
+struct ice_aqc_config_no_drop_policy {
+ u8 opts;
+#define ICE_AQC_FORCE_NO_DROP BIT(0)
+ u8 rsvd[15];
+};
+
+/* Get switch configuration (0x0200) */
+struct ice_aqc_get_sw_cfg {
+ /* Reserved for command and copy of request flags for response */
+ __le16 flags;
+ /* First desc in case of command and next_elem in case of response
+ * In case of response, if it is not zero, means all the configuration
+ * was not returned and new command shall be sent with this value in
+ * the 'first desc' field
+ */
+ __le16 element;
+ /* Reserved for command, only used for response */
+ __le16 num_elems;
+ __le16 rsvd;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Each entry in the response buffer is of the following type: */
+struct ice_aqc_get_sw_cfg_resp_elem {
+ /* VSI/Port Number */
+ __le16 vsi_port_num;
+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0
+#define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \
+ (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S)
+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14
+#define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S)
+#define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0
+#define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1
+#define ICE_AQC_GET_SW_CONF_RESP_VSI 2
+
+ /* SWID VSI/Port belongs to */
+ __le16 swid;
+
+ /* Bit 14..0 : PF/VF number VSI belongs to
+ * Bit 15 : VF indication bit
+ */
+ __le16 pf_vf_num;
+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0
+#define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \
+ (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S)
+#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
+};
+
+/* The response buffer is as follows. Note that the length of the
+ * elements array varies with the length of the command response.
+ */
+struct ice_aqc_get_sw_cfg_resp {
+ struct ice_aqc_get_sw_cfg_resp_elem elements[1];
+};
+
+/* Set Port parameters, (direct, 0x0203) */
+struct ice_aqc_set_port_params {
+ __le16 cmd_flags;
+#define ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS BIT(0)
+#define ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS BIT(1)
+#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
+ __le16 bad_frame_vsi;
+#define ICE_AQC_SET_P_PARAMS_VSI_S 0
+#define ICE_AQC_SET_P_PARAMS_VSI_M (0x3FF << ICE_AQC_SET_P_PARAMS_VSI_S)
+#define ICE_AQC_SET_P_PARAMS_VSI_VALID BIT(15)
+ __le16 swid;
+#define ICE_AQC_SET_P_PARAMS_SWID_S 0
+#define ICE_AQC_SET_P_PARAMS_SWID_M (0xFF << ICE_AQC_SET_P_PARAMS_SWID_S)
+#define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S 8
+#define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_M \
+ (0x3F << ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S)
+#define ICE_AQC_SET_P_PARAMS_IS_LOGI_PORT BIT(14)
+#define ICE_AQC_SET_P_PARAMS_SWID_VALID BIT(15)
+ u8 reserved[10];
+};
+
+/* These resource type defines are used for all switch resource
+ * commands where a resource type is required, such as:
+ * Get Resource Allocation command (indirect 0x0204)
+ * Allocate Resources command (indirect 0x0208)
+ * Free Resources command (indirect 0x0209)
+ * Get Allocated Resource Descriptors Command (indirect 0x020A)
+ */
+#define ICE_AQC_RES_TYPE_VEB_COUNTER 0x00
+#define ICE_AQC_RES_TYPE_VLAN_COUNTER 0x01
+#define ICE_AQC_RES_TYPE_MIRROR_RULE 0x02
+#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
+#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_RECIPE 0x05
+#define ICE_AQC_RES_TYPE_PROFILE 0x06
+#define ICE_AQC_RES_TYPE_SWID 0x07
+#define ICE_AQC_RES_TYPE_VSI 0x08
+#define ICE_AQC_RES_TYPE_FLU 0x09
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_1 0x0A
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_2 0x0B
+#define ICE_AQC_RES_TYPE_WIDE_TABLE_4 0x0C
+#define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH 0x20
+#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
+#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
+#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
+#define ICE_AQC_RES_TYPE_FLEX_DESC_PROG 0x30
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID 0x48
+#define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50
+#define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID 0x58
+#define ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM 0x59
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
+#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+/* Resource types 0x62-67 are reserved for Hash profile builder */
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID 0x68
+#define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM 0x69
+
+#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
+#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
+#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+
+#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
+
+#define ICE_AQC_RES_TYPE_S 0
+#define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S)
+
+/* Get Resource Allocation command (indirect 0x0204) */
+struct ice_aqc_get_res_alloc {
+ __le16 resp_elem_num; /* Used in response, reserved in command */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Resource Allocation Response Buffer per response */
+struct ice_aqc_get_res_resp_elem {
+ __le16 res_type; /* Types defined above cmd 0x0204 */
+ __le16 total_capacity; /* Resources available to all PF's */
+ __le16 total_function; /* Resources allocated for a PF */
+ __le16 total_shared; /* Resources allocated as shared */
+ __le16 total_free; /* Resources un-allocated/not reserved by any PF */
+};
+
+/* Buffer for Get Resource command */
+struct ice_aqc_get_res_resp {
+ /* Number of resource entries to be calculated using
+ * datalen/sizeof(struct ice_aqc_cmd_resp)).
+ * Value of 'datalen' gets updated as part of response.
+ */
+ struct ice_aqc_get_res_resp_elem elem[1];
+};
+
+/* Allocate Resources command (indirect 0x0208)
+ * Free Resources command (indirect 0x0209)
+ */
+struct ice_aqc_alloc_free_res_cmd {
+ __le16 num_entries; /* Number of Resource entries */
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Resource descriptor */
+struct ice_aqc_res_elem {
+ union {
+ __le16 sw_resp;
+ __le16 flu_resp;
+ } e;
+};
+
+/* Buffer for Allocate/Free Resources commands */
+struct ice_aqc_alloc_free_res_elem {
+ __le16 res_type; /* Types defined above cmd 0x0204 */
+#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8
+#define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \
+ (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
+ __le16 num_elems;
+ struct ice_aqc_res_elem elem[1];
+};
+
+/* Get Allocated Resource Descriptors Command (indirect 0x020A) */
+struct ice_aqc_get_allocd_res_desc {
+ union {
+ struct {
+ __le16 res; /* Types defined above cmd 0x0204 */
+ __le16 first_desc;
+ __le32 reserved;
+ } cmd;
+ struct {
+ __le16 res;
+ __le16 next_desc;
+ __le16 num_desc;
+ __le16 reserved;
+ } resp;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_allocd_res_desc_resp {
+ struct ice_aqc_res_elem elem[1];
+};
+
+/* Add VSI (indirect 0x0210)
+ * Update VSI (indirect 0x0211)
+ * Get VSI (indirect 0x0212)
+ * Free VSI (indirect 0x0213)
+ */
+struct ice_aqc_add_get_update_free_vsi {
+ __le16 vsi_num;
+#define ICE_AQ_VSI_NUM_S 0
+#define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S)
+#define ICE_AQ_VSI_IS_VALID BIT(15)
+ __le16 cmd_flags;
+#define ICE_AQ_VSI_KEEP_ALLOC 0x1
+ u8 vf_id;
+ u8 reserved;
+ __le16 vsi_flags;
+#define ICE_AQ_VSI_TYPE_S 0
+#define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S)
+#define ICE_AQ_VSI_TYPE_VF 0x0
+#define ICE_AQ_VSI_TYPE_VMDQ2 0x1
+#define ICE_AQ_VSI_TYPE_PF 0x2
+#define ICE_AQ_VSI_TYPE_EMP_MNG 0x3
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Response descriptor for:
+ * Add VSI (indirect 0x0210)
+ * Update VSI (indirect 0x0211)
+ * Free VSI (indirect 0x0213)
+ */
+struct ice_aqc_add_update_free_vsi_resp {
+ __le16 vsi_num;
+ __le16 ext_status;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_vsi_resp {
+ __le16 vsi_num;
+ u8 vf_id;
+ /* The vsi_flags field uses the ICE_AQ_VSI_TYPE_* defines for values.
+ * These are found above in struct ice_aqc_add_get_update_free_vsi.
+ */
+ u8 vsi_flags;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_vsi_props {
+ __le16 valid_sections;
+#define ICE_AQ_VSI_PROP_SW_VALID BIT(0)
+#define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1)
+#define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2)
+#define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3)
+#define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4)
+#define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5)
+#define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6)
+#define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7)
+#define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8)
+#define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11)
+#define ICE_AQ_VSI_PROP_PASID_VALID BIT(12)
+ /* switch section */
+ u8 sw_id;
+ u8 sw_flags;
+#define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5)
+#define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6)
+#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
+ u8 sw_flags2;
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
+ (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
+#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
+ u8 veb_stat_id;
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
+ /* security section */
+ u8 sec_flags;
+#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
+#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ u8 pvlan_reserved[2];
+ u8 vlan_flags;
+#define ICE_AQ_VSI_VLAN_MODE_S 0
+#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
+#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
+#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
+#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
+#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_VLAN_EMOD_S 3
+#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+ u8 pvlan_reserved2[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* outer tags section */
+ __le16 outer_tag;
+ u8 outer_tag_flags;
+#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
+#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
+#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
+#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
+ u8 outer_tag_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+ __le16 q_mapping[16];
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+ __le16 tc_mapping[8];
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+ /* queueing option section */
+ u8 q_opt_rss;
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+ u8 q_opt_tc;
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+ u8 q_opt_flags;
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+ u8 q_opt_reserved[3];
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ /* section 10 */
+ __le16 sect_10_reserved;
+ /* flow director section */
+ __le16 fd_options;
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+ __le16 max_fd_fltr_dedicated;
+ __le16 max_fd_fltr_shared;
+ __le16 fd_def_q;
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+ __le16 fd_report_opt;
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+ /* PASID section */
+ __le32 pasid_id;
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+ u8 reserved[24];
+};
+
+/* Add/update mirror rule - direct (0x0260) */
+#define ICE_AQC_RULE_ID_VALID_S 7
+#define ICE_AQC_RULE_ID_VALID_M (0x1 << ICE_AQC_RULE_ID_VALID_S)
+#define ICE_AQC_RULE_ID_S 0
+#define ICE_AQC_RULE_ID_M (0x3F << ICE_AQC_RULE_ID_S)
+
+/* Following defines to be used while processing caller specified mirror list
+ * of VSI indexes.
+ */
+/* Action: Byte.bit (1.7)
+ * 0 = Remove VSI from mirror rule
+ * 1 = Add VSI to mirror rule
+ */
+#define ICE_AQC_RULE_ACT_S 15
+#define ICE_AQC_RULE_ACT_M (0x1 << ICE_AQC_RULE_ACT_S)
+/* Action: 1.2:0.0 = Mirrored VSI */
+#define ICE_AQC_RULE_MIRRORED_VSI_S 0
+#define ICE_AQC_RULE_MIRRORED_VSI_M (0x7FF << ICE_AQC_RULE_MIRRORED_VSI_S)
+
+/* This is to be used by add/update mirror rule Admin Queue command.
+ * In case of add mirror rule - if rule ID is specified as
+ * INVAL_MIRROR_RULE_ID, new rule ID is allocated from shared pool.
+ * If specified rule_id is valid, then it is used. If specified rule_id
+ * is in use then new mirroring rule is added.
+ */
+#define ICE_INVAL_MIRROR_RULE_ID 0xFFFF
+
+struct ice_aqc_add_update_mir_rule {
+ __le16 rule_id;
+
+ __le16 rule_type;
+#define ICE_AQC_RULE_TYPE_S 0
+#define ICE_AQC_RULE_TYPE_M (0x7 << ICE_AQC_RULE_TYPE_S)
+ /* VPORT ingress/egress */
+#define ICE_AQC_RULE_TYPE_VPORT_INGRESS 0x1
+#define ICE_AQC_RULE_TYPE_VPORT_EGRESS 0x2
+ /* Physical port ingress mirroring.
+ * All traffic received by this port
+ */
+#define ICE_AQC_RULE_TYPE_PPORT_INGRESS 0x6
+ /* Physical port egress mirroring. All traffic sent by this port */
+#define ICE_AQC_RULE_TYPE_PPORT_EGRESS 0x7
+
+ /* Number of mirrored entries.
+ * The values are in the command buffer
+ */
+ __le16 num_entries;
+
+ /* Destination VSI */
+ __le16 dest;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Delete mirror rule - direct(0x0261) */
+struct ice_aqc_delete_mir_rule {
+ __le16 rule_id;
+ __le16 rsvd;
+
+ /* Byte.bit: 20.0 = Keep allocation. If set VSI stays part of
+ * the PF allocated resources, otherwise it is returned to the
+ * shared pool
+ */
+#define ICE_AQC_FLAG_KEEP_ALLOCD_S 0
+#define ICE_AQC_FLAG_KEEP_ALLOCD_M (0x1 << ICE_AQC_FLAG_KEEP_ALLOCD_S)
+ __le16 flags;
+
+ u8 reserved[10];
+};
+
+/* Set/Get storm config - (direct 0x0280, 0x0281) */
+/* This structure holds get storm configuration response and same structure
+ * is used to perform set_storm_cfg
+ */
+struct ice_aqc_storm_cfg {
+ __le32 bcast_thresh_size;
+ __le32 mcast_thresh_size;
+ /* Bit 18:0 - Traffic upper threshold size
+ * Bit 31:19 - Reserved
+ */
+#define ICE_AQ_THRESHOLD_S 0
+#define ICE_AQ_THRESHOLD_M (0x7FFFF << ICE_AQ_THRESHOLD_S)
+
+ __le32 storm_ctrl_ctrl;
+ /* Bit 0: MDIPW - Drop Multicast packets in previous window
+ * Bit 1: MDICW - Drop multicast packets in current window
+ * Bit 2: BDIPW - Drop broadcast packets in previous window
+ * Bit 3: BDICW - Drop broadcast packets in current window
+ */
+#define ICE_AQ_STORM_CTRL_MDIPW_DROP_MULTICAST BIT(0)
+#define ICE_AQ_STORM_CTRL_MDICW_DROP_MULTICAST BIT(1)
+#define ICE_AQ_STORM_CTRL_BDIPW_DROP_MULTICAST BIT(2)
+#define ICE_AQ_STORM_CTRL_BDICW_DROP_MULTICAST BIT(3)
+ /* Bit 7:5 : Reserved */
+ /* Bit 27:8 : Interval - BSC/MSC Time-interval specification: The
+ * interval size for applying ingress broadcast or multicast storm
+ * control.
+ */
+#define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S 8
+#define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_M \
+ (0xFFFFF << ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S)
+ __le32 reserved;
+};
+
+#define ICE_MAX_NUM_RECIPES 64
+
+/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
+ */
+struct ice_aqc_sw_rules {
+ /* ops: add switch rules, referring the number of rules.
+ * ops: update switch rules, referring the number of filters
+ * ops: remove switch rules, referring the entry index.
+ * ops: get switch rules, referring to the number of filters.
+ */
+ __le16 num_rules_fltr_entry_index;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+#pragma pack(1)
+/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
+ * This structures describes the lookup rules and associated actions. "index"
+ * is returned as part of a response to a successful Add command, and can be
+ * used to identify the rule for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_lkup_rx_tx {
+ __le16 recipe_id;
+#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
+ /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
+ __le16 src;
+ __le32 act;
+
+ /* Bit 0:1 - Action type */
+#define ICE_SINGLE_ACT_TYPE_S 0x00
+#define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S)
+
+ /* Bit 2 - Loop back enable
+ * Bit 3 - LAN enable
+ */
+#define ICE_SINGLE_ACT_LB_ENABLE BIT(2)
+#define ICE_SINGLE_ACT_LAN_ENABLE BIT(3)
+
+ /* Action type = 0 - Forward to VSI or VSI list */
+#define ICE_SINGLE_ACT_VSI_FORWARDING 0x0
+
+#define ICE_SINGLE_ACT_VSI_ID_S 4
+#define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S)
+#define ICE_SINGLE_ACT_VSI_LIST_ID_S 4
+#define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S)
+ /* This bit needs to be set if action is forward to VSI list */
+#define ICE_SINGLE_ACT_VSI_LIST BIT(14)
+#define ICE_SINGLE_ACT_VALID_BIT BIT(17)
+#define ICE_SINGLE_ACT_DROP BIT(18)
+
+ /* Action type = 1 - Forward to Queue of Queue group */
+#define ICE_SINGLE_ACT_TO_Q 0x1
+#define ICE_SINGLE_ACT_Q_INDEX_S 4
+#define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S)
+#define ICE_SINGLE_ACT_Q_REGION_S 15
+#define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S)
+#define ICE_SINGLE_ACT_Q_PRIORITY BIT(18)
+
+ /* Action type = 2 - Prune */
+#define ICE_SINGLE_ACT_PRUNE 0x2
+#define ICE_SINGLE_ACT_EGRESS BIT(15)
+#define ICE_SINGLE_ACT_INGRESS BIT(16)
+#define ICE_SINGLE_ACT_PRUNET BIT(17)
+ /* Bit 18 should be set to 0 for this action */
+
+ /* Action type = 2 - Pointer */
+#define ICE_SINGLE_ACT_PTR 0x2
+#define ICE_SINGLE_ACT_PTR_VAL_S 4
+#define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S)
+ /* Bit 18 should be set to 1 */
+#define ICE_SINGLE_ACT_PTR_BIT BIT(18)
+
+ /* Action type = 3 - Other actions. Last two bits
+ * are other action identifier
+ */
+#define ICE_SINGLE_ACT_OTHER_ACTS 0x3
+#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17
+#define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \
+ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S)
+
+ /* Bit 17:18 - Defines other actions */
+ /* Other action = 0 - Mirror VSI */
+#define ICE_SINGLE_OTHER_ACT_MIRROR 0
+#define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4
+#define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \
+ (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S)
+
+ /* Other action = 3 - Set Stat count */
+#define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3
+#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4
+#define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \
+ (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S)
+
+ __le16 index; /* The index of the rule in the lookup table */
+ /* Length and values of the header to be matched per recipe or
+ * lookup-type
+ */
+ __le16 hdr_len;
+ u8 hdr[1];
+};
+#pragma pack()
+
+/* Add/Update/Remove large action command/response entry
+ * "index" is returned as part of a response to a successful Add command, and
+ * can be used to identify the action for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_lg_act {
+ __le16 index; /* Index in large action table */
+ __le16 size;
+ __le32 act[1]; /* array of size for actions */
+ /* Max number of large actions */
+#define ICE_MAX_LG_ACT 4
+ /* Bit 0:1 - Action type */
+#define ICE_LG_ACT_TYPE_S 0
+#define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S)
+
+ /* Action type = 0 - Forward to VSI or VSI list */
+#define ICE_LG_ACT_VSI_FORWARDING 0
+#define ICE_LG_ACT_VSI_ID_S 3
+#define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S)
+#define ICE_LG_ACT_VSI_LIST_ID_S 3
+#define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S)
+ /* This bit needs to be set if action is forward to VSI list */
+#define ICE_LG_ACT_VSI_LIST BIT(13)
+
+#define ICE_LG_ACT_VALID_BIT BIT(16)
+
+ /* Action type = 1 - Forward to Queue of Queue group */
+#define ICE_LG_ACT_TO_Q 0x1
+#define ICE_LG_ACT_Q_INDEX_S 3
+#define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S)
+#define ICE_LG_ACT_Q_REGION_S 14
+#define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S)
+#define ICE_LG_ACT_Q_PRIORITY_SET BIT(17)
+
+ /* Action type = 2 - Prune */
+#define ICE_LG_ACT_PRUNE 0x2
+#define ICE_LG_ACT_EGRESS BIT(14)
+#define ICE_LG_ACT_INGRESS BIT(15)
+#define ICE_LG_ACT_PRUNET BIT(16)
+
+ /* Action type = 3 - Mirror VSI */
+#define ICE_LG_OTHER_ACT_MIRROR 0x3
+#define ICE_LG_ACT_MIRROR_VSI_ID_S 3
+#define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
+
+ /* Action type = 5 - Generic Value */
+#define ICE_LG_ACT_GENERIC 0x5
+#define ICE_LG_ACT_GENERIC_VALUE_S 3
+#define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
+#define ICE_LG_ACT_GENERIC_OFFSET_S 19
+#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
+#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
+#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
+
+ /* Action = 7 - Set Stat count */
+#define ICE_LG_ACT_STAT_COUNT 0x7
+#define ICE_LG_ACT_STAT_COUNT_S 3
+#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+};
+
+/* Add/Update/Remove VSI list command/response entry
+ * "index" is returned as part of a response to a successful Add command, and
+ * can be used to identify the VSI list for Update/Get/Remove commands.
+ */
+struct ice_sw_rule_vsi_list {
+ __le16 index; /* Index of VSI/Prune list */
+ __le16 number_vsi;
+ __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+};
+
+#pragma pack(1)
+/* Query VSI list command/response entry */
+struct ice_sw_rule_vsi_list_query {
+ __le16 index;
+ ice_declare_bitmap(vsi_list, ICE_MAX_VSI);
+};
+#pragma pack()
+
+#pragma pack(1)
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem {
+ __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
+ __le16 status;
+ union {
+ struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
+ struct ice_sw_rule_lg_act lg_act;
+ struct ice_sw_rule_vsi_list vsi_list;
+ struct ice_sw_rule_vsi_list_query vsi_list_query;
+ } pdata;
+};
+
+#pragma pack()
+
+/* PFC Ignore (direct 0x0301)
+ * The command and response use the same descriptor structure
+ */
+struct ice_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 cmd_flags; /* unused in response */
+#define ICE_AQC_PFC_IGNORE_SET BIT(7)
+#define ICE_AQC_PFC_IGNORE_CLEAR 0
+ u8 reserved[14];
+};
+
+/* Set PFC Mode (direct 0x0303)
+ * Query PFC Mode (direct 0x0302)
+ */
+struct ice_aqc_set_query_pfc_mode {
+ u8 pfc_mode;
+/* For Set Command response, reserved in all other cases */
+#define ICE_AQC_PFC_NOT_CONFIGURED 0
+/* For Query Command response, reserved in all other cases */
+#define ICE_AQC_DCB_DIS 0
+#define ICE_AQC_PFC_VLAN_BASED_PFC 1
+#define ICE_AQC_PFC_DSCP_BASED_PFC 2
+ u8 rsvd[15];
+};
+
+/* Set DCB Parameters (direct 0x0306) */
+struct ice_aqc_set_dcb_params {
+ u8 cmd_flags; /* unused in response */
+#define ICE_AQC_LINK_UP_DCB_CFG BIT(0)
+ u8 valid_flags; /* unused in response */
+#define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0)
+ u8 rsvd[14];
+};
+
+/* Get Default Topology (indirect 0x0400) */
+struct ice_aqc_get_topo {
+ u8 port_num;
+ u8 num_branches;
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Update TSE (indirect 0x0403)
+ * Get TSE (indirect 0x0404)
+ * Add TSE (indirect 0x0401)
+ * Delete TSE (indirect 0x040F)
+ * Move TSE (indirect 0x0408)
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
+ */
+struct ice_aqc_sched_elem_cmd {
+ __le16 num_elem_req; /* Used by commands */
+ __le16 num_elem_resp; /* Used by responses */
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the buffer for:
+ * Suspend Nodes (indirect 0x0409)
+ * Resume Nodes (indirect 0x040A)
+ */
+struct ice_aqc_suspend_resume_elem {
+ __le32 teid[1];
+};
+
+struct ice_aqc_txsched_move_grp_info_hdr {
+ __le32 src_parent_teid;
+ __le32 dest_parent_teid;
+ __le16 num_elems;
+ __le16 reserved;
+};
+
+struct ice_aqc_move_elem {
+ struct ice_aqc_txsched_move_grp_info_hdr hdr;
+ __le32 teid[1];
+};
+
+struct ice_aqc_elem_info_bw {
+ __le16 bw_profile_idx;
+ __le16 bw_alloc;
+};
+
+struct ice_aqc_txsched_elem {
+ u8 elem_type; /* Special field, reserved for some aq calls */
+#define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0
+#define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1
+#define ICE_AQC_ELEM_TYPE_TC 0x2
+#define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3
+#define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4
+#define ICE_AQC_ELEM_TYPE_LEAF 0x5
+#define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6
+ u8 valid_sections;
+#define ICE_AQC_ELEM_VALID_GENERIC BIT(0)
+#define ICE_AQC_ELEM_VALID_CIR BIT(1)
+#define ICE_AQC_ELEM_VALID_EIR BIT(2)
+#define ICE_AQC_ELEM_VALID_SHARED BIT(3)
+ u8 generic;
+#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
+#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
+#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
+#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
+#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
+#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
+ (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
+ u8 flags; /* Special field, reserved for some aq calls */
+#define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1
+ struct ice_aqc_elem_info_bw cir_bw;
+ struct ice_aqc_elem_info_bw eir_bw;
+ __le16 srl_id;
+ __le16 reserved2;
+};
+
+struct ice_aqc_txsched_elem_data {
+ __le32 parent_teid;
+ __le32 node_teid;
+ struct ice_aqc_txsched_elem data;
+};
+
+struct ice_aqc_txsched_topo_grp_info_hdr {
+ __le32 parent_teid;
+ __le16 num_elems;
+ __le16 reserved2;
+};
+
+struct ice_aqc_add_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_conf_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_get_elem {
+ struct ice_aqc_txsched_elem_data generic[1];
+};
+
+struct ice_aqc_get_topo_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ struct ice_aqc_txsched_elem_data
+ generic[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+};
+
+struct ice_aqc_delete_elem {
+ struct ice_aqc_txsched_topo_grp_info_hdr hdr;
+ __le32 teid[1];
+};
+
+/* Query Port ETS (indirect 0x040E)
+ *
+ * This indirect command is used to query port TC node configuration.
+ */
+struct ice_aqc_query_port_ets {
+ __le32 port_teid;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_port_ets_elem {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ /* 3 bits for UP per TC 0-7, 4th byte reserved */
+ __le32 up2tc;
+ u8 tc_bw_share[8];
+ __le32 port_eir_prof_id;
+ __le32 port_cir_prof_id;
+ /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
+ __le32 tc_node_prio;
+#define ICE_TC_NODE_PRIO_S 0x4
+ u8 reserved1[4];
+ __le32 tc_node_teid[8]; /* Used for response, reserved in command */
+};
+
+/* Rate limiting profile for
+ * Add RL profile (indirect 0x0410)
+ * Query RL profile (indirect 0x0411)
+ * Remove RL profile (indirect 0x0415)
+ * These indirect commands acts on single or multiple
+ * RL profiles with specified data.
+ */
+struct ice_aqc_rl_profile {
+ __le16 num_profiles;
+ __le16 num_processed; /* Only for response. Reserved in Command. */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_rl_profile_elem {
+ u8 level;
+ u8 flags;
+#define ICE_AQC_RL_PROFILE_TYPE_S 0x0
+#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S)
+#define ICE_AQC_RL_PROFILE_TYPE_CIR 0
+#define ICE_AQC_RL_PROFILE_TYPE_EIR 1
+#define ICE_AQC_RL_PROFILE_TYPE_SRL 2
+/* The following flag is used for Query RL Profile Data */
+#define ICE_AQC_RL_PROFILE_INVAL_S 0x7
+#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S)
+
+ __le16 profile_id;
+ __le16 max_burst_size;
+ __le16 rl_multiply;
+ __le16 wake_up_calc;
+ __le16 rl_encode;
+};
+
+struct ice_aqc_rl_profile_generic_elem {
+ struct ice_aqc_rl_profile_elem generic[1];
+};
+
+/* Configure L2 Node CGD (indirect 0x0414)
+ * This indirect command allows configuring a congestion domain for given L2
+ * node TEIDs in the scheduler topology.
+ */
+struct ice_aqc_cfg_l2_node_cgd {
+ __le16 num_l2_nodes;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_cfg_l2_node_cgd_elem {
+ __le32 node_teid;
+ u8 cgd;
+ u8 reserved[3];
+};
+
+struct ice_aqc_cfg_l2_node_cgd_data {
+ struct ice_aqc_cfg_l2_node_cgd_elem elem[1];
+};
+
+/* Query Scheduler Resource Allocation (indirect 0x0412)
+ * This indirect command retrieves the scheduler resources allocated by
+ * EMP Firmware to the given PF.
+ */
+struct ice_aqc_query_txsched_res {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_generic_sched_props {
+ __le16 phys_levels;
+ __le16 logical_levels;
+ u8 flattening_bitmap;
+ u8 max_device_cgds;
+ u8 max_pf_cgds;
+ u8 rsvd0;
+ __le16 rdma_qsets;
+ u8 rsvd1[22];
+};
+
+struct ice_aqc_layer_props {
+ u8 logical_layer;
+ u8 chunk_size;
+ __le16 max_device_nodes;
+ __le16 max_pf_nodes;
+ u8 rsvd0[4];
+ __le16 max_sibl_grp_sz;
+ __le16 max_cir_rl_profiles;
+ __le16 max_eir_rl_profiles;
+ __le16 max_srl_profiles;
+ u8 rsvd1[14];
+};
+
+struct ice_aqc_query_txsched_res_resp {
+ struct ice_aqc_generic_sched_props sched_props;
+ struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+};
+
+/* Query Node to Root Topology (indirect 0x0413)
+ * This command uses ice_aqc_get_elem as its data buffer.
+ */
+struct ice_aqc_query_node_to_root {
+ __le32 teid;
+ __le32 num_nodes; /* Response only */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ice_aqc_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define ICE_AQC_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.2 : Report mode
+ * 00b - Report NVM capabilities
+ * 01b - Report topology capabilities
+ * 10b - Report SW configured
+ */
+#define ICE_AQC_REPORT_MODE_S 1
+#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S)
+#define ICE_AQC_REPORT_NVM_CAP 0
+#define ICE_AQC_REPORT_TOPO_CAP BIT(1)
+#define ICE_AQC_REPORT_SW_CFG BIT(2)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21)
+#define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22)
+#define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23)
+#define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26)
+#define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27)
+#define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28)
+#define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29)
+#define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30)
+#define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31)
+#define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32)
+#define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33)
+#define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34)
+#define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35)
+#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40)
+#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41)
+#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42)
+#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43)
+#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44)
+#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45)
+#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46)
+#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47)
+#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48)
+#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49)
+#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52)
+#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55)
+#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56)
+#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57)
+#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58)
+#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59)
+#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60)
+#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61)
+#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62)
+#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63)
+#define ICE_PHY_TYPE_LOW_MAX_INDEX 63
+/* The second set of defines is for phy_type_high. */
+#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1)
+#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
+#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19
+
+struct ice_aqc_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define ICE_AQC_PHY_LOW_POWER_MODE BIT(2)
+#define ICE_AQC_PHY_EN_LINK BIT(3)
+#define ICE_AQC_PHY_AN_MODE BIT(4)
+#define ICE_AQC_PHY_EN_MOD_QUAL BIT(5)
+#define ICE_AQC_PHY_EN_LESM BIT(6)
+#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
+#define ICE_AQC_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1)
+#define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2)
+#define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0)
+#define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1)
+#define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2)
+#define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6)
+#define ICE_AQC_PHY_EEE_EN_50GBASE_KR2 BIT(7)
+#define ICE_AQC_PHY_EEE_EN_50GBASE_KR_PAM4 BIT(8)
+#define ICE_AQC_PHY_EEE_EN_100GBASE_KR4 BIT(9)
+#define ICE_AQC_PHY_EEE_EN_100GBASE_KR2_PAM4 BIT(10)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3)
+#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define ICE_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
+#define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define ICE_AQC_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];
+};
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ice_aqc_set_phy_cfg {
+ u8 lport_num;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set PHY config command data structure */
+struct ice_aqc_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define ICE_AQ_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
+#define ICE_AQ_PHY_ENA_LINK BIT(3)
+#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define ICE_AQ_PHY_ENA_LESM BIT(6)
+#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
+ __le16 eeer_value;
+ u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct ice_aqc_set_mac_cfg {
+ __le16 max_frame_size;
+ u8 params;
+#define ICE_AQ_SET_MAC_PACE_S 3
+#define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S)
+#define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7)
+#define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0
+#define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M
+ u8 tx_tmr_priority;
+ __le16 tx_tmr_value;
+ __le16 fc_refresh_threshold;
+ u8 drop_opts;
+#define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0)
+#define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0
+#define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0)
+ u8 reserved[7];
+};
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ice_aqc_restart_an {
+ u8 lport_num;
+ u8 reserved;
+ u8 cmd_flags;
+#define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1)
+#define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ice_aqc_get_link_status {
+ u8 lport_num;
+ u8 reserved;
+ __le16 cmd_flags;
+#define ICE_AQ_LSE_M 0x3
+#define ICE_AQ_LSE_NOP 0x0
+#define ICE_AQ_LSE_DIS 0x2
+#define ICE_AQ_LSE_ENA 0x3
+ /* only response uses this flag */
+#define ICE_AQ_LSE_IS_ENABLED 0x1
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ice_aqc_get_link_status_data {
+ u8 topo_media_conflict;
+#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
+#define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1)
+#define ICE_AQ_LINK_TOPO_CORRUPT BIT(2)
+#define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define ICE_AQ_LINK_CFG_ERR BIT(0)
+ u8 link_info;
+#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
+#define ICE_AQ_LINK_FAULT BIT(1)
+#define ICE_AQ_LINK_FAULT_TX BIT(2)
+#define ICE_AQ_LINK_FAULT_RX BIT(3)
+#define ICE_AQ_LINK_FAULT_REMOTE BIT(4)
+#define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define ICE_AQ_MEDIA_AVAILABLE BIT(6)
+#define ICE_AQ_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define ICE_AQ_AN_COMPLETED BIT(0)
+#define ICE_AQ_LP_AN_ABILITY BIT(1)
+#define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define ICE_AQ_FEC_EN BIT(3)
+#define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define ICE_AQ_LINK_PAUSE_TX BIT(5)
+#define ICE_AQ_LINK_PAUSE_RX BIT(6)
+#define ICE_AQ_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
+#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define ICE_AQ_LINK_TX_S 2
+#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
+#define ICE_AQ_LINK_TX_ACTIVE 0
+#define ICE_AQ_LINK_TX_DRAINED 1
+#define ICE_AQ_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define ICE_AQ_LINK_LB_PHY_LCL BIT(0)
+#define ICE_AQ_LINK_LB_PHY_RMT BIT(1)
+#define ICE_AQ_LINK_LB_MAC_LCL BIT(2)
+#define ICE_AQ_LINK_LB_PHY_IDX_S 3
+#define ICE_AQ_LINK_LB_PHY_IDX_M (0x7 << ICE_AQ_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0)
+#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1)
+#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2)
+#define ICE_AQ_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define ICE_AQ_CFG_PACING_S 3
+#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S)
+#define ICE_AQ_CFG_PACING_TYPE_M BIT(7)
+#define ICE_AQ_CFG_PACING_TYPE_AVG 0
+#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define ICE_AQ_PWR_CLASS_M 0x3
+#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0
+#define ICE_AQ_LINK_PWR_BASET_HIGH 1
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
+#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define ICE_AQ_LINK_SPEED_M 0x7FF
+#define ICE_AQ_LINK_SPEED_10MB BIT(0)
+#define ICE_AQ_LINK_SPEED_100MB BIT(1)
+#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
+#define ICE_AQ_LINK_SPEED_2500MB BIT(3)
+#define ICE_AQ_LINK_SPEED_5GB BIT(4)
+#define ICE_AQ_LINK_SPEED_10GB BIT(5)
+#define ICE_AQ_LINK_SPEED_20GB BIT(6)
+#define ICE_AQ_LINK_SPEED_25GB BIT(7)
+#define ICE_AQ_LINK_SPEED_40GB BIT(8)
+#define ICE_AQ_LINK_SPEED_50GB BIT(9)
+#define ICE_AQ_LINK_SPEED_100GB BIT(10)
+#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
+ __le32 reserved3; /* Aligns next field to 8-byte boundary */
+ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+};
+
+/* Set event mask command (direct 0x0613) */
+struct ice_aqc_set_event_mask {
+ u8 lport_num;
+ u8 reserved[7];
+ __le16 event_mask;
+#define ICE_AQ_LINK_EVENT_UPDOWN BIT(1)
+#define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2)
+#define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3)
+#define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7)
+#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define ICE_AQ_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define ICE_AQ_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+ u8 reserved1[6];
+};
+
+/* Set PHY Loopback command (direct 0x0619) */
+struct ice_aqc_set_phy_lb {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQ_PHY_LB_PORT_NUM_VALID BIT(0)
+ u8 phy_index;
+ u8 lb_mode;
+#define ICE_AQ_PHY_LB_EN BIT(0)
+#define ICE_AQ_PHY_LB_TYPE_M BIT(1)
+#define ICE_AQ_PHY_LB_TYPE_LOCAL 0
+#define ICE_AQ_PHY_LB_TYPE_REMOTE ICE_AQ_PHY_LB_TYPE_M
+#define ICE_AQ_PHY_LB_LEVEL_M BIT(2)
+#define ICE_AQ_PHY_LB_LEVEL_PMD 0
+#define ICE_AQ_PHY_LB_LEVEL_PCS ICE_AQ_PHY_LB_LEVEL_M
+ u8 reserved2[12];
+};
+
+/* Set MAC Loopback command (direct 0x0620) */
+struct ice_aqc_set_mac_lb {
+ u8 lb_mode;
+#define ICE_AQ_MAC_LB_EN BIT(0)
+#define ICE_AQ_MAC_LB_OSC_CLK BIT(1)
+ u8 reserved[15];
+};
+
+/* DNL Get Status command (indirect 0x680)
+ * Structure used for the response, the command uses the generic
+ * ice_aqc_generic struct to pass a buffer address to the FW.
+ */
+struct ice_aqc_dnl_get_status {
+ u8 ctx;
+ u8 status;
+#define ICE_AQ_DNL_STATUS_IDLE 0x0
+#define ICE_AQ_DNL_STATUS_RESERVED 0x1
+#define ICE_AQ_DNL_STATUS_STOPPED 0x2
+#define ICE_AQ_DNL_STATUS_FATAL 0x3 /* Fatal DNL engine error */
+#define ICE_AQ_DNL_SRC_S 3
+#define ICE_AQ_DNL_SRC_M (0x3 << ICE_AQ_DNL_SRC_S)
+#define ICE_AQ_DNL_SRC_NVM (0x0 << ICE_AQ_DNL_SRC_S)
+#define ICE_AQ_DNL_SRC_NVM_SCRATCH (0x1 << ICE_AQ_DNL_SRC_S)
+ u8 stack_ptr;
+#define ICE_AQ_DNL_ST_PTR_S 0x0
+#define ICE_AQ_DNL_ST_PTR_M (0x7 << ICE_AQ_DNL_ST_PTR_S)
+ u8 engine_flags;
+#define ICE_AQ_DNL_FLAGS_ERROR BIT(2)
+#define ICE_AQ_DNL_FLAGS_NEGATIVE BIT(3)
+#define ICE_AQ_DNL_FLAGS_OVERFLOW BIT(4)
+#define ICE_AQ_DNL_FLAGS_ZERO BIT(5)
+#define ICE_AQ_DNL_FLAGS_CARRY BIT(6)
+#define ICE_AQ_DNL_FLAGS_JUMP BIT(7)
+ __le16 pc;
+ __le16 activity_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_dnl_get_status_data {
+ __le16 activity_err_code;
+ __le16 act_err_code;
+#define ICE_AQ_DNL_ACT_ERR_SUCCESS 0x0000 /* no error */
+#define ICE_AQ_DNL_ACT_ERR_PARSE 0x8001 /* NVM parse error */
+#define ICE_AQ_DNL_ACT_ERR_UNSUPPORTED 0x8002 /* unsupported action */
+#define ICE_AQ_DNL_ACT_ERR_NOT_FOUND 0x8003 /* activity not found */
+#define ICE_AQ_DNL_ACT_ERR_BAD_JUMP 0x8004 /* an illegal jump */
+#define ICE_AQ_DNL_ACT_ERR_PSTO_OVER 0x8005 /* persistent store overflow */
+#define ICE_AQ_DNL_ACT_ERR_ST_OVERFLOW 0x8006 /* stack overflow */
+#define ICE_AQ_DNL_ACT_ERR_TIMEOUT 0x8007 /* activity timeout */
+#define ICE_AQ_DNL_ACT_ERR_BREAK 0x0008 /* stopped at breakpoint */
+#define ICE_AQ_DNL_ACT_ERR_INVAL_ARG 0x0101 /* invalid action argument */
+ __le32 execution_time; /* in nanoseconds */
+ __le16 lib_ver;
+ u8 psto_local_sz;
+ u8 psto_global_sz;
+ u8 stack_sz;
+#define ICE_AQ_DNL_STACK_SZ_S 0
+#define ICE_AQ_DNL_STACK_SZ_M (0xF << ICE_AQ_DNL_STACK_SZ_S)
+ u8 port_count;
+#define ICE_AQ_DNL_PORT_CNT_S 0
+#define ICE_AQ_DNL_PORT_CNT_M (0x1F << ICE_AQ_DNL_PORT_CNT_S)
+ __le16 act_cache_cntr;
+ u32 i2c_clk_cntr;
+ u32 mdio_clk_cntr;
+ u32 sb_iosf_clk_cntr;
+};
+
+/* DNL run command (direct 0x681) */
+struct ice_aqc_dnl_run_command {
+ u8 reserved0;
+ u8 command;
+#define ICE_AQ_DNL_CMD_S 0
+#define ICE_AQ_DNL_CMD_M (0x7 << ICE_AQ_DNL_CMD_S)
+#define ICE_AQ_DNL_CMD_RESET 0x0
+#define ICE_AQ_DNL_CMD_RUN 0x1
+#define ICE_AQ_DNL_CMD_STEP 0x3
+#define ICE_AQ_DNL_CMD_ABORT 0x4
+#define ICE_AQ_DNL_CMD_SET_PC 0x7
+#define ICE_AQ_DNL_CMD_SRC_S 3
+#define ICE_AQ_DNL_CMD_SRC_M (0x3 << ICE_AQ_DNL_CMD_SRC_S)
+#define ICE_AQ_DNL_CMD_SRC_DNL 0x0
+#define ICE_AQ_DNL_CMD_SRC_SCRATCH 0x1
+ __le16 new_pc;
+ u8 reserved1[12];
+};
+
+/* DNL call command (indirect 0x682)
+ * Struct is used for both command and response
+ */
+struct ice_aqc_dnl_call_command {
+ u8 ctx; /* Used in command, reserved in response */
+ u8 reserved;
+ __le16 activity_id;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* DNL call command/response buffer (indirect 0x682) */
+struct ice_aqc_dnl_call {
+ __le32 stores[4];
+};
+
+/* Used for both commands:
+ * DNL read sto command (indirect 0x683)
+ * DNL write sto command (indirect 0x684)
+ */
+struct ice_aqc_dnl_read_write_command {
+ u8 ctx;
+ u8 sto_sel; /* STORE select */
+#define ICE_AQC_DNL_STORE_SELECT_STORE 0x0
+#define ICE_AQC_DNL_STORE_SELECT_PSTO 0x1
+#define ICE_AQC_DNL_STORE_SELECT_STACK 0x2
+ __le16 offset;
+ __le32 data; /* Used for write sto only */
+ __le32 addr_high; /* Used for read sto only */
+ __le32 addr_low; /* Used for read sto only */
+};
+
+/* Used for both command responses:
+ * DNL read sto response (indirect 0x683)
+ * DNL write sto response (indirect 0x684)
+ */
+struct ice_aqc_dnl_read_write_response {
+ u8 reserved;
+ u8 status; /* Reserved for read command */
+ __le16 size; /* Reserved for write command */
+ __le32 data; /* Reserved for write command */
+ __le32 addr_high; /* Reserved for write command */
+ __le32 addr_low; /* Reserved for write command */
+};
+
+/* DNL set breakpoints command (indirect 0x686) */
+struct ice_aqc_dnl_set_breakpoints_command {
+ __le32 reserved[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* DNL set breakpoints data buffer structure (indirect 0x686) */
+struct ice_aqc_dnl_set_breakpoints {
+ u8 ctx;
+ u8 ena; /* 0- disabled, 1- enabled */
+ __le16 offset;
+ __le16 activity_id;
+};
+
+/* DNL read log data command(indirect 0x687) */
+struct ice_aqc_dnl_read_log_command {
+ __le16 reserved0;
+ __le16 offset;
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+/* DNL read log data response(indirect 0x687) */
+struct ice_aqc_dnl_read_log_response {
+ __le16 reserved;
+ __le16 size;
+ __le32 data;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+struct ice_aqc_link_topo_addr {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S)
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
+ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
+#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2
+#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
+ u8 index;
+ __le16 handle;
+#define ICE_AQC_LINK_TOPO_HANDLE_S 0
+#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ice_aqc_get_link_topo {
+ struct ice_aqc_link_topo_addr addr;
+ u8 node_part_num;
+ u8 rsvd[9];
+};
+
+/* Get Link Topology Pin (direct, 0x06E1) */
+struct ice_aqc_get_link_topo_pin {
+ struct ice_aqc_link_topo_addr addr;
+ u8 input_io_params;
+#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S 0
+#define ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_M \
+ (0x1F << ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_S)
+#define ICE_AQC_LINK_TOPO_IO_FUNC_GPIO 0
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RESET_N 1
+#define ICE_AQC_LINK_TOPO_IO_FUNC_INT_N 2
+#define ICE_AQC_LINK_TOPO_IO_FUNC_PRESENT_N 3
+#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_DIS 4
+#define ICE_AQC_LINK_TOPO_IO_FUNC_MODSEL_N 5
+#define ICE_AQC_LINK_TOPO_IO_FUNC_LPMODE 6
+#define ICE_AQC_LINK_TOPO_IO_FUNC_TX_FAULT 7
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RX_LOSS 8
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RS0 9
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RS1 10
+#define ICE_AQC_LINK_TOPO_IO_FUNC_EEPROM_WP 11
+/* 12 repeats intentionally due to two different uses depending on context */
+#define ICE_AQC_LINK_TOPO_IO_FUNC_LED 12
+#define ICE_AQC_LINK_TOPO_IO_FUNC_RED_LED 12
+#define ICE_AQC_LINK_TOPO_IO_FUNC_GREEN_LED 13
+#define ICE_AQC_LINK_TOPO_IO_FUNC_BLUE_LED 14
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S 5
+#define ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_M \
+ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
+/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_params;
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_S 0
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_FUNC_M \
+ (0x1F << \ ICE_AQC_LINK_TOPO_INPUT_IO_FUNC_NUM_S)
+/* Use ICE_AQC_LINK_TOPO_IO_FUNC_* for the non-numerical options */
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_S 5
+#define ICE_AQC_LINK_TOPO_OUTPUT_IO_TYPE_M \
+ (0x7 << ICE_AQC_LINK_TOPO_INPUT_IO_TYPE_S)
+/* Use ICE_AQC_LINK_TOPO_NODE_TYPE_* for the type values */
+ u8 output_io_flags;
+#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S 0
+#define ICE_AQC_LINK_TOPO_OUTPUT_SPEED_M \
+ (0x7 << ICE_AQC_LINK_TOPO_OUTPUT_SPEED_S)
+#define ICE_AQC_LINK_TOPO_OUTPUT_INT_S 3
+#define ICE_AQC_LINK_TOPO_OUTPUT_INT_M \
+ (0x3 << ICE_AQC_LINK_TOPO_OUTPUT_INT_S)
+#define ICE_AQC_LINK_TOPO_OUTPUT_POLARITY BIT(5)
+#define ICE_AQC_LINK_TOPO_OUTPUT_VALUE BIT(6)
+#define ICE_AQC_LINK_TOPO_OUTPUT_DRIVEN BIT(7)
+ u8 rsvd[7];
+};
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ice_aqc_i2c {
+ struct ice_aqc_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define ICE_AQC_I2C_DATA_SIZE_S 0
+#define ICE_AQC_I2C_DATA_SIZE_M (0xF << ICE_AQC_I2C_DATA_SIZE_S)
+#define ICE_AQC_I2C_ADDR_TYPE_M BIT(4)
+#define ICE_AQC_I2C_ADDR_TYPE_7BIT 0
+#define ICE_AQC_I2C_ADDR_TYPE_10BIT ICE_AQC_I2C_ADDR_TYPE_M
+#define ICE_AQC_I2C_DATA_OFFSET_S 5
+#define ICE_AQC_I2C_DATA_OFFSET_M (0x3 << ICE_AQC_I2C_DATA_OFFSET_S)
+#define ICE_AQC_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define ICE_AQC_I2C_ADDR_7BIT_MASK 0x7F
+#define ICE_AQC_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ice_aqc_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ice_aqc_mdio {
+ struct ice_aqc_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define ICE_AQC_MDIO_DEV_S 0
+#define ICE_AQC_MDIO_DEV_M (0x1F << ICE_AQC_MDIO_DEV_S)
+#define ICE_AQC_MDIO_CLAUSE_22 BIT(5)
+#define ICE_AQC_MDIO_CLAUSE_45 BIT(6)
+ u8 rsvd;
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ice_aqc_gpio_by_func {
+ struct ice_aqc_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define ICE_AQC_GPIO_FUNC_S 0
+#define ICE_AQC_GPIO_FUNC_M (0x1F << ICE_AQC_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define ICE_AQC_GPIO_ON BIT(0)
+#define ICE_AQC_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+/* Set LED (direct, 0x06E8) */
+struct ice_aqc_set_led {
+ struct ice_aqc_link_topo_addr topo_addr;
+ u8 color_and_blink;
+#define ICE_AQC_LED_COLOR_S 0
+#define ICE_AQC_LED_COLOR_M (0x7 << ICE_AQC_LED_COLOR_S)
+#define ICE_AQC_LED_COLOR_SKIP 0
+#define ICE_AQC_LED_COLOR_RED 1
+#define ICE_AQC_LED_COLOR_ORANGE 2
+#define ICE_AQC_LED_COLOR_YELLOW 3
+#define ICE_AQC_LED_COLOR_GREEN 4
+#define ICE_AQC_LED_COLOR_BLUE 5
+#define ICE_AQC_LED_COLOR_PURPLE 6
+#define ICE_AQC_LED_BLINK_S 3
+#define ICE_AQC_LED_BLINK_M (0x7 << ICE_AQC_LED_BLINK_S)
+#define ICE_AQC_LED_BLINK_NONE 0
+#define ICE_AQC_LED_BLINK_SLOW 1
+#define ICE_AQC_LED_BLINK_SLOW_MAC 2
+#define ICE_AQC_LED_BLINK_SLOW_FLTR 3
+#define ICE_AQC_LED_BLINK_FAST 5
+#define ICE_AQC_LED_BLINK_FAST_MAC 6
+#define ICE_AQC_LED_BLINK_FAST_FLTR 7
+ u8 rsvd[9];
+};
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ice_aqc_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0)
+#define ICE_AQC_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_PORT_OPT_PORT_NUM_VALID BIT(0)
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_S 0
+#define ICE_AQC_PORT_OPT_COUNT_M (0xF << ICE_AQC_PORT_OPT_COUNT_S)
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_S 0
+#define ICE_AQC_PORT_OPT_ACTIVE_M (0xF << ICE_AQC_PORT_OPT_ACTIVE_S)
+#define ICE_AQC_PORT_OPT_FORCED BIT(6)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+ u8 rsvd[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_S 0
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M (0xF << ICE_AQC_PORT_OPT_PMD_COUNT_S)
+#define ICE_AQC_PORT_OPT_PMD_WIDTH_S 4
+#define ICE_AQC_PORT_OPT_PMD_WIDTH_M (0xF << ICE_AQC_PORT_OPT_PMD_WIDTH_S)
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_S 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_M (0xF << ICE_AQC_PORT_OPT_MAX_LANE_S)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+ u8 global_scid[2];
+ u8 phy_scid[2];
+};
+
+/* The buffer for command 0x06EA contains port_options_count of options
+ * in the option array.
+ */
+struct ice_aqc_get_port_options_data {
+ struct ice_aqc_get_port_options_elem option[1];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SET_PORT_OPT_PORT_NUM_VALID BIT(0)
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ice_aqc_gpio {
+ __le16 gpio_ctrl_handle;
+#define ICE_AQC_GPIO_HANDLE_S 0
+#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ice_aqc_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define ICE_AQC_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F
+#define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF
+#define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10)
+#define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0
+#define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11
+#define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S)
+#define ICE_AQC_SFF_NO_PAGE_CHANGE 0
+#define ICE_AQC_SFF_SET_23_ON_MISMATCH 1
+#define ICE_AQC_SFF_SET_22_ON_MISMATCH 2
+#define ICE_AQC_SFF_IS_WRITE BIT(15)
+ __le16 i2c_mem_addr;
+ __le16 eeprom_page;
+#define ICE_AQC_SFF_EEPROM_BANK_S 0
+#define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S)
+#define ICE_AQC_SFF_EEPROM_PAGE_S 8
+#define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S)
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ice_aqc_nvm {
+#define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define ICE_AQC_NVM_LAST_CMD BIT(0)
+#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define ICE_AQC_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
+#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4)
+#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6)
+#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
+ __le16 module_typeid;
+ __le16 length;
+#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ice_aqc_nvm. */
+#define ICE_AQC_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define ICE_AQC_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_EMP_SR_PTR_OFFSET 0x90
+#define ICE_AQC_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_S 15
+#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define ICE_AQC_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define ICE_AQC_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define ICE_AQC_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+/* The result of netlist NVM read comes in a TLV format. The actual data
+ * (netlist header) starts from word offset 1 (byte 2). The FW strips
+ * out the type field from the TLV header so all the netlist fields
+ * should adjust their offset value by 1 word (2 bytes) in order to map
+ * their correct location.
+ */
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
+#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
+#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
+#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
+#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
+#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
+#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
+#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ice_aqc_nvm_cfg {
+ u8 cmd_flags;
+#define ICE_AQC_ANVM_MULTIPLE_ELEMS BIT(0)
+#define ICE_AQC_ANVM_IMMEDIATE_FIELD BIT(1)
+#define ICE_AQC_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ice_aqc_nvm_checksum {
+ u8 flags;
+#define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0)
+#define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+/**
+ * Send to PF command (indirect 0x0801) ID is only used by PF
+ *
+ * Send to VF command (indirect 0x0802) ID is only used by PF
+ *
+ */
+struct ice_aqc_pf_vf_msg {
+ __le32 id;
+ u32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ice_aqc_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ice_aqc_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Done Alternate Write (direct 0x0904) */
+struct ice_aqc_done_alt_write {
+ u8 flags;
+#define ICE_AQC_CMD_UEFI_BIOS_MODE BIT(0)
+#define ICE_AQC_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ice_aqc_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+/* Get LLDP MIB (indirect 0x0A00)
+ * Note: This is also used by the LLDP MIB Change Event (0x0A01)
+ * as the format is the same.
+ */
+struct ice_aqc_lldp_get_mib {
+ u8 type;
+#define ICE_AQ_LLDP_MIB_TYPE_S 0
+#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
+#define ICE_AQ_LLDP_MIB_LOCAL 0
+#define ICE_AQ_LLDP_MIB_REMOTE 1
+#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
+#define ICE_AQ_LLDP_BRID_TYPE_S 2
+#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
+#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
+#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
+/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
+#define ICE_AQ_LLDP_TX_S 0x4
+#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
+#define ICE_AQ_LLDP_TX_ACTIVE 0
+#define ICE_AQ_LLDP_TX_SUSPENDED 1
+#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
+ * and in the LLDP MIB Change Event (0x0A01). They are valid for the
+ * Get LLDP MIB (0x0A00) response only.
+ */
+ u8 reserved1;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Configure LLDP MIB Change Event (direct 0x0A01) */
+/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
+struct ice_aqc_lldp_set_mib_change {
+ u8 command;
+#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+ u8 reserved[15];
+};
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct ice_aqc_lldp_add_delete_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct ice_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Stop LLDP (direct 0x0A05) */
+struct ice_aqc_lldp_stop {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
+#define ICE_AQ_LLDP_AGENT_STOP 0x0
+#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
+#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
+ u8 reserved[15];
+};
+
+/* Start LLDP (direct 0x0A06) */
+struct ice_aqc_lldp_start {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_START BIT(0)
+#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
+ u8 reserved[15];
+};
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * The command uses the generic descriptor struct and
+ * returns the struct below as an indirect response.
+ */
+struct ice_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define ICE_AQC_CEE_APP_FCOE_S 0
+#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
+#define ICE_AQC_CEE_APP_ISCSI_S 3
+#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
+#define ICE_AQC_CEE_APP_FIP_S 8
+#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
+ __le32 tlv_status;
+#define ICE_AQC_CEE_PG_STATUS_S 0
+#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
+#define ICE_AQC_CEE_PFC_STATUS_S 3
+#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
+#define ICE_AQC_CEE_FCOE_STATUS_S 8
+#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
+#define ICE_AQC_CEE_ISCSI_STATUS_S 11
+#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
+#define ICE_AQC_CEE_FIP_STATUS_S 16
+#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
+ u8 reserved[12];
+};
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBX
+ */
+struct ice_aqc_lldp_set_local_mib {
+ u8 type;
+#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
+#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
+#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
+#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
+#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_lldp_set_local_mib_resp {
+ u8 status;
+#define SET_LOCAL_MIB_RESP_EVENT_M BIT(0)
+#define SET_LOCAL_MIB_RESP_MIB_CHANGE_SILENT 0
+#define SET_LOCAL_MIB_RESP_MIB_CHANGE_EVENT SET_LOCAL_MIB_RESP_EVENT_M
+ u8 reserved[15];
+};
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBX.
+ * The same structure is used for the response, with the command field
+ * being used as the status field.
+ */
+struct ice_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+#define ICE_AQC_START_STOP_AGENT_M BIT(0)
+#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
+#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
+ u8 reserved[15];
+};
+
+/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
+struct ice_aqc_get_set_rss_key {
+#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
+#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0
+#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
+#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
+
+/**
+ * struct ice_aqc_get_set_rss_keys - Get/Set RSS hash key command buffer
+ * @standard_rss_key: 40 most significant bytes of hash key
+ * @extended_hash_key: 12 least significant bytes of hash key
+ *
+ * Set/Get 40 byte hash key using standard_rss_key field, and set
+ * extended_hash_key field to zero. Set/Get 52 byte hash key using
+ * standard_rss_key field for 40 most significant bytes and the
+ * extended_hash_key field for the 12 least significant bytes of hash key.
+ */
+struct ice_aqc_get_set_rss_keys {
+ u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
+ u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
+};
+
+/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
+struct ice_aqc_get_set_rss_lut {
+#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
+#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
+ __le16 vsi_id;
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
+ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
+#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
+ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
+
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
+#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
+
+#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
+#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \
+ (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S)
+
+ __le16 flags;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Add Tx LAN Queues (indirect 0x0C30) */
+struct ice_aqc_add_txqs {
+ u8 num_qgrps;
+ u8 reserved[3];
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Add Tx LAN Queues
+ * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
+ */
+struct ice_aqc_add_txqs_perq {
+ __le16 txq_id;
+ u8 rsvd[2];
+ __le32 q_teid;
+ u8 txq_ctx[22];
+ u8 rsvd2[2];
+ struct ice_aqc_txsched_elem info;
+};
+
+/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_add_tx_qgrp is variable due
+ * to the variable number of queues in each group!
+ */
+struct ice_aqc_add_tx_qgrp {
+ __le32 parent_teid;
+ u8 num_txqs;
+ u8 rsvd[3];
+ struct ice_aqc_add_txqs_perq txqs[1];
+};
+
+/* Disable Tx LAN Queues (indirect 0x0C31) */
+struct ice_aqc_dis_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_DIS_CMD_S 0
+#define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S)
+#define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3)
+ u8 num_entries;
+ __le16 vmvf_and_timeout;
+#define ICE_AQC_Q_DIS_VMVF_NUM_S 0
+#define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S)
+#define ICE_AQC_Q_DIS_TIMEOUT_S 10
+#define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
+ * contains the following structures, arrayed one after the
+ * other.
+ * Note: Since the q_id is 16 bits wide, if the
+ * number of queues is even, then 2 bytes of alignment MUST be
+ * added before the start of the next group, to allow correct
+ * alignment of the parent_teid field.
+ */
+struct ice_aqc_dis_txq_item {
+ __le32 parent_teid;
+ u8 num_qs;
+ u8 rsvd;
+ /* The length of the q_id array varies according to num_qs */
+ __le16 q_id[1];
+ /* This only applies from F8 onward */
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \
+ (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+#define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \
+ (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
+};
+
+struct ice_aqc_dis_txq {
+ struct ice_aqc_dis_txq_item qgrps[1];
+};
+
+/* Tx LAN Queues Cleanup Event (0x0C31) */
+struct ice_aqc_txqs_cleanup {
+ __le16 caller_opc;
+ __le16 cmd_tag;
+ u8 reserved[12];
+};
+
+/* Move / Reconfigure Tx Queues (indirect 0x0C32) */
+struct ice_aqc_move_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_CMD_TYPE_S 0
+#define ICE_AQC_Q_CMD_TYPE_M (0x3 << ICE_AQC_Q_CMD_TYPE_S)
+#define ICE_AQC_Q_CMD_TYPE_MOVE 1
+#define ICE_AQC_Q_CMD_TYPE_TC_CHANGE 2
+#define ICE_AQC_Q_CMD_TYPE_MOVE_AND_TC 3
+#define ICE_AQC_Q_CMD_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_CMD_FLUSH_PIPE BIT(3)
+ u8 num_qs;
+ u8 rsvd;
+ u8 timeout;
+#define ICE_AQC_Q_CMD_TIMEOUT_S 2
+#define ICE_AQC_Q_CMD_TIMEOUT_M (0x3F << ICE_AQC_Q_CMD_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Per-queue data buffer for the Move Tx LAN Queues command/response */
+struct ice_aqc_move_txqs_elem {
+ __le16 txq_id;
+ u8 q_cgd;
+ u8 rsvd;
+ __le32 q_teid;
+};
+
+/* Indirect data buffer for the Move Tx LAN Queues command/response */
+struct ice_aqc_move_txqs_data {
+ __le32 src_teid;
+ __le32 dest_teid;
+ struct ice_aqc_move_txqs_elem txqs[1];
+};
+
+/* Download Package (indirect 0x0C40) */
+/* Also used for Update Package (indirect 0x0C42) */
+struct ice_aqc_download_pkg {
+ u8 flags;
+#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
+ u8 reserved[3];
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_download_pkg_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Package Info List (indirect 0x0C43) */
+struct ice_aqc_get_pkg_info_list {
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Version format for packages */
+struct ice_pkg_ver {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define ICE_PKG_NAME_SIZE 32
+#define ICE_SEG_NAME_SIZE 28
+
+struct ice_aqc_get_pkg_info {
+ struct ice_pkg_ver ver;
+ char name[ICE_SEG_NAME_SIZE];
+ __le32 track_id;
+ u8 is_in_nvm;
+ u8 is_active;
+ u8 is_active_at_boot;
+ u8 is_modified;
+};
+
+/* Get Package Info List response buffer format (0x0C43) */
+struct ice_aqc_get_pkg_info_resp {
+ __le32 count;
+ struct ice_aqc_get_pkg_info pkg_info[1];
+};
+
+/* Driver Shared Parameters (direct, 0x0C90) */
+struct ice_aqc_driver_shared_params {
+ u8 set_or_get_op;
+#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
+#define ICE_AQC_DRIVER_PARAM_SET 0
+#define ICE_AQC_DRIVER_PARAM_GET 1
+ u8 param_indx;
+#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
+ u8 rsvd[2];
+ __le32 param_val;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct ice_aqc_event_lan_overflow {
+ __le32 prtdcb_ruptq;
+ __le32 qtx_ctl;
+ u8 reserved[8];
+};
+
+/**
+ * struct ice_aq_desc - Admin Queue (AQ) descriptor
+ * @flags: ICE_AQ_FLAG_* flags
+ * @opcode: AQ command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_h: opaque data high-half
+ * @cookie_l: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts on the Admin Transmit Queue
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Receive Queue (ARQ) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ice_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ice_aqc_generic generic;
+ struct ice_aqc_get_ver get_ver;
+ struct ice_aqc_driver_ver driver_ver;
+ struct ice_aqc_q_shutdown q_shutdown;
+ struct ice_aqc_get_exp_err exp_err;
+ struct ice_aqc_req_res res_owner;
+ struct ice_aqc_manage_mac_read mac_read;
+ struct ice_aqc_manage_mac_write mac_write;
+ struct ice_aqc_clear_pxe clear_pxe;
+ struct ice_aqc_config_no_drop_policy no_drop;
+ struct ice_aqc_add_update_mir_rule add_update_rule;
+ struct ice_aqc_delete_mir_rule del_rule;
+ struct ice_aqc_list_caps get_cap;
+ struct ice_aqc_get_phy_caps get_phy;
+ struct ice_aqc_set_phy_cfg set_phy;
+ struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_dnl_get_status get_status;
+ struct ice_aqc_dnl_run_command dnl_run;
+ struct ice_aqc_dnl_call_command dnl_call;
+ struct ice_aqc_dnl_read_write_command dnl_read_write;
+ struct ice_aqc_dnl_read_write_response dnl_read_write_resp;
+ struct ice_aqc_dnl_set_breakpoints_command dnl_set_brk;
+ struct ice_aqc_dnl_read_log_command dnl_read_log;
+ struct ice_aqc_dnl_read_log_response dnl_read_log_resp;
+ struct ice_aqc_i2c read_write_i2c;
+ struct ice_aqc_mdio read_write_mdio;
+ struct ice_aqc_gpio_by_func read_write_gpio_by_func;
+ struct ice_aqc_gpio read_write_gpio;
+ struct ice_aqc_set_led set_led;
+ struct ice_aqc_sff_eeprom read_write_sff_param;
+ struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
+ struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_set_port_params set_port_params;
+ struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_storm_cfg storm_conf;
+ struct ice_aqc_get_topo get_topo;
+ struct ice_aqc_sched_elem_cmd sched_elem_cmd;
+ struct ice_aqc_query_txsched_res query_sched_res;
+ struct ice_aqc_query_node_to_root query_node_to_root;
+ struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd;
+ struct ice_aqc_query_port_ets port_ets;
+ struct ice_aqc_rl_profile rl_profile;
+ struct ice_aqc_nvm nvm;
+ struct ice_aqc_nvm_cfg nvm_cfg;
+ struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_read_write_alt_direct read_write_alt_direct;
+ struct ice_aqc_read_write_alt_indirect read_write_alt_indirect;
+ struct ice_aqc_done_alt_write done_alt_write;
+ struct ice_aqc_clear_port_alt_write clear_port_alt_write;
+ struct ice_aqc_pfc_ignore pfc_ignore;
+ struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
+ struct ice_aqc_set_dcb_params set_dcb_params;
+ struct ice_aqc_lldp_get_mib lldp_get_mib;
+ struct ice_aqc_lldp_set_mib_change lldp_set_event;
+ struct ice_aqc_lldp_add_delete_tlv lldp_add_delete_tlv;
+ struct ice_aqc_lldp_update_tlv lldp_update_tlv;
+ struct ice_aqc_lldp_stop lldp_stop;
+ struct ice_aqc_lldp_start lldp_start;
+ struct ice_aqc_lldp_set_local_mib lldp_set_mib;
+ struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
+ struct ice_aqc_get_set_rss_lut get_set_rss_lut;
+ struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_add_txqs add_txqs;
+ struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_move_txqs move_txqs;
+ struct ice_aqc_txqs_cleanup txqs_cleanup;
+ struct ice_aqc_add_get_update_free_vsi vsi_cmd;
+ struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
+ struct ice_aqc_get_vsi_resp get_vsi_resp;
+ struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_get_pkg_info_list get_pkg_info_list;
+ struct ice_aqc_driver_shared_params drv_shared_params;
+ struct ice_aqc_set_mac_lb set_mac_lb;
+ struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
+ struct ice_aqc_get_res_alloc get_res;
+ struct ice_aqc_get_allocd_res_desc get_res_desc;
+ struct ice_aqc_set_mac_cfg set_mac_cfg;
+ struct ice_aqc_set_event_mask set_event_mask;
+ struct ice_aqc_get_link_status get_link_status;
+ struct ice_aqc_event_lan_overflow lan_overflow;
+ struct ice_aqc_get_link_topo get_link_topo;
+ } params;
+};
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define ICE_AQ_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define ICE_AQ_FLAG_DD_S 0
+#define ICE_AQ_FLAG_CMP_S 1
+#define ICE_AQ_FLAG_ERR_S 2
+#define ICE_AQ_FLAG_VFE_S 3
+#define ICE_AQ_FLAG_LB_S 9
+#define ICE_AQ_FLAG_RD_S 10
+#define ICE_AQ_FLAG_VFC_S 11
+#define ICE_AQ_FLAG_BUF_S 12
+#define ICE_AQ_FLAG_SI_S 13
+#define ICE_AQ_FLAG_EI_S 14
+#define ICE_AQ_FLAG_FE_S 15
+
+#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
+#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
+#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
+#define ICE_AQ_FLAG_VFE BIT(ICE_AQ_FLAG_VFE_S) /* 0x8 */
+#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
+#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
+#define ICE_AQ_FLAG_VFC BIT(ICE_AQ_FLAG_VFC_S) /* 0x800 */
+#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
+#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
+#define ICE_AQ_FLAG_EI BIT(ICE_AQ_FLAG_EI_S) /* 0x4000 */
+#define ICE_AQ_FLAG_FE BIT(ICE_AQ_FLAG_FE_S) /* 0x8000 */
+
+/* error codes */
+enum ice_aq_err {
+ ICE_AQ_RC_OK = 0, /* Success */
+ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ ICE_AQ_RC_ENOENT = 2, /* No such element */
+ ICE_AQ_RC_ESRCH = 3, /* Bad opcode */
+ ICE_AQ_RC_EINTR = 4, /* Operation interrupted */
+ ICE_AQ_RC_EIO = 5, /* I/O error */
+ ICE_AQ_RC_ENXIO = 6, /* No such resource */
+ ICE_AQ_RC_E2BIG = 7, /* Arg too long */
+ ICE_AQ_RC_EAGAIN = 8, /* Try again */
+ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
+ ICE_AQ_RC_EACCES = 10, /* Permission denied */
+ ICE_AQ_RC_EFAULT = 11, /* Bad address */
+ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
+ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
+ ICE_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ ICE_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ ICE_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ ICE_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ ICE_AQ_RC_EFBIG = 22, /* File too big */
+ ICE_AQ_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
+ ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
+ ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
+ ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ ICE_AQ_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Queue command opcodes */
+enum ice_adminq_opc {
+ /* AQ commands */
+ ice_aqc_opc_get_ver = 0x0001,
+ ice_aqc_opc_driver_ver = 0x0002,
+ ice_aqc_opc_q_shutdown = 0x0003,
+ ice_aqc_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ice_aqc_opc_req_res = 0x0008,
+ ice_aqc_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ice_aqc_opc_list_func_caps = 0x000A,
+ ice_aqc_opc_list_dev_caps = 0x000B,
+
+ /* manage MAC address */
+ ice_aqc_opc_manage_mac_read = 0x0107,
+ ice_aqc_opc_manage_mac_write = 0x0108,
+
+ /* PXE */
+ ice_aqc_opc_clear_pxe_mode = 0x0110,
+
+ ice_aqc_opc_config_no_drop_policy = 0x0112,
+
+ /* internal switch commands */
+ ice_aqc_opc_get_sw_cfg = 0x0200,
+ ice_aqc_opc_set_port_params = 0x0203,
+
+ /* Alloc/Free/Get Resources */
+ ice_aqc_opc_get_res_alloc = 0x0204,
+ ice_aqc_opc_alloc_res = 0x0208,
+ ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_get_allocd_res_desc = 0x020A,
+
+ /* VSI commands */
+ ice_aqc_opc_add_vsi = 0x0210,
+ ice_aqc_opc_update_vsi = 0x0211,
+ ice_aqc_opc_get_vsi_params = 0x0212,
+ ice_aqc_opc_free_vsi = 0x0213,
+
+ /* Mirroring rules - add/update, delete */
+ ice_aqc_opc_add_update_mir_rule = 0x0260,
+ ice_aqc_opc_del_mir_rule = 0x0261,
+
+ /* storm configuration */
+ ice_aqc_opc_set_storm_cfg = 0x0280,
+ ice_aqc_opc_get_storm_cfg = 0x0281,
+
+ /* switch rules population commands */
+ ice_aqc_opc_add_sw_rules = 0x02A0,
+ ice_aqc_opc_update_sw_rules = 0x02A1,
+ ice_aqc_opc_remove_sw_rules = 0x02A2,
+ ice_aqc_opc_get_sw_rules = 0x02A3,
+ ice_aqc_opc_clear_pf_cfg = 0x02A4,
+
+ /* DCB commands */
+ ice_aqc_opc_pfc_ignore = 0x0301,
+ ice_aqc_opc_query_pfc_mode = 0x0302,
+ ice_aqc_opc_set_pfc_mode = 0x0303,
+ ice_aqc_opc_set_dcb_params = 0x0306,
+
+ /* transmit scheduler commands */
+ ice_aqc_opc_get_dflt_topo = 0x0400,
+ ice_aqc_opc_add_sched_elems = 0x0401,
+ ice_aqc_opc_cfg_sched_elems = 0x0403,
+ ice_aqc_opc_get_sched_elems = 0x0404,
+ ice_aqc_opc_move_sched_elems = 0x0408,
+ ice_aqc_opc_suspend_sched_elems = 0x0409,
+ ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_query_port_ets = 0x040E,
+ ice_aqc_opc_delete_sched_elems = 0x040F,
+ ice_aqc_opc_add_rl_profiles = 0x0410,
+ ice_aqc_opc_query_rl_profiles = 0x0411,
+ ice_aqc_opc_query_sched_res = 0x0412,
+ ice_aqc_opc_query_node_to_root = 0x0413,
+ ice_aqc_opc_cfg_l2_node_cgd = 0x0414,
+ ice_aqc_opc_remove_rl_profiles = 0x0415,
+
+ /* PHY commands */
+ ice_aqc_opc_get_phy_caps = 0x0600,
+ ice_aqc_opc_set_phy_cfg = 0x0601,
+ ice_aqc_opc_set_mac_cfg = 0x0603,
+ ice_aqc_opc_restart_an = 0x0605,
+ ice_aqc_opc_get_link_status = 0x0607,
+ ice_aqc_opc_set_event_mask = 0x0613,
+ ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_dnl_get_status = 0x0680,
+ ice_aqc_opc_dnl_run = 0x0681,
+ ice_aqc_opc_dnl_call = 0x0682,
+ ice_aqc_opc_dnl_read_sto = 0x0683,
+ ice_aqc_opc_dnl_write_sto = 0x0684,
+ ice_aqc_opc_dnl_set_breakpoints = 0x0686,
+ ice_aqc_opc_dnl_read_log = 0x0687,
+ ice_aqc_opc_get_link_topo = 0x06E0,
+ ice_aqc_opc_get_link_topo_pin = 0x06E1,
+ ice_aqc_opc_read_i2c = 0x06E2,
+ ice_aqc_opc_write_i2c = 0x06E3,
+ ice_aqc_opc_read_mdio = 0x06E4,
+ ice_aqc_opc_write_mdio = 0x06E5,
+ ice_aqc_opc_set_gpio_by_func = 0x06E6,
+ ice_aqc_opc_get_gpio_by_func = 0x06E7,
+ ice_aqc_opc_set_led = 0x06E8,
+ ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
+ ice_aqc_opc_set_gpio = 0x06EC,
+ ice_aqc_opc_get_gpio = 0x06ED,
+ ice_aqc_opc_sff_eeprom = 0x06EE,
+
+ /* NVM commands */
+ ice_aqc_opc_nvm_read = 0x0701,
+ ice_aqc_opc_nvm_erase = 0x0702,
+ ice_aqc_opc_nvm_write = 0x0703,
+ ice_aqc_opc_nvm_cfg_read = 0x0704,
+ ice_aqc_opc_nvm_cfg_write = 0x0705,
+ ice_aqc_opc_nvm_checksum = 0x0706,
+ ice_aqc_opc_nvm_write_activate = 0x0707,
+ ice_aqc_opc_nvm_sr_dump = 0x0707,
+ ice_aqc_opc_nvm_save_factory_settings = 0x0708,
+ ice_aqc_opc_nvm_update_empr = 0x0709,
+
+ /* PF/VF mailbox commands */
+ ice_mbx_opc_send_msg_to_pf = 0x0801,
+ ice_mbx_opc_send_msg_to_vf = 0x0802,
+ /* Alternate Structure Commands */
+ ice_aqc_opc_write_alt_direct = 0x0900,
+ ice_aqc_opc_write_alt_indirect = 0x0901,
+ ice_aqc_opc_read_alt_direct = 0x0902,
+ ice_aqc_opc_read_alt_indirect = 0x0903,
+ ice_aqc_opc_done_alt_write = 0x0904,
+ ice_aqc_opc_clear_port_alt_write = 0x0906,
+ /* LLDP commands */
+ ice_aqc_opc_lldp_get_mib = 0x0A00,
+ ice_aqc_opc_lldp_set_mib_change = 0x0A01,
+ ice_aqc_opc_lldp_add_tlv = 0x0A02,
+ ice_aqc_opc_lldp_update_tlv = 0x0A03,
+ ice_aqc_opc_lldp_delete_tlv = 0x0A04,
+ ice_aqc_opc_lldp_stop = 0x0A05,
+ ice_aqc_opc_lldp_start = 0x0A06,
+ ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ ice_aqc_opc_lldp_set_local_mib = 0x0A08,
+ ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
+
+ /* RSS commands */
+ ice_aqc_opc_set_rss_key = 0x0B02,
+ ice_aqc_opc_set_rss_lut = 0x0B03,
+ ice_aqc_opc_get_rss_key = 0x0B04,
+ ice_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Tx queue handling commands/events */
+ ice_aqc_opc_add_txqs = 0x0C30,
+ ice_aqc_opc_dis_txqs = 0x0C31,
+ ice_aqc_opc_txqs_cleanup = 0x0C31,
+ ice_aqc_opc_move_recfg_txqs = 0x0C32,
+
+ /* package commands */
+ ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_upload_section = 0x0C41,
+ ice_aqc_opc_update_pkg = 0x0C42,
+ ice_aqc_opc_get_pkg_info_list = 0x0C43,
+
+ ice_aqc_opc_driver_shared_params = 0x0C90,
+
+ /* Standalone Commands/Events */
+ ice_aqc_opc_event_lan_overflow = 0x1001,
+};
+
+#endif /* _ICE_ADMINQ_CMD_H_ */
Index: sys/dev/ice/ice_alloc.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_alloc.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_ALLOC_H_
+#define _ICE_ALLOC_H_
+
+/* Memory types */
+enum ice_memset_type {
+ ICE_NONDMA_MEM = 0,
+ ICE_DMA_MEM
+};
+
+/* Memcpy types */
+enum ice_memcpy_type {
+ ICE_NONDMA_TO_NONDMA = 0,
+ ICE_NONDMA_TO_DMA,
+ ICE_DMA_TO_DMA,
+ ICE_DMA_TO_NONDMA
+};
+
+#endif /* _ICE_ALLOC_H_ */
Index: sys/dev/ice/ice_bitops.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_bitops.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_BITOPS_H_
+#define _ICE_BITOPS_H_
+
+/* Define the size of the bitmap chunk */
+typedef u32 ice_bitmap_t;
+
+/* Number of bits per bitmap chunk */
+#define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t))
+/* Determine which chunk a bit belongs in */
+#define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK)
+/* How many chunks are required to store this many bits */
+#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK)
+/* Which bit inside a chunk this bit corresponds to */
+#define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK)
+/* How many bits are valid in the last chunk, assumes nr > 0 */
+#define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1)
+/* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */
+#define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \
+ (BITS_PER_CHUNK - LAST_CHUNK_BITS(nr)))
+
+#define ice_declare_bitmap(A, sz) \
+ ice_bitmap_t A[BITS_TO_CHUNKS(sz)]
+
+static inline bool ice_is_bit_set_internal(u16 nr, const ice_bitmap_t *bitmap)
+{
+ return !!(*bitmap & BIT(nr));
+}
+
+/*
+ * If atomic version of the bitops are required, each specific OS
+ * implementation will need to implement OS/platform specific atomic
+ * version of the functions below:
+ *
+ * ice_clear_bit_internal
+ * ice_set_bit_internal
+ * ice_test_and_clear_bit_internal
+ * ice_test_and_set_bit_internal
+ *
+ * and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic
+ * implementation.
+ */
+static inline void ice_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap)
+{
+ *bitmap &= ~BIT(nr);
+}
+
+static inline void ice_set_bit_internal(u16 nr, ice_bitmap_t *bitmap)
+{
+ *bitmap |= BIT(nr);
+}
+
+static inline bool ice_test_and_clear_bit_internal(u16 nr,
+ ice_bitmap_t *bitmap)
+{
+ if (ice_is_bit_set_internal(nr, bitmap)) {
+ ice_clear_bit_internal(nr, bitmap);
+ return true;
+ }
+ return false;
+}
+
+static inline bool ice_test_and_set_bit_internal(u16 nr, ice_bitmap_t *bitmap)
+{
+ if (ice_is_bit_set_internal(nr, bitmap))
+ return true;
+
+ ice_set_bit_internal(nr, bitmap);
+ return false;
+}
+
+/**
+ * ice_is_bit_set - Check state of a bit in a bitmap
+ * @bitmap: the bitmap to check
+ * @nr: the bit to check
+ *
+ * Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr
+ * is less than the size of the bitmap.
+ */
+static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr)
+{
+ return ice_is_bit_set_internal(BIT_IN_CHUNK(nr),
+ &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_clear_bit - Clear a bit in a bitmap
+ * @bitmap: the bitmap to change
+ * @nr: the bit to change
+ *
+ * Clears the bit nr in bitmap. Assumes that nr is less than the size of the
+ * bitmap.
+ */
+static inline void ice_clear_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_set_bit - Set a bit in a bitmap
+ * @bitmap: the bitmap to change
+ * @nr: the bit to change
+ *
+ * Sets the bit nr in bitmap. Assumes that nr is less than the size of the
+ * bitmap.
+ */
+static inline void ice_set_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_test_and_clear_bit - Atomically clear a bit and return the old bit value
+ * @nr: the bit to change
+ * @bitmap: the bitmap to change
+ *
+ * Check and clear the bit nr in bitmap. Assumes that nr is less than the size
+ * of the bitmap.
+ */
+static inline bool
+ice_test_and_clear_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr),
+ &bitmap[BIT_CHUNK(nr)]);
+}
+
+/**
+ * ice_test_and_set_bit - Atomically set a bit and return the old bit value
+ * @nr: the bit to change
+ * @bitmap: the bitmap to change
+ *
+ * Check and set the bit nr in bitmap. Assumes that nr is less than the size of
+ * the bitmap.
+ */
+static inline bool
+ice_test_and_set_bit(u16 nr, ice_bitmap_t *bitmap)
+{
+ return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr),
+ &bitmap[BIT_CHUNK(nr)]);
+}
+
+/* ice_zero_bitmap - set bits of bitmap to zero.
+ * @bmp: bitmap to set zeros
+ * @size: Size of the bitmaps in bits
+ *
+ * Set all of the bits in a bitmap to zero. Note that this function assumes it
+ * operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It
+ * will zero every bit in the last chunk, even if those bits are beyond the
+ * size.
+ */
+static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
+{
+ ice_memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t),
+ ICE_NONDMA_MEM);
+}
+
+/**
+ * ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap to intersect
+ * @bmp2: The second bitmap to intersect wit the first
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise AND on two "source" bitmaps of the same size
+ * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
+ * size as the "source" bitmaps to avoid buffer overflows. This function returns
+ * a non-zero value if at least one bit location from both "source" bitmaps is
+ * non-zero.
+ */
+static inline int
+ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t res = 0, mask;
+ u16 i;
+
+ /* Handle all but the last chunk */
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) {
+ dst[i] = bmp1[i] & bmp2[i];
+ res |= dst[i];
+ }
+
+ /* We want to take care not to modify any bits outside of the bitmap
+ * size, even in the destination bitmap. Thus, we won't directly
+ * assign the last bitmap, but instead use a bitmask to ensure we only
+ * modify bits which are within the size, and leave any bits above the
+ * size value alone.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask);
+ res |= dst[i] & mask;
+
+ return res != 0;
+}
+
+/**
+ * ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap to intersect
+ * @bmp2: The second bitmap to intersect wit the first
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise OR on two "source" bitmaps of the same size
+ * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
+ * size as the "source" bitmaps to avoid buffer overflows.
+ */
+static inline void
+ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk*/
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ dst[i] = bmp1[i] | bmp2[i];
+
+ /* We want to only OR bits within the size. Furthermore, we also do
+ * not want to modify destination bits which are beyond the specified
+ * size. Use a bitmask to ensure that we only modify the bits that are
+ * within the specified size.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask);
+}
+
+/**
+ * ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap
+ * @dst: Destination bitmap that receive the result of the operation
+ * @bmp1: The first bitmap of XOR operation
+ * @bmp2: The second bitmap to XOR with the first
+ * @size: Size of the bitmaps in bits
+ *
+ * This function performs a bitwise XOR on two "source" bitmaps of the same size
+ * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same
+ * size as the "source" bitmaps to avoid buffer overflows.
+ */
+static inline void
+ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
+ const ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk*/
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ dst[i] = bmp1[i] ^ bmp2[i];
+
+ /* We want to only XOR bits within the size. Furthermore, we also do
+ * not want to modify destination bits which are beyond the specified
+ * size. Use a bitmask to ensure that we only modify the bits that are
+ * within the specified size.
+ */
+ mask = LAST_CHUNK_MASK(size);
+ dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask);
+}
+
+/**
+ * ice_find_next_bit - Find the index of the next set bit of a bitmap
+ * @bitmap: the bitmap to scan
+ * @size: the size in bits of the bitmap
+ * @offset: the offset to start at
+ *
+ * Scans the bitmap and returns the index of the first set bit which is equal
+ * to or after the specified offset. Will return size if no bits are set.
+ */
+static inline u16
+ice_find_next_bit(const ice_bitmap_t *bitmap, u16 size, u16 offset)
+{
+ u16 i, j;
+
+ if (offset >= size)
+ return size;
+
+ /* Since the starting position may not be directly on a chunk
+ * boundary, we need to be careful to handle the first chunk specially
+ */
+ i = BIT_CHUNK(offset);
+ if (bitmap[i] != 0) {
+ u16 off = i * BITS_PER_CHUNK;
+
+ for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) {
+ if (ice_is_bit_set(bitmap, off + j))
+ return min(size, (u16)(off + j));
+ }
+ }
+
+ /* Now we handle the remaining chunks, if any */
+ for (i++; i < BITS_TO_CHUNKS(size); i++) {
+ if (bitmap[i] != 0) {
+ u16 off = i * BITS_PER_CHUNK;
+
+ for (j = 0; j < BITS_PER_CHUNK; j++) {
+ if (ice_is_bit_set(bitmap, off + j))
+ return min(size, (u16)(off + j));
+ }
+ }
+ }
+ return size;
+}
+
+/**
+ * ice_find_first_bit - Find the index of the first set bit of a bitmap
+ * @bitmap: the bitmap to scan
+ * @size: the size in bits of the bitmap
+ *
+ * Scans the bitmap and returns the index of the first set bit. Will return
+ * size if no bits are set.
+ */
+static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size)
+{
+ return ice_find_next_bit(bitmap, size, 0);
+}
+
+/**
+ * ice_is_any_bit_set - Return true of any bit in the bitmap is set
+ * @bitmap: the bitmap to check
+ * @size: the size of the bitmap
+ *
+ * Equivalent to checking if ice_find_first_bit returns a value less than the
+ * bitmap size.
+ */
+static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size)
+{
+ return ice_find_first_bit(bitmap, size) < size;
+}
+
+/**
+ * ice_cp_bitmap - copy bitmaps.
+ * @dst: bitmap destination
+ * @src: bitmap to copy from
+ * @size: Size of the bitmaps in bits
+ *
+ * This function copy bitmap from src to dst. Note that this function assumes
+ * it is operating on a bitmap declared using ice_declare_bitmap. It will copy
+ * the entire last chunk even if this contains bits beyond the size.
+ */
+static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size)
+{
+ ice_memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t),
+ ICE_NONDMA_TO_NONDMA);
+}
+
+/**
+ * ice_cmp_bitmaps - compares two bitmaps.
+ * @bmp1: the bitmap to compare
+ * @bmp2: the bitmap to compare with bmp1
+ * @size: Size of the bitmaps in bits
+ *
+ * This function compares two bitmaps, and returns result as true or false.
+ */
+static inline bool
+ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size)
+{
+ ice_bitmap_t mask;
+ u16 i;
+
+ /* Handle all but last chunk*/
+ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++)
+ if (bmp1[i] != bmp2[i])
+ return false;
+
+ /* We want to only compare bits within the size.*/
+ mask = LAST_CHUNK_MASK(size);
+ if ((bmp1[i] & mask) != (bmp2[i] & mask))
+ return false;
+
+ return true;
+}
+
+#undef BIT_CHUNK
+#undef BIT_IN_CHUNK
+#undef LAST_CHUNK_BITS
+#undef LAST_CHUNK_MASK
+
+#endif /* _ICE_BITOPS_H_ */
Index: sys/dev/ice/ice_common.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_common.h
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_COMMON_H_
+#define _ICE_COMMON_H_
+
+#include "ice_type.h"
+#include "ice_nvm.h"
+#include "ice_flex_pipe.h"
+#include "virtchnl.h"
+#include "ice_switch.h"
+
+enum ice_fw_modes {
+ ICE_FW_MODE_NORMAL,
+ ICE_FW_MODE_DBG,
+ ICE_FW_MODE_REC,
+ ICE_FW_MODE_ROLLBACK
+};
+
+/* prototype for functions used for SW locks */
+void ice_free_list(struct LIST_HEAD_TYPE *list);
+void ice_init_lock(struct ice_lock *lock);
+void ice_acquire_lock(struct ice_lock *lock);
+void ice_release_lock(struct ice_lock *lock);
+void ice_destroy_lock(struct ice_lock *lock);
+
+void *ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m, u64 size);
+void ice_free_dma_mem(struct ice_hw *hw, struct ice_dma_mem *m);
+
+void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+
+enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
+enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
+enum ice_status ice_init_hw(struct ice_hw *hw);
+void ice_deinit_hw(struct ice_hw *hw);
+enum ice_status ice_check_reset(struct ice_hw *hw);
+enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_destroy_all_ctrlq(struct ice_hw *hw);
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending);
+enum ice_status
+ice_get_link_status(struct ice_port_info *pi, bool *link_up);
+enum ice_status ice_update_link_info(struct ice_port_info *pi);
+enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_nvm(struct ice_hw *hw);
+enum ice_status
+ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
+ void *data, bool last_command, bool read_shadow_ram,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u32 timeout);
+void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+enum ice_status
+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+void ice_clear_pxe_mode(struct ice_hw *hw);
+
+enum ice_status ice_get_caps(struct ice_hw *hw);
+
+void ice_set_safe_mode_caps(struct ice_hw *hw);
+
+enum ice_status ice_set_mac_type(struct ice_hw *hw);
+
+/* Define a macro that will align a pointer to point to the next memory address
+ * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
+ * example, given the variable pointer = 0x1006, then after the following call:
+ *
+ * pointer = ICE_ALIGN(pointer, 4)
+ *
+ * ... the value of pointer would equal 0x1008, since 0x1008 is the next
+ * address after 0x1006 which is divisible by 4.
+ */
+#define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1))
+
+enum ice_status
+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
+enum ice_status
+ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
+enum ice_status
+ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
+ struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
+ u32 tx_cmpltnq_index);
+enum ice_status
+ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
+enum ice_status
+ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
+ struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
+ u32 tx_drbell_q_index);
+
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
+ u16 lut_size);
+enum ice_status
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type, u8 *lut,
+ u16 lut_size);
+enum ice_status
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *keys);
+enum ice_status
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *keys);
+enum ice_status
+ice_aq_add_lan_txq(struct ice_hw *hw, u8 count,
+ struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
+ bool is_tc_change, bool subseq_call, bool flush_pipe,
+ u8 timeout, u32 *blocked_cgds,
+ struct ice_aqc_move_txqs_data *buf, u16 buf_size,
+ u8 *txqs_moved, struct ice_sq_cd *cd);
+
+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
+extern const struct ice_ctx_ele ice_tlan_ctx_info[];
+enum ice_status
+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+
+enum ice_status
+ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd);
+enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
+ bool save_bad_pac, bool pad_short_pac, bool double_vlan,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+void
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap);
+enum ice_status
+ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
+ struct ice_sq_cd *cd);
+
+enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
+enum ice_status
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+bool ice_fw_supports_link_override(struct ice_hw *hw);
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi);
+
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
+ bool ena_auto_link_update);
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+void
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec);
+enum ice_status
+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info);
+enum ice_status
+__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
+enum ice_status
+__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data);
+enum ice_status
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handle, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_lanqs);
+enum ice_status
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+void ice_replay_post(struct ice_hw *hw);
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
+void ice_sched_replay_agg(struct ice_hw *hw);
+enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+void
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
+ struct ice_eth_stats *cur_stats);
+enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
+void ice_print_rollback_msg(struct ice_hw *hw);
+bool ice_is_generic_mac(struct ice_hw *hw);
+enum ice_status
+ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1);
+enum ice_status
+ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1);
+enum ice_status
+ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf);
+enum ice_status
+ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
+enum ice_status
+ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
+enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw);
+#endif /* _ICE_COMMON_H_ */
Index: sys/dev/ice/ice_common.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_common.c
@@ -0,0 +1,4895 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_adminq_cmd.h"
+
+#include "ice_flow.h"
+#include "ice_switch.h"
+
+#define ICE_PF_RESET_WAIT_COUNT 300
+
+/**
+ * ice_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the MAC type of the adapter based on the
+ * vendor ID and device ID stored in the HW structure.
+ */
+enum ice_status ice_set_mac_type(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
+ return ICE_ERR_DEVICE_NOT_SUPPORTED;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
+ hw->mac_type = ICE_MAC_E810;
+ break;
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ hw->mac_type = ICE_MAC_GENERIC;
+ break;
+ default:
+ hw->mac_type = ICE_MAC_UNKNOWN;
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_clear_pf_cfg - Clear PF configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
+ * configuration, flow director filters, etc.).
+ */
+enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_manage_mac_read - manage MAC address read command
+ * @hw: pointer to the HW struct
+ * @buf: a virtual buffer to hold the manage MAC read response
+ * @buf_size: Size of the virtual buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function is used to return per PF station MAC address (0x0107).
+ * NOTE: Upon successful completion of this command, MAC address information
+ * is returned in user specified buffer. Please interpret user specified
+ * buffer as "manage_mac_read" response.
+ * Response such as various MAC addresses are stored in HW struct (port.mac)
+ * ice_aq_discover_caps is expected to be called before this function is called.
+ */
+enum ice_status
+ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_manage_mac_read_resp *resp;
+ struct ice_aqc_manage_mac_read *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 flags;
+ u8 i;
+
+ cmd = &desc.params.mac_read;
+
+ if (buf_size < sizeof(*resp))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+
+ resp = (struct ice_aqc_manage_mac_read_resp *)buf;
+ flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
+
+ if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
+ ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* A single port can report up to two (LAN and WoL) addresses */
+ for (i = 0; i < cmd->num_addr; i++)
+ if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
+ ice_memcpy(hw->port_info->mac.lan_addr,
+ resp[i].mac_addr, ETH_ALEN,
+ ICE_DMA_TO_NONDMA);
+ ice_memcpy(hw->port_info->mac.perm_addr,
+ resp[i].mac_addr,
+ ETH_ALEN, ICE_DMA_TO_NONDMA);
+ break;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_get_phy_caps - returns PHY capabilities
+ * @pi: port information structure
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Returns the various PHY capabilities supported on the Port (0x0600)
+ */
+enum ice_status
+ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
+
+ cmd->param0 |= CPU_TO_LE16(report_mode);
+ status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+
+ if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
+ pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
+ pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_get_link_topo_handle - get link topology node return status
+ * @pi: port information structure
+ * @node_type: requested node type
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get link topology node return status for specified node type (0x06E0)
+ *
+ * Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present, then
+ * connection type is backplane or BASE-T.
+ */
+static enum ice_status
+ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/*
+ * ice_is_media_cage_present
+ * @pi: port information structure
+ *
+ * Returns true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ice_is_media_cage_present(struct ice_port_info *pi)
+{
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return !ice_aq_get_link_topo_handle(pi,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
+ NULL);
+}
+
+/**
+ * ice_get_media_type - Gets media type
+ * @pi: port information structure
+ */
+static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
+{
+ struct ice_link_status *hw_link_info;
+
+ if (!pi)
+ return ICE_MEDIA_UNKNOWN;
+
+ hw_link_info = &pi->phy.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ICE_MEDIA_UNKNOWN;
+
+ if (hw_link_info->phy_type_low) {
+ switch (hw_link_info->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ return ICE_MEDIA_FIBER;
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ return ICE_MEDIA_BASET;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ return ICE_MEDIA_DA;
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ /* fall-through */
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ if (ice_is_media_cage_present(pi))
+ return ICE_MEDIA_DA;
+ /* fall-through */
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ return ICE_MEDIA_BACKPLANE;
+ }
+ }
+ return ICE_MEDIA_UNKNOWN;
+}
+
+/**
+ * ice_aq_get_link_info
+ * @pi: port information structure
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Link Status (0x607). Returns the link status of the adapter.
+ */
+enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_link_status_data link_data = { 0 };
+ struct ice_aqc_get_link_status *resp;
+ struct ice_link_status *li_old, *li;
+ enum ice_media_type *hw_media_type;
+ struct ice_fc_info *hw_fc_info;
+ bool tx_pause, rx_pause;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u16 cmd_flags;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+
+ li_old = &pi->phy.link_info_old;
+ hw_media_type = &pi->phy.media_type;
+ li = &pi->phy.link_info;
+ hw_fc_info = &pi->fc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
+ cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = CPU_TO_LE16(cmd_flags);
+ resp->lport_num = pi->lport;
+
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
+
+ if (status != ICE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
+ *hw_media_type = ice_get_media_type(pi);
+ li->link_info = link_data.link_info;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
+ ICE_AQ_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ICE_FC_FULL;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
+ else
+ hw_fc_info->current_mode = ICE_FC_NONE;
+
+ li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
+
+ ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)li->phy_type_low);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)li->phy_type_high);
+ ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ pi->phy.get_link_info = false;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_set_mac_cfg
+ * @hw: pointer to the HW struct
+ * @max_frame_size: Maximum Frame Size to be supported
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set MAC configuration (0x0603)
+ */
+enum ice_status
+ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
+{
+ u16 fc_threshold_val, tx_timer_val;
+ struct ice_aqc_set_mac_cfg *cmd;
+ struct ice_aq_desc desc;
+ u32 reg_val;
+
+ cmd = &desc.params.set_mac_cfg;
+
+ if (max_frame_size == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
+
+ cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+
+ /* We read back the transmit timer and fc threshold value of
+ * LFC. Thus, we will use index =
+ * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
+ *
+ * Also, because we are opearating on transmit timer and fc
+ * threshold of LFC, we don't turn on any bit in tx_tmr_priority
+ */
+#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+
+ /* Retrieve the transmit timer */
+ reg_val = rd32(hw,
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
+ tx_timer_val = reg_val &
+ PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+
+ /* Retrieve the fc threshold */
+ reg_val = rd32(hw,
+ PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
+ fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
+ cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_init_fltr_mgmt_struct - initializes filter management list and locks
+ * @hw: pointer to the HW struct
+ */
+static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw;
+
+ hw->switch_info = (struct ice_switch_info *)
+ ice_malloc(hw, sizeof(*hw->switch_info));
+
+ sw = hw->switch_info;
+
+ if (!sw)
+ return ICE_ERR_NO_MEMORY;
+
+ INIT_LIST_HEAD(&sw->vsi_list_map_head);
+
+ return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
+}
+
+/**
+ * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
+ * @hw: pointer to the HW struct
+ */
+static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_vsi_list_map_info *v_pos_map;
+ struct ice_vsi_list_map_info *v_tmp_map;
+ struct ice_sw_recipe *recps;
+ u8 i;
+
+ LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
+ ice_vsi_list_map_info, list_entry) {
+ LIST_DEL(&v_pos_map->list_entry);
+ ice_free(hw, v_pos_map);
+ }
+ recps = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
+
+ recps[i].root_rid = i;
+ LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
+ &recps[i].rg_list, ice_recp_grp_entry,
+ l_entry) {
+ LIST_DEL(&rg_entry->l_entry);
+ ice_free(hw, rg_entry);
+ }
+
+ if (recps[i].adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ ice_destroy_lock(&recps[i].filt_rule_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
+ LIST_DEL(&lst_itr->list_entry);
+ ice_free(hw, lst_itr->lkups);
+ ice_free(hw, lst_itr);
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ ice_destroy_lock(&recps[i].filt_rule_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ ice_fltr_mgmt_list_entry,
+ list_entry) {
+ LIST_DEL(&lst_itr->list_entry);
+ ice_free(hw, lst_itr);
+ }
+ }
+ if (recps[i].root_buf)
+ ice_free(hw, recps[i].root_buf);
+ }
+ ice_rm_all_sw_replay_rule_info(hw);
+ ice_free(hw, sw->recp_list);
+ ice_free(hw, sw);
+}
+
+/**
+ * ice_get_itr_intrl_gran
+ * @hw: pointer to the HW struct
+ *
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
+ * bandwidth according to the device's configuration during power-on.
+ */
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
+{
+ u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
+ GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
+ GL_PWR_MODE_CTL_CAR_MAX_BW_S;
+
+ switch (max_agg_bw) {
+ case ICE_MAX_AGG_BW_200G:
+ case ICE_MAX_AGG_BW_100G:
+ case ICE_MAX_AGG_BW_50G:
+ hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
+ break;
+ case ICE_MAX_AGG_BW_25G:
+ hw->itr_gran = ICE_ITR_GRAN_MAX_25;
+ hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
+ break;
+ }
+}
+
+/**
+ * ice_print_rollback_msg - print FW rollback message
+ * @hw: pointer to the hardware structure
+ */
+void ice_print_rollback_msg(struct ice_hw *hw)
+{
+ char nvm_str[ICE_NVM_VER_LEN] = { 0 };
+ struct ice_nvm_info *nvm = &hw->nvm;
+ struct ice_orom_info *orom;
+
+ orom = &nvm->orom;
+
+ SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
+ nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
+ orom->build, orom->patch);
+ ice_warn(hw,
+ "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
+ nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
+}
+
+/**
+ * ice_init_hw - main hardware initialization routine
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw(struct ice_hw *hw)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ u16 mac_buf_len;
+ void *mac_buf;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Set MAC type based on DeviceID */
+ status = ice_set_mac_type(hw);
+ if (status)
+ return status;
+
+ hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
+ PF_FUNC_RID_FUNCTION_NUMBER_M) >>
+ PF_FUNC_RID_FUNCTION_NUMBER_S;
+
+ status = ice_reset(hw, ICE_RESET_PFR);
+ if (status)
+ return status;
+
+ ice_get_itr_intrl_gran(hw);
+
+ status = ice_create_all_ctrlq(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ status = ice_init_nvm(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
+ ice_print_rollback_msg(hw);
+
+ status = ice_clear_pf_cfg(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ ice_clear_pxe_mode(hw);
+
+ status = ice_get_caps(hw);
+ if (status)
+ goto err_unroll_cqinit;
+
+ hw->port_info = (struct ice_port_info *)
+ ice_malloc(hw, sizeof(*hw->port_info));
+ if (!hw->port_info) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_cqinit;
+ }
+
+ /* set the back pointer to HW */
+ hw->port_info->hw = hw;
+
+ /* Initialize port_info struct with switch configuration data */
+ status = ice_get_initial_sw_cfg(hw);
+ if (status)
+ goto err_unroll_alloc;
+
+ hw->evb_veb = true;
+ /* Query the allocated resources for Tx scheduler */
+ status = ice_sched_query_res_alloc(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Failed to get scheduler allocated resources\n");
+ goto err_unroll_alloc;
+ }
+ ice_sched_get_psm_clk_freq(hw);
+
+ /* Initialize port_info struct with scheduler data */
+ status = ice_sched_init_port(hw->port_info);
+ if (status)
+ goto err_unroll_sched;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_sched;
+ }
+
+ /* Initialize port_info struct with PHY capabilities */
+ status = ice_aq_get_phy_caps(hw->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
+ ice_free(hw, pcaps);
+ if (status)
+ goto err_unroll_sched;
+
+ /* Initialize port_info struct with link information */
+ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
+ if (status)
+ goto err_unroll_sched;
+ /* need a valid SW entry point to build a Tx tree */
+ if (!hw->sw_entry_point_layer) {
+ ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
+ status = ICE_ERR_CFG;
+ goto err_unroll_sched;
+ }
+ INIT_LIST_HEAD(&hw->agg_list);
+ /* Initialize max burst size */
+ if (!hw->max_burst_size)
+ ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
+
+ status = ice_init_fltr_mgmt_struct(hw);
+ if (status)
+ goto err_unroll_sched;
+
+ /* Get MAC information */
+ /* A single port can report up to two (LAN and WoL) addresses */
+ mac_buf = ice_calloc(hw, 2,
+ sizeof(struct ice_aqc_manage_mac_read_resp));
+ mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
+
+ if (!mac_buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll_fltr_mgmt_struct;
+ }
+
+ status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
+ ice_free(hw, mac_buf);
+
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ status = ice_init_hw_tbls(hw);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
+ ice_init_lock(&hw->tnl_lock);
+ return ICE_SUCCESS;
+
+err_unroll_fltr_mgmt_struct:
+ ice_cleanup_fltr_mgmt_struct(hw);
+err_unroll_sched:
+ ice_sched_cleanup_all(hw);
+err_unroll_alloc:
+ ice_free(hw, hw->port_info);
+ hw->port_info = NULL;
+err_unroll_cqinit:
+ ice_destroy_all_ctrlq(hw);
+ return status;
+}
+
+/**
+ * ice_deinit_hw - unroll initialization operations done by ice_init_hw
+ * @hw: pointer to the hardware structure
+ *
+ * This should be called only during nominal operation, not as a result of
+ * ice_init_hw() failing since ice_init_hw() will take care of unrolling
+ * applicable initializations if it fails for any reason.
+ */
+void ice_deinit_hw(struct ice_hw *hw)
+{
+ ice_cleanup_fltr_mgmt_struct(hw);
+
+ ice_sched_cleanup_all(hw);
+ ice_sched_clear_agg(hw);
+ ice_free_seg(hw);
+ ice_free_hw_tbls(hw);
+ ice_destroy_lock(&hw->tnl_lock);
+
+ if (hw->port_info) {
+ ice_free(hw, hw->port_info);
+ hw->port_info = NULL;
+ }
+
+ ice_destroy_all_ctrlq(hw);
+
+ /* Clear VSI contexts if not already cleared */
+ ice_clear_all_vsi_ctx(hw);
+}
+
+/**
+ * ice_check_reset - Check to see if a global reset is complete
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_check_reset(struct ice_hw *hw)
+{
+ u32 cnt, reg = 0, grst_delay, uld_mask;
+
+ /* Poll for Device Active state in case a recent CORER, GLOBR,
+ * or EMPR has occurred. The grst delay value is in 100ms units.
+ * Add 1sec for outstanding AQ commands that can take a long time.
+ */
+ grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
+ GLGEN_RSTCTL_GRSTDEL_S) + 10;
+
+ for (cnt = 0; cnt < grst_delay; cnt++) {
+ ice_msec_delay(100, true);
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
+ break;
+ }
+
+ if (cnt == grst_delay) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset polling failed to complete.\n");
+ return ICE_ERR_RESET_FAILED;
+ }
+
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
+
+ /* Device is Active; check Global Reset processes are done */
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Global reset processes done. %d\n", cnt);
+ break;
+ }
+ ice_msec_delay(10, true);
+ }
+
+ if (cnt == ICE_PF_RESET_WAIT_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
+ reg);
+ return ICE_ERR_RESET_FAILED;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * If a global reset has been triggered, this function checks
+ * for its completion and then issues the PF reset
+ */
+static enum ice_status ice_pf_reset(struct ice_hw *hw)
+{
+ u32 cnt, reg;
+
+ /* If at function entry a global reset was already in progress, i.e.
+ * state is not 'device active' or any of the reset done bits are not
+ * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
+ * global reset is done.
+ */
+ if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
+ (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
+ /* poll on global reset currently in progress until done */
+ if (ice_check_reset(hw))
+ return ICE_ERR_RESET_FAILED;
+
+ return ICE_SUCCESS;
+ }
+
+ /* Reset the PF */
+ reg = rd32(hw, PFGEN_CTRL);
+
+ wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
+
+ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
+ reg = rd32(hw, PFGEN_CTRL);
+ if (!(reg & PFGEN_CTRL_PFSWR_M))
+ break;
+
+ ice_msec_delay(1, true);
+ }
+
+ if (cnt == ICE_PF_RESET_WAIT_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "PF reset polling failed to complete.\n");
+ return ICE_ERR_RESET_FAILED;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_reset - Perform different types of reset
+ * @hw: pointer to the hardware structure
+ * @req: reset request
+ *
+ * This function triggers a reset as specified by the req parameter.
+ *
+ * Note:
+ * If anything other than a PF reset is triggered, PXE mode is restored.
+ * This has to be cleared using ice_clear_pxe_mode again, once the AQ
+ * interface has been restored in the rebuild flow.
+ */
+enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+{
+ u32 val = 0;
+
+ switch (req) {
+ case ICE_RESET_PFR:
+ return ice_pf_reset(hw);
+ case ICE_RESET_CORER:
+ ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
+ val = GLGEN_RTRIG_CORER_M;
+ break;
+ case ICE_RESET_GLOBR:
+ ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
+ val = GLGEN_RTRIG_GLOBR_M;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ val |= rd32(hw, GLGEN_RTRIG);
+ wr32(hw, GLGEN_RTRIG, val);
+ ice_flush(hw);
+
+ /* wait for the FW to be ready */
+ return ice_check_reset(hw);
+}
+
+/**
+ * ice_copy_rxq_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Copies rxq context from dense structure to HW register space
+ */
+static enum ice_status
+ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+{
+ u8 i;
+
+ if (!ice_rxq_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to HW */
+ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, QRX_CONTEXT(i, rxq_index),
+ *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
+ *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Rx Queue Context */
+static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
+ ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
+ ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
+ ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
+ ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
+ ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
+ ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
+ ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
+ ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
+ ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
+ ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
+ ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
+ ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
+ ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
+ ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
+ ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
+ ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
+ ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
+ ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
+ { 0 }
+};
+
+/**
+ * ice_write_rxq_ctx
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the rxq context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Converts rxq context from sparse to dense structure and then writes
+ * it to HW register space and enables the hardware to prefetch descriptors
+ * instead of only fetching them on demand
+ */
+enum ice_status
+ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+
+ if (!rlan_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ rlan_ctx->prefena = 1;
+
+ ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
+ return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+}
+
+/**
+ * ice_clear_rxq_ctx
+ * @hw: pointer to the hardware structure
+ * @rxq_index: the index of the Rx queue to clear
+ *
+ * Clears rxq context in HW register space
+ */
+enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
+{
+ u8 i;
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Clear each dword register separately */
+ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
+ wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Tx Queue Context */
+const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
+ ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
+ ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
+ ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
+ ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
+ ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
+ ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
+ ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
+ ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
+ ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
+ ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
+ ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
+ ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
+ ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
+ ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
+ ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
+ ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
+ ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
+ ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
+ ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
+ ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
+ ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
+ ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
+ ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
+ ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
+ ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
+ ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
+ { 0 }
+};
+
+/**
+ * ice_copy_tx_cmpltnq_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
+ * @tx_cmpltnq_index: the index of the completion queue
+ *
+ * Copies Tx completion queue context from dense structure to HW register space
+ */
+static enum ice_status
+ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
+ u32 tx_cmpltnq_index)
+{
+ u8 i;
+
+ if (!ice_tx_cmpltnq_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to HW */
+ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
+ *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
+ *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Tx Completion Queue Context */
+static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
+ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
+ { 0 }
+};
+
+/**
+ * ice_write_tx_cmpltnq_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_cmpltnq_ctx: pointer to the completion queue context
+ * @tx_cmpltnq_index: the index of the completion queue
+ *
+ * Converts completion queue context from sparse to dense structure and then
+ * writes it to HW register space
+ */
+enum ice_status
+ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
+ struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
+ u32 tx_cmpltnq_index)
+{
+ u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
+
+ ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
+ return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
+}
+
+/**
+ * ice_clear_tx_cmpltnq_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_cmpltnq_index: the index of the completion queue to clear
+ *
+ * Clears Tx completion queue context in HW register space
+ */
+enum ice_status
+ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
+{
+ u8 i;
+
+ if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Clear each dword register separately */
+ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
+ wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_copy_tx_drbell_q_ctx_to_hw
+ * @hw: pointer to the hardware structure
+ * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
+ * @tx_drbell_q_index: the index of the doorbell queue
+ *
+ * Copies doorbell queue context from dense structure to HW register space
+ */
+static enum ice_status
+ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
+ u32 tx_drbell_q_index)
+{
+ u8 i;
+
+ if (!ice_tx_drbell_q_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Copy each dword separately to HW */
+ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
+ wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
+ *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
+
+ ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
+ *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* LAN Tx Doorbell Queue Context info */
+static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
+ ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
+ { 0 }
+};
+
+/**
+ * ice_write_tx_drbell_q_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_drbell_q_ctx: pointer to the doorbell queue context
+ * @tx_drbell_q_index: the index of the doorbell queue
+ *
+ * Converts doorbell queue context from sparse to dense structure and then
+ * writes it to HW register space
+ */
+enum ice_status
+ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
+ struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
+ u32 tx_drbell_q_index)
+{
+ u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
+
+ ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
+ return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
+}
+
+/**
+ * ice_clear_tx_drbell_q_ctx
+ * @hw: pointer to the hardware structure
+ * @tx_drbell_q_index: the index of the doorbell queue to clear
+ *
+ * Clears doorbell queue context in HW register space
+ */
+enum ice_status
+ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
+{
+ u8 i;
+
+ if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
+ return ICE_ERR_PARAM;
+
+ /* Clear each dword register separately */
+ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
+ wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
+
+ return ICE_SUCCESS;
+}
+
+/* FW Admin Queue command wrappers */
+
+/**
+ * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * Helper function to send FW Admin Queue commands to the FW Admin Queue.
+ */
+enum ice_status
+ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_fw_ver
+ * @hw: pointer to the HW struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the firmware version (0x0001) from the admin queue commands
+ */
+enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_ver *resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ resp = &desc.params.get_ver;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_send_driver_ver
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ * @cd: pointer to command details structure or NULL
+ *
+ * Send the driver version (0x0002) to the firmware
+ */
+enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_ver *cmd;
+ struct ice_aq_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
+}
+
+/**
+ * ice_aq_q_shutdown
+ * @hw: pointer to the HW struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well (0x0003).
+ */
+enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+{
+ struct ice_aqc_q_shutdown *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.q_shutdown;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_req_res
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests common resource using the admin queue commands (0x0008).
+ * When attempting to acquire the Global Config Lock, the driver can
+ * learn of three states:
+ * 1) ICE_SUCCESS - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
+ *
+ * Note that if the caller is in an acquire lock, perform action, release lock
+ * phase of operation, it is possible that the FW may detect a timeout and issue
+ * a CORER. In this case, the driver will receive a CORER interrupt and will
+ * have to determine its cause. The calling thread that is handling this flow
+ * will likely get an error propagated back to it indicating the Download
+ * Package, Update Package or the Release Resource AQ commands timed out.
+ */
+static enum ice_status
+ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_req_res *cmd_resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd_resp = &desc.params.res_owner;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
+
+ cmd_resp->res_id = CPU_TO_LE16(res);
+ cmd_resp->access_type = CPU_TO_LE16(access);
+ cmd_resp->res_number = CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ */
+
+ /* Global config lock response utilizes an additional status field.
+ *
+ * If the Global config lock resource is held by some other driver, the
+ * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * and the timeout field indicates the maximum time the current owner
+ * of the resource has to free it.
+ */
+ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
+ if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+ return ICE_SUCCESS;
+ } else if (LE16_TO_CPU(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_IN_PROG) {
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+ return ICE_ERR_AQ_ERROR;
+ } else if (LE16_TO_CPU(cmd_resp->status) ==
+ ICE_AQ_RES_GLBL_DONE) {
+ return ICE_ERR_AQ_NO_WORK;
+ }
+
+ /* invalid FW response, force a timeout immediately */
+ *timeout = 0;
+ return ICE_ERR_AQ_ERROR;
+ }
+
+ /* If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ *timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ice_aq_release_res
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ * @cd: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands (0x0009)
+ */
+static enum ice_status
+ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_req_res *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.res_owner;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
+
+ cmd->res_id = CPU_TO_LE16(res);
+ cmd->res_number = CPU_TO_LE32(sdp_number);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_acquire_res
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * This function will attempt to acquire the ownership of a resource.
+ */
+enum ice_status
+ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
+ enum ice_aq_res_access_type access, u32 timeout)
+{
+#define ICE_RES_POLLING_DELAY_MS 10
+ u32 delay = ICE_RES_POLLING_DELAY_MS;
+ u32 time_left = timeout;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
+
+ /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == ICE_ERR_AQ_NO_WORK)
+ goto ice_acquire_res_exit;
+
+ if (status)
+ ice_debug(hw, ICE_DBG_RES,
+ "resource %d acquire type %d failed.\n", res, access);
+
+ /* If necessary, poll until the current lock owner timeouts */
+ timeout = time_left;
+ while (status && timeout && time_left) {
+ ice_msec_delay(delay, true);
+ timeout = (timeout > delay) ? timeout - delay : 0;
+ status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
+
+ if (status == ICE_ERR_AQ_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+ if (status && status != ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
+
+ice_acquire_res_exit:
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ if (access == ICE_RES_WRITE)
+ ice_debug(hw, ICE_DBG_RES,
+ "resource indicates no work to do.\n");
+ else
+ ice_debug(hw, ICE_DBG_RES,
+ "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ }
+ return status;
+}
+
+/**
+ * ice_release_res
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * This function will release a resource using the proper Admin Command.
+ */
+void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
+{
+ enum ice_status status;
+ u32 total_delay = 0;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_aq_release_res(hw, res, 0, NULL);
+
+ /* there are some rare cases when trying to release the resource
+ * results in an admin queue timeout, so handle them correctly
+ */
+ while ((status == ICE_ERR_AQ_TIMEOUT) &&
+ (total_delay < hw->adminq.sq_cmd_timeout)) {
+ ice_msec_delay(1, true);
+ status = ice_aq_release_res(hw, res, 0, NULL);
+ total_delay++;
+ }
+}
+
+/**
+ * ice_aq_alloc_free_res - command to allocate/free resources
+ * @hw: pointer to the HW struct
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @opc: pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Helper function to allocate/free resources using the admin queue commands
+ */
+enum ice_status
+ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_alloc_free_res_cmd *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.sw_res_ctrl;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_entries = CPU_TO_LE16(num_entries);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_alloc_hw_res - allocate resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource
+ * @num: number of resources to allocate
+ * @btm: allocate from bottom
+ * @res: pointer to array that will receive the resources
+ */
+enum ice_status
+ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = ice_struct_size(buf, elem, num - 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to allocate resource. */
+ buf->num_elems = CPU_TO_LE16(num);
+ buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
+ ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
+ if (btm)
+ buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto ice_alloc_res_exit;
+
+ ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
+ ICE_NONDMA_TO_NONDMA);
+
+ice_alloc_res_exit:
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_free_hw_res - free allocated HW resource
+ * @hw: pointer to the HW struct
+ * @type: type of resource to free
+ * @num: number of resources
+ * @res: pointer to array that contains the resources to free
+ */
+enum ice_status
+ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = ice_struct_size(buf, elem, num - 1);
+ buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to free resource. */
+ buf->num_elems = CPU_TO_LE16(num);
+ buf->res_type = CPU_TO_LE16(type);
+ ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ */
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
+ ICE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ice_print_led_caps - print LED capabilities
+ * @hw: pointer to the ice_hw instance
+ * @caps: pointer to common caps instance
+ * @prefix: string to prefix when printing
+ * @debug: set to indicate debug print
+ */
+static void
+ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ char const *prefix, bool debug)
+{
+ u8 i;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
+ caps->led_pin_num);
+ else
+ ice_info(hw, "%s: led_pin_num = %d\n", prefix,
+ caps->led_pin_num);
+
+ for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
+ if (!caps->led[i])
+ continue;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
+ prefix, i, caps->led[i]);
+ else
+ ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
+ caps->led[i]);
+ }
+}
+
+/**
+ * ice_print_sdp_caps - print SDP capabilities
+ * @hw: pointer to the ice_hw instance
+ * @caps: pointer to common caps instance
+ * @prefix: string to prefix when printing
+ * @debug: set to indicate debug print
+ */
+static void
+ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+ char const *prefix, bool debug)
+{
+ u8 i;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
+ caps->sdp_pin_num);
+ else
+ ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
+ caps->sdp_pin_num);
+
+ for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
+ if (!caps->sdp[i])
+ continue;
+
+ if (debug)
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
+ prefix, i, caps->sdp[i]);
+ else
+ ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
+ i, caps->sdp[i]);
+ }
+}
+
+/**
+ * ice_parse_caps - parse function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: pointer to a buffer containing function/device capability records
+ * @cap_count: number of capability records in the list
+ * @opc: type of capabilities list to parse
+ *
+ * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ */
+static void
+ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
+ enum ice_adminq_opc opc)
+{
+ struct ice_aqc_list_caps_elem *cap_resp;
+ struct ice_hw_func_caps *func_p = NULL;
+ struct ice_hw_dev_caps *dev_p = NULL;
+ struct ice_hw_common_caps *caps;
+ char const *prefix;
+ u32 i;
+
+ if (!buf)
+ return;
+
+ cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+ if (opc == ice_aqc_opc_list_dev_caps) {
+ dev_p = &hw->dev_caps;
+ caps = &dev_p->common_cap;
+ prefix = "dev cap";
+ } else if (opc == ice_aqc_opc_list_func_caps) {
+ func_p = &hw->func_caps;
+ caps = &func_p->common_cap;
+ prefix = "func cap";
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
+ return;
+ }
+
+ for (i = 0; caps && i < cap_count; i++, cap_resp++) {
+ u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
+ u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
+ u32 number = LE32_TO_CPU(cap_resp->number);
+ u16 cap = LE16_TO_CPU(cap_resp->cap);
+
+ switch (cap) {
+ case ICE_AQC_CAPS_SWITCHING_MODE:
+ caps->switching_mode = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: switching_mode = %d\n", prefix,
+ caps->switching_mode);
+ break;
+ case ICE_AQC_CAPS_MANAGEABILITY_MODE:
+ caps->mgmt_mode = number;
+ caps->mgmt_protocols_mctp = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: mgmt_mode = %d\n", prefix,
+ caps->mgmt_mode);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: mgmt_protocols_mctp = %d\n", prefix,
+ caps->mgmt_protocols_mctp);
+ break;
+ case ICE_AQC_CAPS_OS2BMC:
+ caps->os2bmc = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: os2bmc = %d\n", prefix, caps->os2bmc);
+ break;
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: valid_functions (bitmap) = %d\n", prefix,
+ caps->valid_functions);
+
+ /* store func count for resource management purposes */
+ if (dev_p)
+ dev_p->num_funcs = ice_hweight32(number);
+ break;
+ case ICE_AQC_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: sr_iov_1_1 = %d\n", prefix,
+ caps->sr_iov_1_1);
+ break;
+ case ICE_AQC_CAPS_VF:
+ if (dev_p) {
+ dev_p->num_vfs_exposed = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_vfs_exposed = %d\n", prefix,
+ dev_p->num_vfs_exposed);
+ } else if (func_p) {
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_allocd_vfs = %d\n", prefix,
+ func_p->num_allocd_vfs);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: vf_base_id = %d\n", prefix,
+ func_p->vf_base_id);
+ }
+ break;
+ case ICE_AQC_CAPS_802_1QBG:
+ caps->evb_802_1_qbg = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: evb_802_1_qbg = %d\n", prefix, number);
+ break;
+ case ICE_AQC_CAPS_802_1BR:
+ caps->evb_802_1_qbh = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: evb_802_1_qbh = %d\n", prefix, number);
+ break;
+ case ICE_AQC_CAPS_VSI:
+ if (dev_p) {
+ dev_p->num_vsi_allocd_to_host = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_vsi_allocd_to_host = %d\n",
+ prefix,
+ dev_p->num_vsi_allocd_to_host);
+ } else if (func_p) {
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: guar_num_vsi (fw) = %d\n",
+ prefix, number);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: guar_num_vsi = %d\n",
+ prefix, func_p->guar_num_vsi);
+ }
+ break;
+ case ICE_AQC_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: dcb = %d\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: active_tc_bitmap = %d\n", prefix,
+ caps->active_tc_bitmap);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d\n", prefix, caps->maxtc);
+ break;
+ case ICE_AQC_CAPS_ISCSI:
+ caps->iscsi = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: iscsi = %d\n", prefix, caps->iscsi);
+ break;
+ case ICE_AQC_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_size = %d\n", prefix,
+ caps->rss_table_size);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rss_table_entry_width = %d\n", prefix,
+ caps->rss_table_entry_width);
+ break;
+ case ICE_AQC_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_rxq = %d\n", prefix,
+ caps->num_rxq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: rxq_first_id = %d\n", prefix,
+ caps->rxq_first_id);
+ break;
+ case ICE_AQC_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_txq = %d\n", prefix,
+ caps->num_txq);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: txq_first_id = %d\n", prefix,
+ caps->txq_first_id);
+ break;
+ case ICE_AQC_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_msix_vectors = %d\n", prefix,
+ caps->num_msix_vectors);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: msix_vector_first_id = %d\n", prefix,
+ caps->msix_vector_first_id);
+ break;
+ case ICE_AQC_CAPS_NVM_VER:
+ break;
+ case ICE_AQC_CAPS_NVM_MGMT:
+ caps->nvm_unified_update =
+ (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: nvm_unified_update = %d\n", prefix,
+ caps->nvm_unified_update);
+ break;
+ case ICE_AQC_CAPS_CEM:
+ caps->mgmt_cem = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: mgmt_cem = %d\n", prefix,
+ caps->mgmt_cem);
+ break;
+ case ICE_AQC_CAPS_LED:
+ if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
+ caps->led[phys_id] = true;
+ caps->led_pin_num++;
+ }
+ break;
+ case ICE_AQC_CAPS_SDP:
+ if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
+ caps->sdp[phys_id] = true;
+ caps->sdp_pin_num++;
+ }
+ break;
+ case ICE_AQC_CAPS_WR_CSR_PROT:
+ caps->wr_csr_prot = number;
+ caps->wr_csr_prot |= (u64)logical_id << 32;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: wr_csr_prot = 0x%llX\n", prefix,
+ (unsigned long long)caps->wr_csr_prot);
+ break;
+ case ICE_AQC_CAPS_WOL_PROXY:
+ caps->num_wol_proxy_fltr = number;
+ caps->wol_proxy_vsi_seid = logical_id;
+ caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
+ caps->acpi_prog_mthd = !!(phys_id &
+ ICE_ACPI_PROG_MTHD_M);
+ caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: num_wol_proxy_fltr = %d\n", prefix,
+ caps->num_wol_proxy_fltr);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: wol_proxy_vsi_seid = %d\n", prefix,
+ caps->wol_proxy_vsi_seid);
+ break;
+ case ICE_AQC_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ prefix, caps->max_mtu);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: unknown capability[%d]: 0x%x\n", prefix,
+ i, cap);
+ break;
+ }
+ }
+
+ ice_print_led_caps(hw, caps, prefix, true);
+ ice_print_sdp_caps(hw, caps, prefix, true);
+
+ /* Re-calculate capabilities that are dependent on the number of
+ * physical ports; i.e. some features are not supported or function
+ * differently on devices with more than 4 ports.
+ */
+ if (hw->dev_caps.num_funcs > 4) {
+ /* Max 4 TCs per port */
+ caps->maxtc = 4;
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: maxtc = %d (based on #ports)\n", prefix,
+ caps->maxtc);
+ }
+}
+
+/**
+ * ice_aq_discover_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a virtual buffer to hold the capabilities
+ * @buf_size: Size of the virtual buffer
+ * @cap_count: cap count needed if AQ err==ENOMEM
+ * @opc: capabilities type to discover - pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the function(0x000a)/device(0x000b) capabilities description from
+ * the firmware.
+ */
+enum ice_status
+ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+ enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_list_caps *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ice_aqc_opc_list_func_caps &&
+ opc != ice_aqc_opc_list_dev_caps)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status)
+ ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
+ else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+ *cap_count = LE32_TO_CPU(cmd->count);
+ return status;
+}
+
+/**
+ * ice_discover_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ * @opc: capabilities type to discover - pass in the command opcode
+ */
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
+{
+ enum ice_status status;
+ u32 cap_count;
+ u16 cbuf_len;
+ u8 retries;
+
+ /* The driver doesn't know how many capabilities the device will return
+ * so the buffer size required isn't known ahead of time. The driver
+ * starts with cbuf_len and if this turns out to be insufficient, the
+ * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
+ * The driver then allocates the buffer based on the count and retries
+ * the operation. So it follows that the retry count is 2.
+ */
+#define ICE_GET_CAP_BUF_COUNT 40
+#define ICE_GET_CAP_RETRY_COUNT 2
+
+ cap_count = ICE_GET_CAP_BUF_COUNT;
+ retries = ICE_GET_CAP_RETRY_COUNT;
+
+ do {
+ void *cbuf;
+
+ cbuf_len = (u16)(cap_count *
+ sizeof(struct ice_aqc_list_caps_elem));
+ cbuf = ice_malloc(hw, cbuf_len);
+ if (!cbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
+ opc, NULL);
+ ice_free(hw, cbuf);
+
+ if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
+ break;
+
+ /* If ENOMEM is returned, try again with bigger buffer */
+ } while (--retries);
+
+ return status;
+}
+
+/**
+ * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
+ * @hw: pointer to the hardware structure
+ */
+void ice_set_safe_mode_caps(struct ice_hw *hw)
+{
+ struct ice_hw_func_caps *func_caps = &hw->func_caps;
+ struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
+ u32 valid_func, rxq_first_id, txq_first_id;
+ u32 msix_vector_first_id, max_mtu;
+ u32 num_funcs;
+
+ /* cache some func_caps values that should be restored after memset */
+ valid_func = func_caps->common_cap.valid_functions;
+ txq_first_id = func_caps->common_cap.txq_first_id;
+ rxq_first_id = func_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
+ max_mtu = func_caps->common_cap.max_mtu;
+
+ /* unset func capabilities */
+ memset(func_caps, 0, sizeof(*func_caps));
+
+ /* restore cached values */
+ func_caps->common_cap.valid_functions = valid_func;
+ func_caps->common_cap.txq_first_id = txq_first_id;
+ func_caps->common_cap.rxq_first_id = rxq_first_id;
+ func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ func_caps->common_cap.max_mtu = max_mtu;
+
+ /* one Tx and one Rx queue in safe mode */
+ func_caps->common_cap.num_rxq = 1;
+ func_caps->common_cap.num_txq = 1;
+
+ /* two MSIX vectors, one for traffic and one for misc causes */
+ func_caps->common_cap.num_msix_vectors = 2;
+ func_caps->guar_num_vsi = 1;
+
+ /* cache some dev_caps values that should be restored after memset */
+ valid_func = dev_caps->common_cap.valid_functions;
+ txq_first_id = dev_caps->common_cap.txq_first_id;
+ rxq_first_id = dev_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
+ max_mtu = dev_caps->common_cap.max_mtu;
+ num_funcs = dev_caps->num_funcs;
+
+ /* unset dev capabilities */
+ memset(dev_caps, 0, sizeof(*dev_caps));
+
+ /* restore cached values */
+ dev_caps->common_cap.valid_functions = valid_func;
+ dev_caps->common_cap.txq_first_id = txq_first_id;
+ dev_caps->common_cap.rxq_first_id = rxq_first_id;
+ dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ dev_caps->common_cap.max_mtu = max_mtu;
+ dev_caps->num_funcs = num_funcs;
+
+ /* one Tx and one Rx queue per function in safe mode */
+ dev_caps->common_cap.num_rxq = num_funcs;
+ dev_caps->common_cap.num_txq = num_funcs;
+
+ /* two MSIX vectors per function */
+ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
+}
+
+/**
+ * ice_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_caps(struct ice_hw *hw)
+{
+ enum ice_status status;
+
+ status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
+ if (!status)
+ status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+
+ return status;
+}
+
+/**
+ * ice_aq_manage_mac_write - manage MAC address write command
+ * @hw: pointer to the HW struct
+ * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
+ * @flags: flags to control write behavior
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function is used to write MAC address to the NVM (0x0108).
+ */
+enum ice_status
+ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_manage_mac_write *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.mac_write;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
+
+ cmd->flags = flags;
+ ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_clear_pxe_mode
+ * @hw: pointer to the HW struct
+ *
+ * Tell the firmware that the driver is taking over from PXE (0x0110).
+ */
+static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
+ desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the HW struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ */
+void ice_clear_pxe_mode(struct ice_hw *hw)
+{
+ if (ice_check_sq_alive(hw, &hw->adminq))
+ ice_aq_clear_pxe_mode(hw);
+}
+
+/**
+ * ice_aq_set_port_params - set physical port parameters.
+ * @pi: pointer to the port info struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Physical port parameters (0x0203)
+ */
+enum ice_status
+ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
+ bool save_bad_pac, bool pad_short_pac, bool double_vlan,
+ struct ice_sq_cd *cd)
+
+{
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+ u16 cmd_flags = 0;
+
+ cmd = &desc.params.set_port_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+ if (save_bad_pac)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
+ if (pad_short_pac)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
+ if (double_vlan)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_get_link_speed_based_on_phy_type - returns link speed
+ * @phy_type_low: lower part of phy_type
+ * @phy_type_high: higher part of phy_type
+ *
+ * This helper function will convert an entry in PHY type structure
+ * [phy_type_low, phy_type_high] to its corresponding link speed.
+ * Note: In the structure of [phy_type_low, phy_type_high], there should
+ * be one bit set, as this function will convert one PHY type to its
+ * speed.
+ * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ */
+static u16
+ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+{
+ u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+
+ switch (phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ switch (phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
+ return ICE_AQ_LINK_SPEED_UNKNOWN;
+ else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
+ speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
+ return speed_phy_type_low;
+ else
+ return speed_phy_type_high;
+}
+
+/**
+ * ice_update_phy_type
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @link_speeds_bitmap: targeted link speeds bitmap
+ *
+ * Note: For the link_speeds_bitmap structure, you can check it at
+ * [ice_aqc_get_link_status->link_speed]. Caller can pass in
+ * link_speeds_bitmap include multiple speeds.
+ *
+ * Each entry in this [phy_type_low, phy_type_high] structure will
+ * present a certain link speed. This helper function will turn on bits
+ * in [phy_type_low, phy_type_high] structure based on the value of
+ * link_speeds_bitmap input parameter.
+ */
+void
+ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
+ u16 link_speeds_bitmap)
+{
+ u64 pt_high;
+ u64 pt_low;
+ int index;
+ u16 speed;
+
+ /* We first check with low part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
+ pt_low = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_low |= BIT_ULL(index);
+ }
+
+ /* We then check with high part of phy_type */
+ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
+ pt_high = BIT_ULL(index);
+ speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
+
+ if (link_speeds_bitmap & speed)
+ *phy_type_high |= BIT_ULL(index);
+ }
+}
+
+/**
+ * ice_aq_set_phy_cfg
+ * @hw: pointer to the HW struct
+ * @pi: port info structure of the interested logical port
+ * @cfg: structure with PHY configuration data to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters supported on the Port.
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters. This status will be indicated by the command response (0x0601).
+ */
+enum ice_status
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
+ struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!cfg)
+ return ICE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
+ ice_debug(hw, ICE_DBG_PHY,
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ cfg->caps);
+
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
+ desc.params.set_phy.lport_num = pi->lport;
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
+ cfg->low_power_ctrl_an);
+ ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+
+ status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
+
+ if (!status)
+ pi->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ice_update_link_info - update status of the HW network link
+ * @pi: port info structure of the interested logical port
+ */
+enum ice_status ice_update_link_info(struct ice_port_info *pi)
+{
+ struct ice_link_status *li;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ li = &pi->phy.link_info;
+
+ status = ice_aq_get_link_info(pi, true, NULL, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_hw *hw;
+
+ hw = pi->hw;
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ pcaps, NULL);
+ if (status == ICE_SUCCESS)
+ ice_memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type),
+ ICE_NONDMA_TO_NONDMA);
+
+ ice_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ice_cache_phy_user_req
+ * @pi: port information structure
+ * @cache_data: PHY logging data
+ * @cache_mode: PHY logging mode
+ *
+ * Log the user request on (FC, FEC, SPEED) for later user.
+ */
+static void
+ice_cache_phy_user_req(struct ice_port_info *pi,
+ struct ice_phy_cache_mode_data cache_data,
+ enum ice_phy_cache_mode cache_mode)
+{
+ if (!pi)
+ return;
+
+ switch (cache_mode) {
+ case ICE_FC_MODE:
+ pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
+ break;
+ case ICE_SPEED_MODE:
+ pi->phy.curr_user_speed_req =
+ cache_data.data.curr_user_speed_req;
+ break;
+ case ICE_FEC_MODE:
+ pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_caps_to_fc_mode
+ * @caps: PHY capabilities
+ *
+ * Convert PHY FC capabilities to ice FC mode
+ */
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
+{
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
+ caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_FULL;
+
+ if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
+ return ICE_FC_TX_PAUSE;
+
+ if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ return ICE_FC_RX_PAUSE;
+
+ return ICE_FC_NONE;
+}
+
+/**
+ * ice_caps_to_fec_mode
+ * @caps: PHY capabilities
+ * @fec_options: Link FEC options
+ *
+ * Convert PHY FEC capabilities to ice FEC mode
+ */
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
+{
+ if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ return ICE_FEC_AUTO;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+ ICE_AQC_PHY_FEC_25G_KR_REQ))
+ return ICE_FEC_BASER;
+
+ if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
+ return ICE_FEC_RS;
+
+ return ICE_FEC_NONE;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @ena_auto_link_update: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_phy_cache_mode_data cache_data;
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status;
+ u8 pause_mask = 0x0;
+ struct ice_hw *hw;
+
+ if (!pi || !aq_failures)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+ *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
+
+ /* Cache user FC request */
+ cache_data.data.curr_user_fc_req = pi->fc.req_mode;
+ ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ switch (pi->fc.req_mode) {
+ case ICE_FC_AUTO:
+ /* Query the value of FC that both the NIC and attached media
+ * can do.
+ */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ pcaps, NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
+ goto out;
+ }
+
+ pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_FULL:
+ pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_RX_PAUSE:
+ pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ICE_FC_TX_PAUSE:
+ pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Get the current PHY config */
+ ice_memset(pcaps, 0, sizeof(*pcaps), ICE_NONDMA_MEM);
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
+ goto out;
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
+
+ /* clear the old pause settings */
+ cfg.caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+ ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg.caps |= pause_mask;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps->caps) {
+ int retry_count, retry_max = 10;
+
+ /* Auto restart link so settings take effect */
+ if (ena_auto_link_update)
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+ if (status) {
+ *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
+ goto out;
+ }
+
+ /* Update the link info
+ * It sometimes takes a really long time for link to
+ * come back from the atomic reset. Thus, we wait a
+ * little bit.
+ */
+ for (retry_count = 0; retry_count < retry_max; retry_count++) {
+ status = ice_update_link_info(pi);
+
+ if (status == ICE_SUCCESS)
+ break;
+
+ ice_msec_delay(100, true);
+ }
+
+ if (status)
+ *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
+ }
+
+out:
+ ice_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ice_phy_caps_equals_cfg
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities matches PHY
+ * configuration
+ */
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
+ struct ice_aqc_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
+ ICE_AQC_PHY_EN_MOD_QUAL);
+ cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @pi: port information structure
+ * @caps: PHY ability structure to copy date from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy AQC PHY get ability data to PHY set configuration
+ * data structure
+ */
+void
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *caps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ if (!pi || !caps || !cfg)
+ return;
+
+ ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+
+ if (ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ return;
+
+ if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
+ cfg->module_compliance_enforcement |=
+ ICE_LINK_OVERRIDE_STRICT_MODE;
+ }
+}
+
+/**
+ * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @pi: port information structure
+ * @cfg: PHY configuration data to set FEC mode
+ * @fec: FEC mode to configure
+ */
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+ enum ice_fec_mode fec)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw;
+
+ if (!pi || !cfg)
+ return ICE_ERR_BAD_PTR;
+
+ hw = pi->hw;
+
+ pcaps = (struct ice_aqc_get_phy_caps_data *)
+ ice_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+ NULL);
+ if (status)
+ goto out;
+
+ switch (fec) {
+ case ICE_FEC_BASER:
+ /* Clear RS bits, and AND BASE-R ability
+ * bits and OR request bits.
+ */
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+ ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_REQ;
+ break;
+ case ICE_FEC_RS:
+ /* Clear BASE-R bits, and AND RS ability
+ * bits and OR request bits.
+ */
+ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
+ cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+ break;
+ case ICE_FEC_NONE:
+ /* Clear all FEC option bits. */
+ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
+ break;
+ case ICE_FEC_AUTO:
+ /* AND auto FEC bit, and all caps bits. */
+ cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+ cfg->link_fec_opt |= pcaps->link_fec_options;
+ break;
+ default:
+ status = ICE_ERR_PARAM;
+ break;
+ }
+
+ if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
+ struct ice_link_default_override_tlv tlv;
+
+ if (ice_get_link_default_override(&tlv, pi))
+ goto out;
+
+ if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
+ (tlv.options & ICE_LINK_OVERRIDE_EN))
+ cfg->link_fec_opt = tlv.fec_options;
+ }
+
+out:
+ ice_free(hw, pcaps);
+
+ return status;
+}
+
+/**
+ * ice_get_link_status - get status of the HW network link
+ * @pi: port information structure
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ */
+enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+{
+ struct ice_phy_info *phy_info;
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!pi || !link_up)
+ return ICE_ERR_PARAM;
+
+ phy_info = &pi->phy;
+
+ if (phy_info->get_link_info) {
+ status = ice_update_link_info(pi);
+
+ if (status)
+ ice_debug(pi->hw, ICE_DBG_LINK,
+ "get link status error, status = %d\n",
+ status);
+ }
+
+ *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_link_restart_an
+ * @pi: pointer to the port information structure
+ * @ena_link: if true: enable link, if false: disable link
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ */
+enum ice_status
+ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_restart_an *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
+
+ cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
+ cmd->lport_num = pi->lport;
+ if (ena_link)
+ cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
+
+ return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = CPU_TO_LE16(mask);
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_mac_loopback
+ * @hw: pointer to the HW struct
+ * @ena_lpbk: Enable or Disable loopback
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable/disable loopback on a given port
+ */
+enum ice_status
+ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_mac_lb *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_mac_lb;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
+ if (ena_lpbk)
+ cmd->lb_mode = ICE_AQ_MAC_LB_EN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_port_id_led
+ * @pi: pointer to the port information
+ * @is_orig_mode: is this LED set to original mode (by the net-list)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set LED value for the given port (0x06e9)
+ */
+enum ice_status
+ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_port_id_led *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
+
+ if (is_orig_mode)
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_sff_eeprom
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @set_page: set or ignore the page
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read/Write SFF EEPROM (0x06EE)
+ */
+enum ice_status
+ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
+ bool write, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_sff_eeprom *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!data || (mem_addr & 0xff00))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
+ ICE_AQC_SFF_I2CBUS_7BIT_M) |
+ ((set_page <<
+ ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
+ ICE_AQC_SFF_SET_EEPROM_PAGE_M));
+ cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
+ cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
+ if (write)
+ cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
+
+ status = ice_aq_send_cmd(hw, &desc, data, length, cd);
+ return status;
+}
+
+/**
+ * __ice_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: VSI FW index
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ * @glob_lut_idx: global LUT index
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
+ */
+static enum ice_status
+__ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
+ u16 lut_size, u8 glob_lut_idx, bool set)
+{
+ struct ice_aqc_get_set_rss_lut *cmd_resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 flags = 0;
+
+ cmd_resp = &desc.params.get_set_rss_lut;
+
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
+ }
+
+ cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
+ ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
+ ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
+ ICE_AQC_GSET_RSS_LUT_VSI_VALID);
+
+ switch (lut_type) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
+ case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
+ flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
+ break;
+ default:
+ status = ICE_ERR_PARAM;
+ goto ice_aq_get_set_rss_lut_exit;
+ }
+
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
+ flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
+ ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
+
+ if (!set)
+ goto ice_aq_get_set_rss_lut_send;
+ } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ if (!set)
+ goto ice_aq_get_set_rss_lut_send;
+ } else {
+ goto ice_aq_get_set_rss_lut_send;
+ }
+
+ /* LUT size is only valid for Global and PF table types */
+ switch (lut_size) {
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+ if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+ flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+ ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+ break;
+ }
+ /* fall-through */
+ default:
+ status = ICE_ERR_PARAM;
+ goto ice_aq_get_set_rss_lut_exit;
+ }
+
+ice_aq_get_set_rss_lut_send:
+ cmd_resp->flags = CPU_TO_LE16(flags);
+ status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
+
+ice_aq_get_set_rss_lut_exit:
+ return status;
+}
+
+/**
+ * ice_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ */
+enum ice_status
+ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, false);
+}
+
+/**
+ * ice_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @lut_type: LUT table type
+ * @lut: pointer to the LUT buffer provided by the caller
+ * @lut_size: size of the LUT buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ */
+enum ice_status
+ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
+ u8 *lut, u16 lut_size)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ lut_type, lut, lut_size, 0, true);
+}
+
+/**
+ * __ice_aq_get_set_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_id: VSI FW index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get (0x0B04) or set (0x0B02) the RSS key per VSI
+ */
+static enum
+ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key,
+ bool set)
+{
+ struct ice_aqc_get_set_rss_key *cmd_resp;
+ u16 key_size = sizeof(*key);
+ struct ice_aq_desc desc;
+
+ cmd_resp = &desc.params.get_set_rss_key;
+
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
+ }
+
+ cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
+ ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
+ ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
+ ICE_AQC_GSET_RSS_KEY_VSI_VALID);
+
+ return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
+}
+
+/**
+ * ice_aq_get_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @key: pointer to key info struct
+ *
+ * get the RSS key per VSI
+ */
+enum ice_status
+ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *key)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ key, false);
+}
+
+/**
+ * ice_aq_set_rss_key
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @keys: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ */
+enum ice_status
+ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_aqc_get_set_rss_keys *keys)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
+ return ICE_ERR_PARAM;
+
+ return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
+ keys, true);
+}
+
+/**
+ * ice_aq_add_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: Number of added queue groups
+ * @qg_list: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx LAN queue (0x0C30)
+ *
+ * NOTE:
+ * Prior to calling add Tx LAN queue:
+ * Initialize the following as part of the Tx queue context:
+ * Completion queue ID if the queue uses Completion queue, Quanta profile,
+ * Cache profile and Packet shaper profile.
+ *
+ * After add Tx LAN queue AQ command is completed:
+ * Interrupts should be associated with specific queues,
+ * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
+ * flow.
+ */
+enum ice_status
+ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
+ struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ u16 i, sum_header_size, sum_q_size = 0;
+ struct ice_aqc_add_tx_qgrp *list;
+ struct ice_aqc_add_txqs *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.add_txqs;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
+
+ if (!qg_list)
+ return ICE_ERR_PARAM;
+
+ if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
+ return ICE_ERR_PARAM;
+
+ sum_header_size = num_qgrps *
+ (sizeof(*qg_list) - sizeof(*qg_list->txqs));
+
+ list = qg_list;
+ for (i = 0; i < num_qgrps; i++) {
+ struct ice_aqc_add_txqs_perq *q = list->txqs;
+
+ sum_q_size += list->num_txqs * sizeof(*q);
+ list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+ }
+
+ if (buf_size != (sum_header_size + sum_q_size))
+ return ICE_ERR_PARAM;
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_qgrps = num_qgrps;
+
+ return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_dis_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qgrps: number of groups in the list
+ * @qg_list: the list of groups to disable
+ * @buf_size: the total size of the qg_list buffer in bytes
+ * @rst_src: if called due to reset, specifies the reset source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
+ * @cd: pointer to command details structure or NULL
+ *
+ * Disable LAN Tx queue (0x0C31)
+ */
+static enum ice_status
+ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
+ struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_dis_txqs *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 i, sz = 0;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ cmd = &desc.params.dis_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
+
+ /* qg_list can be NULL only in VM/VF reset flow */
+ if (!qg_list && !rst_src)
+ return ICE_ERR_PARAM;
+
+ if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
+ return ICE_ERR_PARAM;
+
+ cmd->num_entries = num_qgrps;
+
+ cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
+ ICE_AQC_Q_DIS_TIMEOUT_M);
+
+ switch (rst_src) {
+ case ICE_VM_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
+ cmd->vmvf_and_timeout |=
+ CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_VF_RESET:
+ cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
+ /* In this case, FW expects vmvf_num to be absolute VF ID */
+ cmd->vmvf_and_timeout |=
+ CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
+ ICE_AQC_Q_DIS_VMVF_NUM_M);
+ break;
+ case ICE_NO_RESET:
+ default:
+ break;
+ }
+
+ /* flush pipe on time out */
+ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
+ /* If no queue group info, we are in a reset flow. Issue the AQ */
+ if (!qg_list)
+ goto do_aq;
+
+ /* set RD bit to indicate that command buffer is provided by the driver
+ * and it needs to be read by the firmware
+ */
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ for (i = 0; i < num_qgrps; ++i) {
+ /* Calculate the size taken up by the queue IDs in this group */
+ sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
+
+ /* Add the size of the group header */
+ sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+
+ /* If the num of queues is even, add 2 bytes of padding */
+ if ((qg_list[i].num_qs % 2) == 0)
+ sz += 2;
+ }
+
+ if (buf_size != sz)
+ return ICE_ERR_PARAM;
+
+do_aq:
+ status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
+ if (status) {
+ if (!qg_list)
+ ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
+ vmvf_num, hw->adminq.sq_last_status);
+ else
+ ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
+ LE16_TO_CPU(qg_list[0].q_id[0]),
+ hw->adminq.sq_last_status);
+ }
+ return status;
+}
+
+/**
+ * ice_aq_move_recfg_lan_txq
+ * @hw: pointer to the hardware structure
+ * @num_qs: number of queues to move/reconfigure
+ * @is_move: true if this operation involves node movement
+ * @is_tc_change: true if this operation involves a TC change
+ * @subseq_call: true if this operation is a subsequent call
+ * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
+ * @timeout: timeout in units of 100 usec (valid values 0-50)
+ * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
+ * @buf: struct containing src/dest TEID and per-queue info
+ * @buf_size: size of buffer for indirect command
+ * @txqs_moved: out param, number of queues successfully moved
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move / Reconfigure Tx LAN queues (0x0C32)
+ */
+enum ice_status
+ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
+ bool is_tc_change, bool subseq_call, bool flush_pipe,
+ u8 timeout, u32 *blocked_cgds,
+ struct ice_aqc_move_txqs_data *buf, u16 buf_size,
+ u8 *txqs_moved, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_move_txqs *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.move_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
+
+#define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
+ if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
+ return ICE_ERR_PARAM;
+
+ if (is_tc_change && !flush_pipe && !blocked_cgds)
+ return ICE_ERR_PARAM;
+
+ if (!is_move && !is_tc_change)
+ return ICE_ERR_PARAM;
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (is_move)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
+
+ if (is_tc_change)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
+
+ if (subseq_call)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
+
+ if (flush_pipe)
+ cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
+
+ cmd->num_qs = num_qs;
+ cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
+ ICE_AQC_Q_CMD_TIMEOUT_M);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+
+ if (!status && txqs_moved)
+ *txqs_moved = cmd->num_qs;
+
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
+ is_tc_change && !flush_pipe)
+ *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
+
+ return status;
+}
+
+/* End of FW Admin Queue command wrappers */
+
+/**
+ * ice_write_byte - write a byte to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u8 src_byte, dest_byte, mask;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ src_byte = *from;
+ src_byte &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_byte <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+
+ dest_byte &= ~mask; /* get the bits not changing */
+ dest_byte |= src_byte; /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_write_word - write a word to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u16 src_word, mask;
+ __le16 dest_word;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_word = *(u16 *)from;
+ src_word &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_word <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
+
+ dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
+ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_write_dword - write a dword to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u32 src_dword, mask;
+ __le32 dest_dword;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = (u32)~0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_dword = *(u32 *)from;
+ src_dword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_dword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
+
+ dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
+ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_write_qword - write a qword to a packed context structure
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ u64 src_qword, mask;
+ __le64 dest_qword;
+ u8 *from, *dest;
+ u16 shift_width;
+
+ /* copy from the next struct field */
+ from = src_ctx + ce_info->offset;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = (u64)~0;
+
+ /* don't swizzle the bits until after the mask because the mask bits
+ * will be in a different bit position on big endian machines
+ */
+ src_qword = *(u64 *)from;
+ src_qword &= mask;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ src_qword <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ dest = dest_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
+
+ dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
+ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
+
+ /* put it all back */
+ ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_set_ctx - set context bits in packed structure
+ * @src_ctx: pointer to a generic non-packed context structure
+ * @dest_ctx: pointer to memory for the packed structure
+ * @ce_info: a description of the structure to be transformed
+ */
+enum ice_status
+ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width; f++) {
+ /* We have to deal with each element of the FW response
+ * using the correct size so that we are correct regardless
+ * of the endianness of the machine.
+ */
+ switch (ce_info[f].size_of) {
+ case sizeof(u8):
+ ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u16):
+ ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u32):
+ ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case sizeof(u64):
+ ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ default:
+ return ICE_ERR_INVAL_SIZE;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_byte - read context byte into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u8 dest_byte, mask;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = (u8)(BIT(ce_info->width) - 1);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+
+ dest_byte &= ~(mask);
+
+ dest_byte >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_read_word - read context word into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u16 dest_word, mask;
+ u8 *src, *target;
+ __le16 src_word;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+ mask = BIT(ce_info->width) - 1;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_word &= ~(CPU_TO_LE16(mask));
+
+ /* get the data back into host order before shifting */
+ dest_word = LE16_TO_CPU(src_word);
+
+ dest_word >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_read_dword - read context dword into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u32 dest_dword, mask;
+ __le32 src_dword;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 32 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 5 bits so the shift will do nothing
+ */
+ if (ce_info->width < 32)
+ mask = BIT(ce_info->width) - 1;
+ else
+ mask = (u32)~0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_dword &= ~(CPU_TO_LE32(mask));
+
+ /* get the data back into host order before shifting */
+ dest_dword = LE32_TO_CPU(src_dword);
+
+ dest_dword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_read_qword - read context qword into struct
+ * @src_ctx: the context structure to read from
+ * @dest_ctx: the context to be written to
+ * @ce_info: a description of the struct to be filled
+ */
+static void
+ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ u64 dest_qword, mask;
+ __le64 src_qword;
+ u8 *src, *target;
+ u16 shift_width;
+
+ /* prepare the bits and mask */
+ shift_width = ce_info->lsb % 8;
+
+ /* if the field width is exactly 64 on an x86 machine, then the shift
+ * operation will not work because the SHL instructions count is masked
+ * to 6 bits so the shift will do nothing
+ */
+ if (ce_info->width < 64)
+ mask = BIT_ULL(ce_info->width) - 1;
+ else
+ mask = (u64)~0;
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+
+ /* get the current bits from the src bit string */
+ src = src_ctx + (ce_info->lsb / 8);
+
+ ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
+
+ /* the data in the memory is stored as little endian so mask it
+ * correctly
+ */
+ src_qword &= ~(CPU_TO_LE64(mask));
+
+ /* get the data back into host order before shifting */
+ dest_qword = LE64_TO_CPU(src_qword);
+
+ dest_qword >>= shift_width;
+
+ /* get the address from the struct field */
+ target = dest_ctx + ce_info->offset;
+
+ /* put it back in the struct */
+ ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+}
+
+/**
+ * ice_get_ctx - extract context bits from a packed structure
+ * @src_ctx: pointer to a generic packed context structure
+ * @dest_ctx: pointer to a generic non-packed context structure
+ * @ce_info: a description of the structure to be read from
+ */
+enum ice_status
+ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
+{
+ int f;
+
+ for (f = 0; ce_info[f].width; f++) {
+ switch (ce_info[f].size_of) {
+ case 1:
+ ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case 2:
+ ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case 4:
+ ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ case 8:
+ ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
+ break;
+ default:
+ /* nothing to do, just keep going */
+ break;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ */
+struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ struct ice_q_ctx *q_ctx;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return NULL;
+ if (q_handle >= vsi->num_lan_q_entries[tc])
+ return NULL;
+ if (!vsi->lan_q_ctx[tc])
+ return NULL;
+ q_ctx = vsi->lan_q_ctx[tc];
+ return &q_ctx[q_handle];
+}
+
+/**
+ * ice_ena_vsi_txq
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ * @num_qgrps: Number of added queue groups
+ * @buf: list of queue groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function adds one LAN queue
+ */
+enum ice_status
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_txsched_elem_data node = { 0 };
+ struct ice_sched_node *parent;
+ struct ice_q_ctx *q_ctx;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (num_qgrps > 1 || buf->num_txqs > 1)
+ return ICE_ERR_MAX_LIMIT;
+
+ hw = pi->hw;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
+ if (!q_ctx) {
+ ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
+ q_handle);
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
+ /* find a parent node */
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!parent) {
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
+ buf->parent_teid = parent->info.node_teid;
+ node.parent_teid = parent->info.node_teid;
+ /* Mark that the values in the "generic" section as valid. The default
+ * value in the "generic" section is zero. This means that :
+ * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
+ * - 0 priority among siblings, indicated by Bit 1-3.
+ * - WFQ, indicated by Bit 4.
+ * - 0 Adjustment value is used in PSM credit update flow, indicated by
+ * Bit 5-6.
+ * - Bit 7 is reserved.
+ * Without setting the generic section as valid in valid_sections, the
+ * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
+ */
+ buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+
+ /* add the LAN queue */
+ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
+ LE16_TO_CPU(buf->txqs[0].txq_id),
+ hw->adminq.sq_last_status);
+ goto ena_txq_exit;
+ }
+
+ node.node_teid = buf->txqs[0].q_teid;
+ node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ q_ctx->q_handle = q_handle;
+ q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
+
+ /* add a leaf node into scheduler tree queue layer */
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ if (!status)
+ status = ice_sched_replay_q_bw(pi, q_ctx);
+
+ena_txq_exit:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_dis_vsi_txq
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @num_queues: number of queues
+ * @q_handles: pointer to software queue handle array
+ * @q_ids: pointer to the q_id array
+ * @q_teids: pointer to queue node teids
+ * @rst_src: if called due to reset, specifies the reset source
+ * @vmvf_num: the relative VM or VF number that is undergoing the reset
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function removes queues and their corresponding nodes in SW DB
+ */
+enum ice_status
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handles, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
+ struct ice_aqc_dis_txq_item qg_list;
+ struct ice_q_ctx *q_ctx;
+ u16 i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (!num_queues) {
+ /* if queue is disabled already yet the disable queue command
+ * has to be sent to complete the VF reset, then call
+ * ice_aq_dis_lan_txq without any queue information
+ */
+ if (rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ vmvf_num, NULL);
+ return ICE_ERR_CFG;
+ }
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ for (i = 0; i < num_queues; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
+ if (!node)
+ continue;
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ if (!q_ctx) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ q_handles[i]);
+ continue;
+ }
+ if (q_ctx->q_handle != q_handles[i]) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ q_ctx->q_handle, q_handles[i]);
+ continue;
+ }
+ qg_list.parent_teid = node->info.parent_teid;
+ qg_list.num_qs = 1;
+ qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
+ status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
+ sizeof(qg_list), rst_src, vmvf_num,
+ cd);
+
+ if (status != ICE_SUCCESS)
+ break;
+ ice_free_sched_node(pi, node);
+ q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
+ }
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_qs - configure the new/existing VSI queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @maxqs: max queues array per TC
+ * @owner: LAN or RDMA
+ *
+ * This function adds/updates the VSI queues per TC.
+ */
+static enum ice_status
+ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *maxqs, u8 owner)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u8 i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return ICE_ERR_CFG;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ ice_for_each_traffic_class(i) {
+ /* configuration is possible only if TC node is present */
+ if (!ice_sched_get_tc_node(pi, i))
+ continue;
+
+ status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
+ ice_is_tc_ena(tc_bitmap, i));
+ if (status)
+ break;
+ }
+
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_lan - configure VSI LAN queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @max_lanqs: max LAN queues array per TC
+ *
+ * This function adds/updates the VSI LAN queues per TC.
+ */
+enum ice_status
+ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_lanqs)
+{
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
+ ICE_SCHED_NODE_OWNER_LAN);
+}
+
+/**
+ * ice_replay_pre_init - replay pre initialization
+ * @hw: pointer to the HW struct
+ *
+ * Initializes required config data for VSI, FD, ACL, and RSS before replay.
+ */
+static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ /* Delete old entries from replay filter list head if there is any */
+ ice_rm_all_sw_replay_rule_info(hw);
+ /* In start of replay, move entries into replay_rules list, it
+ * will allow adding rules entries back to filt_rules list,
+ * which is operational list.
+ */
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
+ LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
+ &sw->recp_list[i].filt_replay_rules);
+ ice_sched_replay_agg_vsi_preinit(hw);
+
+ return ice_sched_replay_tc_node_bw(hw->port_info);
+}
+
+/**
+ * ice_replay_vsi - replay VSI configuration
+ * @hw: pointer to the HW struct
+ * @vsi_handle: driver VSI handle
+ *
+ * Restore all VSI configuration after reset. It is required to call this
+ * function with main VSI first.
+ */
+enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Replay pre-initialization if there is any */
+ if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
+ status = ice_replay_pre_init(hw);
+ if (status)
+ return status;
+ }
+ /* Replay per VSI all RSS configurations */
+ status = ice_replay_rss_cfg(hw, vsi_handle);
+ if (status)
+ return status;
+ /* Replay per VSI all filters */
+ status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ if (!status)
+ status = ice_replay_vsi_agg(hw, vsi_handle);
+ return status;
+}
+
+/**
+ * ice_replay_post - post replay configuration cleanup
+ * @hw: pointer to the HW struct
+ *
+ * Post replay cleanup.
+ */
+void ice_replay_post(struct ice_hw *hw)
+{
+ /* Delete old entries from replay filter list head */
+ ice_rm_all_sw_replay_rule_info(hw);
+ ice_sched_replay_agg(hw);
+}
+
+/**
+ * ice_stat_update40 - read 40 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: offset of 64 bit HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
+ */
+ if (!prev_stat_loaded) {
+ *prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
+ if (new_data >= *prev_stat)
+ *cur_stat += new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
+}
+
+/**
+ * ice_stat_update32 - read 32 bit stat from the chip and update stat values
+ * @hw: ptr to the hardware info
+ * @reg: offset of HW register to read from
+ * @prev_stat_loaded: bool to specify if previous stats are loaded
+ * @prev_stat: ptr to previous loaded stat value
+ * @cur_stat: ptr to current stat value
+ */
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+
+ /* device stats are not reset at PFR, they likely will not be zeroed
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
+ */
+ if (!prev_stat_loaded) {
+ *prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
+ if (new_data >= *prev_stat)
+ *cur_stat += new_data - *prev_stat;
+ else
+ /* to manage the potential roll-over */
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
+}
+
+/**
+ * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
+ * @hw: ptr to the hardware info
+ * @vsi_handle: VSI handle
+ * @prev_stat_loaded: bool to specify if the previous stat values are loaded
+ * @cur_stats: ptr to current stats structure
+ *
+ * The GLV_REPC statistic register actually tracks two 16bit statistics, and
+ * thus cannot be read using the normal ice_stat_update32 function.
+ *
+ * Read the GLV_REPC register associated with the given VSI, and update the
+ * rx_no_desc and rx_error values in the ice_eth_stats structure.
+ *
+ * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
+ * cleared each time it's read.
+ *
+ * Note that the GLV_RDPC register also counts the causes that would trigger
+ * GLV_REPC. However, it does not give the finer grained detail about why the
+ * packets are being dropped. The GLV_REPC values can be used to distinguish
+ * whether Rx packets are dropped due to errors or due to no available
+ * descriptors.
+ */
+void
+ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
+ struct ice_eth_stats *cur_stats)
+{
+ u16 vsi_num, no_desc, error_cnt;
+ u32 repc;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return;
+
+ vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ /* If we haven't loaded stats yet, just clear the current value */
+ if (!prev_stat_loaded) {
+ wr32(hw, GLV_REPC(vsi_num), 0);
+ return;
+ }
+
+ repc = rd32(hw, GLV_REPC(vsi_num));
+ no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
+ error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
+
+ /* Clear the count by writing to the stats register */
+ wr32(hw, GLV_REPC(vsi_num), 0);
+
+ cur_stats->rx_no_desc += no_desc;
+ cur_stats->rx_errors += error_cnt;
+}
+
+/**
+ * ice_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ */
+enum ice_status
+ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
+ u32 reg_addr1, u32 reg_val1)
+{
+ struct ice_aqc_read_write_alt_direct *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
+ cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = CPU_TO_LE32(reg_val1);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * ice_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ */
+enum ice_status
+ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
+{
+ struct ice_aqc_read_write_alt_direct *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
+ cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+
+ if (status == ICE_SUCCESS) {
+ *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_alternate_write_done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ */
+enum ice_status
+ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
+{
+ struct ice_aqc_done_alt_write *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *reset_needed = (LE16_TO_CPU(cmd->flags) &
+ ICE_AQC_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ice_aq_alternate_clear
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ */
+enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * ice_sched_query_elem - query element information from HW
+ * @hw: pointer to the HW struct
+ * @node_teid: node TEID to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
+ buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status != ICE_SUCCESS || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
+
+/**
+ * ice_get_fw_mode - returns FW mode
+ * @hw: pointer to the HW struct
+ */
+enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
+{
+#define ICE_FW_MODE_DBG_M BIT(0)
+#define ICE_FW_MODE_REC_M BIT(1)
+#define ICE_FW_MODE_ROLLBACK_M BIT(2)
+ u32 fw_mode;
+
+ /* check the current FW mode */
+ fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
+
+ if (fw_mode & ICE_FW_MODE_DBG_M)
+ return ICE_FW_MODE_DBG;
+ else if (fw_mode & ICE_FW_MODE_REC_M)
+ return ICE_FW_MODE_REC;
+ else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
+ return ICE_FW_MODE_ROLLBACK;
+ else
+ return ICE_FW_MODE_NORMAL;
+}
+
+/**
+ * ice_cfg_get_cur_lldp_persist_status
+ * @hw: pointer to the HW struct
+ * @lldp_status: return value of LLDP persistent status
+ *
+ * Get the current status of LLDP persistent
+ */
+enum ice_status
+ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret;
+ __le32 raw_data;
+ u32 data, mask;
+
+ if (!lldp_status)
+ return ICE_ERR_BAD_PTR;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
+ ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
+ ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
+ false, true, NULL);
+ if (!ret) {
+ data = LE32_TO_CPU(raw_data);
+ mask = ICE_AQC_NVM_LLDP_STATUS_M <<
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ data = data & mask;
+ *lldp_status = data >>
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ }
+
+ ice_release_nvm(hw);
+
+ return ret;
+}
+
+/**
+ * ice_get_dflt_lldp_persist_status
+ * @hw: pointer to the HW struct
+ * @lldp_status: return value of LLDP persistent status
+ *
+ * Get the default status of LLDP persistent
+ */
+enum ice_status
+ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
+{
+ struct ice_port_info *pi = hw->port_info;
+ u32 data, mask, loc_data, loc_data_tmp;
+ enum ice_status ret;
+ __le16 loc_raw_data;
+ __le32 raw_data;
+
+ if (!lldp_status)
+ return ICE_ERR_BAD_PTR;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+
+ /* Read the offset of EMP_SR_PTR */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
+ ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
+ ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
+ &loc_raw_data, false, true, NULL);
+ if (ret)
+ goto exit;
+
+ loc_data = LE16_TO_CPU(loc_raw_data);
+ if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
+ loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
+ loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
+ } else {
+ loc_data *= ICE_AQC_NVM_WORD_UNIT;
+ }
+
+ /* Read the offset of LLDP configuration pointer */
+ loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
+ ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
+ false, true, NULL);
+ if (ret)
+ goto exit;
+
+ loc_data_tmp = LE16_TO_CPU(loc_raw_data);
+ loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
+ loc_data += loc_data_tmp;
+
+ /* We need to skip LLDP configuration section length (2 bytes)*/
+ loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
+
+ /* Read the LLDP Default Configure */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
+ ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
+ true, NULL);
+ if (!ret) {
+ data = LE32_TO_CPU(raw_data);
+ mask = ICE_AQC_NVM_LLDP_STATUS_M <<
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ data = data & mask;
+ *lldp_status = data >>
+ (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
+ }
+
+exit:
+ ice_release_nvm(hw);
+
+ return ret;
+}
+
+/**
+ * ice_get_netlist_ver_info
+ * @hw: pointer to the HW struct
+ *
+ * Get the netlist version information
+ */
+enum ice_status
+ice_get_netlist_ver_info(struct ice_hw *hw)
+{
+ struct ice_netlist_ver_info *ver = &hw->netlist_ver;
+ enum ice_status ret;
+ u32 id_blk_start;
+ __le16 raw_data;
+ u16 data, i;
+ u16 *buff;
+
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret)
+ return ret;
+ buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
+ sizeof(*buff));
+ if (!buff) {
+ ret = ICE_ERR_NO_MEMORY;
+ goto exit_no_mem;
+ }
+
+ /* read module length */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
+ ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+
+ data = LE16_TO_CPU(raw_data);
+ /* exit if length is = 0 */
+ if (!data)
+ goto exit_error;
+
+ /* read node count */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
+ ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
+ false, false, NULL);
+ if (ret)
+ goto exit_error;
+ data = LE16_TO_CPU(raw_data);
+
+ /* netlist ID block starts from offset 4 + node count * 2 */
+ id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+
+ /* read the entire netlist ID block */
+ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
+ id_blk_start * 2,
+ ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
+ false, NULL);
+ if (ret)
+ goto exit_error;
+
+ for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
+ buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
+
+ ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
+ ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
+ ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
+ ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
+ buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+
+exit_error:
+ ice_free(hw, buff);
+exit_no_mem:
+ ice_release_nvm(hw);
+ return ret;
+}
+
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
+ hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_link_default_override
+ * @ldo: pointer to the link default override struct
+ * @pi: pointer to the port info struct
+ *
+ * Gets the link default override for a port
+ */
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+ struct ice_port_info *pi)
+{
+ u16 i, tlv, tlv_len, tlv_start, buf, offset;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
+ ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read link override TLV.\n");
+ return status;
+ }
+
+ /* Each port has its own config; calculate for our port */
+ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
+ ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
+
+ /* link options first */
+ status = ice_read_sr_word(hw, tlv_start, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+ ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
+ ICE_LINK_OVERRIDE_PHY_CFG_S;
+
+ /* link PHY config */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
+ status = ice_read_sr_word(hw, offset, &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override phy config.\n");
+ return status;
+ }
+ ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
+
+ /* PHY types low */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_low |= ((u64)buf << (i * 16));
+ }
+
+ /* PHY types high */
+ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
+ ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
+ for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+ status = ice_read_sr_word(hw, (offset + i), &buf);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read override link options.\n");
+ return status;
+ }
+ /* shift 16 bits at a time to fill 64 bits */
+ ldo->phy_type_high |= ((u64)buf << (i * 16));
+ }
+
+ return status;
+}
Index: sys/dev/ice/ice_common_sysctls.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_common_sysctls.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_common_sysctls.h
+ * @brief driver wide sysctls not related to the iflib stack
+ *
+ * Contains static sysctl values which are driver wide and configure all
+ * devices of the driver at once.
+ *
+ * Device specific sysctls are setup by functions in ice_lib.c
+ */
+
+#ifndef _ICE_COMMON_SYSCTLS_H_
+#define _ICE_COMMON_SYSCTLS_H_
+
+#include <sys/sysctl.h>
+
+/**
+ * @var ice_enable_tx_fc_filter
+ * @brief boolean indicating if the Tx Flow Control filter should be enabled
+ *
+ * Global sysctl variable indicating whether the Tx Flow Control filters
+ * should be enabled. If true, Ethertype 0x8808 packets will be dropped if
+ * they come from non-HW sources. If false, packets coming from software will
+ * not be dropped. Leave this on if unless you must send flow control frames
+ * (or other control frames) from software.
+ *
+ * @remark each PF has a separate sysctl which can override this value.
+ */
+bool ice_enable_tx_fc_filter = true;
+
+/**
+ * @var ice_enable_tx_lldp_filter
+ * @brief boolean indicating if the Tx LLDP filter should be enabled
+ *
+ * Global sysctl variable indicating whether the Tx Flow Control filters
+ * should be enabled. If true, Ethertype 0x88cc packets will be dropped if
+ * they come from non-HW sources. If false, packets coming from software will
+ * not be dropped. Leave this on if unless you must send LLDP frames from
+ * software.
+ *
+ * @remark each PF has a separate sysctl which can override this value.
+ */
+bool ice_enable_tx_lldp_filter = true;
+
+/* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will
+ * automatically load tunable values, without the need to manually create the
+ * TUNABLE definition.
+ *
+ * This works since at least FreeBSD 11, and was backported into FreeBSD 10
+ * before the FreeBSD 10.1-RELEASE.
+ *
+ * If the tunable needs a custom loader, mark the SYSCTL as CTLFLAG_NOFETCH,
+ * and create the tunable manually.
+ */
+
+static SYSCTL_NODE(_hw, OID_AUTO, ice, CTLFLAG_RD, 0, "ICE driver parameters");
+
+static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0,
+ "ICE driver debug parameters");
+
+SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
+ &ice_enable_tx_fc_filter, 0,
+ "Drop Ethertype 0x8808 control frames originating from non-HW sources");
+
+SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN,
+ &ice_enable_tx_lldp_filter, 0,
+ "Drop Ethertype 0x88cc LLDP frames originating from non-HW sources");
+
+#endif /* _ICE_COMMON_SYSCTLS_H_ */
Index: sys/dev/ice/ice_common_txrx.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_common_txrx.h
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_common_txrx.h
+ * @brief common Tx/Rx utility functions
+ *
+ * Contains common utility functions for the Tx/Rx hot path.
+ *
+ * The functions do depend on the if_pkt_info_t structure. A suitable
+ * implementation of this structure must be provided if these functions are to
+ * be used without the iflib networking stack.
+ */
+
+#ifndef _ICE_COMMON_TXRX_H_
+#define _ICE_COMMON_TXRX_H_
+
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+
+/**
+ * ice_tso_detect_sparse - detect TSO packets with too many segments
+ * @pi: packet information
+ *
+ * Hardware only transmits packets with a maximum of 8 descriptors. For TSO
+ * packets, hardware needs to be able to build the split packets using 8 or
+ * fewer descriptors. Additionally, the header must be contained within at
+ * most 3 descriptors.
+ *
+ * To verify this, we walk the headers to find out how many descriptors the
+ * headers require (usually 1). Then we ensure that, for each TSO segment, its
+ * data plus the headers are contained within 8 or fewer descriptors.
+ */
+static inline int
+ice_tso_detect_sparse(if_pkt_info_t pi)
+{
+ int count, curseg, i, hlen, segsz, seglen, tsolen, hdrs, maxsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ int nsegs = pi->ipi_nsegs;
+
+ curseg = hdrs = 0;
+
+ hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
+ tsolen = pi->ipi_len - hlen;
+
+ /* First, count the number of descriptors for the header.
+ * Additionally, make sure it does not span more than 3 segments.
+ */
+ i = 0;
+ curseg = segs[0].ds_len;
+ while (hlen > 0) {
+ hdrs++;
+ if (hdrs > ICE_MAX_TSO_HDR_SEGS)
+ return (1);
+ if (curseg == 0) {
+ i++;
+ if (__predict_false(i == nsegs))
+ return (1);
+
+ curseg = segs[i].ds_len;
+ }
+ seglen = min(curseg, hlen);
+ curseg -= seglen;
+ hlen -= seglen;
+ }
+
+ maxsegs = ICE_MAX_TX_SEGS - hdrs;
+
+ /* We must count the headers, in order to verify that they take up
+ * 3 or fewer descriptors. However, we don't need to check the data
+ * if the total segments is small.
+ */
+ if (nsegs <= maxsegs)
+ return (0);
+
+ count = 0;
+
+ /* Now check the data to make sure that each TSO segment is made up of
+ * no more than maxsegs descriptors. This ensures that hardware will
+ * be capable of performing TSO offload.
+ */
+ while (tsolen > 0) {
+ segsz = pi->ipi_tso_segsz;
+ while (segsz > 0 && tsolen != 0) {
+ count++;
+ if (count > maxsegs) {
+ return (1);
+ }
+ if (curseg == 0) {
+ i++;
+ if (__predict_false(i == nsegs)) {
+ return (1);
+ }
+ curseg = segs[i].ds_len;
+ }
+ seglen = min(curseg, segsz);
+ segsz -= seglen;
+ curseg -= seglen;
+ tsolen -= seglen;
+ }
+ count = 0;
+ }
+
+ return (0);
+}
+
+/**
+ * ice_tso_setup - Setup a context descriptor to prepare for a TSO packet
+ * @txq: the Tx queue to use
+ * @pi: the packet info to prepare for
+ *
+ * Setup a context descriptor in preparation for sending a Tx packet that
+ * requires the TSO offload. Returns the index of the descriptor to use when
+ * encapsulating the Tx packet data into descriptors.
+ */
+static inline int
+ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi)
+{
+ struct ice_tx_ctx_desc *txd;
+ u32 cmd, mss, type, tsolen;
+ int idx;
+ u64 type_cmd_tso_mss;
+
+ idx = pi->ipi_pidx;
+ txd = (struct ice_tx_ctx_desc *)&txq->tx_base[idx];
+ tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen);
+
+ type = ICE_TX_DESC_DTYPE_CTX;
+ cmd = ICE_TX_CTX_DESC_TSO;
+ /* TSO MSS must not be less than 64 */
+ if (pi->ipi_tso_segsz < ICE_MIN_TSO_MSS) {
+ txq->stats.mss_too_small++;
+ pi->ipi_tso_segsz = ICE_MIN_TSO_MSS;
+ }
+ mss = pi->ipi_tso_segsz;
+
+ type_cmd_tso_mss = ((u64)type << ICE_TXD_CTX_QW1_DTYPE_S) |
+ ((u64)cmd << ICE_TXD_CTX_QW1_CMD_S) |
+ ((u64)tsolen << ICE_TXD_CTX_QW1_TSO_LEN_S) |
+ ((u64)mss << ICE_TXD_CTX_QW1_MSS_S);
+ txd->qw1 = htole64(type_cmd_tso_mss);
+
+ txd->tunneling_params = htole32(0);
+ txq->tso++;
+
+ return ((idx + 1) & (txq->desc_count-1));
+}
+
+/**
+ * ice_tx_setup_offload - Setup register values for performing a Tx offload
+ * @txq: The Tx queue, used to track checksum offload stats
+ * @pi: the packet info to program for
+ * @cmd: the cmd register value to update
+ * @off: the off register value to update
+ *
+ * Based on the packet info provided, update the cmd and off values for
+ * enabling Tx offloads. This depends on the packet type and which offloads
+ * have been requested.
+ *
+ * We also track the total number of times that we've requested hardware
+ * offload a particular type of checksum for debugging purposes.
+ */
+static inline void
+ice_tx_setup_offload(struct ice_tx_queue *txq, if_pkt_info_t pi, u32 *cmd, u32 *off)
+{
+ u32 remaining_csum_flags = pi->ipi_csum_flags;
+
+ switch (pi->ipi_etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ if (pi->ipi_csum_flags & ICE_CSUM_IP) {
+ *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ txq->stats.cso[ICE_CSO_STAT_TX_IP4]++;
+ remaining_csum_flags &= ~CSUM_IP;
+ } else
+ *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ *cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
+ /*
+ * This indicates that the IIPT flag was set to the IPV6 value;
+ * there's no checksum for IPv6 packets.
+ */
+ txq->stats.cso[ICE_CSO_STAT_TX_IP6]++;
+ break;
+#endif
+ default:
+ txq->stats.cso[ICE_CSO_STAT_TX_L3_ERR]++;
+ break;
+ }
+
+ *off |= (pi->ipi_ehdrlen >> 1) << ICE_TX_DESC_LEN_MACLEN_S;
+ *off |= (pi->ipi_ip_hlen >> 2) << ICE_TX_DESC_LEN_IPLEN_S;
+
+ if (!(remaining_csum_flags & ~ICE_RX_CSUM_FLAGS))
+ return;
+
+ switch (pi->ipi_ipproto) {
+ case IPPROTO_TCP:
+ if (pi->ipi_csum_flags & ICE_CSUM_TCP) {
+ *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
+ *off |= (pi->ipi_tcp_hlen >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ txq->stats.cso[ICE_CSO_STAT_TX_TCP]++;
+ }
+ break;
+ case IPPROTO_UDP:
+ if (pi->ipi_csum_flags & ICE_CSUM_UDP) {
+ *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
+ *off |= (sizeof(struct udphdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ txq->stats.cso[ICE_CSO_STAT_TX_UDP]++;
+ }
+ break;
+ case IPPROTO_SCTP:
+ if (pi->ipi_csum_flags & ICE_CSUM_SCTP) {
+ *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *off |= (sizeof(struct sctphdr) >> 2) <<
+ ICE_TX_DESC_LEN_L4_LEN_S;
+ txq->stats.cso[ICE_CSO_STAT_TX_SCTP]++;
+ }
+ break;
+ default:
+ txq->stats.cso[ICE_CSO_STAT_TX_L4_ERR]++;
+ break;
+ }
+}
+
+/**
+ * ice_rx_checksum - verify hardware checksum is valid or not
+ * @rxq: the Rx queue structure
+ * @flags: checksum flags to update
+ * @data: checksum data to update
+ * @status0: descriptor status data
+ * @ptype: packet type
+ *
+ * Determine whether the hardware indicated that the Rx checksum is valid. If
+ * so, update the checksum flags and data, informing the stack of the status
+ * of the checksum so that it does not spend time verifying it manually.
+ */
+static void
+ice_rx_checksum(struct ice_rx_queue *rxq, uint32_t *flags, uint32_t *data,
+ u16 status0, u16 ptype)
+{
+ const u16 l3_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S));
+ const u16 l4_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S));
+ const u16 xsum_errors = (l3_error | l4_error |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S));
+ struct ice_rx_ptype_decoded decoded;
+ bool is_ipv4, is_ipv6;
+
+ /* No L3 or L4 checksum was calculated */
+ if (!(status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) {
+ return;
+ }
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+ *flags = 0;
+
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ is_ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
+ is_ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
+ (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+
+ /* No checksum errors were reported */
+ if (!(status0 & xsum_errors)) {
+ if (is_ipv4)
+ *flags |= CSUM_L3_CALC | CSUM_L3_VALID;
+
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ *flags |= CSUM_L4_CALC | CSUM_L4_VALID;
+ *data |= htons(0xffff);
+ break;
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ /*
+ * Certain IPv6 extension headers impact the validity of L4 checksums.
+ * If one of these headers exist, hardware will set the IPV6EXADD bit
+ * in the descriptor. If the bit is set then pretend like hardware
+ * didn't checksum this packet.
+ */
+ if (is_ipv6 && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))) {
+ rxq->stats.cso[ICE_CSO_STAT_RX_IP6_ERR]++;
+ return;
+ }
+
+ /*
+ * At this point, status0 must have at least one of the l3_error or
+ * l4_error bits set.
+ */
+
+ if (status0 & l3_error) {
+ if (is_ipv4) {
+ rxq->stats.cso[ICE_CSO_STAT_RX_IP4_ERR]++;
+ *flags |= CSUM_L3_CALC;
+ } else {
+ /* Hardware indicated L3 error but this isn't IPv4? */
+ rxq->stats.cso[ICE_CSO_STAT_RX_L3_ERR]++;
+ }
+ /* don't bother reporting L4 errors if we got an L3 error */
+ return;
+ } else if (is_ipv4) {
+ *flags |= CSUM_L3_CALC | CSUM_L3_VALID;
+ }
+
+ if (status0 & l4_error) {
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ rxq->stats.cso[ICE_CSO_STAT_RX_TCP_ERR]++;
+ *flags |= CSUM_L4_CALC;
+ break;
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ rxq->stats.cso[ICE_CSO_STAT_RX_UDP_ERR]++;
+ *flags |= CSUM_L4_CALC;
+ break;
+ case ICE_RX_PTYPE_INNER_PROT_SCTP:
+ rxq->stats.cso[ICE_CSO_STAT_RX_SCTP_ERR]++;
+ *flags |= CSUM_L4_CALC;
+ break;
+ default:
+ /*
+ * Hardware indicated L4 error, but this isn't one of
+ * the expected protocols.
+ */
+ rxq->stats.cso[ICE_CSO_STAT_RX_L4_ERR]++;
+ }
+ }
+}
+
+/**
+ * ice_ptype_to_hash - Convert packet type to a hash value
+ * @ptype: the packet type to convert
+ *
+ * Given the packet type, convert to a suitable hashtype to report to the
+ * upper stack via the iri_rsstype value of the if_rxd_info_t structure.
+ *
+ * If the hash type is unknown we'll report M_HASHTYPE_OPAQUE.
+ */
+static inline int
+ice_ptype_to_hash(u16 ptype)
+{
+ struct ice_rx_ptype_decoded decoded;
+
+ if (ptype >= ARRAY_SIZE(ice_ptype_lkup))
+ return M_HASHTYPE_OPAQUE;
+
+ decoded = ice_decode_rx_desc_ptype(ptype);
+
+ if (!decoded.known)
+ return M_HASHTYPE_OPAQUE;
+
+ if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
+ return M_HASHTYPE_OPAQUE;
+
+ /* Note: anything that gets to this point is IP */
+ if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6) {
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV6;
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV6;
+ default:
+ return M_HASHTYPE_RSS_IPV6;
+ }
+ }
+ if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4) {
+ switch (decoded.inner_prot) {
+ case ICE_RX_PTYPE_INNER_PROT_TCP:
+ return M_HASHTYPE_RSS_TCP_IPV4;
+ case ICE_RX_PTYPE_INNER_PROT_UDP:
+ return M_HASHTYPE_RSS_UDP_IPV4;
+ default:
+ return M_HASHTYPE_RSS_IPV4;
+ }
+ }
+
+ /* We should never get here!! */
+ return M_HASHTYPE_OPAQUE;
+}
+#endif
Index: sys/dev/ice/ice_controlq.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_controlq.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_CONTROLQ_H_
+#define _ICE_CONTROLQ_H_
+
+#include "ice_adminq_cmd.h"
+
+/* Maximum buffer lengths for all control queue types */
+#define ICE_AQ_MAX_BUF_LEN 4096
+#define ICE_MBXQ_MAX_BUF_LEN 4096
+
+#define ICE_CTL_Q_DESC(R, i) \
+ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+
+#define ICE_CTL_Q_DESC_UNUSED(R) \
+ (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* Defines that help manage the driver vs FW API checks.
+ * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
+ */
+#define EXP_FW_API_VER_BRANCH 0x00
+#define EXP_FW_API_VER_MAJOR 0x01
+#define EXP_FW_API_VER_MINOR 0x05
+
+/* Different control queue types: These are mainly for SW consumption. */
+enum ice_ctl_q {
+ ICE_CTL_Q_UNKNOWN = 0,
+ ICE_CTL_Q_ADMIN,
+ ICE_CTL_Q_MAILBOX,
+};
+
+/* Control Queue timeout settings - max delay 250ms */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
+#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
+
+struct ice_ctl_q_ring {
+ void *dma_head; /* Virtual address to DMA head */
+ struct ice_dma_mem desc_buf; /* descriptor ring memory */
+ void *cmd_buf; /* command buffer memory */
+
+ union {
+ struct ice_dma_mem *sq_bi;
+ struct ice_dma_mem *rq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+ u32 len_mask;
+ u32 len_ena_mask;
+ u32 head_mask;
+};
+
+/* sq transaction details */
+struct ice_sq_cd {
+ struct ice_aq_desc *wb_desc;
+};
+
+#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i]))
+
+/* rq event information */
+struct ice_rq_event_info {
+ struct ice_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Control Queue information */
+struct ice_ctl_q_info {
+ enum ice_ctl_q qtype;
+ enum ice_aq_err rq_last_status; /* last status on receive queue */
+ struct ice_ctl_q_ring rq; /* receive queue */
+ struct ice_ctl_q_ring sq; /* send queue */
+ u32 sq_cmd_timeout; /* send queue cmd write back timeout */
+ u16 num_rq_entries; /* receive queue depth */
+ u16 num_sq_entries; /* send queue depth */
+ u16 rq_buf_size; /* receive queue buffer size */
+ u16 sq_buf_size; /* send queue buffer size */
+ enum ice_aq_err sq_last_status; /* last status on send queue */
+ struct ice_lock sq_lock; /* Send queue lock */
+ struct ice_lock rq_lock; /* Receive queue lock */
+};
+
+#endif /* _ICE_CONTROLQ_H_ */
Index: sys/dev/ice/ice_controlq.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_controlq.c
@@ -0,0 +1,1227 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+
+#define ICE_CQ_INIT_REGS(qinfo, prefix) \
+do { \
+ (qinfo)->sq.head = prefix##_ATQH; \
+ (qinfo)->sq.tail = prefix##_ATQT; \
+ (qinfo)->sq.len = prefix##_ATQLEN; \
+ (qinfo)->sq.bah = prefix##_ATQBAH; \
+ (qinfo)->sq.bal = prefix##_ATQBAL; \
+ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
+ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
+ (qinfo)->rq.head = prefix##_ARQH; \
+ (qinfo)->rq.tail = prefix##_ARQT; \
+ (qinfo)->rq.len = prefix##_ARQLEN; \
+ (qinfo)->rq.bah = prefix##_ARQBAH; \
+ (qinfo)->rq.bal = prefix##_ARQBAL; \
+ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
+ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
+} while (0)
+
+/**
+ * ice_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_adminq_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->adminq;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ICE_CQ_INIT_REGS(cq, PF_FW);
+}
+
+/**
+ * ice_mailbox_init_regs - Initialize Mailbox registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_mailbox_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+
+ ICE_CQ_INIT_REGS(cq, PF_MBX);
+}
+
+/**
+ * ice_check_sq_alive
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Returns true if Queue is enabled else false.
+ */
+bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ /* check both queue-length and queue-enable fields */
+ if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
+ return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
+ cq->sq.len_ena_mask)) ==
+ (cq->num_sq_entries | cq->sq.len_ena_mask);
+
+ return false;
+}
+
+/**
+ * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+
+ cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
+ if (!cq->sq.desc_buf.va)
+ return ICE_ERR_NO_MEMORY;
+
+ cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
+ sizeof(struct ice_sq_cd));
+ if (!cq->sq.cmd_buf) {
+ ice_free_dma_mem(hw, &cq->sq.desc_buf);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+
+ cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
+ if (!cq->rq.desc_buf.va)
+ return ICE_ERR_NO_MEMORY;
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_cq_ring - Free control queue ring
+ * @hw: pointer to the hardware structure
+ * @ring: pointer to the specific control queue ring
+ *
+ * This assumes the posted buffers have already been cleaned
+ * and de-allocated
+ */
+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
+{
+ ice_free_dma_mem(hw, &ring->desc_buf);
+}
+
+/**
+ * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+ cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
+ sizeof(cq->rq.desc_buf));
+ if (!cq->rq.dma_head)
+ return ICE_ERR_NO_MEMORY;
+ cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < cq->num_rq_entries; i++) {
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+
+ bi = &cq->rq.r.rq_bi[i];
+ bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
+ if (!bi->va)
+ goto unwind_alloc_rq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = ICE_CTL_Q_DESC(cq->rq, i);
+
+ desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = CPU_TO_LE16(bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.generic.addr_high =
+ CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
+ desc->params.generic.addr_low =
+ CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
+ desc->params.generic.param0 = 0;
+ desc->params.generic.param1 = 0;
+ }
+ return ICE_SUCCESS;
+
+unwind_alloc_rq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
+ ice_free(hw, cq->rq.dma_head);
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+static enum ice_status
+ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
+ sizeof(cq->sq.desc_buf));
+ if (!cq->sq.dma_head)
+ return ICE_ERR_NO_MEMORY;
+ cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < cq->num_sq_entries; i++) {
+ struct ice_dma_mem *bi;
+
+ bi = &cq->sq.r.sq_bi[i];
+ bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
+ if (!bi->va)
+ goto unwind_alloc_sq_bufs;
+ }
+ return ICE_SUCCESS;
+
+unwind_alloc_sq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
+ ice_free(hw, cq->sq.dma_head);
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+static enum ice_status
+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
+{
+ /* Clear Head and Tail */
+ wr32(hw, ring->head, 0);
+ wr32(hw, ring->tail, 0);
+
+ /* set starting point */
+ wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
+ wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
+ wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_cfg_sq_regs - configure Control ATQ registers
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * Configure base address and length registers for the transmit queue
+ */
+static enum ice_status
+ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
+}
+
+/**
+ * ice_cfg_rq_regs - configure Control ARQ register
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * Configure base address and length registers for the receive (event queue)
+ */
+static enum ice_status
+ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status status;
+
+ status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
+ if (status)
+ return status;
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_init_sq - main initialization routine for Control ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * This is the main initialization routine for the Control Send Queue
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_sq_entries
+ * - cq->sq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (cq->sq.count > 0) {
+ /* queue already initialized */
+ ret_code = ICE_ERR_NOT_READY;
+ goto init_ctrlq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if (!cq->num_sq_entries || !cq->sq_buf_size) {
+ ret_code = ICE_ERR_CFG;
+ goto init_ctrlq_exit;
+ }
+
+ cq->sq.next_to_use = 0;
+ cq->sq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = ice_alloc_sq_bufs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* initialize base registers */
+ ret_code = ice_cfg_sq_regs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* success! */
+ cq->sq.count = cq->num_sq_entries;
+ goto init_ctrlq_exit;
+
+init_ctrlq_free_rings:
+ ice_free_cq_ring(hw, &cq->sq);
+
+init_ctrlq_exit:
+ return ret_code;
+}
+
+/**
+ * ice_init_rq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (cq->rq.count > 0) {
+ /* queue already initialized */
+ ret_code = ICE_ERR_NOT_READY;
+ goto init_ctrlq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if (!cq->num_rq_entries || !cq->rq_buf_size) {
+ ret_code = ICE_ERR_CFG;
+ goto init_ctrlq_exit;
+ }
+
+ cq->rq.next_to_use = 0;
+ cq->rq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = ice_alloc_rq_bufs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* initialize base registers */
+ ret_code = ice_cfg_rq_regs(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_rings;
+
+ /* success! */
+ cq->rq.count = cq->num_rq_entries;
+ goto init_ctrlq_exit;
+
+init_ctrlq_free_rings:
+ ice_free_cq_ring(hw, &cq->rq);
+
+init_ctrlq_exit:
+ return ret_code;
+}
+
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) \
+ ice_free_dma_mem((hw), \
+ &(qi)->ring.r.ring##_bi[i]); \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ ice_free(hw, (qi)->ring.cmd_buf); \
+ /* free DMA head */ \
+ ice_free(hw, (qi)->ring.dma_head); \
+} while (0)
+
+/**
+ * ice_shutdown_sq - shutdown the Control ATQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for the Control Transmit Queue
+ */
+static enum ice_status
+ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code = ICE_SUCCESS;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_acquire_lock(&cq->sq_lock);
+
+ if (!cq->sq.count) {
+ ret_code = ICE_ERR_NOT_READY;
+ goto shutdown_sq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, cq->sq.head, 0);
+ wr32(hw, cq->sq.tail, 0);
+ wr32(hw, cq->sq.len, 0);
+ wr32(hw, cq->sq.bal, 0);
+ wr32(hw, cq->sq.bah, 0);
+
+ cq->sq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers and the ring itself */
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
+ ice_free_cq_ring(hw, &cq->sq);
+
+shutdown_sq_out:
+ ice_release_lock(&cq->sq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_aq_ver_check - Check the reported AQ API version.
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the driver should load on a given AQ API version.
+ *
+ * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
+ */
+static bool ice_aq_ver_check(struct ice_hw *hw)
+{
+ if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ /* Major API version is newer than expected, don't load */
+ ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+ return false;
+ } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
+ if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ } else {
+ /* Major API version is older than expected, log a warning */
+ ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ }
+ return true;
+}
+
+/**
+ * ice_shutdown_rq - shutdown Control ARQ
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for the Control Receive Queue
+ */
+static enum ice_status
+ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ enum ice_status ret_code = ICE_SUCCESS;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_acquire_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ret_code = ICE_ERR_NOT_READY;
+ goto shutdown_rq_out;
+ }
+
+ /* Stop Control Queue processing */
+ wr32(hw, cq->rq.head, 0);
+ wr32(hw, cq->rq.tail, 0);
+ wr32(hw, cq->rq.len, 0);
+ wr32(hw, cq->rq.bal, 0);
+ wr32(hw, cq->rq.bah, 0);
+
+ /* set rq.count to 0 to indicate uninitialized queue */
+ cq->rq.count = 0;
+
+ /* free ring buffers and the ring itself */
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
+ ice_free_cq_ring(hw, &cq->rq);
+
+shutdown_rq_out:
+ ice_release_lock(&cq->rq_lock);
+ return ret_code;
+}
+
+/**
+ * ice_idle_aq - stop ARQ/ATQ processing momentarily
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ */
+void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ wr32(hw, cq->sq.len, 0);
+ wr32(hw, cq->rq.len, 0);
+
+ ice_msec_delay(2, false);
+}
+
+/**
+ * ice_init_check_adminq - Check version for Admin Queue to know if its alive
+ * @hw: pointer to the hardware structure
+ */
+static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->adminq;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_aq_get_fw_ver(hw, NULL);
+ if (status)
+ goto init_ctrlq_free_rq;
+
+ if (!ice_aq_ver_check(hw)) {
+ status = ICE_ERR_FW_API_VER;
+ goto init_ctrlq_free_rq;
+ }
+
+ return ICE_SUCCESS;
+
+init_ctrlq_free_rq:
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
+ return status;
+}
+
+/**
+ * ice_init_ctrlq - main initialization routine for any control Queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks
+ */
+static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+ enum ice_status ret_code;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ ice_adminq_init_regs(hw);
+ cq = &hw->adminq;
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ ice_mailbox_init_regs(hw);
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ cq->qtype = q_type;
+
+ /* verify input for valid configuration */
+ if (!cq->num_rq_entries || !cq->num_sq_entries ||
+ !cq->rq_buf_size || !cq->sq_buf_size) {
+ return ICE_ERR_CFG;
+ }
+
+ /* setup SQ command write back timeout */
+ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
+
+ /* allocate the ATQ */
+ ret_code = ice_init_sq(hw, cq);
+ if (ret_code)
+ return ret_code;
+
+ /* allocate the ARQ */
+ ret_code = ice_init_rq(hw, cq);
+ if (ret_code)
+ goto init_ctrlq_free_sq;
+
+ /* success! */
+ return ICE_SUCCESS;
+
+init_ctrlq_free_sq:
+ ice_shutdown_sq(hw, cq);
+ return ret_code;
+}
+
+/**
+ * ice_init_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
+ */
+enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Init FW admin queue */
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ if (status)
+ return status;
+
+ status = ice_init_check_adminq(hw);
+ if (status)
+ return status;
+ /* Init Mailbox queue */
+ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_init_lock(&cq->sq_lock);
+ ice_init_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
+/**
+ * ice_shutdown_ctrlq - shutdown routine for any control queue
+ * @hw: pointer to the hardware structure
+ * @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
+ */
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+{
+ struct ice_ctl_q_info *cq;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ if (ice_check_sq_alive(hw, cq))
+ ice_aq_q_shutdown(hw, true);
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ break;
+ default:
+ return;
+ }
+
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
+}
+
+/**
+ * ice_shutdown_all_ctrlq - shutdown routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
+ */
+void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ /* Shutdown FW admin queue */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PF-VF Mailbox */
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+}
+
+/**
+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Destroys the send and receive queue locks for a given control queue.
+ */
+static void
+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ ice_destroy_lock(&cq->sq_lock);
+ ice_destroy_lock(&cq->rq_lock);
+}
+
+/**
+ * ice_destroy_all_ctrlq - exit routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * This function shuts down all the control queues and then destroys the
+ * control queue locks. It should be called once during driver unload. The
+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
+ * reinitialize control queues, such as in response to a reset event.
+ */
+void ice_destroy_all_ctrlq(struct ice_hw *hw)
+{
+ /* shut down all the control queues first */
+ ice_shutdown_all_ctrlq(hw);
+
+ ice_destroy_ctrlq_locks(&hw->adminq);
+ ice_destroy_ctrlq_locks(&hw->mailboxq);
+}
+
+/**
+ * ice_clean_sq - cleans Admin send queue (ATQ)
+ * @hw: pointer to the hardware structure
+ * @cq: pointer to the specific Control queue
+ *
+ * returns the number of free desc
+ */
+static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ struct ice_ctl_q_ring *sq = &cq->sq;
+ u16 ntc = sq->next_to_clean;
+ struct ice_sq_cd *details;
+ struct ice_aq_desc *desc;
+
+ desc = ICE_CTL_Q_DESC(*sq, ntc);
+ details = ICE_CTL_Q_DETAILS(*sq, ntc);
+
+ while (rd32(hw, cq->sq.head) != ntc) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
+ ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
+ ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
+ ntc++;
+ if (ntc == sq->count)
+ ntc = 0;
+ desc = ICE_CTL_Q_DESC(*sq, ntc);
+ details = ICE_CTL_Q_DETAILS(*sq, ntc);
+ }
+
+ sq->next_to_clean = ntc;
+
+ return ICE_CTL_Q_DESC_UNUSED(sq);
+}
+
+/**
+ * ice_debug_cq
+ * @hw: pointer to the hardware structure
+ * @desc: pointer to control queue descriptor
+ * @buf: pointer to command buffer
+ * @buf_len: max length of buf
+ *
+ * Dumps debug log about control command with descriptor contents.
+ */
+static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
+{
+ struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ u16 datalen, flags;
+
+ if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
+ return;
+
+ if (!desc)
+ return;
+
+ datalen = LE16_TO_CPU(cq_desc->datalen);
+ flags = LE16_TO_CPU(cq_desc->flags);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ LE16_TO_CPU(cq_desc->opcode), flags, datalen,
+ LE16_TO_CPU(cq_desc->retval));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(cq_desc->cookie_high),
+ LE32_TO_CPU(cq_desc->cookie_low));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(cq_desc->params.generic.param0),
+ LE32_TO_CPU(cq_desc->params.generic.param1));
+ ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
+ LE32_TO_CPU(cq_desc->params.generic.addr_high),
+ LE32_TO_CPU(cq_desc->params.generic.addr_low));
+ /* Dump buffer iff 1) one exists and 2) is either a response indicated
+ * by the DD and/or CMP flag set or a command with the RD flag set.
+ */
+ if (buf && cq_desc->datalen != 0 &&
+ (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
+ flags & ICE_AQ_FLAG_RD)) {
+ ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
+ MIN_T(u16, buf_len, datalen));
+ }
+}
+
+/**
+ * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ */
+bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
+}
+
+/**
+ * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buf: buffer to use for indirect commands (or NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * This is the main send command routine for the ATQ. It runs the queue,
+ * cleans the queue, etc.
+ */
+static enum ice_status
+ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_dma_mem *dma_buf = NULL;
+ struct ice_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_sq_cd *details;
+ u32 total_delay = 0;
+ u16 retval = 0;
+ u32 val = 0;
+
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
+
+ cq->sq_last_status = ICE_AQ_RC_OK;
+
+ if (!cq->sq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send queue not initialized.\n");
+ status = ICE_ERR_AQ_EMPTY;
+ goto sq_send_command_error;
+ }
+
+ if ((buf && !buf_size) || (!buf && buf_size)) {
+ status = ICE_ERR_PARAM;
+ goto sq_send_command_error;
+ }
+
+ if (buf) {
+ if (buf_size > cq->sq_buf_size) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Invalid buffer size for Control Send queue: %d.\n",
+ buf_size);
+ status = ICE_ERR_INVAL_SIZE;
+ goto sq_send_command_error;
+ }
+
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
+ if (buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
+ }
+
+ val = rd32(hw, cq->sq.head);
+ if (val >= cq->num_sq_entries) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "head overrun at %d in the Control Send Queue ring\n",
+ val);
+ status = ICE_ERR_AQ_EMPTY;
+ goto sq_send_command_error;
+ }
+
+ details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
+ if (cd)
+ *details = *cd;
+ else
+ ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
+
+ /* Call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW/MBX; the function returns the
+ * number of desc available. The clean function called here could be
+ * called in a separate thread in case of asynchronous completions.
+ */
+ if (ice_clean_sq(hw, cq) == 0) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Error: Control Send Queue is full.\n");
+ status = ICE_ERR_AQ_FULL;
+ goto sq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
+ ICE_NONDMA_TO_DMA);
+
+ /* if buf is not NULL assume indirect command */
+ if (buf) {
+ dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
+ /* copy the user buf into the respective DMA buf */
+ ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
+ desc_on_ring->datalen = CPU_TO_LE16(buf_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.generic.addr_high =
+ CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
+ desc_on_ring->params.generic.addr_low =
+ CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
+ }
+
+ /* Debug desc and buffer */
+ ice_debug(hw, ICE_DBG_AQ_DESC,
+ "ATQ: Control Send queue desc and buffer:\n");
+
+ ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
+
+ (cq->sq.next_to_use)++;
+ if (cq->sq.next_to_use == cq->sq.count)
+ cq->sq.next_to_use = 0;
+ wr32(hw, cq->sq.tail, cq->sq.next_to_use);
+
+ do {
+ if (ice_sq_done(hw, cq))
+ break;
+
+ ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
+ total_delay++;
+ } while (total_delay < cq->sq_cmd_timeout);
+
+ /* if ready, copy the desc back to temp */
+ if (ice_sq_done(hw, cq)) {
+ ice_memcpy(desc, desc_on_ring, sizeof(*desc),
+ ICE_DMA_TO_NONDMA);
+ if (buf) {
+ /* get returned length to copy */
+ u16 copy_size = LE16_TO_CPU(desc->datalen);
+
+ if (copy_size > buf_size) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Return len %d > than buf len %d\n",
+ copy_size, buf_size);
+ status = ICE_ERR_AQ_ERROR;
+ } else {
+ ice_memcpy(buf, dma_buf->va, copy_size,
+ ICE_DMA_TO_NONDMA);
+ }
+ }
+ retval = LE16_TO_CPU(desc->retval);
+ if (retval) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue command 0x%04X completed with error 0x%X\n",
+ LE16_TO_CPU(desc->opcode),
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if (!status && retval != ICE_AQ_RC_OK)
+ status = ICE_ERR_AQ_ERROR;
+ cq->sq_last_status = (enum ice_aq_err)retval;
+ }
+
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ATQ: desc and buffer writeback:\n");
+
+ ice_debug_cq(hw, (void *)desc, buf, buf_size);
+
+ /* save writeback AQ if requested */
+ if (details->wb_desc)
+ ice_memcpy(details->wb_desc, desc_on_ring,
+ sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
+
+ /* update the error if time out occurred */
+ if (!cmd_completed) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Send Queue Writeback timeout.\n");
+ status = ICE_ERR_AQ_TIMEOUT;
+ }
+
+sq_send_command_error:
+ return status;
+}
+
+/**
+ * ice_sq_send_cmd - send command to Control Queue (ATQ)
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buf: buffer to use for indirect commands (or NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
+ * @cd: pointer to command details structure
+ *
+ * This is the main send command routine for the ATQ. It runs the queue,
+ * cleans the queue, etc.
+ */
+enum ice_status
+ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ /* if reset is in progress return a soft error */
+ if (hw->reset_ongoing)
+ return ICE_ERR_RESET_ONGOING;
+
+ ice_acquire_lock(&cq->sq_lock);
+ status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
+ ice_release_lock(&cq->sq_lock);
+
+ return status;
+}
+
+/**
+ * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ */
+void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
+ desc->opcode = CPU_TO_LE16(opcode);
+ desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
+}
+
+/**
+ * ice_clean_rq_elem
+ * @hw: pointer to the HW struct
+ * @cq: pointer to the specific Control queue
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'.
+ */
+enum ice_status
+ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_rq_event_info *e, u16 *pending)
+{
+ u16 ntc = cq->rq.next_to_clean;
+ enum ice_status ret_code = ICE_SUCCESS;
+ struct ice_aq_desc *desc;
+ struct ice_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
+
+ /* take the lock before we start messing with the ring */
+ ice_acquire_lock(&cq->rq_lock);
+
+ if (!cq->rq.count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive queue not initialized.\n");
+ ret_code = ICE_ERR_AQ_EMPTY;
+ goto clean_rq_elem_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = ICE_ERR_AQ_NO_WORK;
+ goto clean_rq_elem_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = ICE_CTL_Q_DESC(cq->rq, ntc);
+ desc_idx = ntc;
+
+ cq->rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
+ flags = LE16_TO_CPU(desc->flags);
+ if (flags & ICE_AQ_FLAG_ERR) {
+ ret_code = ICE_ERR_AQ_ERROR;
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Control Receive Queue Event 0x%04X received with error 0x%X\n",
+ LE16_TO_CPU(desc->opcode),
+ cq->rq_last_status);
+ }
+ ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
+ datalen = LE16_TO_CPU(desc->datalen);
+ e->msg_len = MIN_T(u16, datalen, e->buf_len);
+ if (e->msg_buf && e->msg_len)
+ ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
+ e->msg_len, ICE_DMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
+
+ ice_debug_cq(hw, (void *)desc, e->msg_buf,
+ cq->rq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message size
+ */
+ bi = &cq->rq.r.rq_bi[ntc];
+ ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
+
+ desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > ICE_AQ_LG_BUF)
+ desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
+ desc->datalen = CPU_TO_LE16(bi->size);
+ desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
+ desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, cq->rq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == cq->num_rq_entries)
+ ntc = 0;
+ cq->rq.next_to_clean = ntc;
+ cq->rq.next_to_use = ntu;
+
+clean_rq_elem_out:
+ /* Set pending if needed, unlock and return */
+ if (pending) {
+ /* re-read HW head to calculate actual pending messages */
+ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+ *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+ }
+clean_rq_elem_err:
+ ice_release_lock(&cq->rq_lock);
+
+ return ret_code;
+}
Index: sys/dev/ice/ice_dcb.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_dcb.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_DCB_H_
+#define _ICE_DCB_H_
+
+#include "ice_type.h"
+#include "ice_common.h"
+
+#define ICE_DCBX_OFFLOAD_DIS 0
+#define ICE_DCBX_OFFLOAD_ENABLED 1
+
+#define ICE_DCBX_STATUS_NOT_STARTED 0
+#define ICE_DCBX_STATUS_IN_PROGRESS 1
+#define ICE_DCBX_STATUS_DONE 2
+#define ICE_DCBX_STATUS_MULTIPLE_PEERS 3
+#define ICE_DCBX_STATUS_DIS 7
+
+#define ICE_TLV_TYPE_END 0
+#define ICE_TLV_TYPE_ORG 127
+
+#define ICE_IEEE_8021QAZ_OUI 0x0080C2
+#define ICE_IEEE_SUBTYPE_ETS_CFG 9
+#define ICE_IEEE_SUBTYPE_ETS_REC 10
+#define ICE_IEEE_SUBTYPE_PFC_CFG 11
+#define ICE_IEEE_SUBTYPE_APP_PRI 12
+
+#define ICE_CEE_DCBX_OUI 0x001B21
+#define ICE_CEE_DCBX_TYPE 2
+
+#define ICE_CEE_SUBTYPE_CTRL 1
+#define ICE_CEE_SUBTYPE_PG_CFG 2
+#define ICE_CEE_SUBTYPE_PFC_CFG 3
+#define ICE_CEE_SUBTYPE_APP_PRI 4
+
+#define ICE_CEE_MAX_FEAT_TYPE 3
+#define ICE_LLDP_ADMINSTATUS_DIS 0
+#define ICE_LLDP_ADMINSTATUS_ENA_RX 1
+#define ICE_LLDP_ADMINSTATUS_ENA_TX 2
+#define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3
+
+/* Defines for LLDP TLV header */
+#define ICE_LLDP_TLV_LEN_S 0
+#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
+#define ICE_LLDP_TLV_TYPE_S 9
+#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
+#define ICE_LLDP_TLV_SUBTYPE_S 0
+#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
+#define ICE_LLDP_TLV_OUI_S 8
+#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
+
+/* Defines for IEEE ETS TLV */
+#define ICE_IEEE_ETS_MAXTC_S 0
+#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
+#define ICE_IEEE_ETS_CBS_S 6
+#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
+#define ICE_IEEE_ETS_WILLING_S 7
+#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
+#define ICE_IEEE_ETS_PRIO_0_S 0
+#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
+#define ICE_IEEE_ETS_PRIO_1_S 4
+#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
+#define ICE_CEE_PGID_PRIO_0_S 0
+#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
+#define ICE_CEE_PGID_PRIO_1_S 4
+#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
+#define ICE_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define ICE_IEEE_TSA_STRICT 0
+#define ICE_IEEE_TSA_CBS 1
+#define ICE_IEEE_TSA_ETS 2
+#define ICE_IEEE_TSA_VENDOR 255
+
+/* Defines for IEEE PFC TLV */
+#define ICE_IEEE_PFC_CAP_S 0
+#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
+#define ICE_IEEE_PFC_MBC_S 6
+#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
+#define ICE_IEEE_PFC_WILLING_S 7
+#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
+
+/* Defines for IEEE APP TLV */
+#define ICE_IEEE_APP_SEL_S 0
+#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
+#define ICE_IEEE_APP_PRIO_S 5
+#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
+
+/* TLV definitions for preparing MIB */
+#define ICE_TLV_ID_CHASSIS_ID 0
+#define ICE_TLV_ID_PORT_ID 1
+#define ICE_TLV_ID_TIME_TO_LIVE 2
+#define ICE_IEEE_TLV_ID_ETS_CFG 3
+#define ICE_IEEE_TLV_ID_ETS_REC 4
+#define ICE_IEEE_TLV_ID_PFC_CFG 5
+#define ICE_IEEE_TLV_ID_APP_PRI 6
+#define ICE_TLV_ID_END_OF_LLDPPDU 7
+#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+
+#define ICE_IEEE_ETS_TLV_LEN 25
+#define ICE_IEEE_PFC_TLV_LEN 6
+#define ICE_IEEE_APP_TLV_LEN 11
+
+#pragma pack(1)
+/* IEEE 802.1AB LLDP TLV structure */
+struct ice_lldp_generic_tlv {
+ __be16 typelen;
+ u8 tlvinfo[1];
+};
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct ice_lldp_org_tlv {
+ __be16 typelen;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+};
+#pragma pack()
+
+struct ice_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct ice_cee_ctrl_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct ice_cee_feat_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define ICE_CEE_FEAT_TLV_ENA_M 0x80
+#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
+#define ICE_CEE_FEAT_TLV_ERR_M 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+#pragma pack(1)
+struct ice_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define ICE_CEE_APP_SELECTOR_M 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+};
+#pragma pack()
+
+/* TODO: The below structures related LLDP/DCBX variables
+ * and statistics are defined but need to find how to get
+ * the required information from the Firmware to use them
+ */
+
+/* IEEE 802.1AB LLDP Agent Statistics */
+struct ice_lldp_stats {
+ u64 remtablelastchangetime;
+ u64 remtableinserts;
+ u64 remtabledeletes;
+ u64 remtabledrops;
+ u64 remtableageouts;
+ u64 txframestotal;
+ u64 rxframesdiscarded;
+ u64 rxportframeerrors;
+ u64 rxportframestotal;
+ u64 rxporttlvsdiscardedtotal;
+ u64 rxporttlvsunrecognizedtotal;
+ u64 remtoomanyneighbors;
+};
+
+/* IEEE 802.1Qaz DCBX variables */
+struct ice_dcbx_variables {
+ u32 defmaxtrafficclasses;
+ u32 defprioritytcmapping;
+ u32 deftcbandwidth;
+ u32 deftsaassignment;
+};
+
+enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
+ void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
+ u16 buf_size, u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
+ struct ice_sq_cd *cd);
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+u8 ice_get_dcbx_status(struct ice_hw *hw);
+enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg);
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cmd_details);
+enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf);
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd);
+enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd);
+#endif /* _ICE_DCB_H_ */
Index: sys/dev/ice/ice_dcb.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_dcb.c
@@ -0,0 +1,1697 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_dcb.h"
+
+/**
+ * ice_aq_get_lldp_mib
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @local_len: length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet). (0x0A00)
+ */
+enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_get_mib *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_get_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
+
+ cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
+ cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M;
+
+ desc.datalen = CPU_TO_LE16(buf_size);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ if (local_len)
+ *local_len = LE16_TO_CPU(cmd->local_len);
+ if (remote_len)
+ *remote_len = LE16_TO_CPU(cmd->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_update: Enable or Disable event posting
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes (0x0A01)
+ */
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_mib_change *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_event;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
+
+ if (!ena_update)
+ cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_add_delete_lldp_tlv
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge
+ * @add_lldp_tlv: add (true) or delete (false) TLV
+ * @buf: buffer with TLV to add or delete
+ * @buf_size: length of the buffer
+ * @tlv_len: length of the TLV to be added/deleted
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cd: pointer to command details structure or NULL
+ *
+ * (Add tlv)
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer. (0x0A02)
+ *
+ * (Delete tlv)
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer. (0x0A04)
+ */
+enum ice_status
+ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
+ void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_add_delete_tlv *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (tlv_len == 0)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.lldp_add_delete_tlv;
+
+ if (add_lldp_tlv)
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_add_tlv);
+ else
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_lldp_delete_tlv);
+
+ desc.flags |= CPU_TO_LE16((u16)(ICE_AQ_FLAG_RD));
+
+ cmd->type = ((bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ cmd->len = CPU_TO_LE16(tlv_len);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && mib_len)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+
+ return status;
+}
+
+/**
+ * ice_aq_update_lldp_tlv
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge
+ * @buf: buffer with TLV to update
+ * @buf_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV. (0x0A03)
+ */
+enum ice_status
+ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
+ u16 buf_size, u16 old_len, u16 new_len, u16 offset,
+ u16 *mib_len, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_update_tlv *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_update_tlv;
+
+ if (offset == 0 || old_len == 0 || new_len == 0)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_update_tlv);
+
+ desc.flags |= CPU_TO_LE16((u16)(ICE_AQ_FLAG_RD));
+
+ cmd->type = ((bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M);
+ cmd->old_len = CPU_TO_LE16(old_len);
+ cmd->new_offset = CPU_TO_LE16(offset);
+ cmd->new_len = CPU_TO_LE16(new_len);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && mib_len)
+ *mib_len = LE16_TO_CPU(desc.datalen);
+
+ return status;
+}
+
+/**
+ * ice_aq_stop_lldp
+ * @hw: pointer to the HW struct
+ * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
+ * False if LLDP Agent needs to be Stopped
+ * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across
+ * reboots
+ * @cd: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent (0x0A05)
+ */
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_stop;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
+
+ if (shutdown_lldp_agent)
+ cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_start_lldp
+ * @hw: pointer to the HW struct
+ * @persist: True if Start of LLDP Agent needs to be persistent across reboots
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports. (0x0A06)
+ */
+enum ice_status
+ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_start *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_start;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
+
+ cmd->command = ICE_AQ_LLDP_AGENT_START;
+
+ if (persist)
+ cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = CPU_TO_LE16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = CPU_TO_LE16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_get_dcbx_status
+ * @hw: pointer to the HW struct
+ *
+ * Get the DCBX status from the Firmware
+ */
+u8 ice_get_dcbx_status(struct ice_hw *hw)
+{
+ u32 reg;
+
+ reg = rd32(hw, PRTDCB_GENS);
+ return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
+ PRTDCB_GENS_DCBX_STATUS_S);
+}
+
+/**
+ * ice_parse_ieee_ets_common_tlv
+ * @buf: Data buffer to be parsed for ETS CFG/REC data
+ * @ets_cfg: Container to store parsed data
+ *
+ * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV
+ */
+static void
+ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ ets_cfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
+ ICE_IEEE_ETS_PRIO_1_S);
+ ets_cfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
+ ICE_IEEE_ETS_PRIO_0_S);
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16|
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ ets_cfg->tcbwtable[i] = buf[offset];
+ ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++];
+ }
+}
+
+/**
+ * ice_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
+ ICE_IEEE_ETS_WILLING_S);
+ etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
+ etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
+ ICE_IEEE_ETS_MAXTC_S);
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec);
+}
+
+/**
+ * ice_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
+ ICE_IEEE_PFC_WILLING_S);
+ dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
+ dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
+ ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.pfcena = buf[1];
+}
+
+/**
+ * ice_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ */
+static void
+ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 offset = 0;
+ u16 typelen;
+ int i = 0;
+ u16 len;
+ u8 *buf;
+
+ typelen = NTOHS(tlv->typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ buf = tlv->tlvinfo;
+
+ /* Removing sizeof(ouisubtype) and reserved byte from len.
+ * Remaining len div 3 is number of APP TLVs.
+ */
+ len -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < len) {
+ dcbcfg->app[i].priority = ((buf[offset] &
+ ICE_IEEE_APP_PRIO_M) >>
+ ICE_IEEE_APP_PRIO_S);
+ dcbcfg->app[i].selector = ((buf[offset] &
+ ICE_IEEE_APP_SEL_M) >>
+ ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * ice_parse_ieee_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ switch (subtype) {
+ case ICE_IEEE_SUBTYPE_ETS_CFG:
+ ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_ETS_REC:
+ ice_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_PFC_CFG:
+ ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_APP_PRI:
+ ice_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ */
+static void
+ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ etscfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ etscfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT)
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ else
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * ice_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ */
+static void
+ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcena = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * ice_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ */
+static void
+ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, typelen, offset = 0;
+ struct ice_cee_app_prio *app;
+ u8 i;
+
+ typelen = NTOHS(tlv->hdr.typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+
+ dcbcfg->numapps = len / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > ICE_DCBX_MAX_APPS)
+ dcbcfg->numapps = ICE_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < ICE_MAX_USER_PRIORITY; up++)
+ if (app->prio_map & BIT(up))
+ break;
+
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M);
+ switch (selector) {
+ case ICE_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE;
+ break;
+ case ICE_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].prot_id = NTOHS(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * ice_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u16 len, tlvlen, typelen;
+ u32 ouisubtype;
+
+ ouisubtype = NTOHL(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ /* Return if not CEE DCBX */
+ if (subtype != ICE_CEE_DCBX_TYPE)
+ return;
+
+ typelen = NTOHS(tlv->typelen);
+ tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
+ sizeof(struct ice_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) {
+ u16 sublen;
+
+ typelen = NTOHS(sub_tlv->hdr.typelen);
+ sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
+ ICE_LLDP_TLV_TYPE_S);
+ switch (subtype) {
+ case ICE_CEE_SUBTYPE_PG_CFG:
+ ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_PFC_CFG:
+ ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_APP_PRI:
+ ice_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct ice_cee_feat_tlv *)
+ ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * ice_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ */
+static void
+ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = NTOHL(tlv->ouisubtype);
+ oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ switch (oui) {
+ case ICE_IEEE_8021QAZ_OUI:
+ ice_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case ICE_CEE_DCBX_OUI:
+ ice_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_lldp_to_dcb_cfg
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ */
+enum ice_status
+ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_lldp_org_tlv *tlv;
+ enum ice_status ret = ICE_SUCCESS;
+ u16 offset = 0;
+ u16 typelen;
+ u16 type;
+ u16 len;
+
+ if (!lldpmib || !dcbcfg)
+ return ICE_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HEADER_LEN;
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelen = NTOHS(tlv->typelen);
+ type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ offset += sizeof(typelen) + len;
+
+ /* END TLV or beyond LLDPDU size */
+ if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE)
+ break;
+
+ switch (type) {
+ case ICE_TLV_TYPE_ORG:
+ ice_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @mib_type: MIB type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the firmware
+ */
+enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ enum ice_status ret;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
+ ICE_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (ret == ICE_SUCCESS)
+ /* Parse LLDP MIB to get DCB configuration */
+ ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
+
+ ice_free(hw, lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the HW struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request (true) or release (false) ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cd: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored. (0x0301)
+ */
+enum ice_status
+ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pfc_ignore *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.pfc_ignore;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_pfc_ignore);
+
+ if (request)
+ cmd->cmd_flags = ICE_AQC_PFC_IGNORE_SET;
+
+ cmd->tc_bitmap = tcmap;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status && tcmap_ret)
+ *tcmap_ret = cmd->tc_bitmap;
+
+ return status;
+}
+
+/**
+ * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW
+ * @hw: pointer to the HW struct
+ * @start_dcbx_agent: True if DCBX Agent needs to be started
+ * False if DCBX Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBX agent status
+ * True if DCBX Agent is active
+ * False if DCBX Agent is stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent. In case that this wrapper function
+ * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
+ */
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop_start_specific_agent *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+ u16 opcode;
+
+ cmd = &desc.params.lldp_agent_ctrl;
+
+ opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+ if (start_dcbx_agent)
+ cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ *dcbx_agent_status = false;
+
+ if (status == ICE_SUCCESS &&
+ cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
+ *dcbx_agent_status = true;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cee_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware (0x0A07)
+ */
+enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd);
+}
+
+/**
+ * ice_aq_query_pfc_mode - Query PFC mode
+ * @hw: pointer to the HW struct
+ * @pfcmode_ret: Return PFC mode
+ * @cd: pointer to command details structure or NULL
+ *
+ * This will return an indication if DSCP-based PFC or VLAN-based PFC
+ * is enabled. (0x0302)
+ */
+enum ice_status
+ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_pfc_mode);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status)
+ *pfcmode_ret = cmd->pfc_mode;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_pfc_mode - Set PFC mode
+ * @hw: pointer to the HW struct
+ * @pfcmode_set: set-value of PFC mode
+ * @pfcmode_ret: return value of PFC mode, written by FW
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN
+ * -based PFC (0x0303)
+ */
+enum ice_status
+ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfcmode_set, u8 *pfcmode_ret,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (pfcmode_set > ICE_AQC_PFC_DSCP_BASED_PFC)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
+
+ cmd->pfc_mode = pfcmode_set;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ if (!status)
+ *pfcmode_ret = cmd->pfc_mode;
+
+ return status;
+}
+
+/**
+ * ice_aq_set_dcb_parameters - Set DCB parameters
+ * @hw: pointer to the HW struct
+ * @dcb_enable: True if DCB configuration needs to be applied
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ command will tell FW if it will apply or not apply the default DCB
+ * configuration when link up (0x0306).
+ */
+enum ice_status
+ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_dcb_params *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_dcb_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_dcb_params);
+
+ cmd->valid_flags = ICE_AQC_LINK_UP_DCB_CFG_VALID;
+ if (dcb_enable)
+ cmd->cmd_flags = ICE_AQC_LINK_UP_DCB_CFG;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_cee_to_dcb_cfg
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ */
+static void
+ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status);
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+ u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio);
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ u16 ice_app_prot_id_type;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ dcbcfg->etscfg.prio_table[i * 2] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ dcbcfg->etscfg.prio_table[i * 2 + 1] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ }
+
+ ice_for_each_traffic_class(i) {
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+
+ app_index = 0;
+ for (i = 0; i < 3; i++) {
+ if (i == 0) {
+ /* FCoE APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE;
+ } else if (i == 1) {
+ /* iSCSI APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
+ ice_app_sel_type = ICE_APP_SEL_TCPIP;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+ } else {
+ /* FIP APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FIP;
+ }
+
+ status = (tlv_status & ice_aqc_cee_status_mask) >>
+ ice_aqc_cee_status_shift;
+ err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE/iSCSI/FIP APP if Error is False and
+ * Oper/Sync is True
+ */
+ if (!err && sync && oper) {
+ dcbcfg->app[app_index].priority =
+ (app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift;
+ dcbcfg->app[app_index].selector = ice_app_sel_type;
+ dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
+ app_index++;
+ }
+ }
+
+ dcbcfg->numapps = app_index;
+}
+
+/**
+ * ice_get_ieee_dcb_cfg
+ * @pi: port information structure
+ * @dcbx_mode: mode of DCBX (IEEE or CEE)
+ *
+ * Get IEEE or CEE mode DCB configuration from the Firmware
+ */
+STATIC enum ice_status
+ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = NULL;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ if (dcbx_mode == ICE_DCBX_MODE_IEEE)
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ else if (dcbx_mode == ICE_DCBX_MODE_CEE)
+ dcbx_cfg = &pi->desired_dcbx_cfg;
+
+ /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
+ * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
+ */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ dcbx_cfg = &pi->remote_dcbx_cfg;
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ ret = ICE_SUCCESS;
+
+out:
+ return ret;
+}
+
+/**
+ * ice_get_dcb_cfg
+ * @pi: port information structure
+ *
+ * Get DCB configuration from the Firmware
+ */
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct ice_dcbx_cfg *dcbx_cfg;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
+ if (ret == ICE_SUCCESS) {
+ /* CEE mode */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbx_cfg->tlv_status = LE32_TO_CPU(cee_cfg.tlv_status);
+ ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ /* CEE mode not enabled try querying IEEE data */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_init_dcb
+ * @hw: pointer to the HW struct
+ * @enable_mib_change: enable MIB change event
+ *
+ * Update DCB configuration from the Firmware
+ */
+enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret = ICE_SUCCESS;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ pi->is_sw_lldp = true;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ /* Get current DCBX configuration */
+ ret = ice_get_dcb_cfg(pi);
+ if (ret)
+ return ret;
+ pi->is_sw_lldp = false;
+ } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ return ICE_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ if (enable_mib_change) {
+ ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
+ if (ret)
+ pi->is_sw_lldp = true;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_mib: enable/disable MIB change event
+ *
+ * Configure (disable/enable) MIB
+ */
+enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
+ return ICE_ERR_NOT_READY;
+
+ ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
+ if (!ret)
+ pi->is_sw_lldp = !ena_mib;
+
+ return ret;
+}
+
+/**
+ * ice_add_ieee_ets_common_tlv
+ * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
+ * @ets_cfg: Container for ice_dcb_ets_cfg data
+ *
+ * Populate the TLV buffer with ice_dcb_ets_cfg data
+ */
+static void
+ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 priority0, priority1;
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ priority0 = ets_cfg->prio_table[i * 2] & 0xF;
+ priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ buf[offset] = ets_cfg->tcbwtable[i];
+ buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u8 maxtcwilling = 0;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = HTONS(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S);
+ maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+ buf[0] = maxtcwilling;
+
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etsrec;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = HTONS(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+
+ /* First Octet is reserved */
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etsrec);
+}
+
+/**
+ * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = HTONS(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(ICE_IEEE_PFC_WILLING_S);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(ICE_IEEE_PFC_MBC_S);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
+}
+
+/**
+ * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store which holds the APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ */
+static void
+ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 typelen, len, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = HTONL(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+ /* len includes size of ouisubtype + 1 reserved + 3*numapps */
+ len = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF));
+ tlv->typelen = HTONS(typelen);
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: Fill TLV data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ * @tlvid: Type of IEEE TLV
+ *
+ * Add tlv information
+ */
+static void
+ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format
+ * @lldpmib: pointer to the HW struct
+ * @miblen: length of LLDP MIB
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Convert the DCB configuration to MIB format
+ */
+void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, offset = 0, tlvid = ICE_TLV_ID_START;
+ struct ice_lldp_org_tlv *tlv;
+ u16 typelen;
+
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelen = NTOHS(tlv->typelen);
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ if (len)
+ offset += len + 2;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU ||
+ offset > ICE_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (len)
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+ *miblen = offset;
+}
+
+/**
+ * ice_set_dcb_cfg - Set the local LLDP MIB to FW
+ * @pi: port information structure
+ *
+ * Set DCB configuration to the Firmware
+ */
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+{
+ u8 mib_type, *lldpmib = NULL;
+ struct ice_dcbx_cfg *dcbcfg;
+ enum ice_status ret;
+ struct ice_hw *hw;
+ u16 miblen;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* update the HW local config */
+ dcbcfg = &pi->local_dcbx_cfg;
+ /* Allocate the LLDPDU */
+ lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
+
+ ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
+ NULL);
+
+ ice_free(hw, lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_query_port_ets - query port ETS configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ETS configuration
+ */
+enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_query_port_ets *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ cmd = &desc.params.port_ets;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
+ cmd->port_teid = pi->root->info.node_teid;
+
+ status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
+ return status;
+}
+
+/**
+ * ice_update_port_tc_tree_cfg - update TC tree configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ *
+ * update the SW DB with the new TC changes
+ */
+enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_aqc_get_elem elem;
+ enum ice_status status = ICE_SUCCESS;
+ u32 teid1, teid2;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ /* suspend the missing TC nodes */
+ for (i = 0; i < pi->root->num_children; i++) {
+ teid1 = LE32_TO_CPU(pi->root->children[i]->info.node_teid);
+ ice_for_each_traffic_class(j) {
+ teid2 = LE32_TO_CPU(buf->tc_node_teid[j]);
+ if (teid1 == teid2)
+ break;
+ }
+ if (j < ICE_MAX_TRAFFIC_CLASS)
+ continue;
+ /* TC is missing */
+ pi->root->children[i]->in_use = false;
+ }
+ /* add the new TC nodes */
+ ice_for_each_traffic_class(j) {
+ teid2 = LE32_TO_CPU(buf->tc_node_teid[j]);
+ if (teid2 == ICE_INVAL_TEID)
+ continue;
+ /* Is it already present in the tree ? */
+ for (i = 0; i < pi->root->num_children; i++) {
+ tc_node = pi->root->children[i];
+ if (!tc_node)
+ continue;
+ teid1 = LE32_TO_CPU(tc_node->info.node_teid);
+ if (teid1 == teid2) {
+ tc_node->tc_num = j;
+ tc_node->in_use = true;
+ break;
+ }
+ }
+ if (i < pi->root->num_children)
+ continue;
+ /* new TC */
+ status = ice_sched_query_elem(pi->hw, teid2, &elem);
+ if (!status)
+ status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ if (status)
+ break;
+ /* update the TC number */
+ node = ice_sched_find_node_by_teid(pi->root, teid2);
+ if (node)
+ node->tc_num = j;
+ }
+ return status;
+}
+
+/**
+ * ice_query_port_ets - query port ETS configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ETS configuration and update the
+ * SW DB with the TC changes
+ */
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
+ if (!status)
+ status = ice_update_port_tc_tree_cfg(pi, buf);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
Index: sys/dev/ice/ice_devids.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_devids.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_DEVIDS_H_
+#define _ICE_DEVIDS_H_
+
+/* Device IDs */
+/* Intel(R) Ethernet Connection E823-L for backplane */
+#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
+/* Intel(R) Ethernet Connection E823-L for SFP */
+#define ICE_DEV_ID_E823L_SFP 0x124D
+/* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E823L_10G_BASE_T 0x124E
+/* Intel(R) Ethernet Connection E823-L 1GbE */
+#define ICE_DEV_ID_E823L_1GBE 0x124F
+/* Intel(R) Ethernet Connection E823-L for QSFP */
+#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E810-C for backplane */
+#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
+/* Intel(R) Ethernet Controller E810-C for QSFP */
+#define ICE_DEV_ID_E810C_QSFP 0x1592
+/* Intel(R) Ethernet Controller E810-C for SFP */
+#define ICE_DEV_ID_E810C_SFP 0x1593
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
+/* Intel(R) Ethernet Controller E810-XXV for SFP */
+#define ICE_DEV_ID_E810_XXV_SFP 0x159B
+/* Intel(R) Ethernet Connection E822-C for backplane */
+#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
+/* Intel(R) Ethernet Connection E822-C for QSFP */
+#define ICE_DEV_ID_E822C_QSFP 0x1891
+/* Intel(R) Ethernet Connection E822-C for SFP */
+#define ICE_DEV_ID_E822C_SFP 0x1892
+/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
+/* Intel(R) Ethernet Connection E822-C 1GbE */
+#define ICE_DEV_ID_E822C_SGMII 0x1894
+/* Intel(R) Ethernet Connection E822-L for backplane */
+#define ICE_DEV_ID_E822L_BACKPLANE 0x1897
+/* Intel(R) Ethernet Connection E822-L for SFP */
+#define ICE_DEV_ID_E822L_SFP 0x1898
+/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
+#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
+/* Intel(R) Ethernet Connection E822-L 1GbE */
+#define ICE_DEV_ID_E822L_SGMII 0x189A
+
+#endif /* _ICE_DEVIDS_H_ */
Index: sys/dev/ice/ice_drv_info.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_drv_info.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_drv_info.h
+ * @brief device IDs and driver version
+ *
+ * Contains the device IDs tables and the driver version string.
+ *
+ * This file contains static or constant definitions intended to be included
+ * exactly once in the main driver interface file. It implicitly depends on
+ * the main driver header file.
+ *
+ * These definitions could be placed directly in the interface file, but are
+ * kept separate for organizational purposes.
+ */
+
+/**
+ * @var ice_driver_version
+ * @brief driver version string
+ *
+ * Driver version information, used for display as part of an informational
+ * sysctl, and as part of the driver information sent to the firmware at load.
+ *
+ * @var ice_major_version
+ * @brief driver major version number
+ *
+ * @var ice_minor_version
+ * @brief driver minor version number
+ *
+ * @var ice_patch_version
+ * @brief driver patch version number
+ *
+ * @var ice_rc_version
+ * @brief driver release candidate version number
+ */
+const char ice_driver_version[] = "0.26.0-k";
+const uint8_t ice_major_version = 0;
+const uint8_t ice_minor_version = 26;
+const uint8_t ice_patch_version = 0;
+const uint8_t ice_rc_version = 0;
+
+#define PVIDV(vendor, devid, name) \
+ PVID(vendor, devid, name " - 0.26.0-k")
+#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 0.26.0-k")
+
+/**
+ * @var ice_vendor_info_array
+ * @brief array of PCI devices supported by this driver
+ *
+ * Array of PCI devices which are supported by this driver. Used to determine
+ * whether a given device should be loaded by this driver. This information is
+ * also exported as part of the module information for other tools to analyze.
+ *
+ * @remark Each type of device ID needs to be listed from most-specific entry
+ * to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before
+ * the PVIDV() for it.
+ */
+static pci_vendor_info_t ice_vendor_info_array[] = {
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE,
+ "Intel(R) Ethernet Controller E810-C for backplane"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0005, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0006, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0007, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ ICE_INTEL_VENDOR_ID, 0x0008, 0,
+ "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP,
+ "Intel(R) Ethernet Controller E810-C for QSFP"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E810-L-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0005, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0006, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0007, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-4"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0008, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0009, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 2.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP,
+ "Intel(R) Ethernet Controller E810-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE,
+ "Intel(R) Ethernet Connection E822-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP,
+ "Intel(R) Ethernet Connection E822-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP,
+ "Intel(R) Ethernet Connection E822-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T,
+ "Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII,
+ "Intel(R) Ethernet Connection E822-C 1GbE"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE,
+ "Intel(R) Ethernet Connection E822-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP,
+ "Intel(R) Ethernet Connection E822-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T,
+ "Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII,
+ "Intel(R) Ethernet Connection E822-L 1GbE"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE,
+ "Intel(R) Ethernet Connection E823-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP,
+ "Intel(R) Ethernet Connection E823-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP,
+ "Intel(R) Ethernet Connection E823-L for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T,
+ "Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE,
+ "Intel(R) Ethernet Connection E823-L 1GbE"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE,
+ "Intel(R) Ethernet Controller E810-XXV for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP,
+ "Intel(R) Ethernet Controller E810-XXV for QSFP"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0005, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0006, 0,
+ "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
+ "Intel(R) Ethernet Controller E810-XXV for SFP"),
+ PVID_END
+};
+
Index: sys/dev/ice/ice_features.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_features.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_features.h
+ * @brief device feature controls
+ *
+ * Contains a list of various device features which could be enabled or
+ * disabled.
+ */
+
+#ifndef _ICE_FEATURES_H_
+#define _ICE_FEATURES_H_
+
+/**
+ * @enum feat_list
+ * @brief driver feature enumeration
+ *
+ * Enumeration of possible device driver features that can be enabled or
+ * disabled. Each possible value represents a different feature which can be
+ * enabled or disabled.
+ *
+ * The driver stores a bitmap of the features that the device and OS are
+ * capable of, as well as another bitmap indicating which features are
+ * currently enabled for that device.
+ */
+enum feat_list {
+ ICE_FEATURE_SRIOV,
+ ICE_FEATURE_RSS,
+ ICE_FEATURE_NETMAP,
+ ICE_FEATURE_FDIR,
+ ICE_FEATURE_MSI,
+ ICE_FEATURE_MSIX,
+ ICE_FEATURE_RDMA,
+ ICE_FEATURE_SAFE_MODE,
+ ICE_FEATURE_LENIENT_LINK_MODE,
+ ICE_FEATURE_DEFAULT_OVERRIDE,
+ /* Must be last entry */
+ ICE_FEATURE_COUNT
+};
+
+/**
+ * ice_disable_unsupported_features - Disable features not enabled by OS
+ * @bitmap: the feature bitmap
+ *
+ * Check for OS support of various driver features. Clear the feature bit for
+ * any feature which is not enabled by the OS. This should be called early
+ * during driver attach after setting up the feature bitmap.
+ *
+ * @remark the bitmap parameter is marked as unused in order to avoid an
+ * unused parameter warning in case none of the features need to be disabled.
+ */
+static inline void
+ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
+{
+ ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
+#ifndef DEV_NETMAP
+ ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
+#endif
+}
+
+#endif
Index: sys/dev/ice/ice_flex_pipe.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_flex_pipe.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_FLEX_PIPE_H_
+#define _ICE_FLEX_PIPE_H_
+
+#include "ice_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off);
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+ u16 *value);
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
+ ice_bitmap_t *bm);
+void
+ice_init_prof_result_bm(struct ice_hw *hw);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
+
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port);
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+bool
+ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type);
+enum ice_status ice_replay_tunnels(struct ice_hw *hw);
+
+/* XLT1/PType group functions */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
+
+/* XLT2/VSI group functions */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es);
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
+enum ice_status
+ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_status
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+void ice_free_seg(struct ice_hw *hw);
+void ice_fill_blk_tbls(struct ice_hw *hw);
+void ice_clear_hw_tbls(struct ice_hw *hw);
+void ice_free_hw_tbls(struct ice_hw *hw);
+enum ice_status
+ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+ u64 id);
+enum ice_status
+ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+ u64 id);
+enum ice_status
+ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len);
+
+#endif /* _ICE_FLEX_PIPE_H_ */
Index: sys/dev/ice/ice_flex_pipe.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_flex_pipe.c
@@ -0,0 +1,5630 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_flex_pipe.h"
+#include "ice_protocol_type.h"
+#include "ice_flow.h"
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
+static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
+ /* SWITCH */
+ {
+ ICE_SID_XLT0_SW,
+ ICE_SID_XLT_KEY_BUILDER_SW,
+ ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW,
+ ICE_SID_CDID_KEY_BUILDER_SW,
+ ICE_SID_CDID_REDIR_SW
+ },
+
+ /* ACL */
+ {
+ ICE_SID_XLT0_ACL,
+ ICE_SID_XLT_KEY_BUILDER_ACL,
+ ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL,
+ ICE_SID_CDID_KEY_BUILDER_ACL,
+ ICE_SID_CDID_REDIR_ACL
+ },
+
+ /* FD */
+ {
+ ICE_SID_XLT0_FD,
+ ICE_SID_XLT_KEY_BUILDER_FD,
+ ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD,
+ ICE_SID_CDID_KEY_BUILDER_FD,
+ ICE_SID_CDID_REDIR_FD
+ },
+
+ /* RSS */
+ {
+ ICE_SID_XLT0_RSS,
+ ICE_SID_XLT_KEY_BUILDER_RSS,
+ ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS,
+ ICE_SID_CDID_KEY_BUILDER_RSS,
+ ICE_SID_CDID_REDIR_RSS
+ },
+
+ /* PE */
+ {
+ ICE_SID_XLT0_PE,
+ ICE_SID_XLT_KEY_BUILDER_PE,
+ ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE,
+ ICE_SID_CDID_KEY_BUILDER_PE,
+ ICE_SID_CDID_REDIR_PE
+ }
+};
+
+/**
+ * ice_sect_id - returns section ID
+ * @blk: block type
+ * @sect: section type
+ *
+ * This helper function returns the proper section ID given a block type and a
+ * section type.
+ */
+static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
+{
+ return ice_sect_lkup[blk][sect];
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = LE16_TO_CPU(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = LE16_TO_CPU(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms;
+
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ LE32_TO_CPU(ice_seg->device_table_count));
+
+ return (_FORCE_ struct ice_buf_table *)
+ (nvms->vers + LE32_TO_CPU(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+static void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ CPU_TO_LE32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect = ((u8 *)state->buf) +
+ LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *
+ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type, u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *
+ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = (struct ice_boost_tcam_section *)section;
+ if (index >= LE16_TO_CPU(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static enum ice_status
+ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ tcam = (struct ice_boost_tcam_entry *)
+ ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
+ *entry = tcam;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *
+ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = (struct ice_label_section *)section;
+ if (index >= LE16_TO_CPU(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *
+ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
+ u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type,
+ NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = LE16_TO_CPU(label->value);
+ return label->name;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM);
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].in_use = false;
+ hw->tnl.tbl[hw->tnl.count].marked = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry)
+ hw->tnl.tbl[i].valid = true;
+ }
+}
+
+/* Key creation */
+
+#define ICE_DC_KEY 0x1 /* don't care */
+#define ICE_DC_KEYINV 0x1
+#define ICE_NM_KEY 0x0 /* never match */
+#define ICE_NM_KEYINV 0x0
+#define ICE_0_KEY 0x1 /* match 0 */
+#define ICE_0_KEYINV 0x0
+#define ICE_1_KEY 0x0 /* match 1 */
+#define ICE_1_KEYINV 0x1
+
+/**
+ * ice_gen_key_word - generate 16-bits of a key/mask word
+ * @val: the value
+ * @valid: valid bits mask (change only the valid bits)
+ * @dont_care: don't care mask
+ * @nvr_mtch: never match mask
+ * @key: pointer to an array of where the resulting key portion
+ * @key_inv: pointer to an array of where the resulting key invert portion
+ *
+ * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask
+ * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits
+ * of key and 8 bits of key invert.
+ *
+ * '0' = b01, always match a 0 bit
+ * '1' = b10, always match a 1 bit
+ * '?' = b11, don't care bit (always matches)
+ * '~' = b00, never match bit
+ *
+ * Input:
+ * val: b0 1 0 1 0 1
+ * dont_care: b0 0 1 1 0 0
+ * never_mtch: b0 0 0 0 1 1
+ * ------------------------------
+ * Result: key: b01 10 11 11 00 00
+ */
+static enum ice_status
+ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
+ u8 *key_inv)
+{
+ u8 in_key = *key, in_key_inv = *key_inv;
+ u8 i;
+
+ /* 'dont_care' and 'nvr_mtch' masks cannot overlap */
+ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
+ return ICE_ERR_CFG;
+
+ *key = 0;
+ *key_inv = 0;
+
+ /* encode the 8 bits into 8-bit key and 8-bit key invert */
+ for (i = 0; i < 8; i++) {
+ *key >>= 1;
+ *key_inv >>= 1;
+
+ if (!(valid & 0x1)) { /* change only valid bits */
+ *key |= (in_key & 0x1) << 7;
+ *key_inv |= (in_key_inv & 0x1) << 7;
+ } else if (dont_care & 0x1) { /* don't care bit */
+ *key |= ICE_DC_KEY << 7;
+ *key_inv |= ICE_DC_KEYINV << 7;
+ } else if (nvr_mtch & 0x1) { /* never match bit */
+ *key |= ICE_NM_KEY << 7;
+ *key_inv |= ICE_NM_KEYINV << 7;
+ } else if (val & 0x01) { /* exact 1 match */
+ *key |= ICE_1_KEY << 7;
+ *key_inv |= ICE_1_KEYINV << 7;
+ } else { /* exact 0 match */
+ *key |= ICE_0_KEY << 7;
+ *key_inv |= ICE_0_KEYINV << 7;
+ }
+
+ dont_care >>= 1;
+ nvr_mtch >>= 1;
+ valid >>= 1;
+ val >>= 1;
+ in_key >>= 1;
+ in_key_inv >>= 1;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_bits_max_set - determine if the number of bits set is within a maximum
+ * @mask: pointer to the byte array which is the mask
+ * @size: the number of bytes in the mask
+ * @max: the max number of set bits
+ *
+ * This function determines if there are at most 'max' number of bits set in an
+ * array. Returns true if the number for bits set is <= max or will return false
+ * otherwise.
+ */
+static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
+{
+ u16 count = 0;
+ u16 i;
+
+ /* check each byte */
+ for (i = 0; i < size; i++) {
+ /* if 0, go to next byte */
+ if (!mask[i])
+ continue;
+
+ /* We know there is at least one set bit in this byte because of
+ * the above check; if we already have found 'max' number of
+ * bits set, then we can return failure now.
+ */
+ if (count == max)
+ return false;
+
+ /* count the bits in this byte, checking threshold */
+ count += ice_hweight8(mask[i]);
+ if (count > max)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_key - generate a variable sized key with multiples of 16-bits
+ * @key: pointer to where the key will be stored
+ * @size: the size of the complete key in bytes (must be even)
+ * @val: array of 8-bit values that makes up the value portion of the key
+ * @upd: array of 8-bit masks that determine what key portion to update
+ * @dc: array of 8-bit masks that make up the don't care mask
+ * @nm: array of 8-bit masks that make up the never match mask
+ * @off: the offset of the first byte in the key to update
+ * @len: the number of bytes in the key update
+ *
+ * This function generates a key from a value, a don't care mask and a never
+ * match mask.
+ * upd, dc, and nm are optional parameters, and can be NULL:
+ * upd == NULL --> udp mask is all 1's (update all bits)
+ * dc == NULL --> dc mask is all 0's (no don't care bits)
+ * nm == NULL --> nm mask is all 0's (no never match bits)
+ */
+enum ice_status
+ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
+ u16 len)
+{
+ u16 half_size;
+ u16 i;
+
+ /* size must be a multiple of 2 bytes. */
+ if (size % 2)
+ return ICE_ERR_CFG;
+ half_size = size / 2;
+
+ if (off + len > half_size)
+ return ICE_ERR_CFG;
+
+ /* Make sure at most one bit is set in the never match mask. Having more
+ * than one never match mask bit set will cause HW to consume excessive
+ * power otherwise; this is a power management efficiency check.
+ */
+#define ICE_NVR_MTCH_BITS_MAX 1
+ if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
+ return ICE_ERR_CFG;
+
+ for (i = 0; i < len; i++)
+ if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
+ dc ? dc[i] : 0, nm ? nm[i] : 0,
+ key + off + i, key + half_size + off + i))
+ return ICE_ERR_CFG;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (status == ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_acquire_change_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the change lock.
+ */
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
+ ICE_CHANGE_LOCK_TIMEOUT);
+}
+
+/**
+ * ice_release_change_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the change lock using the proper Admin Command.
+ */
+void ice_release_change_lock(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+enum ice_status
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static enum ice_status
+ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
+ bool last_buf, u32 *error_offset, u32 *error_info,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+static struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i]));
+
+ if (LE32_TO_CPU(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+enum ice_status
+ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ u32 offset, info, i;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+
+ status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_status
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ struct ice_buf_hdr *bh;
+ u32 offset, info, i;
+
+ if (!bufs || !count)
+ return ICE_ERR_PARAM;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_SUCCESS;
+
+ /* reset pkg_dwnld_status in case this function is called in the
+ * reset/rebuild flow
+ */
+ hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
+ else
+ hw->pkg_dwnld_status = hw->adminq.sq_last_status;
+ return status;
+ }
+
+ for (i = 0; i < count; i++) {
+ bool last = ((i + 1) == count);
+
+ if (!last) {
+ /* check next buffer for metadata flag */
+ bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+ /* A set metadata flag in the next buffer will signal
+ * that the current buffer will be the last buffer
+ * downloaded
+ */
+ if (LE16_TO_CPU(bh->section_count))
+ if (LE32_TO_CPU(bh->section_entry[0].type) &
+ ICE_METADATA_BUF)
+ last = true;
+ }
+
+ bh = (struct ice_buf_hdr *)(bufs + i);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ hw->pkg_dwnld_status = hw->adminq.sq_last_status;
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ ice_release_global_cfg_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_status
+ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ LE32_TO_CPU(ice_seg->hdr.seg_type),
+ LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+
+ return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ LE32_TO_CPU(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_global_metadata_seg *meta_seg;
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+ if (!pkg_hdr)
+ return ICE_ERR_PARAM;
+
+ meta_seg = (struct ice_global_metadata_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (meta_seg) {
+ hw->pkg_ver = meta_seg->pkg_ver;
+ ice_memcpy(hw->pkg_name, meta_seg->pkg_name,
+ sizeof(hw->pkg_name), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
+ meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
+ meta_seg->pkg_name);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find metadata segment in driver package\n");
+ return ICE_ERR_CFG;
+ }
+
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ hw->ice_pkg_ver = seg_hdr->seg_format_ver;
+ ice_memcpy(hw->ice_pkg_name, seg_hdr->seg_id,
+ sizeof(hw->ice_pkg_name), ICE_NONDMA_TO_NONDMA);
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft,
+ seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice segment in driver package\n");
+ return ICE_ERR_CFG;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT - 1);
+ pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg_info)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
+ if (status)
+ goto init_pkg_free_alloc;
+
+ for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ LE32_TO_CPU(pkg_info->pkg_info[i].track_id);
+ ice_memcpy(hw->active_pkg_name,
+ pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name),
+ ICE_NONDMA_TO_NONDMA);
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+ i, pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ ice_free(hw, pkg_info);
+
+ return status;
+}
+
+/**
+ * ice_find_label_value
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @name: name of the label to search for
+ * @type: the section type that will contain the label
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Finds a label's value given the label name and the section type to search.
+ * The ice_seg parameter must not be NULL since the first call to
+ * ice_enum_labels requires a pointer to an actual ice_seg structure.
+ */
+enum ice_status
+ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
+ u16 *value)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ice_seg)
+ return ICE_ERR_PARAM;
+
+ do {
+ label_name = ice_enum_labels(ice_seg, type, &state, &val);
+ if (label_name && !strcmp(label_name, name)) {
+ *value = val;
+ return ICE_SUCCESS;
+ }
+
+ ice_seg = NULL;
+ } while (label_name);
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < sizeof(*pkg))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_ERR_CFG;
+
+ /* pkg must have at least one segment */
+ seg_count = LE32_TO_CPU(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_ERR_CFG;
+
+ /* make sure segment array fits in package length */
+ if (len < ice_struct_size(pkg, seg_offset, seg_count - 1))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = LE32_TO_CPU(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + LE32_TO_CPU(seg->seg_size))
+ return ICE_ERR_BUF_TOO_SHORT;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ ice_free(hw, hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
+ pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_status
+ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Check package version compatibility */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return status;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+ pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size);
+ if (!pkg)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
+ if (status)
+ goto fw_ddp_compat_free_alloc;
+
+ for (i = 0; i < LE32_TO_CPU(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ status = ICE_ERR_FW_DDP_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ ice_free(hw, pkg);
+ return status;
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ struct ice_pkg_hdr *pkg;
+ enum ice_status status;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ status = ice_verify_pkg(pkg, len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ status);
+ return status;
+ }
+
+ /* initialize package info */
+ status = ice_init_pkg_info(hw, pkg);
+ if (status)
+ return status;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ status = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (status)
+ return status;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ status = ice_download_pkg(hw, seg);
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "package previously loaded - no work.\n");
+ status = ICE_SUCCESS;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!status) {
+ status = ice_get_pkg_info(hw);
+ if (!status)
+ status = ice_chk_pkg_version(&hw->active_pkg_ver);
+ }
+
+ if (!status) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+ enum ice_status status;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA);
+
+ status = ice_init_pkg(hw, buf_copy, len);
+ if (status) {
+ /* Free the copy, since we failed to initialize the package */
+ ice_free(hw, buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return status;
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld));
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr,
+ section_entry));
+ return bld;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector
+ * vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section =
+ (struct ice_sw_fv_section *)section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= LE16_TO_CPU(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = LE16_TO_CPU(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+{
+ u16 i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ ice_bitmap_t *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (req_profs == ICE_PROF_ALL) {
+ u16 i;
+
+ for (i = 0; i < ICE_MAX_NUM_PROFILES; i++)
+ ice_set_bit(i, bm);
+ return;
+ }
+
+ ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES);
+
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv);
+
+ if (req_profs & prof_type)
+ ice_set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol ID
+ * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!ids_cnt || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!ice_is_bit_set(bm, (u16)offset))
+ continue;
+
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
+
+ /* This code assumes that if a switch field vector line
+ * has a matching protocol, then this line will contain
+ * the entries necessary to represent every field in
+ * that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == ids_cnt) {
+ fvl = (struct ice_sw_fv_list_entry *)
+ ice_malloc(hw, sizeof(*fvl));
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ LIST_ADD(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (LIST_EMPTY(fv_list))
+ return ICE_ERR_CFG;
+ return ICE_SUCCESS;
+
+err:
+ LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
+ list_entry) {
+ LIST_DEL(&fvl->list_entry);
+ ice_free(hw, fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = (struct ice_fv *)
+ ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ ice_zero_bitmap(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ ice_set_bit(i,
+ hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ ice_free(hw, bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+static enum ice_status
+ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries += count;
+
+ data_end = LE16_TO_CPU(buf->data_end) +
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+static void *
+ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = LE16_TO_CPU(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ICE_ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = LE16_TO_CPU(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end);
+ buf->section_entry[sect_count].size = CPU_TO_LE16(size);
+ buf->section_entry[sect_count].type = CPU_TO_LE32(type);
+
+ data_end += size;
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ buf->section_count = CPU_TO_LE16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+static struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_unreserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to unreserve
+ *
+ * Unreserves one or more section table entries in a package buffer, releasing
+ * space that can be used for section data. This routine can be called
+ * multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+enum ice_status
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return ICE_ERR_PARAM;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't decrease table size */
+ section_count = LE16_TO_CPU(buf->section_count);
+ if (section_count > 0)
+ return ICE_ERR_CFG;
+
+ if (count > bld->reserved_section_table_entries)
+ return ICE_ERR_CFG;
+ bld->reserved_section_table_entries -= count;
+
+ data_end = LE16_TO_CPU(buf->data_end) -
+ (count * sizeof(buf->section_entry[0]));
+ buf->data_end = CPU_TO_LE16(data_end);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_pkg_buf_get_free_space
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of free bytes remaining in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end);
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return LE16_TO_CPU(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
+/**
+ * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_tunnel_port_in_use
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @index: optionally returns index
+ *
+ * Returns whether a port is already in use as a tunnel, and optionally its
+ * index
+ */
+bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
+{
+ bool res;
+
+ ice_acquire_lock(&hw->tnl_lock);
+ res = ice_tunnel_port_in_use_hlpr(hw, port, index);
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_tunnel_get_type
+ * @hw: pointer to the HW structure
+ * @port: port to search for
+ * @type: returns tunnel index
+ *
+ * For a given port number, will return the type of tunnel.
+ */
+bool
+ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type)
+{
+ bool res = false;
+ u16 i;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
+ *type = hw->tnl.tbl[i].type;
+ res = true;
+ break;
+ }
+
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_find_free_tunnel_entry
+ * @hw: pointer to the HW structure
+ * @type: tunnel type
+ * @index: optionally returns index
+ *
+ * Returns whether there is a free tunnel entry, and optionally its index
+ */
+static bool
+ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *index)
+{
+ u16 i;
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
+ hw->tnl.tbl[i].type == type) {
+ if (index)
+ *index = i;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @type: tunnel type (TNL_ALL will return any open port)
+ * @port: returns open port
+ */
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 *port)
+{
+ bool res = false;
+ u16 i;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
+ }
+
+ ice_release_lock(&hw->tnl_lock);
+
+ return res;
+}
+
+/**
+ * ice_create_tunnel
+ * @hw: pointer to the HW structure
+ * @type: type of tunnel
+ * @port: port of tunnel to create
+ *
+ * Create a tunnel by updating the parse graph in the parser. We do that by
+ * creating a package buffer with the tunnel info and issuing an update package
+ * command.
+ */
+enum ice_status
+ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 index;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
+ hw->tnl.tbl[index].ref++;
+ status = ICE_SUCCESS;
+ goto ice_create_tunnel_end;
+ }
+
+ if (!ice_find_free_tunnel_entry(hw, type, &index)) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto ice_create_tunnel_end;
+ }
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_create_tunnel_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ sizeof(*sect_rx));
+ if (!sect_rx)
+ goto ice_create_tunnel_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ sizeof(*sect_tx));
+ if (!sect_tx)
+ goto ice_create_tunnel_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer */
+ ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA);
+
+ /* over-write the never-match dest port key bits with the encoded port
+ * bits
+ */
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ (u8 *)&port, NULL, NULL, NULL,
+ (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key),
+ sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key));
+
+ /* exact copy of entry to Tx section entry */
+ ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status) {
+ hw->tnl.tbl[index].port = port;
+ hw->tnl.tbl[index].in_use = true;
+ hw->tnl.tbl[index].ref = 1;
+ }
+
+ice_create_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_create_tunnel_end:
+ ice_release_lock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_destroy_tunnel
+ * @hw: pointer to the HW structure
+ * @port: port of tunnel to destroy (ignored if the all parameter is true)
+ * @all: flag that states to destroy all tunnels
+ *
+ * Destroys a tunnel or all tunnels by creating an update package buffer
+ * targeting the specific updates requested and then performing an update
+ * package.
+ */
+enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+{
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ enum ice_status status = ICE_ERR_MAX_LIMIT;
+ struct ice_buf_build *bld;
+ u16 count = 0;
+ u16 index;
+ u16 size;
+ u16 i;
+
+ ice_acquire_lock(&hw->tnl_lock);
+
+ if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
+ if (hw->tnl.tbl[index].ref > 1) {
+ hw->tnl.tbl[index].ref--;
+ status = ICE_SUCCESS;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* determine count */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port))
+ count++;
+
+ if (!count) {
+ status = ICE_ERR_PARAM;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* size of section - there is at least one entry */
+ size = ice_struct_size(sect_rx, tcam, count - 1);
+
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_destroy_tunnel_end;
+ }
+
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_destroy_tunnel_err;
+
+ sect_rx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_rx)
+ goto ice_destroy_tunnel_err;
+ sect_rx->count = CPU_TO_LE16(1);
+
+ sect_tx = (struct ice_boost_tcam_section *)
+ ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ size);
+ if (!sect_tx)
+ goto ice_destroy_tunnel_err;
+ sect_tx->count = CPU_TO_LE16(1);
+
+ /* copy original boost entry to update package buffer, one copy to Rx
+ * section, another copy to the Tx section
+ */
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
+ (all || hw->tnl.tbl[i].port == port)) {
+ ice_memcpy(sect_rx->tcam + i,
+ hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_rx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+ ice_memcpy(sect_tx->tcam + i,
+ hw->tnl.tbl[i].boost_entry,
+ sizeof(*sect_tx->tcam),
+ ICE_NONDMA_TO_NONDMA);
+ hw->tnl.tbl[i].marked = true;
+ }
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+ if (!status)
+ for (i = 0; i < hw->tnl.count &&
+ i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].marked) {
+ hw->tnl.tbl[i].ref = 0;
+ hw->tnl.tbl[i].port = 0;
+ hw->tnl.tbl[i].in_use = false;
+ hw->tnl.tbl[i].marked = false;
+ }
+
+ice_destroy_tunnel_err:
+ ice_pkg_buf_free(hw, bld);
+
+ice_destroy_tunnel_end:
+ ice_release_lock(&hw->tnl_lock);
+
+ return status;
+}
+
+/**
+ * ice_replay_tunnels
+ * @hw: pointer to the HW structure
+ *
+ * Replays all tunnels
+ */
+enum ice_status ice_replay_tunnels(struct ice_hw *hw)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u16 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) {
+ enum ice_tunnel_type type = hw->tnl.tbl[i].type;
+ u16 refs = hw->tnl.tbl[i].ref;
+ u16 port = hw->tnl.tbl[i].port;
+
+ if (!hw->tnl.tbl[i].in_use)
+ continue;
+
+ /* Replay tunnels one at a time by destroying them, then
+ * recreating them
+ */
+ hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */
+ status = ice_destroy_tunnel(hw, port, false);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "ERR: 0x%x - destroy tunnel port 0x%x\n",
+ status, port);
+ break;
+ }
+
+ status = ice_create_tunnel(hw, type, port);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "ERR: 0x%x - create tunnel port 0x%x\n",
+ status, port);
+ break;
+ }
+
+ /* reset to original ref count */
+ hw->tnl.tbl[i].ref = refs;
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return ICE_SUCCESS;
+}
+
+/* PTG Management */
+
+/**
+ * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will update the XLT1 hardware table to reflect the new
+ * packet type group configuration.
+ */
+enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+{
+ struct ice_xlt1_section *sect;
+ struct ice_buf_build *bld;
+ enum ice_status status;
+ u16 index;
+
+ bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
+ ICE_XLT1_SIZE(ICE_XLT1_CNT),
+ (void **)&sect);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ sect->count = CPU_TO_LE16(ICE_XLT1_CNT);
+ sect->offset = CPU_TO_LE16(0);
+ for (index = 0; index < ICE_XLT1_CNT; index++)
+ sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg;
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to search for
+ * @ptg: pointer to variable that receives the PTG
+ *
+ * This function will search the PTGs for a particular ptype, returning the
+ * PTG ID that contains it through the PTG parameter, with the value of
+ * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
+ */
+static enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
+{
+ if (ptype >= ICE_XLT1_CNT || !ptg)
+ return ICE_ERR_PARAM;
+
+ *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_alloc_val - Allocates a new packet type group ID by value
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptg: the PTG to allocate
+ *
+ * This function allocates a given packet type group ID specified by the PTG
+ * parameter.
+ */
+static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+ hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
+}
+
+/**
+ * ice_ptg_free - Frees a packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptg: the PTG ID to free
+ *
+ * This function frees a packet type group, and returns all the current ptypes
+ * within it to the default PTG.
+ */
+void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+ struct ice_ptg_ptype *p, *temp;
+
+ hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false;
+ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ while (p) {
+ p->ptg = ICE_DEFAULT_PTG;
+ temp = p->next_ptype;
+ p->next_ptype = NULL;
+ p = temp;
+ }
+
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL;
+}
+
+/**
+ * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to remove
+ * @ptg: the PTG to remove the ptype from
+ *
+ * This function will remove the ptype from the specific PTG, and move it to
+ * the default PTG (ICE_DEFAULT_PTG).
+ */
+static enum ice_status
+ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+ struct ice_ptg_ptype **ch;
+ struct ice_ptg_ptype *p;
+
+ if (ptype > ICE_XLT1_CNT - 1)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Should not happen if .in_use is set, bad config */
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
+ return ICE_ERR_CFG;
+
+ /* find the ptype within this PTG, and bypass the link over it */
+ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ while (p) {
+ if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
+ *ch = p->next_ptype;
+ break;
+ }
+
+ ch = &p->next_ptype;
+ p = p->next_ptype;
+ }
+
+ hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
+ hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to add or move
+ * @ptg: the PTG to add or move the ptype to
+ *
+ * This function will either add or move a ptype to a particular PTG depending
+ * on if the ptype is already part of another group. Note that using a
+ * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * default PTG.
+ */
+static enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+ enum ice_status status;
+ u8 original_ptg;
+
+ if (ptype > ICE_XLT1_CNT - 1)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
+ if (status)
+ return status;
+
+ /* Is ptype already in the correct PTG? */
+ if (original_ptg == ptg)
+ return ICE_SUCCESS;
+
+ /* Remove from original PTG and move back to the default PTG */
+ if (original_ptg != ICE_DEFAULT_PTG)
+ ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
+
+ /* Moving to default PTG? Then we're done with this request */
+ if (ptg == ICE_DEFAULT_PTG)
+ return ICE_SUCCESS;
+
+ /* Add ptype to PTG at beginning of list */
+ hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
+ &hw->blk[blk].xlt1.ptypes[ptype];
+
+ hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
+ hw->blk[blk].xlt1.t[ptype] = ptg;
+
+ return ICE_SUCCESS;
+}
+
+/* Block / table size info */
+struct ice_blk_size_details {
+ u16 xlt1; /* # XLT1 entries */
+ u16 xlt2; /* # XLT2 entries */
+ u16 prof_tcam; /* # profile ID TCAM entries */
+ u16 prof_id; /* # profile IDs */
+ u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
+ u16 prof_redir; /* # profile redirection entries */
+ u16 es; /* # extraction sequence entries */
+ u16 fvw; /* # field vector words */
+ u8 overwrite; /* overwrite existing entries allowed */
+ u8 reverse; /* reverse FV order */
+};
+
+static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
+ /**
+ * Table Definitions
+ * XLT1 - Number of entries in XLT1 table
+ * XLT2 - Number of entries in XLT2 table
+ * TCAM - Number of entries Profile ID TCAM table
+ * CDID - Control Domain ID of the hardware block
+ * PRED - Number of entries in the Profile Redirection Table
+ * FV - Number of entries in the Field Vector
+ * FVW - Width (in WORDs) of the Field Vector
+ * OVR - Overwrite existing table entries
+ * REV - Reverse FV
+ */
+ /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
+ /* Overwrite , Reverse FV */
+ /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
+ false, false },
+ /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
+ false, false },
+ /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
+ false, true },
+ /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
+ true, true },
+ /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
+ false, false },
+};
+
+enum ice_sid_all {
+ ICE_SID_XLT1_OFF = 0,
+ ICE_SID_XLT2_OFF,
+ ICE_SID_PR_OFF,
+ ICE_SID_PR_REDIR_OFF,
+ ICE_SID_ES_OFF,
+ ICE_SID_OFF_COUNT,
+};
+
+/* Characteristic handling */
+
+/**
+ * ice_match_prop_lst - determine if properties of two lists match
+ * @list1: first properties list
+ * @list2: second properties list
+ *
+ * Count, cookies and the order must match in order to be considered equivalent.
+ */
+static bool
+ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
+{
+ struct ice_vsig_prof *tmp1;
+ struct ice_vsig_prof *tmp2;
+ u16 chk_count = 0;
+ u16 count = 0;
+
+ /* compare counts */
+ LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) {
+ count++;
+ }
+ LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) {
+ chk_count++;
+ }
+ if (!count || count != chk_count)
+ return false;
+
+ tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list);
+ tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list);
+
+ /* profile cookies must compare, and in the exact same order to take
+ * into account priority
+ */
+ while (count--) {
+ if (tmp2->profile_cookie != tmp1->profile_cookie)
+ return false;
+
+ tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list);
+ tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list);
+ }
+
+ return true;
+}
+
+/* VSIG Management */
+
+/**
+ * ice_vsig_update_xlt2_sect - update one section of XLT2 table
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: HW VSI number to program
+ * @vsig: VSIG for the VSI
+ *
+ * This function will update the XLT2 hardware table with the input VSI
+ * group configuration.
+ */
+static enum ice_status
+ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ u16 vsig)
+{
+ struct ice_xlt2_section *sect;
+ struct ice_buf_build *bld;
+ enum ice_status status;
+
+ bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
+ sizeof(struct ice_xlt2_section),
+ (void **)&sect);
+ if (!bld)
+ return ICE_ERR_NO_MEMORY;
+
+ sect->count = CPU_TO_LE16(1);
+ sect->offset = CPU_TO_LE16(vsi);
+ sect->value[0] = CPU_TO_LE16(vsig);
+
+ status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will update the XLT2 hardware table with the input VSI
+ * group configuration of used vsis.
+ */
+enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 vsi;
+
+ for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
+ /* update only vsis that have been changed */
+ if (hw->blk[blk].xlt2.vsis[vsi].changed) {
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+ status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig);
+ if (status)
+ return status;
+
+ hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI of interest
+ * @vsig: pointer to receive the VSI group
+ *
+ * This function will lookup the VSI entry in the XLT2 list and return
+ * the VSI group its associated with.
+ */
+enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
+{
+ if (!vsig || vsi >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+
+ /* As long as there's a default or valid VSIG associated with the input
+ * VSI, the functions returns a success. Any handling of VSIG will be
+ * done by the following add, update or remove functions.
+ */
+ *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_alloc_val - allocate a new VSIG by value
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: the VSIG to allocate
+ *
+ * This function will allocate a given VSIG specified by the VSIG parameter.
+ */
+static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
+ }
+
+ return ICE_VSIG_VALUE(idx, hw->pf_id);
+}
+
+/**
+ * ice_vsig_alloc - Finds a free entry and allocates a new VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ *
+ * This function will iterate through the VSIG list and mark the first
+ * unused entry for the new VSIG entry as used and return that value.
+ */
+static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ return ice_vsig_alloc_val(hw, blk, i);
+
+ return ICE_DEFAULT_VSIG;
+}
+
+/**
+ * ice_find_dup_props_vsig - find VSI group with a specified set of properties
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @chs: characteristic list
+ * @vsig: returns the VSIG with the matching profiles, if found
+ *
+ * Each VSIG is associated with a characteristic set; i.e. all VSIs under
+ * a group have the same characteristic set. To check if there exists a VSIG
+ * which has the same characteristics as the input characteristics; this
+ * function will iterate through the XLT2 list and return the VSIG that has a
+ * matching configuration. In order to make sure that priorities are accounted
+ * for, the list must match exactly, including the order in which the
+ * characteristics are listed.
+ */
+static enum ice_status
+ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
+ struct LIST_HEAD_TYPE *chs, u16 *vsig)
+{
+ struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2;
+ u16 i;
+
+ for (i = 0; i < xlt2->count; i++) {
+ if (xlt2->vsig_tbl[i].in_use &&
+ ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
+ *vsig = ICE_VSIG_VALUE(i, hw->pf_id);
+ return ICE_SUCCESS;
+ }
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_vsig_free - free VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to remove
+ *
+ * The function will remove all VSIs associated with the input VSIG and move
+ * them to the DEFAULT_VSIG and mark the VSIG available.
+ */
+static enum ice_status
+ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ struct ice_vsig_prof *dtmp, *del;
+ struct ice_vsig_vsi *vsi_cur;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+ if (idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
+
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the
+ * list and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ /* remove all vsis associated with this VSIG XLT2 entry */
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+ vsi_cur = tmp;
+ } while (vsi_cur);
+
+ /* NULL terminate head of VSI list */
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL;
+ }
+
+ /* free characteristic list */
+ LIST_FOR_EACH_ENTRY_SAFE(del, dtmp,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ LIST_DEL(&del->list);
+ ice_free(hw, del);
+ }
+
+ /* if VSIG characteristic list was cleared for reset
+ * re-initialize the list head
+ */
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_remove_vsi - remove VSI from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to remove
+ * @vsig: VSI group to remove from
+ *
+ * The function will remove the input VSI from its VSI group and move it
+ * to the DEFAULT_VSIG.
+ */
+static enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* entry already in default VSIG, don't have to remove */
+ if (idx == ICE_DEFAULT_VSIG)
+ return ICE_SUCCESS;
+
+ vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ if (!(*vsi_head))
+ return ICE_ERR_CFG;
+
+ vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+ vsi_cur = (*vsi_head);
+
+ /* iterate the VSI list, skip over the entry to be removed */
+ while (vsi_cur) {
+ if (vsi_tgt == vsi_cur) {
+ (*vsi_head) = vsi_cur->next_vsi;
+ break;
+ }
+ vsi_head = &vsi_cur->next_vsi;
+ vsi_cur = vsi_cur->next_vsi;
+ }
+
+ /* verify if VSI was removed from group list */
+ if (!vsi_cur)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to move
+ * @vsig: destination VSI group
+ *
+ * This function will move or add the input VSI to the target VSIG.
+ * The function will find the original VSIG the VSI belongs to and
+ * move the entry to the DEFAULT_VSIG, update the original VSIG and
+ * then move entry to the new VSIG.
+ */
+static enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi *tmp;
+ enum ice_status status;
+ u16 orig_vsig, idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ /* if VSIG not in use and VSIG is not default type this VSIG
+ * doesn't exist.
+ */
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
+ vsig != ICE_DEFAULT_VSIG)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (status)
+ return status;
+
+ /* no update required if vsigs match */
+ if (orig_vsig == vsig)
+ return ICE_SUCCESS;
+
+ if (orig_vsig != ICE_DEFAULT_VSIG) {
+ /* remove entry from orig_vsig and add to default VSIG */
+ status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
+ if (status)
+ return status;
+ }
+
+ if (idx == ICE_DEFAULT_VSIG)
+ return ICE_SUCCESS;
+
+ /* Create VSI entry and add VSIG and prop_mask values */
+ hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
+ hw->blk[blk].xlt2.vsis[vsi].changed = 1;
+
+ /* Add new entry to the head of the VSIG list */
+ tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
+ &hw->blk[blk].xlt2.vsis[vsi];
+ hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
+ hw->blk[blk].xlt2.t[vsi] = vsig;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_id - find profile ID for a given field vector
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @fv: field vector to search for
+ * @prof_id: receives the profile ID
+ */
+static enum ice_status
+ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_fv_word *fv, u8 *prof_id)
+{
+ struct ice_es *es = &hw->blk[blk].es;
+ u16 off;
+ u8 i;
+
+ for (i = 0; i < (u8)es->count; i++) {
+ off = i * es->fvw;
+
+ if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
+ continue;
+
+ *prof_id = i;
+ return ICE_SUCCESS;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_prof_id_rsrc_type - get profile ID resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_SW:
+ *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_ACL:
+ *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID;
+ break;
+ case ICE_BLK_PE:
+ *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type
+ * @blk: the block type
+ * @rsrc_type: pointer to variable to receive the resource type
+ */
+static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
+{
+ switch (blk) {
+ case ICE_BLK_SW:
+ *rsrc_type = ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_ACL:
+ *rsrc_type = ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_FD:
+ *rsrc_type = ICE_AQC_RES_TYPE_FD_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_RSS:
+ *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM;
+ break;
+ case ICE_BLK_PE:
+ *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/**
+ * ice_alloc_tcam_ent - allocate hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the TCAM for
+ * @tcam_idx: pointer to variable to receive the TCAM entry
+ *
+ * This function allocates a new entry in a Profile ID TCAM for a specific
+ * block.
+ */
+static enum ice_status
+ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
+}
+
+/**
+ * ice_free_tcam_ent - free hardware TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the TCAM entry
+ * @tcam_idx: the TCAM entry to free
+ *
+ * This function frees an entry in a Profile ID TCAM for a specific block.
+ */
+static enum ice_status
+ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
+{
+ u16 res_type;
+
+ if (!ice_tcam_ent_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
+}
+
+/**
+ * ice_alloc_prof_id - allocate profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block to allocate the profile ID for
+ * @prof_id: pointer to variable to receive the profile ID
+ *
+ * This function allocates a new profile ID, which also corresponds to a Field
+ * Vector (Extraction Sequence) entry.
+ */
+static enum ice_status
+ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+{
+ enum ice_status status;
+ u16 res_type;
+ u16 get_prof;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
+ if (!status)
+ *prof_id = (u8)get_prof;
+
+ return status;
+}
+
+/**
+ * ice_free_prof_id - free profile ID
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID to free
+ *
+ * This function frees a profile ID, which also corresponds to a Field Vector.
+ */
+static enum ice_status
+ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ u16 tmp_prof_id = (u16)prof_id;
+ u16 res_type;
+
+ if (!ice_prof_id_rsrc_type(blk, &res_type))
+ return ICE_ERR_PARAM;
+
+ return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
+}
+
+/**
+ * ice_prof_inc_ref - increment reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to increment the reference count
+ */
+static enum ice_status
+ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ hw->blk[blk].es.ref_count[prof_id]++;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_es - write an extraction sequence to hardware
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write the extraction sequence
+ * @prof_id: the profile ID to write
+ * @fv: pointer to the extraction sequence to write - NULL to clear extraction
+ */
+static void
+ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
+ struct ice_fv_word *fv)
+{
+ u16 off;
+
+ off = prof_id * hw->blk[blk].es.fvw;
+ if (!fv) {
+ ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw *
+ sizeof(*fv), ICE_NONDMA_MEM);
+ hw->blk[blk].es.written[prof_id] = false;
+ } else {
+ ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw *
+ sizeof(*fv), ICE_NONDMA_TO_NONDMA);
+ }
+}
+
+/**
+ * ice_prof_dec_ref - decrement reference count for profile
+ * @hw: pointer to the HW struct
+ * @blk: the block from which to free the profile ID
+ * @prof_id: the profile ID for which to decrement the reference count
+ */
+static enum ice_status
+ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+{
+ if (prof_id > hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (hw->blk[blk].es.ref_count[prof_id] > 0) {
+ if (!--hw->blk[blk].es.ref_count[prof_id]) {
+ ice_write_es(hw, blk, prof_id, NULL);
+ return ice_free_prof_id(hw, blk, prof_id);
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* Block / table section IDs */
+static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
+ /* SWITCH */
+ { ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW
+ },
+
+ /* ACL */
+ { ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL
+ },
+
+ /* FD */
+ { ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD
+ },
+
+ /* RSS */
+ { ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS
+ },
+
+ /* PE */
+ { ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE
+ }
+};
+
+/**
+ * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 pt;
+
+ for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+ u8 ptg;
+
+ ptg = hw->blk[blk].xlt1.t[pt];
+ if (ptg != ICE_DEFAULT_PTG) {
+ ice_ptg_alloc_val(hw, blk, ptg);
+ ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+ }
+ }
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 vsi;
+
+ for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+ u16 vsig;
+
+ vsig = hw->blk[blk].xlt2.t[vsi];
+ if (vsig) {
+ ice_vsig_alloc_val(hw, blk, vsig);
+ ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+ /* no changes at this time, since this has been
+ * initialized from the original package
+ */
+ hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+ }
+ }
+}
+
+/**
+ * ice_init_sw_db - init software database from HW tables
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_sw_db(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+ ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+ }
+}
+
+/**
+ * ice_fill_tbl - Reads content of a single table type into database
+ * @hw: pointer to the hardware structure
+ * @block_id: Block ID of the table to copy
+ * @sid: Section ID of the table to copy
+ *
+ * Will attempt to read the entire content of a given table of a single block
+ * into the driver database. We assume that the buffer will always
+ * be as large or larger than the data contained in the package. If
+ * this condition is not met, there is most likely an error in the package
+ * contents.
+ */
+static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
+{
+ u32 dst_len, sect_len, offset = 0;
+ struct ice_prof_redir_section *pr;
+ struct ice_prof_id_section *pid;
+ struct ice_xlt1_section *xlt1;
+ struct ice_xlt2_section *xlt2;
+ struct ice_sw_fv_section *es;
+ struct ice_pkg_enum state;
+ u8 *src, *dst;
+ void *sect;
+
+ /* if the HW segment pointer is null then the first iteration of
+ * ice_pkg_enum_section() will fail. In this case the HW tables will
+ * not be filled and return success.
+ */
+ if (!hw->seg) {
+ ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
+ return;
+ }
+
+ ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM);
+
+ sect = ice_pkg_enum_section(hw->seg, &state, sid);
+
+ while (sect) {
+ switch (sid) {
+ case ICE_SID_XLT1_SW:
+ case ICE_SID_XLT1_FD:
+ case ICE_SID_XLT1_RSS:
+ case ICE_SID_XLT1_ACL:
+ case ICE_SID_XLT1_PE:
+ xlt1 = (struct ice_xlt1_section *)sect;
+ src = xlt1->value;
+ sect_len = LE16_TO_CPU(xlt1->count) *
+ sizeof(*hw->blk[block_id].xlt1.t);
+ dst = hw->blk[block_id].xlt1.t;
+ dst_len = hw->blk[block_id].xlt1.count *
+ sizeof(*hw->blk[block_id].xlt1.t);
+ break;
+ case ICE_SID_XLT2_SW:
+ case ICE_SID_XLT2_FD:
+ case ICE_SID_XLT2_RSS:
+ case ICE_SID_XLT2_ACL:
+ case ICE_SID_XLT2_PE:
+ xlt2 = (struct ice_xlt2_section *)sect;
+ src = (_FORCE_ u8 *)xlt2->value;
+ sect_len = LE16_TO_CPU(xlt2->count) *
+ sizeof(*hw->blk[block_id].xlt2.t);
+ dst = (u8 *)hw->blk[block_id].xlt2.t;
+ dst_len = hw->blk[block_id].xlt2.count *
+ sizeof(*hw->blk[block_id].xlt2.t);
+ break;
+ case ICE_SID_PROFID_TCAM_SW:
+ case ICE_SID_PROFID_TCAM_FD:
+ case ICE_SID_PROFID_TCAM_RSS:
+ case ICE_SID_PROFID_TCAM_ACL:
+ case ICE_SID_PROFID_TCAM_PE:
+ pid = (struct ice_prof_id_section *)sect;
+ src = (u8 *)pid->entry;
+ sect_len = LE16_TO_CPU(pid->count) *
+ sizeof(*hw->blk[block_id].prof.t);
+ dst = (u8 *)hw->blk[block_id].prof.t;
+ dst_len = hw->blk[block_id].prof.count *
+ sizeof(*hw->blk[block_id].prof.t);
+ break;
+ case ICE_SID_PROFID_REDIR_SW:
+ case ICE_SID_PROFID_REDIR_FD:
+ case ICE_SID_PROFID_REDIR_RSS:
+ case ICE_SID_PROFID_REDIR_ACL:
+ case ICE_SID_PROFID_REDIR_PE:
+ pr = (struct ice_prof_redir_section *)sect;
+ src = pr->redir_value;
+ sect_len = LE16_TO_CPU(pr->count) *
+ sizeof(*hw->blk[block_id].prof_redir.t);
+ dst = hw->blk[block_id].prof_redir.t;
+ dst_len = hw->blk[block_id].prof_redir.count *
+ sizeof(*hw->blk[block_id].prof_redir.t);
+ break;
+ case ICE_SID_FLD_VEC_SW:
+ case ICE_SID_FLD_VEC_FD:
+ case ICE_SID_FLD_VEC_RSS:
+ case ICE_SID_FLD_VEC_ACL:
+ case ICE_SID_FLD_VEC_PE:
+ es = (struct ice_sw_fv_section *)sect;
+ src = (u8 *)es->fv;
+ sect_len = (u32)(LE16_TO_CPU(es->count) *
+ hw->blk[block_id].es.fvw) *
+ sizeof(*hw->blk[block_id].es.t);
+ dst = (u8 *)hw->blk[block_id].es.t;
+ dst_len = (u32)(hw->blk[block_id].es.count *
+ hw->blk[block_id].es.fvw) *
+ sizeof(*hw->blk[block_id].es.t);
+ break;
+ default:
+ return;
+ }
+
+ /* if the section offset exceeds destination length, terminate
+ * table fill.
+ */
+ if (offset > dst_len)
+ return;
+
+ /* if the sum of section size and offset exceed destination size
+ * then we are out of bounds of the HW table size for that PF.
+ * Changing section length to fill the remaining table space
+ * of that PF.
+ */
+ if ((offset + sect_len) > dst_len)
+ sect_len = dst_len - offset;
+
+ ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA);
+ offset += sect_len;
+ sect = ice_pkg_enum_section(NULL, &state, sid);
+ }
+}
+
+/**
+ * ice_fill_blk_tbls - Read package context for tables
+ * @hw: pointer to the hardware structure
+ *
+ * Reads the current package contents and populates the driver
+ * database with the data iteratively for all advanced feature
+ * blocks. Assume that the HW tables have been allocated.
+ */
+void ice_fill_blk_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ enum ice_block blk_id = (enum ice_block)i;
+
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
+ }
+
+ ice_init_sw_db(hw);
+}
+
+/**
+ * ice_free_prof_map - free profile map
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_es *es = &hw->blk[blk_idx].es;
+ struct ice_prof_map *del, *tmp;
+
+ ice_acquire_lock(&es->prof_map_lock);
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map,
+ ice_prof_map, list) {
+ LIST_DEL(&del->list);
+ ice_free(hw, del);
+ }
+ INIT_LIST_HEAD(&es->prof_map);
+ ice_release_lock(&es->prof_map_lock);
+}
+
+/**
+ * ice_free_flow_profs - free flow profile entries
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ struct ice_flow_prof *p, *tmp;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk_idx]);
+ LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx],
+ ice_flow_prof, l_entry) {
+ struct ice_flow_entry *e, *t;
+
+ LIST_FOR_EACH_ENTRY_SAFE(e, t, &p->entries,
+ ice_flow_entry, l_entry)
+ ice_flow_rem_entry(hw, (enum ice_block)blk_idx,
+ ICE_FLOW_ENTRY_HNDL(e));
+
+ LIST_DEL(&p->l_entry);
+ if (p->acts)
+ ice_free(hw, p->acts);
+ ice_free(hw, p);
+ }
+ ice_release_lock(&hw->fl_profs_locks[blk_idx]);
+
+ /* if driver is in reset and tables are being cleared
+ * re-initialize the flow profile list heads
+ */
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_free_vsig_tbl - free complete VSIG table entries
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block on which to free the VSIG table entries
+ */
+static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 i;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl)
+ return;
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++)
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use)
+ ice_vsig_free(hw, blk, i);
+}
+
+/**
+ * ice_free_hw_tbls - free hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+void ice_free_hw_tbls(struct ice_hw *hw)
+{
+ struct ice_rss_cfg *r, *rt;
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ if (hw->blk[i].is_list_init) {
+ struct ice_es *es = &hw->blk[i].es;
+
+ ice_free_prof_map(hw, i);
+ ice_destroy_lock(&es->prof_map_lock);
+
+ ice_free_flow_profs(hw, i);
+ ice_destroy_lock(&hw->fl_profs_locks[i]);
+
+ hw->blk[i].is_list_init = false;
+ }
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+ ice_free(hw, hw->blk[i].xlt1.ptypes);
+ ice_free(hw, hw->blk[i].xlt1.ptg_tbl);
+ ice_free(hw, hw->blk[i].xlt1.t);
+ ice_free(hw, hw->blk[i].xlt2.t);
+ ice_free(hw, hw->blk[i].xlt2.vsig_tbl);
+ ice_free(hw, hw->blk[i].xlt2.vsis);
+ ice_free(hw, hw->blk[i].prof.t);
+ ice_free(hw, hw->blk[i].prof_redir.t);
+ ice_free(hw, hw->blk[i].es.t);
+ ice_free(hw, hw->blk[i].es.ref_count);
+ ice_free(hw, hw->blk[i].es.written);
+ }
+
+ LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head,
+ ice_rss_cfg, l_entry) {
+ LIST_DEL(&r->l_entry);
+ ice_free(hw, r);
+ }
+ ice_destroy_lock(&hw->rss_locks);
+ ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM);
+}
+
+/**
+ * ice_init_flow_profs - init flow profile locks and list heads
+ * @hw: pointer to the hardware structure
+ * @blk_idx: HW block index
+ */
+static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
+{
+ ice_init_lock(&hw->fl_profs_locks[blk_idx]);
+ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]);
+}
+
+/**
+ * ice_clear_hw_tbls - clear HW tables and flow profiles
+ * @hw: pointer to the hardware structure
+ */
+void ice_clear_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+
+ if (hw->blk[i].is_list_init) {
+ ice_free_prof_map(hw, i);
+ ice_free_flow_profs(hw, i);
+ }
+
+ ice_free_vsig_tbl(hw, (enum ice_block)i);
+
+ ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt1->ptg_tbl, 0,
+ ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt2->vsig_tbl, 0,
+ xlt2->count * sizeof(*xlt2->vsig_tbl),
+ ICE_NONDMA_MEM);
+ ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(prof->t, 0, prof->count * sizeof(*prof->t),
+ ICE_NONDMA_MEM);
+ ice_memset(prof_redir->t, 0,
+ prof_redir->count * sizeof(*prof_redir->t),
+ ICE_NONDMA_MEM);
+
+ ice_memset(es->t, 0, es->count * sizeof(*es->t),
+ ICE_NONDMA_MEM);
+ ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count),
+ ICE_NONDMA_MEM);
+ ice_memset(es->written, 0, es->count * sizeof(*es->written),
+ ICE_NONDMA_MEM);
+ }
+}
+
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ ice_init_lock(&hw->rss_locks);
+ INIT_LIST_HEAD(&hw->rss_list_head);
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+ u16 j;
+
+ if (hw->blk[i].is_list_init)
+ continue;
+
+ ice_init_flow_profs(hw, i);
+ ice_init_lock(&es->prof_map_lock);
+ INIT_LIST_HEAD(&es->prof_map);
+ hw->blk[i].is_list_init = true;
+
+ hw->blk[i].overwrite = blk_sizes[i].overwrite;
+ es->reverse = blk_sizes[i].reverse;
+
+ xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+ xlt1->count = blk_sizes[i].xlt1;
+
+ xlt1->ptypes = (struct ice_ptg_ptype *)
+ ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes));
+
+ if (!xlt1->ptypes)
+ goto err;
+
+ xlt1->ptg_tbl = (struct ice_ptg_entry *)
+ ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl));
+
+ if (!xlt1->ptg_tbl)
+ goto err;
+
+ xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t));
+ if (!xlt1->t)
+ goto err;
+
+ xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+ xlt2->count = blk_sizes[i].xlt2;
+
+ xlt2->vsis = (struct ice_vsig_vsi *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis));
+
+ if (!xlt2->vsis)
+ goto err;
+
+ xlt2->vsig_tbl = (struct ice_vsig_entry *)
+ ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl));
+ if (!xlt2->vsig_tbl)
+ goto err;
+
+ for (j = 0; j < xlt2->count; j++)
+ INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
+ xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t));
+ if (!xlt2->t)
+ goto err;
+
+ prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+ prof->count = blk_sizes[i].prof_tcam;
+ prof->max_prof_id = blk_sizes[i].prof_id;
+ prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+ prof->t = (struct ice_prof_tcam_entry *)
+ ice_calloc(hw, prof->count, sizeof(*prof->t));
+
+ if (!prof->t)
+ goto err;
+
+ prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+ prof_redir->count = blk_sizes[i].prof_redir;
+ prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count,
+ sizeof(*prof_redir->t));
+
+ if (!prof_redir->t)
+ goto err;
+
+ es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+ es->count = blk_sizes[i].es;
+ es->fvw = blk_sizes[i].fvw;
+ es->t = (struct ice_fv_word *)
+ ice_calloc(hw, (u32)(es->count * es->fvw),
+ sizeof(*es->t));
+ if (!es->t)
+ goto err;
+
+ es->ref_count = (u16 *)
+ ice_calloc(hw, es->count, sizeof(*es->ref_count));
+
+ es->written = (u8 *)
+ ice_calloc(hw, es->count, sizeof(*es->written));
+ if (!es->ref_count)
+ goto err;
+ }
+ return ICE_SUCCESS;
+
+err:
+ ice_free_hw_tbls(hw);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_prof_gen_key - generate profile ID key
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ * @key: output of profile ID key
+ */
+static enum ice_status
+ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
+ u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 key[ICE_TCAM_KEY_SZ])
+{
+ struct ice_prof_id_key inkey;
+
+ inkey.xlt1 = ptg;
+ inkey.xlt2_cdid = CPU_TO_LE16(vsig);
+ inkey.flags = CPU_TO_LE16(flags);
+
+ switch (hw->blk[blk].prof.cdid_bits) {
+ case 0:
+ break;
+ case 2:
+#define ICE_CD_2_M 0xC000U
+#define ICE_CD_2_S 14
+ inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M);
+ inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S);
+ break;
+ case 4:
+#define ICE_CD_4_M 0xF000U
+#define ICE_CD_4_S 12
+ inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M);
+ inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S);
+ break;
+ case 8:
+#define ICE_CD_8_M 0xFF00U
+#define ICE_CD_8_S 16
+ inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M);
+ inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S);
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n");
+ break;
+ }
+
+ return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk,
+ nm_msk, 0, ICE_TCAM_KEY_SZ / 2);
+}
+
+/**
+ * ice_tcam_write_entry - write TCAM entry
+ * @hw: pointer to the HW struct
+ * @blk: the block in which to write profile ID to
+ * @idx: the entry index to write to
+ * @prof_id: profile ID
+ * @ptg: packet type group (PTG) portion of key
+ * @vsig: VSIG portion of key
+ * @cdid: CDID: portion of key
+ * @flags: flag portion of key
+ * @vl_msk: valid mask
+ * @dc_msk: don't care mask
+ * @nm_msk: never match mask
+ */
+static enum ice_status
+ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
+ u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ],
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
+{
+ struct ice_prof_tcam_entry;
+ enum ice_status status;
+
+ status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
+ dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
+ if (!status) {
+ hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx);
+ hw->blk[blk].prof.t[idx].prof_id = prof_id;
+ }
+
+ return status;
+}
+
+/**
+ * ice_vsig_get_ref - returns number of VSIs belong to a VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to query
+ * @refs: pointer to variable to receive the reference count
+ */
+static enum ice_status
+ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *ptr;
+
+ *refs = 0;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ while (ptr) {
+ (*refs)++;
+ ptr = ptr->next_vsi;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_has_prof_vsig - check to see if VSIG has a specific profile
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: VSIG to check against
+ * @hdl: profile handle
+ */
+static bool
+ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *ent;
+
+ LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ if (ent->profile_cookie == hdl)
+ return true;
+ }
+
+ ice_debug(hw, ICE_DBG_INIT,
+ "Characteristic list for VSI group %d not found.\n",
+ vsig);
+ return false;
+}
+
+/**
+ * ice_prof_bld_es - build profile ID extraction sequence changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+ u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word);
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) {
+ u16 off = tmp->prof_id * hw->blk[blk].es.fvw;
+ struct ice_pkg_es *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_VEC_TBL);
+ p = (struct ice_pkg_es *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
+ vec_size -
+ sizeof(p->es[0]));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->offset = CPU_TO_LE16(tmp->prof_id);
+
+ ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size,
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_tcam - build profile ID TCAM changes
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
+ struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) {
+ struct ice_prof_id_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_PROF_TCAM);
+ p = (struct ice_prof_id_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx);
+ p->entry[0].prof_id = tmp->prof_id;
+
+ ice_memcpy(p->entry[0].key,
+ &hw->blk[blk].prof.t[tmp->tcam_idx].key,
+ sizeof(hw->blk[blk].prof.t->key),
+ ICE_NONDMA_TO_NONDMA);
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt1 - build XLT1 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
+ struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) {
+ struct ice_xlt1_section *p;
+ u32 id;
+
+ id = ice_sect_id(blk, ICE_XLT1);
+ p = (struct ice_xlt1_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->offset = CPU_TO_LE16(tmp->ptype);
+ p->value[0] = tmp->ptg;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_prof_bld_xlt2 - build XLT2 changes
+ * @blk: hardware block
+ * @bld: the update package buffer build to add to
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
+ struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_chs_chg *tmp;
+
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ struct ice_xlt2_section *p;
+ u32 id;
+
+ switch (tmp->type) {
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ id = ice_sect_id(blk, ICE_XLT2);
+ p = (struct ice_xlt2_section *)
+ ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+
+ if (!p)
+ return ICE_ERR_MAX_LIMIT;
+
+ p->count = CPU_TO_LE16(1);
+ p->offset = CPU_TO_LE16(tmp->vsi);
+ p->value[0] = CPU_TO_LE16(tmp->vsig);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_upd_prof_hw - update hardware using the change list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @chgs: the list of changes to make in hardware
+ */
+static enum ice_status
+ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
+ struct LIST_HEAD_TYPE *chgs)
+{
+ struct ice_buf_build *b;
+ struct ice_chs_chg *tmp;
+ enum ice_status status;
+ u16 pkg_sects;
+ u16 xlt1 = 0;
+ u16 xlt2 = 0;
+ u16 tcam = 0;
+ u16 es = 0;
+ u16 sects;
+
+ /* count number of sections we need */
+ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) {
+ switch (tmp->type) {
+ case ICE_PTG_ES_ADD:
+ if (tmp->add_ptg)
+ xlt1++;
+ if (tmp->add_prof)
+ es++;
+ break;
+ case ICE_TCAM_ADD:
+ tcam++;
+ break;
+ case ICE_VSIG_ADD:
+ case ICE_VSI_MOVE:
+ case ICE_VSIG_REM:
+ xlt2++;
+ break;
+ default:
+ break;
+ }
+ }
+ sects = xlt1 + xlt2 + tcam + es;
+
+ if (!sects)
+ return ICE_SUCCESS;
+
+ /* Build update package buffer */
+ b = ice_pkg_buf_alloc(hw);
+ if (!b)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_pkg_buf_reserve_section(b, sects);
+ if (status)
+ goto error_tmp;
+
+ /* Preserve order of table update: ES, TCAM, PTG, VSIG */
+ if (es) {
+ status = ice_prof_bld_es(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (tcam) {
+ status = ice_prof_bld_tcam(hw, blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt1) {
+ status = ice_prof_bld_xlt1(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ if (xlt2) {
+ status = ice_prof_bld_xlt2(blk, b, chgs);
+ if (status)
+ goto error_tmp;
+ }
+
+ /* After package buffer build check if the section count in buffer is
+ * non-zero and matches the number of sections detected for package
+ * update.
+ */
+ pkg_sects = ice_pkg_buf_get_active_sections(b);
+ if (!pkg_sects || pkg_sects != sects) {
+ status = ICE_ERR_INVAL_SIZE;
+ goto error_tmp;
+ }
+
+ /* update package */
+ status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
+ if (status == ICE_ERR_AQ_ERROR)
+ ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
+
+error_tmp:
+ ice_pkg_buf_free(hw, b);
+ return status;
+}
+
+/**
+ * ice_add_prof - add profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
+ * @es: extraction sequence (length of array is determined by the block)
+ *
+ * This function registers a profile, which matches a set of PTGs with a
+ * particular extraction sequence. While the hardware profile is allocated
+ * it will not be written until the first call to ice_add_flow that specifies
+ * the ID value used here.
+ */
+enum ice_status
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
+ struct ice_fv_word *es)
+{
+ u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
+ ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+ struct ice_prof_map *prof;
+ enum ice_status status;
+ u8 byte = 0;
+ u8 prof_id;
+
+ ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
+ /* search for existing profile */
+ status = ice_find_prof_id(hw, blk, es, &prof_id);
+ if (status) {
+ /* allocate profile ID */
+ status = ice_alloc_prof_id(hw, blk, &prof_id);
+ if (status)
+ goto err_ice_add_prof;
+
+ /* and write new es */
+ ice_write_es(hw, blk, prof_id, es);
+ }
+
+ ice_prof_inc_ref(hw, blk, prof_id);
+
+ /* add profile info */
+
+ prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof));
+ if (!prof)
+ goto err_ice_add_prof;
+
+ prof->profile_cookie = id;
+ prof->prof_id = prof_id;
+ prof->ptg_cnt = 0;
+ prof->context = 0;
+
+ /* build list of ptgs */
+ while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
+ u8 bit;
+
+ if (!ptypes[byte]) {
+ bytes--;
+ byte++;
+ continue;
+ }
+ /* Examine 8 bits per byte */
+ for (bit = 0; bit < 8; bit++) {
+ if (ptypes[byte] & BIT(bit)) {
+ u16 ptype;
+ u8 ptg;
+ u8 m;
+
+ ptype = byte * BITS_PER_BYTE + bit;
+
+ /* The package should place all ptypes in a
+ * non-zero PTG, so the following call should
+ * never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
+ continue;
+
+ /* If PTG is already added, skip and continue */
+ if (ice_is_bit_set(ptgs_used, ptg))
+ continue;
+
+ ice_set_bit(ptg, ptgs_used);
+ prof->ptg[prof->ptg_cnt] = ptg;
+
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
+ break;
+
+ /* nothing left in byte, then exit */
+ m = ~(u8)((1 << (bit + 1)) - 1);
+ if (!(ptypes[byte] & m))
+ break;
+ }
+ }
+
+ bytes--;
+ byte++;
+ }
+
+ LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
+ status = ICE_SUCCESS;
+
+err_ice_add_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_search_prof_id_low - Search for a profile tracking ID low level
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added. This
+ * version assumes that the caller has already acquired the prof map lock.
+ */
+static struct ice_prof_map *
+ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry = NULL;
+ struct ice_prof_map *map;
+
+ LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map,
+ list) {
+ if (map->profile_cookie == id) {
+ entry = map;
+ break;
+ }
+ }
+
+ return entry;
+}
+
+/**
+ * ice_search_prof_id - Search for a profile tracking ID
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will search for a profile tracking ID which was previously added.
+ */
+struct ice_prof_map *
+ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *entry;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+ entry = ice_search_prof_id_low(hw, blk, id);
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+
+ return entry;
+}
+
+/**
+ * ice_set_prof_context - Set context for a given profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: context
+ */
+struct ice_prof_map *
+ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
+{
+ struct ice_prof_map *entry;
+
+ entry = ice_search_prof_id(hw, blk, id);
+ if (entry)
+ entry->context = cntxt;
+
+ return entry;
+}
+
+/**
+ * ice_get_prof_context - Get context for a given profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ * @cntxt: pointer to variable to receive the context
+ */
+struct ice_prof_map *
+ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
+{
+ struct ice_prof_map *entry;
+
+ entry = ice_search_prof_id(hw, blk, id);
+ if (entry)
+ *cntxt = entry->context;
+
+ return entry;
+}
+
+/**
+ * ice_vsig_prof_id_count - count profiles in a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ */
+static u16
+ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M, count = 0;
+ struct ice_vsig_prof *p;
+
+ LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_rel_tcam_idx - release a TCAM index
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @idx: the index to release
+ */
+static enum ice_status
+ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+{
+ /* Masks to invoke a never match entry */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
+ enum ice_status status;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
+ dc_msk, nm_msk);
+ if (status)
+ return status;
+
+ /* release the TCAM entry */
+ status = ice_free_tcam_ent(hw, blk, idx);
+
+ return status;
+}
+
+/**
+ * ice_rem_prof_id - remove one profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @prof: pointer to profile structure to remove
+ */
+static enum ice_status
+ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
+ struct ice_vsig_prof *prof)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < prof->tcam_count; i++) {
+ if (prof->tcam[i].in_use) {
+ prof->tcam[i].in_use = false;
+ status = ice_rel_tcam_idx(hw, blk,
+ prof->tcam[i].tcam_idx);
+ if (status)
+ return ICE_ERR_HW_TABLE;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_vsig - remove VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to remove
+ * @chg: the change list
+ */
+static enum ice_status
+ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_vsi *vsi_cur;
+ struct ice_vsig_prof *d, *t;
+ enum ice_status status;
+
+ /* remove TCAM entries */
+ LIST_FOR_EACH_ENTRY_SAFE(d, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ status = ice_rem_prof_id(hw, blk, d);
+ if (status)
+ return status;
+
+ LIST_DEL(&d->list);
+ ice_free(hw, d);
+ }
+
+ /* Move all VSIS associated with this VSIG to the default VSIG */
+ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ /* If the VSIG has at least 1 VSI then iterate through the list
+ * and remove the VSIs before deleting the group.
+ */
+ if (vsi_cur) {
+ do {
+ struct ice_vsig_vsi *tmp = vsi_cur->next_vsi;
+ struct ice_chs_chg *p;
+
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->type = ICE_VSIG_REM;
+ p->orig_vsig = vsig;
+ p->vsig = ICE_DEFAULT_VSIG;
+ p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis;
+
+ LIST_ADD(&p->list_entry, chg);
+
+ vsi_cur = tmp;
+ } while (vsi_cur);
+ }
+
+ return ice_vsig_free(hw, blk, vsig);
+}
+
+/**
+ * ice_rem_prof_id_vsig - remove a specific profile from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG to remove the profile from
+ * @hdl: profile handle indicating which profile to remove
+ * @chg: list to receive a record of changes
+ */
+static enum ice_status
+ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+ struct ice_vsig_prof *p, *t;
+ enum ice_status status;
+
+ LIST_FOR_EACH_ENTRY_SAFE(p, t,
+ &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ if (p->profile_cookie == hdl) {
+ if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
+ /* this is the last profile, remove the VSIG */
+ return ice_rem_vsig(hw, blk, vsig, chg);
+
+ status = ice_rem_prof_id(hw, blk, p);
+ if (!status) {
+ LIST_DEL(&p->list);
+ ice_free(hw, p);
+ }
+ return status;
+ }
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_flow_all - remove all flows with a particular profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ */
+static enum ice_status
+ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_chs_chg *del, *tmp;
+ struct LIST_HEAD_TYPE chg;
+ enum ice_status status;
+ u16 i;
+
+ INIT_LIST_HEAD(&chg);
+
+ for (i = 1; i < ICE_MAX_VSIGS; i++) {
+ if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) {
+ if (ice_has_prof_vsig(hw, blk, i, id)) {
+ status = ice_rem_prof_id_vsig(hw, blk, i, id,
+ &chg);
+ if (status)
+ goto err_ice_rem_flow_all;
+ }
+ }
+ }
+
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_flow_all:
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+ LIST_DEL(&del->list_entry);
+ ice_free(hw, del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_prof - remove profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @id: profile tracking ID
+ *
+ * This will remove the profile specified by the ID parameter, which was
+ * previously created through ice_add_prof. If any existing entries
+ * are associated with this profile, they will be removed as well.
+ */
+enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+{
+ struct ice_prof_map *pmap;
+ enum ice_status status;
+
+ ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
+
+ pmap = ice_search_prof_id_low(hw, blk, id);
+ if (!pmap) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto err_ice_rem_prof;
+ }
+
+ /* remove all flows with this profile */
+ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie);
+ if (status)
+ goto err_ice_rem_prof;
+
+ /* dereference profile, and possibly remove */
+ ice_prof_dec_ref(hw, blk, pmap->prof_id);
+
+ LIST_DEL(&pmap->list);
+ ice_free(hw, pmap);
+
+err_ice_rem_prof:
+ ice_release_lock(&hw->blk[blk].es.prof_map_lock);
+ return status;
+}
+
+/**
+ * ice_get_prof - get profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: profile handle
+ * @chg: change list
+ */
+static enum ice_status
+ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_prof_map *map;
+ struct ice_chs_chg *p;
+ u16 i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ if (!hw->blk[blk].es.written[map->prof_id]) {
+ /* add ES to change list */
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ goto err_ice_get_prof;
+
+ p->type = ICE_PTG_ES_ADD;
+ p->ptype = 0;
+ p->ptg = map->ptg[i];
+ p->add_ptg = 0;
+
+ p->add_prof = 1;
+ p->prof_id = map->prof_id;
+
+ hw->blk[blk].es.written[map->prof_id] = true;
+
+ LIST_ADD(&p->list_entry, chg);
+ }
+ }
+
+ return ICE_SUCCESS;
+
+err_ice_get_prof:
+ /* let caller clean up the change list */
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: VSIG from which to copy the list
+ * @lst: output list
+ *
+ * This routine makes a copy of the list of profiles in the specified VSIG.
+ */
+static enum ice_status
+ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct LIST_HEAD_TYPE *lst)
+{
+ struct ice_vsig_prof *ent1, *ent2;
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ struct ice_vsig_prof *p;
+
+ /* copy to the input list */
+ p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p),
+ ICE_NONDMA_TO_NONDMA);
+ if (!p)
+ goto err_ice_get_profs_vsig;
+
+ LIST_ADD_TAIL(&p->list, lst);
+ }
+
+ return ICE_SUCCESS;
+
+err_ice_get_profs_vsig:
+ LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
+ LIST_DEL(&ent1->list);
+ ice_free(hw, ent1);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_add_prof_to_lst - add profile entry to a list
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @lst: the list to be added to
+ * @hdl: profile handle of entry to add
+ */
+static enum ice_status
+ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
+ struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *p;
+ u16 i;
+
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ p->profile_cookie = map->profile_cookie;
+ p->prof_id = map->prof_id;
+ p->tcam_count = map->ptg_cnt;
+
+ for (i = 0; i < map->ptg_cnt; i++) {
+ p->tcam[i].prof_id = map->prof_id;
+ p->tcam[i].tcam_idx = ICE_INVALID_TCAM;
+ p->tcam[i].ptg = map->ptg[i];
+ }
+
+ LIST_ADD(&p->list, lst);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_move_vsi - move VSI to another VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to move
+ * @vsig: the VSIG to move the VSI to
+ * @chg: the change list
+ */
+static enum ice_status
+ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 orig_vsig;
+
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (!status)
+ status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+
+ if (status) {
+ ice_free(hw, p);
+ return status;
+ }
+
+ p->type = ICE_VSI_MOVE;
+ p->vsi = vsi;
+ p->orig_vsig = orig_vsig;
+ p->vsig = vsig;
+
+ LIST_ADD(&p->list_entry, chg);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
+ * @hw: pointer to the HW struct
+ * @idx: the index of the TCAM entry to remove
+ * @chg: the list of change structures to search
+ */
+static void
+ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_chs_chg *pos, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) {
+ if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) {
+ LIST_DEL(&tmp->list_entry);
+ ice_free(hw, tmp);
+ }
+ }
+}
+
+/**
+ * ice_prof_tcam_ena_dis - add enable or disable TCAM change
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @enable: true to enable, false to disable
+ * @vsig: the VSIG of the TCAM entry
+ * @tcam: pointer the TCAM info structure of the TCAM to disable
+ * @chg: the change list
+ *
+ * This function appends an enable or disable TCAM entry in the change log
+ */
+static enum ice_status
+ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
+ u16 vsig, struct ice_tcam_inf *tcam,
+ struct LIST_HEAD_TYPE *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ /* if disabling, free the TCAM */
+ if (!enable) {
+ status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx);
+
+ /* if we have already created a change for this TCAM entry, then
+ * we need to remove that entry, in order to prevent writing to
+ * a TCAM entry we no longer will have ownership of.
+ */
+ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg);
+ tcam->tcam_idx = 0;
+ tcam->in_use = 0;
+ return status;
+ }
+
+ /* for re-enabling, reallocate a TCAM */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
+ if (status)
+ return status;
+
+ /* add TCAM to change list */
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
+ tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
+ nm_msk);
+ if (status)
+ goto err_ice_prof_tcam_ena_dis;
+
+ tcam->in_use = 1;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = tcam->prof_id;
+ p->ptg = tcam->ptg;
+ p->vsig = 0;
+ p->tcam_idx = tcam->tcam_idx;
+
+ /* log change */
+ LIST_ADD(&p->list_entry, chg);
+
+ return ICE_SUCCESS;
+
+err_ice_prof_tcam_ena_dis:
+ ice_free(hw, p);
+ return status;
+}
+
+/**
+ * ice_adj_prof_priorities - adjust profile based on priorities
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG for which to adjust profile priorities
+ * @chg: the change list
+ */
+static enum ice_status
+ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_vsig_prof *t;
+ u16 idx;
+
+ ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ /* Priority is based on the order in which the profiles are added. The
+ * newest added profile has highest priority and the oldest added
+ * profile has the lowest priority. Since the profile property list for
+ * a VSIG is sorted from newest to oldest, this code traverses the list
+ * in order and enables the first of each PTG that it finds (that is not
+ * already enabled); it also disables any duplicate PTGs that it finds
+ * in the older profiles (that are currently enabled).
+ */
+
+ LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
+ ice_vsig_prof, list) {
+ u16 i;
+
+ for (i = 0; i < t->tcam_count; i++) {
+ bool used;
+
+ /* Scan the priorities from newest to oldest.
+ * Make sure that the newest profiles take priority.
+ */
+ used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg);
+
+ if (used && t->tcam[i].in_use) {
+ /* need to mark this PTG as never match, as it
+ * was already in use and therefore duplicate
+ * (and lower priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, false,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ } else if (!used && !t->tcam[i].in_use) {
+ /* need to enable this PTG, as it in not in use
+ * and not enabled (highest priority)
+ */
+ status = ice_prof_tcam_ena_dis(hw, blk, true,
+ vsig,
+ &t->tcam[i],
+ chg);
+ if (status)
+ return status;
+ }
+
+ /* keep track of used ptgs */
+ ice_set_bit(t->tcam[i].ptg, ptgs_used);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_add_prof_id_vsig - add profile to VSIG
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsig: the VSIG to which this profile is to be added
+ * @hdl: the profile handle indicating the profile to add
+ * @rev: true to add entries to the end of the list
+ * @chg: the change list
+ */
+static enum ice_status
+ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
+ bool rev, struct LIST_HEAD_TYPE *chg)
+{
+ /* Masks that ignore flags */
+ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
+ u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
+ struct ice_prof_map *map;
+ struct ice_vsig_prof *t;
+ struct ice_chs_chg *p;
+ u16 vsig_idx, i;
+
+ /* Get the details on the profile specified by the handle ID */
+ map = ice_search_prof_id(hw, blk, hdl);
+ if (!map)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Error, if this VSIG already has this profile */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl))
+ return ICE_ERR_ALREADY_EXISTS;
+
+ /* new VSIG profile structure */
+ t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+ if (!t)
+ return ICE_ERR_NO_MEMORY;
+
+ t->profile_cookie = map->profile_cookie;
+ t->prof_id = map->prof_id;
+ t->tcam_count = map->ptg_cnt;
+
+ /* create TCAM entries */
+ for (i = 0; i < map->ptg_cnt; i++) {
+ enum ice_status status;
+ u16 tcam_idx;
+
+ /* add TCAM to change list */
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ goto err_ice_add_prof_id_vsig;
+
+ /* allocate the TCAM entry index */
+ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
+ if (status) {
+ ice_free(hw, p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ t->tcam[i].ptg = map->ptg[i];
+ t->tcam[i].prof_id = map->prof_id;
+ t->tcam[i].tcam_idx = tcam_idx;
+ t->tcam[i].in_use = true;
+
+ p->type = ICE_TCAM_ADD;
+ p->add_tcam_idx = true;
+ p->prof_id = t->tcam[i].prof_id;
+ p->ptg = t->tcam[i].ptg;
+ p->vsig = vsig;
+ p->tcam_idx = t->tcam[i].tcam_idx;
+
+ /* write the TCAM entry */
+ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
+ t->tcam[i].prof_id,
+ t->tcam[i].ptg, vsig, 0, 0,
+ vl_msk, dc_msk, nm_msk);
+ if (status) {
+ ice_free(hw, p);
+ goto err_ice_add_prof_id_vsig;
+ }
+
+ /* log change */
+ LIST_ADD(&p->list_entry, chg);
+ }
+
+ /* add profile to VSIG */
+ vsig_idx = vsig & ICE_VSIG_IDX_M;
+ if (rev)
+ LIST_ADD_TAIL(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+ else
+ LIST_ADD(&t->list,
+ &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst);
+
+ return ICE_SUCCESS;
+
+err_ice_add_prof_id_vsig:
+ /* let caller clean up the change list */
+ ice_free(hw, t);
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_create_prof_id_vsig - add a new VSIG with a single profile
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @hdl: the profile handle of the profile that will be added to the VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
+ struct LIST_HEAD_TYPE *chg)
+{
+ enum ice_status status;
+ struct ice_chs_chg *p;
+ u16 new_vsig;
+
+ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
+ if (!p)
+ return ICE_ERR_NO_MEMORY;
+
+ new_vsig = ice_vsig_alloc(hw, blk);
+ if (!new_vsig) {
+ status = ICE_ERR_HW_TABLE;
+ goto err_ice_create_prof_id_vsig;
+ }
+
+ status = ice_move_vsi(hw, blk, vsi, new_vsig, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg);
+ if (status)
+ goto err_ice_create_prof_id_vsig;
+
+ p->type = ICE_VSIG_ADD;
+ p->vsi = vsi;
+ p->orig_vsig = ICE_DEFAULT_VSIG;
+ p->vsig = new_vsig;
+
+ LIST_ADD(&p->list_entry, chg);
+
+ return ICE_SUCCESS;
+
+err_ice_create_prof_id_vsig:
+ /* let caller clean up the change list */
+ ice_free(hw, p);
+ return status;
+}
+
+/**
+ * ice_create_vsig_from_lst - create a new VSIG with a list of profiles
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the initial VSI that will be in VSIG
+ * @lst: the list of profile that will be added to the VSIG
+ * @new_vsig: return of new VSIG
+ * @chg: the change list
+ */
+static enum ice_status
+ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
+ struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
+ struct LIST_HEAD_TYPE *chg)
+{
+ struct ice_vsig_prof *t;
+ enum ice_status status;
+ u16 vsig;
+
+ vsig = ice_vsig_alloc(hw, blk);
+ if (!vsig)
+ return ICE_ERR_HW_TABLE;
+
+ status = ice_move_vsi(hw, blk, vsi, vsig, chg);
+ if (status)
+ return status;
+
+ LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) {
+ /* Reverse the order here since we are copying the list */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie,
+ true, chg);
+ if (status)
+ return status;
+ }
+
+ *new_vsig = vsig;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_find_prof_vsig - find a VSIG with a specific profile handle
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @hdl: the profile handle of the profile to search for
+ * @vsig: returns the VSIG with the matching profile
+ */
+static bool
+ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
+{
+ struct ice_vsig_prof *t;
+ struct LIST_HEAD_TYPE lst;
+ enum ice_status status;
+
+ INIT_LIST_HEAD(&lst);
+
+ t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t));
+ if (!t)
+ return false;
+
+ t->profile_cookie = hdl;
+ LIST_ADD(&t->list, &lst);
+
+ status = ice_find_dup_props_vsig(hw, blk, &lst, vsig);
+
+ LIST_DEL(&t->list);
+ ice_free(hw, t);
+
+ return status == ICE_SUCCESS;
+}
+
+/**
+ * ice_add_vsi_flow - add VSI flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: input VSI
+ * @vsig: target VSIG to include the input VSI
+ *
+ * Calling this function will add the VSI to a given VSIG and
+ * update the HW tables accordingly. This call can be used to
+ * add multiple VSIs to a VSIG if we know beforehand that those
+ * VSIs have the same characteristics of the VSIG. This will
+ * save time in generating a new VSIG and TCAMs till a match is
+ * found and subsequent rollback when a matching VSIG is found.
+ */
+enum ice_status
+ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_chs_chg *tmp, *del;
+ struct LIST_HEAD_TYPE chg;
+ enum ice_status status;
+
+ /* if target VSIG is default the move is invalid */
+ if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
+ return ICE_ERR_PARAM;
+
+ INIT_LIST_HEAD(&chg);
+
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ /* update hardware if success */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+ LIST_DEL(&del->list_entry);
+ ice_free(hw, del);
+ }
+
+ return status;
+}
+
+/**
+ * ice_add_prof_id_flow - add profile flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI to enable with the profile specified by ID
+ * @hdl: profile handle
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct LIST_HEAD_TYPE union_lst;
+ struct ice_chs_chg *tmp, *del;
+ struct LIST_HEAD_TYPE chg;
+ enum ice_status status;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&union_lst);
+ INIT_LIST_HEAD(&chg);
+
+ /* Get profile */
+ status = ice_get_prof(hw, blk, hdl, &chg);
+ if (status)
+ return status;
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool only_vsi;
+ u16 or_vsig;
+ u16 ref;
+
+ /* found in VSIG */
+ or_vsig = vsig;
+
+ /* make sure that there is no overlap/conflict between the new
+ * characteristics and the existing ones; we don't support that
+ * scenario
+ */
+ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto err_ice_add_prof_id_flow;
+ }
+
+ /* last VSI in the VSIG? */
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ /* create a union of the current profiles and the one being
+ * added
+ */
+ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* search for an existing VSIG with an exact charc match */
+ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig);
+ if (!status) {
+ /* move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* VSI has been moved out of or_vsig. If the or_vsig had
+ * only that VSI it is now empty and can be removed.
+ */
+ if (only_vsi) {
+ status = ice_rem_vsig(hw, blk, or_vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else if (only_vsi) {
+ /* If the original VSIG only contains one VSI, then it
+ * will be the requesting VSI. In this case the VSI is
+ * not sharing entries and we can simply add the new
+ * profile to the VSIG.
+ */
+ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* No match, so we need a new VSIG */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &union_lst, &vsig,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ } else {
+ /* need to find or add a VSIG */
+ /* search for an existing VSIG with an exact charc match */
+ if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ } else {
+ /* we did not find an exact match */
+ /* we need to add a VSIG */
+ status = ice_create_prof_id_vsig(hw, blk, vsi, hdl,
+ &chg);
+ if (status)
+ goto err_ice_add_prof_id_flow;
+ }
+ }
+
+ /* update hardware */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_add_prof_id_flow:
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+ LIST_DEL(&del->list_entry);
+ ice_free(hw, del);
+ }
+
+ LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) {
+ LIST_DEL(&del1->list);
+ ice_free(hw, del1);
+ }
+
+ return status;
+}
+
+/**
+ * ice_add_flow - add flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: array of VSIs to enable with the profile specified by ID
+ * @count: number of elements in the VSI array
+ * @id: profile tracking ID
+ *
+ * Calling this function will update the hardware tables to enable the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be enabled.
+ */
+enum ice_status
+ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+ u64 id)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
+ if (status)
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_prof_from_list - remove a profile from list
+ * @hw: pointer to the HW struct
+ * @lst: list to remove the profile from
+ * @hdl: the profile handle indicating the profile to remove
+ */
+static enum ice_status
+ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
+{
+ struct ice_vsig_prof *ent, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) {
+ if (ent->profile_cookie == hdl) {
+ LIST_DEL(&ent->list);
+ ice_free(hw, ent);
+ return ICE_SUCCESS;
+ }
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_rem_prof_id_flow - remove flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: the VSI from which to remove the profile specified by ID
+ * @hdl: profile tracking handle
+ *
+ * Calling this function will update the hardware tables to remove the
+ * profile indicated by the ID parameter for the VSIs specified in the VSI
+ * array. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
+{
+ struct ice_vsig_prof *tmp1, *del1;
+ struct LIST_HEAD_TYPE chg, copy;
+ struct ice_chs_chg *tmp, *del;
+ enum ice_status status;
+ u16 vsig;
+
+ INIT_LIST_HEAD(&copy);
+ INIT_LIST_HEAD(&chg);
+
+ /* determine if VSI is already part of a VSIG */
+ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig);
+ if (!status && vsig) {
+ bool last_profile;
+ bool only_vsi;
+ u16 ref;
+
+ /* found in VSIG */
+ last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1;
+ status = ice_vsig_get_ref(hw, blk, vsig, &ref);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ only_vsi = (ref == 1);
+
+ if (only_vsi) {
+ /* If the original VSIG only contains one reference,
+ * which will be the requesting VSI, then the VSI is not
+ * sharing entries and we can simply remove the specific
+ * characteristics from the VSIG.
+ */
+
+ if (last_profile) {
+ /* If there are no profiles left for this VSIG,
+ * then simply remove the the VSIG.
+ */
+ status = ice_rem_vsig(hw, blk, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ status = ice_rem_prof_id_vsig(hw, blk, vsig,
+ hdl, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+
+ } else {
+ /* Make a copy of the VSIG's list of Profiles */
+ status = ice_get_profs_vsig(hw, blk, vsig, &copy);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Remove specified profile entry from the list */
+ status = ice_rem_prof_from_list(hw, &copy, hdl);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ if (LIST_EMPTY(&copy)) {
+ status = ice_move_vsi(hw, blk, vsi,
+ ICE_DEFAULT_VSIG, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ } else if (!ice_find_dup_props_vsig(hw, blk, &copy,
+ &vsig)) {
+ /* found an exact match */
+ /* add or move VSI to the VSIG that matches */
+ /* Search for a VSIG with a matching profile
+ * list
+ */
+
+ /* Found match, move VSI to the matching VSIG */
+ status = ice_move_vsi(hw, blk, vsi, vsig, &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ } else {
+ /* since no existing VSIG supports this
+ * characteristic pattern, we need to create a
+ * new VSIG and TCAM entries
+ */
+ status = ice_create_vsig_from_lst(hw, blk, vsi,
+ &copy, &vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+
+ /* Adjust priorities */
+ status = ice_adj_prof_priorities(hw, blk, vsig,
+ &chg);
+ if (status)
+ goto err_ice_rem_prof_id_flow;
+ }
+ }
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ /* update hardware tables */
+ if (!status)
+ status = ice_upd_prof_hw(hw, blk, &chg);
+
+err_ice_rem_prof_id_flow:
+ LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) {
+ LIST_DEL(&del->list_entry);
+ ice_free(hw, del);
+ }
+
+ LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &copy, ice_vsig_prof, list) {
+ LIST_DEL(&del1->list);
+ ice_free(hw, del1);
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_flow - remove flow
+ * @hw: pointer to the HW struct
+ * @blk: hardware block
+ * @vsi: array of VSIs from which to remove the profile specified by ID
+ * @count: number of elements in the VSI array
+ * @id: profile tracking ID
+ *
+ * The function will remove flows from the specified VSIs that were enabled
+ * using ice_add_flow. The ID value will indicated which profile will be
+ * removed. Once successfully called, the flow will be disabled.
+ */
+enum ice_status
+ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
+ u64 id)
+{
+ enum ice_status status;
+ u16 i;
+
+ for (i = 0; i < count; i++) {
+ status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
+ if (status)
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
Index: sys/dev/ice/ice_flex_type.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_flex_type.h
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_FLEX_TYPE_H_
+#define _ICE_FLEX_TYPE_H_
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
+#pragma pack(1)
+/* Extraction Sequence (Field Vector) Table */
+struct ice_fv_word {
+ u8 prot_id;
+ u16 off; /* Offset within the protocol header */
+ u8 resvrd;
+};
+#pragma pack()
+
+#define ICE_MAX_NUM_PROFILES 256
+
+#define ICE_MAX_FV_WORDS 48
+struct ice_fv {
+ struct ice_fv_word ew[ICE_MAX_FV_WORDS];
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver pkg_format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[1];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE 0x00000010
+ __le32 seg_type;
+ struct ice_pkg_ver seg_format_ver;
+ __le32 seg_size;
+ char seg_id[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[1];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[1];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[1];
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 rsvd;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[1];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
+ sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+#define ICE_SID_CDID_REDIR_SW 18
+
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
+
+#define ICE_SID_RXPARSER_CAM 50
+#define ICE_SID_RXPARSER_NOMATCH_CAM 51
+#define ICE_SID_RXPARSER_IMEM 52
+#define ICE_SID_RXPARSER_XLT0_BUILDER 53
+#define ICE_SID_RXPARSER_NODE_PTYPE 54
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_PROTO_GRP 57
+#define ICE_SID_RXPARSER_METADATA_INIT 58
+#define ICE_SID_RXPARSER_XLT0 59
+
+#define ICE_SID_TXPARSER_CAM 60
+#define ICE_SID_TXPARSER_NOMATCH_CAM 61
+#define ICE_SID_TXPARSER_IMEM 62
+#define ICE_SID_TXPARSER_XLT0_BUILDER 63
+#define ICE_SID_TXPARSER_NODE_PTYPE 64
+#define ICE_SID_TXPARSER_MARKER_PTYPE 65
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
+#define ICE_SID_TXPARSER_PROTO_GRP 67
+#define ICE_SID_TXPARSER_METADATA_INIT 68
+#define ICE_SID_TXPARSER_XLT0 69
+
+#define ICE_SID_RXPARSER_INIT_REDIR 70
+#define ICE_SID_TXPARSER_INIT_REDIR 71
+#define ICE_SID_RXPARSER_MARKER_GRP 72
+#define ICE_SID_TXPARSER_MARKER_GRP 73
+#define ICE_SID_RXPARSER_LAST_PROTO 74
+#define ICE_SID_TXPARSER_LAST_PROTO 75
+#define ICE_SID_RXPARSER_PG_SPILL 76
+#define ICE_SID_TXPARSER_PG_SPILL 77
+#define ICE_SID_RXPARSER_NOMATCH_SPILL 78
+#define ICE_SID_TXPARSER_NOMATCH_SPILL 79
+
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010
+#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011
+#define ICE_SID_LBL_RESERVED_12 0x80000012
+#define ICE_SID_LBL_RESERVED_13 0x80000013
+#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014
+#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015
+#define ICE_SID_LBL_PTYPE 0x80000016
+#define ICE_SID_LBL_PROTOCOL_ID 0x80000017
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019
+#define ICE_SID_LBL_RXPARSER_PG 0x8000001A
+#define ICE_SID_LBL_TXPARSER_PG 0x8000001B
+#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C
+#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D
+#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E
+#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F
+#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020
+#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021
+#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022
+#define ICE_SID_LBL_FLAG 0x80000023
+#define ICE_SID_LBL_REG 0x80000024
+#define ICE_SID_LBL_SW_PTG 0x80000025
+#define ICE_SID_LBL_ACL_PTG 0x80000026
+#define ICE_SID_LBL_PE_PTG 0x80000027
+#define ICE_SID_LBL_RSS_PTG 0x80000028
+#define ICE_SID_LBL_FD_PTG 0x80000029
+#define ICE_SID_LBL_SW_VSIG 0x8000002A
+#define ICE_SID_LBL_ACL_VSIG 0x8000002B
+#define ICE_SID_LBL_PE_VSIG 0x8000002C
+#define ICE_SID_LBL_RSS_VSIG 0x8000002D
+#define ICE_SID_LBL_FD_VSIG 0x8000002E
+#define ICE_SID_LBL_PTYPE_META 0x8000002F
+#define ICE_SID_LBL_SW_PROFID 0x80000030
+#define ICE_SID_LBL_ACL_PROFID 0x80000031
+#define ICE_SID_LBL_PE_PROFID 0x80000032
+#define ICE_SID_LBL_RSS_PROFID 0x80000033
+#define ICE_SID_LBL_FD_PROFID 0x80000034
+#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035
+#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036
+#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037
+#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY 1
+#define ICE_PTYPE_IPV4FRAG_PAY 22
+#define ICE_PTYPE_IPV4_PAY 23
+#define ICE_PTYPE_IPV4_UDP_PAY 24
+#define ICE_PTYPE_IPV4_TCP_PAY 26
+#define ICE_PTYPE_IPV4_SCTP_PAY 27
+#define ICE_PTYPE_IPV4_ICMP_PAY 28
+#define ICE_PTYPE_IPV6FRAG_PAY 88
+#define ICE_PTYPE_IPV6_PAY 89
+#define ICE_PTYPE_IPV6_UDP_PAY 90
+#define ICE_PTYPE_IPV6_TCP_PAY 92
+#define ICE_PTYPE_IPV6_SCTP_PAY 93
+#define ICE_PTYPE_IPV6_ICMP_PAY 94
+
+/* Packet Type Groups (PTG) - Inner Most fields (IM) */
+#define ICE_PTG_IM_IPV4_TCP 16
+#define ICE_PTG_IM_IPV4_UDP 17
+#define ICE_PTG_IM_IPV4_SCTP 18
+#define ICE_PTG_IM_IPV4_PAY 20
+#define ICE_PTG_IM_IPV4_OTHER 21
+#define ICE_PTG_IM_IPV6_TCP 32
+#define ICE_PTG_IM_IPV6_UDP 33
+#define ICE_PTG_IM_IPV6_SCTP 34
+#define ICE_PTG_IM_IPV6_OTHER 37
+#define ICE_PTG_IM_L2_OTHER 67
+
+struct ice_flex_fields {
+ union {
+ struct {
+ u8 src_ip;
+ u8 dst_ip;
+ u8 flow_label; /* valid for IPv6 only */
+ } ip_fields;
+
+ struct {
+ u8 src_prt;
+ u8 dst_prt;
+ } tcp_udp_fields;
+
+ struct {
+ u8 src_ip;
+ u8 dst_ip;
+ u8 src_prt;
+ u8 dst_prt;
+ } ip_tcp_udp_fields;
+
+ struct {
+ u8 src_prt;
+ u8 dst_prt;
+ u8 flow_label; /* valid for IPv6 only */
+ u8 spi;
+ } ip_esp_fields;
+
+ struct {
+ u32 offset;
+ u32 length;
+ } off_len;
+ } fields;
+};
+
+#define ICE_XLT1_DFLT_GRP 0
+#define ICE_XLT1_TABLE_SIZE 1024
+
+/* package labels */
+struct ice_label {
+ __le16 value;
+#define ICE_PKG_LABEL_SIZE 64
+ char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+ __le16 count;
+ struct ice_label label[1];
+};
+
+#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+ sizeof(struct ice_label_section) - sizeof(struct ice_label), \
+ sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+ __le16 count;
+ __le16 base_offset;
+ struct ice_fv fv[1];
+};
+
+struct ice_sw_fv_list_entry {
+ struct LIST_ENTRY_TYPE list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
+#pragma pack(1)
+/* The BOOST TCAM stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY 15
+ u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+ __le16 hv_dst_port_key;
+ __le16 hv_src_port_key;
+ u8 tcam_search_key;
+};
+#pragma pack()
+
+struct ice_boost_key {
+ struct ice_boost_key_value key;
+ struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+ __le16 addr;
+ __le16 reserved;
+ /* break up the 40 bytes of key into different fields */
+ struct ice_boost_key key;
+ u8 boost_hit_index_group;
+ /* The following contains bitfields which are not on byte boundaries.
+ * These fields are currently unused by driver software.
+ */
+#define ICE_BOOST_BIT_FIELDS 43
+ u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_boost_tcam_entry tcam[1];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+ sizeof(struct ice_boost_tcam_section) - \
+ sizeof(struct ice_boost_tcam_entry), \
+ sizeof(struct ice_boost_tcam_entry))
+
+#pragma pack(1)
+struct ice_xlt1_section {
+ __le16 count;
+ __le16 offset;
+ u8 value[1];
+};
+#pragma pack()
+
+#define ICE_XLT1_SIZE(n) (sizeof(struct ice_xlt1_section) + \
+ (sizeof(u8) * ((n) - 1)))
+
+struct ice_xlt2_section {
+ __le16 count;
+ __le16 offset;
+ __le16 value[1];
+};
+
+#define ICE_XLT2_SIZE(n) (sizeof(struct ice_xlt2_section) + \
+ (sizeof(u16) * ((n) - 1)))
+
+struct ice_prof_redir_section {
+ __le16 count;
+ __le16 offset;
+ u8 redir_value[1];
+};
+
+#define ICE_PROF_REDIR_SIZE(n) (sizeof(struct ice_prof_redir_section) + \
+ (sizeof(u8) * ((n) - 1)))
+
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+/* Tunnel enabling */
+
+enum ice_tunnel_type {
+ TNL_VXLAN = 0,
+ TNL_GENEVE,
+ TNL_LAST = 0xFF,
+ TNL_ALL = 0xFF,
+};
+
+struct ice_tunnel_type_scan {
+ enum ice_tunnel_type type;
+ const char *label_prefix;
+};
+
+struct ice_tunnel_entry {
+ enum ice_tunnel_type type;
+ u16 boost_addr;
+ u16 port;
+ u16 ref;
+ struct ice_boost_tcam_entry *boost_entry;
+ u8 valid;
+ u8 in_use;
+ u8 marked;
+};
+
+#define ICE_TUNNEL_MAX_ENTRIES 16
+
+struct ice_tunnel_table {
+ struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
+ u16 count;
+};
+
+struct ice_pkg_es {
+ __le16 count;
+ __le16 offset;
+ struct ice_fv_word es[1];
+};
+
+struct ice_es {
+ u32 sid;
+ u16 count;
+ u16 fvw;
+ u16 *ref_count;
+ struct LIST_HEAD_TYPE prof_map;
+ struct ice_fv_word *t;
+ struct ice_lock prof_map_lock; /* protect access to profiles list */
+ u8 *written;
+ u8 reverse; /* set to true to reverse FV order */
+};
+
+/* PTYPE Group management */
+
+/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
+ * group (PTG) ID as output.
+ *
+ * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
+ * are a part of this group until moved to a new PTG.
+ */
+#define ICE_DEFAULT_PTG 0
+
+struct ice_ptg_entry {
+ struct ice_ptg_ptype *first_ptype;
+ u8 in_use;
+};
+
+struct ice_ptg_ptype {
+ struct ice_ptg_ptype *next_ptype;
+ u8 ptg;
+};
+
+#define ICE_MAX_TCAM_PER_PROFILE 32
+#define ICE_MAX_PTG_PER_PROFILE 32
+
+struct ice_prof_map {
+ struct LIST_ENTRY_TYPE list;
+ u64 profile_cookie;
+ u64 context;
+ u8 prof_id;
+ u8 ptg_cnt;
+ u8 ptg[ICE_MAX_PTG_PER_PROFILE];
+};
+
+#define ICE_INVALID_TCAM 0xFFFF
+
+struct ice_tcam_inf {
+ u16 tcam_idx;
+ u8 ptg;
+ u8 prof_id;
+ u8 in_use;
+};
+
+struct ice_vsig_prof {
+ struct LIST_ENTRY_TYPE list;
+ u64 profile_cookie;
+ u8 prof_id;
+ u8 tcam_count;
+ struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE];
+};
+
+struct ice_vsig_entry {
+ struct LIST_HEAD_TYPE prop_lst;
+ struct ice_vsig_vsi *first_vsi;
+ u8 in_use;
+};
+
+struct ice_vsig_vsi {
+ struct ice_vsig_vsi *next_vsi;
+ u32 prop_mask;
+ u16 changed;
+ u16 vsig;
+};
+
+#define ICE_XLT1_CNT 1024
+#define ICE_MAX_PTGS 256
+
+/* XLT1 Table */
+struct ice_xlt1 {
+ struct ice_ptg_entry *ptg_tbl;
+ struct ice_ptg_ptype *ptypes;
+ u8 *t;
+ u32 sid;
+ u16 count;
+};
+
+#define ICE_XLT2_CNT 768
+#define ICE_MAX_VSIGS 768
+
+/* VSIG bit layout:
+ * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS
+ * [13:15]: PF number of device
+ */
+#define ICE_VSIG_IDX_M (0x1FFF)
+#define ICE_PF_NUM_S 13
+#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
+#define ICE_VSIG_VALUE(vsig, pf_id) \
+ (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
+ (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
+#define ICE_DEFAULT_VSIG 0
+
+/* XLT2 Table */
+struct ice_xlt2 {
+ struct ice_vsig_entry *vsig_tbl;
+ struct ice_vsig_vsi *vsis;
+ u16 *t;
+ u32 sid;
+ u16 count;
+};
+
+/* Extraction sequence - list of match fields:
+ * protocol ID, offset, profile length
+ */
+union ice_match_fld {
+ struct {
+ u8 prot_id;
+ u8 offset;
+ u8 length;
+ u8 reserved; /* must be zero */
+ } fld;
+ u32 val;
+};
+
+#define ICE_MATCH_LIST_SZ 20
+#pragma pack(1)
+struct ice_match {
+ u8 count;
+ union ice_match_fld list[ICE_MATCH_LIST_SZ];
+};
+
+/* Profile ID Management */
+struct ice_prof_id_key {
+ __le16 flags;
+ u8 xlt1;
+ __le16 xlt2_cdid;
+};
+
+/* Keys are made up of two values, each one-half the size of the key.
+ * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
+ */
+#define ICE_TCAM_KEY_VAL_SZ 5
+#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
+
+struct ice_prof_tcam_entry {
+ __le16 addr;
+ u8 key[ICE_TCAM_KEY_SZ];
+ u8 prof_id;
+};
+
+struct ice_prof_id_section {
+ __le16 count;
+ struct ice_prof_tcam_entry entry[1];
+};
+#pragma pack()
+
+struct ice_prof_tcam {
+ u32 sid;
+ u16 count;
+ u16 max_prof_id;
+ struct ice_prof_tcam_entry *t;
+ u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */
+};
+
+struct ice_prof_redir {
+ u8 *t;
+ u32 sid;
+ u16 count;
+};
+
+/* Tables per block */
+struct ice_blk_info {
+ struct ice_xlt1 xlt1;
+ struct ice_xlt2 xlt2;
+ struct ice_prof_tcam prof;
+ struct ice_prof_redir prof_redir;
+ struct ice_es es;
+ u8 overwrite; /* set to true to allow overwrite of table entries */
+ u8 is_list_init;
+};
+
+enum ice_chg_type {
+ ICE_TCAM_NONE = 0,
+ ICE_PTG_ES_ADD,
+ ICE_TCAM_ADD,
+ ICE_VSIG_ADD,
+ ICE_VSIG_REM,
+ ICE_VSI_MOVE,
+};
+
+struct ice_chs_chg {
+ struct LIST_ENTRY_TYPE list_entry;
+ enum ice_chg_type type;
+
+ u8 add_ptg;
+ u8 add_vsig;
+ u8 add_tcam_idx;
+ u8 add_prof;
+ u16 ptype;
+ u8 ptg;
+ u8 prof_id;
+ u16 vsi;
+ u16 vsig;
+ u16 orig_vsig;
+ u16 tcam_idx;
+};
+
+#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
+
+enum ice_prof_type {
+ ICE_PROF_NON_TUN = 0x1,
+ ICE_PROF_TUN_UDP = 0x2,
+ ICE_PROF_TUN_GRE = 0x4,
+ ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_ALL = 0xFF,
+};
+#endif /* _ICE_FLEX_TYPE_H_ */
Index: sys/dev/ice/ice_flow.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_flow.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_FLOW_H_
+#define _ICE_FLOW_H_
+
+#include "ice_flex_type.h"
+#define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix)))
+#define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful
+#define ICE_FLOW_PROF_ID_BYPASS 0
+#define ICE_FLOW_PROF_ID_DEFAULT 1
+#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
+#define ICE_FLOW_VSI_INVAL 0xffff
+#define ICE_FLOW_FLD_OFF_INVAL 0xffff
+
+/* Generate flow hash field from flow field type(s) */
+#define ICE_FLOW_HASH_IPV4 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA))
+#define ICE_FLOW_HASH_IPV6 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_TCP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
+#define ICE_FLOW_HASH_UDP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT))
+#define ICE_FLOW_HASH_SCTP_PORT \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT))
+
+#define ICE_HASH_INVALID 0
+#define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
+
+/* Protocol header fields within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization such as GRE,
+ * VxLAN, etc.
+ */
+enum ice_flow_seg_hdr {
+ ICE_FLOW_SEG_HDR_NONE = 0x00000000,
+ ICE_FLOW_SEG_HDR_ETH = 0x00000001,
+ ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
+ ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
+ ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
+ ICE_FLOW_SEG_HDR_ARP = 0x00000010,
+ ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
+ ICE_FLOW_SEG_HDR_TCP = 0x00000040,
+ ICE_FLOW_SEG_HDR_UDP = 0x00000080,
+ ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
+ ICE_FLOW_SEG_HDR_GRE = 0x00000200,
+};
+
+enum ice_flow_field {
+ /* L2 */
+ ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FIELD_IDX_ETH_TYPE,
+ /* L3 */
+ ICE_FLOW_FIELD_IDX_IPV4_DSCP,
+ ICE_FLOW_FIELD_IDX_IPV6_DSCP,
+ ICE_FLOW_FIELD_IDX_IPV4_TTL,
+ ICE_FLOW_FIELD_IDX_IPV4_PROT,
+ ICE_FLOW_FIELD_IDX_IPV6_TTL,
+ ICE_FLOW_FIELD_IDX_IPV6_PROT,
+ ICE_FLOW_FIELD_IDX_IPV4_SA,
+ ICE_FLOW_FIELD_IDX_IPV4_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_DA,
+ /* L4 */
+ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
+ ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+ /* ARP */
+ ICE_FLOW_FIELD_IDX_ARP_SIP,
+ ICE_FLOW_FIELD_IDX_ARP_DIP,
+ ICE_FLOW_FIELD_IDX_ARP_SHA,
+ ICE_FLOW_FIELD_IDX_ARP_DHA,
+ ICE_FLOW_FIELD_IDX_ARP_OP,
+ /* ICMP */
+ ICE_FLOW_FIELD_IDX_ICMP_TYPE,
+ ICE_FLOW_FIELD_IDX_ICMP_CODE,
+ /* GRE */
+ ICE_FLOW_FIELD_IDX_GRE_KEYID,
+ /* The total number of enums must not exceed 64 */
+ ICE_FLOW_FIELD_IDX_MAX
+};
+
+/* Flow headers and fields for AVF support */
+enum ice_flow_avf_hdr_field {
+ /* Values 0 - 28 are reserved for future use */
+ ICE_AVF_FLOW_FIELD_INVALID = 0,
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_UDP,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV4_TCP,
+ ICE_AVF_FLOW_FIELD_IPV4_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV4_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV4,
+ /* Values 37-38 are reserved */
+ ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
+ ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_UDP,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
+ ICE_AVF_FLOW_FIELD_IPV6_TCP,
+ ICE_AVF_FLOW_FIELD_IPV6_SCTP,
+ ICE_AVF_FLOW_FIELD_IPV6_OTHER,
+ ICE_AVF_FLOW_FIELD_FRAG_IPV6,
+ ICE_AVF_FLOW_FIELD_RSVD47,
+ ICE_AVF_FLOW_FIELD_FCOE_OX,
+ ICE_AVF_FLOW_FIELD_FCOE_RX,
+ ICE_AVF_FLOW_FIELD_FCOE_OTHER,
+ /* Values 51-62 are reserved */
+ ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
+ ICE_AVF_FLOW_FIELD_MAX
+};
+
+/* Supported RSS offloads This macro is defined to support
+ * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * capabilities to the caller of this ops.
+ */
+#define ICE_DEFAULT_RSS_HENA ( \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+
+enum ice_flow_dir {
+ ICE_FLOW_DIR_UNDEFINED = 0,
+ ICE_FLOW_TX = 0x01,
+ ICE_FLOW_RX = 0x02,
+ ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX
+};
+
+enum ice_flow_priority {
+ ICE_FLOW_PRIO_LOW,
+ ICE_FLOW_PRIO_NORMAL,
+ ICE_FLOW_PRIO_HIGH
+};
+
+#define ICE_FLOW_SEG_MAX 2
+#define ICE_FLOW_SEG_RAW_FLD_MAX 2
+#define ICE_FLOW_PROFILE_MAX 1024
+#define ICE_FLOW_SW_FIELD_VECTOR_MAX 48
+#define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32
+#define ICE_FLOW_FV_EXTRACT_SZ 2
+
+#define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val))
+
+struct ice_flow_seg_xtrct {
+ u8 prot_id; /* Protocol ID of extracted header field */
+ u16 off; /* Starting offset of the field in header in bytes */
+ u8 idx; /* Index of FV entry used */
+ u8 disp; /* Displacement of field in bits fr. FV entry's start */
+};
+
+enum ice_flow_fld_match_type {
+ ICE_FLOW_FLD_TYPE_REG, /* Value, mask */
+ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */
+ ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */
+ ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */
+};
+
+struct ice_flow_fld_loc {
+ /* Describe offsets of field information relative to the beginning of
+ * input buffer provided when adding flow entries.
+ */
+ u16 val; /* Offset where the value is located */
+ u16 mask; /* Offset where the mask/prefix value is located */
+ u16 last; /* Length or offset where the upper value is located */
+};
+
+struct ice_flow_fld_info {
+ enum ice_flow_fld_match_type type;
+ /* Location where to retrieve data from an input buffer */
+ struct ice_flow_fld_loc src;
+ /* Location where to put the data into the final entry buffer */
+ struct ice_flow_fld_loc entry;
+ struct ice_flow_seg_xtrct xtrct;
+};
+
+struct ice_flow_seg_fld_raw {
+ struct ice_flow_fld_info info;
+ u16 off; /* Offset from the start of the segment */
+};
+
+struct ice_flow_seg_info {
+ u32 hdrs; /* Bitmask indicating protocol headers present */
+ u64 match; /* Bitmask indicating header fields to be matched */
+ u64 range; /* Bitmask indicating header fields matched as ranges */
+
+ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX];
+
+ u8 raws_cnt; /* Number of raw fields to be matched */
+ struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX];
+};
+
+/* This structure describes a flow entry, and is tracked only in this file */
+struct ice_flow_entry {
+ struct LIST_ENTRY_TYPE l_entry;
+
+ u64 id;
+ struct ice_flow_prof *prof;
+ /* Action list */
+ struct ice_flow_action *acts;
+ /* Flow entry's content */
+ void *entry;
+ enum ice_flow_priority priority;
+ u16 vsi_handle;
+ u16 entry_sz;
+ u8 acts_cnt;
+};
+
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+
+struct ice_flow_prof {
+ struct LIST_ENTRY_TYPE l_entry;
+
+ u64 id;
+ enum ice_flow_dir dir;
+ u8 segs_cnt;
+ u8 acts_cnt;
+
+ /* Keep track of flow entries associated with this flow profile */
+ struct ice_lock entries_lock;
+ struct LIST_HEAD_TYPE entries;
+
+ struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX];
+
+ /* software VSI handles referenced by this flow profile */
+ ice_declare_bitmap(vsis, ICE_MAX_VSI);
+
+ union {
+ /* struct sw_recipe */
+ /* struct fd */
+ u32 data;
+ } cfg;
+
+ /* Default actions */
+ struct ice_flow_action *acts;
+};
+
+struct ice_rss_cfg {
+ struct LIST_ENTRY_TYPE l_entry;
+ /* bitmap of VSIs added to the RSS entry */
+ ice_declare_bitmap(vsis, ICE_MAX_VSI);
+ u64 hashed_flds;
+ u32 packet_hdr;
+};
+
+enum ice_flow_action_type {
+ ICE_FLOW_ACT_NOP,
+ ICE_FLOW_ACT_ALLOW,
+ ICE_FLOW_ACT_DROP,
+ ICE_FLOW_ACT_CNTR_PKT,
+ ICE_FLOW_ACT_FWD_VSI,
+ ICE_FLOW_ACT_FWD_VSI_LIST, /* Should be abstracted away */
+ ICE_FLOW_ACT_FWD_QUEUE, /* Can Queues be abstracted away? */
+ ICE_FLOW_ACT_FWD_QUEUE_GROUP, /* Can Queues be abstracted away? */
+ ICE_FLOW_ACT_PUSH,
+ ICE_FLOW_ACT_POP,
+ ICE_FLOW_ACT_MODIFY,
+ ICE_FLOW_ACT_CNTR_BYTES,
+ ICE_FLOW_ACT_CNTR_PKT_BYTES,
+ ICE_FLOW_ACT_GENERIC_0,
+ ICE_FLOW_ACT_GENERIC_1,
+ ICE_FLOW_ACT_GENERIC_2,
+ ICE_FLOW_ACT_GENERIC_3,
+ ICE_FLOW_ACT_GENERIC_4,
+ ICE_FLOW_ACT_RPT_FLOW_ID,
+ ICE_FLOW_ACT_BUILD_PROF_IDX,
+};
+
+struct ice_flow_action {
+ enum ice_flow_action_type type;
+ union {
+ u32 dummy;
+ } data;
+};
+
+u64
+ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ struct ice_flow_seg_info *segs, u8 segs_cnt);
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_action *acts, u8 acts_cnt,
+ struct ice_flow_prof **prof);
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+enum ice_status
+ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
+ u16 vsig);
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u8 *hw_prof);
+
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id);
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi, enum ice_flow_priority prio,
+ void *data, struct ice_flow_action *acts, u8 acts_cnt,
+ u64 *entry_h);
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+ u64 entry_h);
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
+void
+ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 prefix_loc, u8 prefix_sz);
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc);
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs);
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs);
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
+#endif /* _ICE_FLOW_H_ */
Index: sys/dev/ice/ice_flow.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_flow.c
@@ -0,0 +1,2228 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_flow.h"
+
+/* Size of known protocol header fields */
+#define ICE_FLOW_FLD_SZ_ETH_TYPE 2
+#define ICE_FLOW_FLD_SZ_VLAN 2
+#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
+#define ICE_FLOW_FLD_SZ_IP_DSCP 1
+#define ICE_FLOW_FLD_SZ_IP_TTL 1
+#define ICE_FLOW_FLD_SZ_IP_PROT 1
+#define ICE_FLOW_FLD_SZ_PORT 2
+#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
+#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
+#define ICE_FLOW_FLD_SZ_ICMP_CODE 1
+#define ICE_FLOW_FLD_SZ_ARP_OPER 2
+#define ICE_FLOW_FLD_SZ_GRE_KEYID 4
+
+/* Describe properties of a protocol header field */
+struct ice_flow_field_info {
+ enum ice_flow_seg_hdr hdr;
+ s16 off; /* Offset from start of a protocol header, in bits */
+ u16 size; /* Size of fields in bits */
+};
+
+#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
+ .hdr = _hdr, \
+ .off = (_offset_bytes) * BITS_PER_BYTE, \
+ .size = (_size_bytes) * BITS_PER_BYTE, \
+}
+
+/* Table containing properties of supported protocol header fields */
+static const
+struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
+ /* Ether */
+ /* ICE_FLOW_FIELD_IDX_ETH_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ETH_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_S_VLAN */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN),
+ /* ICE_FLOW_FIELD_IDX_C_VLAN */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN),
+ /* ICE_FLOW_FIELD_IDX_ETH_TYPE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 12, ICE_FLOW_FLD_SZ_ETH_TYPE),
+ /* IPv4 / IPv6 */
+ /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, ICE_FLOW_FLD_SZ_IP_DSCP),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP),
+ /* ICE_FLOW_FIELD_IDX_IPV4_TTL */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, ICE_FLOW_FLD_SZ_IP_TTL),
+ /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT),
+ /* ICE_FLOW_FIELD_IDX_IPV6_TTL */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 7, ICE_FLOW_FLD_SZ_IP_TTL),
+ /* ICE_FLOW_FIELD_IDX_IPV4_PROT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 6, ICE_FLOW_FLD_SZ_IP_PROT),
+ /* ICE_FLOW_FIELD_IDX_IPV4_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV4_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* Transport */
+ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT),
+ /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT),
+ /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT),
+ /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT),
+ /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT),
+ /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT),
+ /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
+ /* ARP */
+ /* ICE_FLOW_FIELD_IDX_ARP_SIP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR),
+ /* ICE_FLOW_FIELD_IDX_ARP_DIP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR),
+ /* ICE_FLOW_FIELD_IDX_ARP_SHA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ARP_DHA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
+ /* ICE_FLOW_FIELD_IDX_ARP_OP */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER),
+ /* ICMP */
+ /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE),
+ /* ICE_FLOW_FIELD_IDX_ICMP_CODE */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE),
+ /* GRE */
+ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID),
+};
+
+/* Bitmaps indicating relevant packet types for a particular protocol header
+ *
+ * Packet types for packets with an Outer/First/Single MAC header
+ */
+static const u32 ice_ptypes_mac_ofos[] = {
+ 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC VLAN header */
+static const u32 ice_ptypes_macvlan_il[] = {
+ 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
+ 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv4 header */
+static const u32 ice_ptypes_ipv4_ofos[] = {
+ 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header */
+static const u32 ice_ptypes_ipv4_il[] = {
+ 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
+ 0x0000000E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header */
+static const u32 ice_ptypes_ipv6_ofos[] = {
+ 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header */
+static const u32 ice_ptypes_ipv6_il[] = {
+ 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
+ 0x00000770, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ARP header */
+static const u32 ice_ptypes_arp_of[] = {
+ 0x00000800, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* UDP Packet types for non-tunneled packets or tunneled
+ * packets with inner UDP.
+ */
+static const u32 ice_ptypes_udp_il[] = {
+ 0x81000000, 0x20204040, 0x04000010, 0x80810102,
+ 0x00000040, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last TCP header */
+static const u32 ice_ptypes_tcp_il[] = {
+ 0x04000000, 0x80810102, 0x10000040, 0x02040408,
+ 0x00000102, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last SCTP header */
+static const u32 ice_ptypes_sctp_il[] = {
+ 0x08000000, 0x01020204, 0x20000081, 0x04080810,
+ 0x00000204, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First ICMP header */
+static const u32 ice_ptypes_icmp_of[] = {
+ 0x10000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last ICMP header */
+static const u32 ice_ptypes_icmp_il[] = {
+ 0x00000000, 0x02040408, 0x40000102, 0x08101020,
+ 0x00000408, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outermost/First GRE header */
+static const u32 ice_ptypes_gre_of[] = {
+ 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
+ 0x0000017E, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last MAC header */
+static const u32 ice_ptypes_mac_il[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Manage parameters and info. used during the creation of a flow profile */
+struct ice_flow_prof_params {
+ enum ice_block blk;
+ u16 entry_length; /* # of bytes formatted entry will require */
+ u8 es_cnt;
+ struct ice_flow_prof *prof;
+
+ /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0
+ * This will give us the direction flags.
+ */
+ struct ice_fv_word es[ICE_MAX_FV_WORDS];
+ ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX);
+};
+
+#define ICE_FLOW_SEG_HDRS_L3_MASK \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \
+ ICE_FLOW_SEG_HDR_ARP)
+#define ICE_FLOW_SEG_HDRS_L4_MASK \
+ (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+ ICE_FLOW_SEG_HDR_SCTP)
+
+/**
+ * ice_flow_val_hdrs - validates packet segments for valid protocol headers
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+static enum ice_status
+ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+ u8 i;
+
+ for (i = 0; i < segs_cnt; i++) {
+ /* Multiple L3 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
+ !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
+ return ICE_ERR_PARAM;
+
+ /* Multiple L4 headers */
+ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
+ !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
+ return ICE_ERR_PARAM;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/* Sizes of fixed known protocol headers without header options */
+#define ICE_FLOW_PROT_HDR_SZ_MAC 14
+#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
+#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
+#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
+#define ICE_FLOW_PROT_HDR_SZ_ARP 28
+#define ICE_FLOW_PROT_HDR_SZ_ICMP 8
+#define ICE_FLOW_PROT_HDR_SZ_TCP 20
+#define ICE_FLOW_PROT_HDR_SZ_UDP 8
+#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
+
+/**
+ * ice_flow_calc_seg_sz - calculates size of a packet segment based on headers
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose header size is to be determined
+ */
+static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
+{
+ u16 sz;
+
+ /* L2 headers */
+ sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
+ ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
+
+ /* L3 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
+ sz += ICE_FLOW_PROT_HDR_SZ_ARP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
+ /* A L3 header is required if L4 is specified */
+ return 0;
+
+ /* L4 headers */
+ if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
+ sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
+ sz += ICE_FLOW_PROT_HDR_SZ_TCP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
+ sz += ICE_FLOW_PROT_HDR_SZ_UDP;
+ else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_SCTP)
+ sz += ICE_FLOW_PROT_HDR_SZ_SCTP;
+
+ return sz;
+}
+
+/**
+ * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments
+ * @params: information about the flow to be processed
+ *
+ * This function identifies the packet types associated with the protocol
+ * headers being present in packet segments of the specified flow profile.
+ */
+static enum ice_status
+ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+{
+ struct ice_flow_prof *prof;
+ u8 i;
+
+ ice_memset(params->ptypes, 0xff, sizeof(params->ptypes),
+ ICE_NONDMA_MEM);
+
+ prof = params->prof;
+
+ for (i = 0; i < params->prof->segs_cnt; i++) {
+ const ice_bitmap_t *src;
+ u32 hdrs;
+
+ hdrs = prof->segs[i].hdrs;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos :
+ (const ice_bitmap_t *)ice_ptypes_mac_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
+ src = (const ice_bitmap_t *)ice_ptypes_macvlan_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ (const ice_bitmap_t *)ice_ptypes_arp_of,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos :
+ (const ice_bitmap_t *)ice_ptypes_ipv4_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos :
+ (const ice_bitmap_t *)ice_ptypes_ipv6_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ }
+
+ if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
+ src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of :
+ (const ice_bitmap_t *)ice_ptypes_icmp_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
+ src = (const ice_bitmap_t *)ice_ptypes_udp_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) {
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ (const ice_bitmap_t *)ice_ptypes_tcp_il,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) {
+ src = (const ice_bitmap_t *)ice_ptypes_sctp_il;
+ ice_and_bitmap(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
+ if (!i) {
+ src = (const ice_bitmap_t *)ice_ptypes_gre_of;
+ ice_and_bitmap(params->ptypes, params->ptypes,
+ src, ICE_FLOW_PTYPE_MAX);
+ }
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_pkt_flags - Create an extr sequence entry for packet flags
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @flags: The value of pkt_flags[x:x] in Rx/Tx MDID metadata.
+ *
+ * This function will allocate an extraction sequence entries for a DWORD size
+ * chunk of the packet flags.
+ */
+static enum ice_status
+ice_flow_xtract_pkt_flags(struct ice_hw *hw,
+ struct ice_flow_prof_params *params,
+ enum ice_flex_mdid_pkt_flags flags)
+{
+ u8 fv_words = hw->blk[params->blk].es.fvw;
+ u8 idx;
+
+ /* Make sure the number of extraction sequence entries required does not
+ * exceed the block's capacity.
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = ICE_PROT_META_ID;
+ params->es[idx].off = flags;
+ params->es_cnt++;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_fld - Create an extraction sequence entry for the given field
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: packet segment index of the field to be extracted
+ * @fld: ID of field to be extracted
+ *
+ * This function determines the protocol ID, offset, and size of the given
+ * field. It then allocates one or more extraction sequence entries for the
+ * given field, and fill the entries with protocol ID and offset information.
+ */
+static enum ice_status
+ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg, enum ice_flow_field fld)
+{
+ enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
+ enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
+ u8 fv_words = hw->blk[params->blk].es.fvw;
+ struct ice_flow_fld_info *flds;
+ u16 cnt, ese_bits, i;
+ s16 adj = 0;
+ u16 off;
+
+ flds = params->prof->segs[seg].fields;
+
+ switch (fld) {
+ case ICE_FLOW_FIELD_IDX_ETH_DA:
+ case ICE_FLOW_FIELD_IDX_ETH_SA:
+ case ICE_FLOW_FIELD_IDX_S_VLAN:
+ case ICE_FLOW_FIELD_IDX_C_VLAN:
+ prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_ETH_TYPE:
+ prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV4_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_TTL:
+ case ICE_FLOW_FIELD_IDX_IPV6_PROT:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+
+ /* TTL and PROT share the same extraction seq. entry.
+ * Each is considered a sibling to the other in terms of sharing
+ * the same extraction sequence entry.
+ */
+ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
+ else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
+ sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV4_SA:
+ case ICE_FLOW_FIELD_IDX_IPV4_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_IPV6_SA:
+ case ICE_FLOW_FIELD_IDX_IPV6_DA:
+ prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
+ case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
+ prot_id = ICE_PROT_TCP_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_UDP_DST_PORT:
+ prot_id = ICE_PROT_UDP_IL_OR_S;
+ break;
+ case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT:
+ case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
+ prot_id = ICE_PROT_SCTP_IL;
+ break;
+ case ICE_FLOW_FIELD_IDX_ARP_SIP:
+ case ICE_FLOW_FIELD_IDX_ARP_DIP:
+ case ICE_FLOW_FIELD_IDX_ARP_SHA:
+ case ICE_FLOW_FIELD_IDX_ARP_DHA:
+ case ICE_FLOW_FIELD_IDX_ARP_OP:
+ prot_id = ICE_PROT_ARP_OF;
+ break;
+ case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
+ case ICE_FLOW_FIELD_IDX_ICMP_CODE:
+ /* ICMP type and code share the same extraction seq. entry */
+ prot_id = (params->prof->segs[seg].hdrs &
+ ICE_FLOW_SEG_HDR_IPV4) ?
+ ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
+ sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
+ ICE_FLOW_FIELD_IDX_ICMP_CODE :
+ ICE_FLOW_FIELD_IDX_ICMP_TYPE;
+ break;
+ case ICE_FLOW_FIELD_IDX_GRE_KEYID:
+ prot_id = ICE_PROT_GRE_OF;
+ break;
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ /* Each extraction sequence entry is a word in size, and extracts a
+ * word-aligned offset from a protocol header.
+ */
+ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE;
+
+ flds[fld].xtrct.prot_id = prot_id;
+ flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ flds[fld].xtrct.disp = (u8)((ice_flds_info[fld].off + adj) % ese_bits);
+ flds[fld].xtrct.idx = params->es_cnt;
+
+ /* Adjust the next field-entry index after accommodating the number of
+ * entries this field consumes
+ */
+ cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp +
+ ice_flds_info[fld].size, ese_bits);
+
+ /* Fill in the extraction sequence entries needed for this field */
+ off = flds[fld].xtrct.off;
+ for (i = 0; i < cnt; i++) {
+ /* Only consume an extraction sequence entry if there is no
+ * sibling field associated with this field or the sibling entry
+ * already extracts the word shared with this field.
+ */
+ if (sib == ICE_FLOW_FIELD_IDX_MAX ||
+ flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
+ flds[sib].xtrct.off != off) {
+ u8 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= fv_words)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+ }
+
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_xtract_raws - Create extract sequence entries for raw bytes
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ * @seg: index of packet segment whose raw fields are to be be extracted
+ */
+static enum ice_status
+ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
+ u8 seg)
+{
+ u16 fv_words;
+ u16 hdrs_sz;
+ u8 i;
+
+ if (!params->prof->segs[seg].raws_cnt)
+ return ICE_SUCCESS;
+
+ if (params->prof->segs[seg].raws_cnt >
+ ARRAY_SIZE(params->prof->segs[seg].raws))
+ return ICE_ERR_MAX_LIMIT;
+
+ /* Offsets within the segment headers are not supported */
+ hdrs_sz = ice_flow_calc_seg_sz(params, seg);
+ if (!hdrs_sz)
+ return ICE_ERR_PARAM;
+
+ fv_words = hw->blk[params->blk].es.fvw;
+
+ for (i = 0; i < params->prof->segs[seg].raws_cnt; i++) {
+ struct ice_flow_seg_fld_raw *raw;
+ u16 off, cnt, j;
+
+ raw = &params->prof->segs[seg].raws[i];
+
+ /* Storing extraction information */
+ raw->info.xtrct.prot_id = ICE_PROT_MAC_OF_OR_S;
+ raw->info.xtrct.off = (raw->off / ICE_FLOW_FV_EXTRACT_SZ) *
+ ICE_FLOW_FV_EXTRACT_SZ;
+ raw->info.xtrct.disp = (raw->off % ICE_FLOW_FV_EXTRACT_SZ) *
+ BITS_PER_BYTE;
+ raw->info.xtrct.idx = params->es_cnt;
+
+ /* Determine the number of field vector entries this raw field
+ * consumes.
+ */
+ cnt = DIVIDE_AND_ROUND_UP(raw->info.xtrct.disp +
+ (raw->info.src.last * BITS_PER_BYTE),
+ (ICE_FLOW_FV_EXTRACT_SZ *
+ BITS_PER_BYTE));
+ off = raw->info.xtrct.off;
+ for (j = 0; j < cnt; j++) {
+ u16 idx;
+
+ /* Make sure the number of extraction sequence required
+ * does not exceed the block's capability
+ */
+ if (params->es_cnt >= hw->blk[params->blk].es.count ||
+ params->es_cnt >= ICE_MAX_FV_WORDS)
+ return ICE_ERR_MAX_LIMIT;
+
+ /* some blocks require a reversed field vector layout */
+ if (hw->blk[params->blk].es.reverse)
+ idx = fv_words - params->es_cnt - 1;
+ else
+ idx = params->es_cnt;
+
+ params->es[idx].prot_id = raw->info.xtrct.prot_id;
+ params->es[idx].off = off;
+ params->es_cnt++;
+ off += ICE_FLOW_FV_EXTRACT_SZ;
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ *
+ * This function iterates through all matched fields in the given segments, and
+ * creates an extraction sequence for the fields.
+ */
+static enum ice_status
+ice_flow_create_xtrct_seq(struct ice_hw *hw,
+ struct ice_flow_prof_params *params)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u8 i;
+
+ /* For ACL, we also need to extract the direction bit (Rx,Tx) data from
+ * packet flags
+ */
+ if (params->blk == ICE_BLK_ACL) {
+ status = ice_flow_xtract_pkt_flags(hw, params,
+ ICE_RX_MDID_PKT_FLAGS_15_0);
+ if (status)
+ return status;
+ }
+
+ for (i = 0; i < params->prof->segs_cnt; i++) {
+ u64 match = params->prof->segs[i].match;
+ enum ice_flow_field j;
+
+ for (j = 0; j < ICE_FLOW_FIELD_IDX_MAX && match; j++) {
+ const u64 bit = BIT_ULL(j);
+
+ if (match & bit) {
+ status = ice_flow_xtract_fld(hw, params, i, j);
+ if (status)
+ return status;
+ match &= ~bit;
+ }
+ }
+
+ /* Process raw matching bytes */
+ status = ice_flow_xtract_raws(hw, params, i);
+ if (status)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_proc_segs - process all packet segments associated with a profile
+ * @hw: pointer to the HW struct
+ * @params: information about the flow to be processed
+ */
+static enum ice_status
+ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
+{
+ enum ice_status status;
+
+ status = ice_flow_proc_seg_hdrs(params);
+ if (status)
+ return status;
+
+ status = ice_flow_create_xtrct_seq(hw, params);
+ if (status)
+ return status;
+
+ switch (params->blk) {
+ case ICE_BLK_RSS:
+ /* Only header information is provided for RSS configuration.
+ * No further processing is needed.
+ */
+ status = ICE_SUCCESS;
+ break;
+ case ICE_BLK_FD:
+ status = ICE_SUCCESS;
+ break;
+ case ICE_BLK_SW:
+ default:
+ return ICE_ERR_NOT_IMPL;
+ }
+
+ return status;
+}
+
+#define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001
+#define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002
+#define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004
+
+/**
+ * ice_flow_find_prof_conds - Find a profile matching headers and conditions
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI)
+ * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*)
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, struct ice_flow_seg_info *segs,
+ u8 segs_cnt, u16 vsi_handle, u32 conds)
+{
+ struct ice_flow_prof *p, *prof = NULL;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) &&
+ segs_cnt && segs_cnt == p->segs_cnt) {
+ u8 i;
+
+ /* Check for profile-VSI association if specified */
+ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) &&
+ ice_is_vsi_valid(hw, vsi_handle) &&
+ !ice_is_bit_set(p->vsis, vsi_handle))
+ continue;
+
+ /* Protocol headers must be checked. Matched fields are
+ * checked if specified.
+ */
+ for (i = 0; i < segs_cnt; i++)
+ if (segs[i].hdrs != p->segs[i].hdrs ||
+ ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) &&
+ segs[i].match != p->segs[i].match))
+ break;
+
+ /* A match is found if all segments are matched */
+ if (i == segs_cnt) {
+ prof = p;
+ break;
+ }
+ }
+ }
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return prof;
+}
+
+/**
+ * ice_flow_find_prof - Look up a profile matching headers and matched fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ */
+u64
+ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ struct ice_flow_seg_info *segs, u8 segs_cnt)
+{
+ struct ice_flow_prof *p;
+
+ p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt,
+ ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS);
+
+ return p ? p->id : ICE_FLOW_PROF_ID_INVAL;
+}
+
+/**
+ * ice_flow_find_prof_id - Look up a profile with given profile ID
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: unique ID to identify this flow profile
+ */
+static struct ice_flow_prof *
+ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *p;
+
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ if (p->id == prof_id)
+ return p;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_dealloc_flow_entry - Deallocate flow entry memory
+ * @hw: pointer to the HW struct
+ * @entry: flow entry to be removed
+ */
+static void
+ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->entry)
+ ice_free(hw, entry->entry);
+
+ if (entry->acts) {
+ ice_free(hw, entry->acts);
+ entry->acts = NULL;
+ entry->acts_cnt = 0;
+ }
+
+ ice_free(hw, entry);
+}
+
+/**
+ * ice_flow_rem_entry_sync - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry: flow entry to be removed
+ */
+static enum ice_status
+ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __ALWAYS_UNUSED blk,
+ struct ice_flow_entry *entry)
+{
+ if (!entry)
+ return ICE_ERR_BAD_PTR;
+
+ LIST_DEL(&entry->l_entry);
+
+ ice_dealloc_flow_entry(hw, entry);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @acts: array of default actions
+ * @acts_cnt: number of default actions
+ * @prof: stores the returned flow profile added
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ enum ice_flow_dir dir, u64 prof_id,
+ struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_action *acts, u8 acts_cnt,
+ struct ice_flow_prof **prof)
+{
+ struct ice_flow_prof_params params;
+ enum ice_status status;
+ u8 i;
+
+ if (!prof || (acts_cnt && !acts))
+ return ICE_ERR_BAD_PTR;
+
+ ice_memset(&params, 0, sizeof(params), ICE_NONDMA_MEM);
+ params.prof = (struct ice_flow_prof *)
+ ice_malloc(hw, sizeof(*params.prof));
+ if (!params.prof)
+ return ICE_ERR_NO_MEMORY;
+
+ /* initialize extraction sequence to all invalid (0xff) */
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ params.es[i].prot_id = ICE_PROT_INVALID;
+ params.es[i].off = ICE_FV_OFFSET_INVAL;
+ }
+
+ params.blk = blk;
+ params.prof->id = prof_id;
+ params.prof->dir = dir;
+ params.prof->segs_cnt = segs_cnt;
+
+ /* Make a copy of the segments that need to be persistent in the flow
+ * profile instance
+ */
+ for (i = 0; i < segs_cnt; i++)
+ ice_memcpy(&params.prof->segs[i], &segs[i], sizeof(*segs),
+ ICE_NONDMA_TO_NONDMA);
+
+ /* Make a copy of the actions that need to be persistent in the flow
+ * profile instance.
+ */
+ if (acts_cnt) {
+ params.prof->acts = (struct ice_flow_action *)
+ ice_memdup(hw, acts, acts_cnt * sizeof(*acts),
+ ICE_NONDMA_TO_NONDMA);
+
+ if (!params.prof->acts) {
+ status = ICE_ERR_NO_MEMORY;
+ goto out;
+ }
+ }
+
+ status = ice_flow_proc_segs(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW,
+ "Error processing a flow's packet segments\n");
+ goto out;
+ }
+
+ /* Add a HW profile for this flow profile */
+ status = ice_add_prof(hw, blk, prof_id, (u8 *)params.ptypes, params.es);
+ if (status) {
+ ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&params.prof->entries);
+ ice_init_lock(&params.prof->entries_lock);
+ *prof = params.prof;
+
+out:
+ if (status) {
+ if (params.prof->acts)
+ ice_free(hw, params.prof->acts);
+ ice_free(hw, params.prof);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof_sync - remove a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile to remove
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ */
+static enum ice_status
+ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof)
+{
+ enum ice_status status;
+
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!LIST_EMPTY(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ ice_acquire_lock(&prof->entries_lock);
+
+ LIST_FOR_EACH_ENTRY_SAFE(e, t, &prof->entries, ice_flow_entry,
+ l_entry) {
+ status = ice_flow_rem_entry_sync(hw, blk, e);
+ if (status)
+ break;
+ }
+
+ ice_release_lock(&prof->entries_lock);
+ }
+
+ /* Remove all hardware profiles associated with this flow profile */
+ status = ice_rem_prof(hw, blk, prof->id);
+ if (!status) {
+ LIST_DEL(&prof->l_entry);
+ ice_destroy_lock(&prof->entries_lock);
+ if (prof->acts)
+ ice_free(hw, prof->acts);
+ ice_free(hw, prof);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @vsi_handle: software VSI handle
+ * @vsig: target VSI group
+ *
+ * Assumption: the caller has already verified that the VSI to
+ * be added has the same characteristics as the VSIG and will
+ * thereby have access to all resources added to that VSIG.
+ */
+enum ice_status
+ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
+ u16 vsig)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle),
+ vsig);
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_assoc_prof - associate a VSI with a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
+ status = ice_add_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ ice_set_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile add failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_disassoc_prof - disassociate a VSI from a flow profile
+ * @hw: pointer to the hardware structure
+ * @blk: classification stage
+ * @prof: pointer to flow profile
+ * @vsi_handle: software VSI handle
+ *
+ * Assumption: the caller has acquired the lock to the profile list
+ * and the software VSI handle has been validated
+ */
+static enum ice_status
+ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
+ struct ice_flow_prof *prof, u16 vsi_handle)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ if (ice_is_bit_set(prof->vsis, vsi_handle)) {
+ status = ice_rem_prof_id_flow(hw, blk,
+ ice_get_hw_vsi_num(hw,
+ vsi_handle),
+ prof->id);
+ if (!status)
+ ice_clear_bit(vsi_handle, prof->vsis);
+ else
+ ice_debug(hw, ICE_DBG_FLOW,
+ "HW profile remove failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_add_prof - Add a flow profile for packet segments and matched fields
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @dir: flow direction
+ * @prof_id: unique ID to identify this flow profile
+ * @segs: array of one or more packet segments that describe the flow
+ * @segs_cnt: number of packet segments provided
+ * @acts: array of default actions
+ * @acts_cnt: number of default actions
+ * @prof: stores the returned flow profile added
+ */
+enum ice_status
+ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
+ u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
+ struct ice_flow_action *acts, u8 acts_cnt,
+ struct ice_flow_prof **prof)
+{
+ enum ice_status status;
+
+ if (segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_MAX_LIMIT;
+
+ if (!segs_cnt)
+ return ICE_ERR_PARAM;
+
+ if (!segs)
+ return ICE_ERR_BAD_PTR;
+
+ status = ice_flow_val_hdrs(segs, segs_cnt);
+ if (status)
+ return status;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+ status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt,
+ acts, acts_cnt, prof);
+ if (!status)
+ LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]);
+
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_prof - Remove a flow profile and all entries associated with it
+ * @hw: pointer to the HW struct
+ * @blk: the block for which the flow profile is to be removed
+ * @prof_id: unique ID of the flow profile to be removed
+ */
+enum ice_status
+ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ /* prof becomes invalid after the call */
+ status = ice_flow_rem_prof_sync(hw, blk, prof);
+
+out:
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: the profile ID handle
+ * @hw_prof_id: pointer to variable to receive the HW profile ID
+ */
+enum ice_status
+ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u8 *hw_prof_id)
+{
+ struct ice_prof_map *map;
+
+ map = ice_search_prof_id(hw, blk, prof_id);
+ if (map) {
+ *hw_prof_id = map->prof_id;
+ return ICE_SUCCESS;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_flow_find_entry - look for a flow entry using its unique ID
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry_id: unique ID to identify this flow entry
+ *
+ * This function looks for the flow entry with the specified unique ID in all
+ * flow profiles of the specified classification stage. If the entry is found,
+ * and it returns the handle to the flow entry. Otherwise, it returns
+ * ICE_FLOW_ENTRY_ID_INVAL.
+ */
+u64 ice_flow_find_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_id)
+{
+ struct ice_flow_entry *found = NULL;
+ struct ice_flow_prof *p;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+ LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) {
+ struct ice_flow_entry *e;
+
+ ice_acquire_lock(&p->entries_lock);
+ LIST_FOR_EACH_ENTRY(e, &p->entries, ice_flow_entry, l_entry)
+ if (e->id == entry_id) {
+ found = e;
+ break;
+ }
+ ice_release_lock(&p->entries_lock);
+
+ if (found)
+ break;
+ }
+
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return found ? ICE_FLOW_ENTRY_HNDL(found) : ICE_FLOW_ENTRY_HANDLE_INVAL;
+}
+
+/**
+ * ice_flow_add_entry - Add a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @prof_id: ID of the profile to add a new flow entry to
+ * @entry_id: unique ID to identify this flow entry
+ * @vsi_handle: software VSI handle for the flow entry
+ * @prio: priority of the flow entry
+ * @data: pointer to a data buffer containing flow entry's match values/masks
+ * @acts: arrays of actions to be performed on a match
+ * @acts_cnt: number of actions
+ * @entry_h: pointer to buffer that receives the new flow entry's handle
+ */
+enum ice_status
+ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
+ u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
+ void *data, struct ice_flow_action *acts, u8 acts_cnt,
+ u64 *entry_h)
+{
+ struct ice_flow_entry *e = NULL;
+ struct ice_flow_prof *prof;
+ enum ice_status status = ICE_SUCCESS;
+
+ /* ACL entries must indicate an action */
+ if (blk == ICE_BLK_ACL && (!acts || !acts_cnt))
+ return ICE_ERR_PARAM;
+
+ /* No flow entry data is expected for RSS */
+ if (!entry_h || (!data && blk != ICE_BLK_RSS))
+ return ICE_ERR_BAD_PTR;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+
+ prof = ice_flow_find_prof_id(hw, blk, prof_id);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ } else {
+ /* Allocate memory for the entry being added and associate
+ * the VSI to the found flow profile
+ */
+ e = (struct ice_flow_entry *)ice_malloc(hw, sizeof(*e));
+ if (!e)
+ status = ICE_ERR_NO_MEMORY;
+ else
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ }
+
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+ if (status)
+ goto out;
+
+ e->id = entry_id;
+ e->vsi_handle = vsi_handle;
+ e->prof = prof;
+ e->priority = prio;
+
+ switch (blk) {
+ case ICE_BLK_RSS:
+ /* RSS will add only one entry per VSI per profile */
+ break;
+ case ICE_BLK_FD:
+ break;
+ case ICE_BLK_SW:
+ case ICE_BLK_PE:
+ default:
+ status = ICE_ERR_NOT_IMPL;
+ goto out;
+ }
+
+ if (blk != ICE_BLK_ACL) {
+ /* ACL will handle the entry management */
+ ice_acquire_lock(&prof->entries_lock);
+ LIST_ADD(&e->l_entry, &prof->entries);
+ ice_release_lock(&prof->entries_lock);
+ }
+
+ *entry_h = ICE_FLOW_ENTRY_HNDL(e);
+
+out:
+ if (status && e) {
+ if (e->entry)
+ ice_free(hw, e->entry);
+ ice_free(hw, e);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_rem_entry - Remove a flow entry
+ * @hw: pointer to the HW struct
+ * @blk: classification stage
+ * @entry_h: handle to the flow entry to be removed
+ */
+enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
+ u64 entry_h)
+{
+ struct ice_flow_entry *entry;
+ struct ice_flow_prof *prof;
+ enum ice_status status = ICE_SUCCESS;
+
+ if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
+ return ICE_ERR_PARAM;
+
+ entry = ICE_FLOW_ENTRY_PTR(entry_h);
+
+ /* Retain the pointer to the flow profile as the entry will be freed */
+ prof = entry->prof;
+
+ if (prof) {
+ ice_acquire_lock(&prof->entries_lock);
+ status = ice_flow_rem_entry_sync(hw, blk, entry);
+ ice_release_lock(&prof->entries_lock);
+ }
+
+ return status;
+}
+
+/**
+ * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @field_type: type of the field
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ *
+ * This helper function stores information of a field being matched, including
+ * the type of the field and the locations of the value to match, the mask, and
+ * and the upper-bound value in the start of the input buffer for a flow entry.
+ * This function should only be used for fixed-size data structures.
+ *
+ * This function also opportunistically determines the protocol headers to be
+ * present based on the fields being set. Some fields cannot be used alone to
+ * determine the protocol headers present. Sometimes, fields for particular
+ * protocol headers are not matched. In those cases, the protocol headers
+ * must be explicitly set.
+ */
+static void
+ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ enum ice_flow_fld_match_type field_type, u16 val_loc,
+ u16 mask_loc, u16 last_loc)
+{
+ u64 bit = BIT_ULL(fld);
+
+ seg->match |= bit;
+ if (field_type == ICE_FLOW_FLD_TYPE_RANGE)
+ seg->range |= bit;
+
+ seg->fields[fld].type = field_type;
+ seg->fields[fld].src.val = val_loc;
+ seg->fields[fld].src.mask = mask_loc;
+ seg->fields[fld].src.last = last_loc;
+
+ ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr);
+}
+
+/**
+ * ice_flow_set_fld - specifies locations of field from entry's input buffer
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's
+ * input buffer
+ * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from
+ * entry's input buffer
+ * @range: indicate if field being matched is to be in a range
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match,
+ * the mask value, and upper value can be extracted. These locations are then
+ * stored in the flow profile. When adding a flow entry associated with the
+ * flow profile, these locations will be used to quickly extract the values and
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 mask_loc, u16 last_loc, bool range)
+{
+ enum ice_flow_fld_match_type t = range ?
+ ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG;
+
+ ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc);
+}
+
+/**
+ * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf
+ * @seg: packet segment the field being set belongs to
+ * @fld: field to be set
+ * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from
+ * entry's input buffer
+ * @pref_loc: location of prefix value from entry's input buffer
+ * @pref_sz: size of the location holding the prefix value
+ *
+ * This function specifies the locations, in the form of byte offsets from the
+ * start of the input buffer for a flow entry, from where the value to match
+ * and the IPv4 prefix value can be extracted. These locations are then stored
+ * in the flow profile. When adding flow entries to the associated flow profile,
+ * these locations can be used to quickly extract the values to create the
+ * content of a match entry. This function should only be used for fixed-size
+ * data structures.
+ */
+void
+ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
+ u16 val_loc, u16 pref_loc, u8 pref_sz)
+{
+ /* For this type of field, the "mask" location is for the prefix value's
+ * location and the "last" location is for the size of the location of
+ * the prefix value.
+ */
+ ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc,
+ pref_loc, (u16)pref_sz);
+}
+
+/**
+ * ice_flow_add_fld_raw - sets locations of a raw field from entry's input buf
+ * @seg: packet segment the field being set belongs to
+ * @off: offset of the raw field from the beginning of the segment in bytes
+ * @len: length of the raw pattern to be matched
+ * @val_loc: location of the value to match from entry's input buffer
+ * @mask_loc: location of mask value from entry's input buffer
+ *
+ * This function specifies the offset of the raw field to be match from the
+ * beginning of the specified packet segment, and the locations, in the form of
+ * byte offsets from the start of the input buffer for a flow entry, from where
+ * the value to match and the mask value to be extracted. These locations are
+ * then stored in the flow profile. When adding flow entries to the associated
+ * flow profile, these locations can be used to quickly extract the values to
+ * create the content of a match entry. This function should only be used for
+ * fixed-size data structures.
+ */
+void
+ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
+ u16 val_loc, u16 mask_loc)
+{
+ if (seg->raws_cnt < ICE_FLOW_SEG_RAW_FLD_MAX) {
+ seg->raws[seg->raws_cnt].off = off;
+ seg->raws[seg->raws_cnt].info.type = ICE_FLOW_FLD_TYPE_SIZE;
+ seg->raws[seg->raws_cnt].info.src.val = val_loc;
+ seg->raws[seg->raws_cnt].info.src.mask = mask_loc;
+ /* The "last" field is used to store the length of the field */
+ seg->raws[seg->raws_cnt].info.src.last = len;
+ }
+
+ /* Overflows of "raws" will be handled as an error condition later in
+ * the flow when this information is processed.
+ */
+ seg->raws_cnt++;
+}
+
+#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
+ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
+
+#define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \
+ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
+ ICE_FLOW_SEG_HDR_SCTP)
+
+#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
+ (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
+ ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
+
+/**
+ * ice_flow_set_rss_seg_info - setup packet segments for RSS
+ * @segs: pointer to the flow field segment(s)
+ * @hash_fields: fields to be hashed on for the segment(s)
+ * @flow_hdr: protocol header fields within a packet segment
+ *
+ * Helper function to extract fields from hash bitmap and use flow
+ * header value to set flow field segment for further use in flow
+ * profile entry or removal.
+ */
+static enum ice_status
+ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
+ u32 flow_hdr)
+{
+ u64 val = hash_fields;
+ u8 i;
+
+ for (i = 0; val && i < ICE_FLOW_FIELD_IDX_MAX; i++) {
+ u64 bit = BIT_ULL(i);
+
+ if (val & bit) {
+ ice_flow_set_fld(segs, (enum ice_flow_field)i,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ val &= ~bit;
+ }
+ }
+ ICE_FLOW_SET_HDRS(segs, flow_hdr);
+
+ if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
+ return ICE_ERR_PARAM;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
+ if (val && !ice_is_pow2(val))
+ return ICE_ERR_CFG;
+
+ val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
+ if (val && !ice_is_pow2(val))
+ return ICE_ERR_CFG;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rem_vsi_rss_list - remove VSI from RSS list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * Remove the VSI from all RSS configurations in the list.
+ */
+void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ if (LIST_EMPTY(&hw->rss_list_head))
+ return;
+
+ ice_acquire_lock(&hw->rss_locks);
+ LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
+ ice_rss_cfg, l_entry) {
+ if (ice_test_and_clear_bit(vsi_handle, r->vsis))
+ if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
+ LIST_DEL(&r->l_entry);
+ ice_free(hw, r);
+ }
+ }
+ ice_release_lock(&hw->rss_locks);
+}
+
+/**
+ * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function will iterate through all flow profiles and disassociate
+ * the VSI from that profile. If the flow profile has no VSIs it will
+ * be removed.
+ */
+enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *p, *t;
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (LIST_EMPTY(&hw->fl_profs[blk]))
+ return ICE_SUCCESS;
+
+ ice_acquire_lock(&hw->fl_profs_locks[blk]);
+ LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
+ l_entry) {
+ if (ice_is_bit_set(p->vsis, vsi_handle)) {
+ status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle);
+ if (status)
+ break;
+
+ if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof_sync(hw, blk, p);
+ if (status)
+ break;
+ }
+ }
+ }
+ ice_release_lock(&hw->fl_profs_locks[blk]);
+
+ return status;
+}
+
+/**
+ * ice_rem_rss_list - remove RSS configuration from list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static void
+ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *tmp;
+
+ /* Search for RSS hash fields associated to the VSI that match the
+ * hash configurations associated to the flow profile. If found
+ * remove from the RSS entry list of the VSI context and delete entry.
+ */
+ LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head,
+ ice_rss_cfg, l_entry) {
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ ice_clear_bit(vsi_handle, r->vsis);
+ if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) {
+ LIST_DEL(&r->l_entry);
+ ice_free(hw, r);
+ }
+ return;
+ }
+ }
+}
+
+/**
+ * ice_add_rss_list - add RSS configuration to list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof: pointer to flow profile
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
+{
+ struct ice_rss_cfg *r, *rss_cfg;
+
+ LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
+ ice_rss_cfg, l_entry)
+ if (r->hashed_flds == prof->segs[prof->segs_cnt - 1].match &&
+ r->packet_hdr == prof->segs[prof->segs_cnt - 1].hdrs) {
+ ice_set_bit(vsi_handle, r->vsis);
+ return ICE_SUCCESS;
+ }
+
+ rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
+ if (!rss_cfg)
+ return ICE_ERR_NO_MEMORY;
+
+ rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
+ rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
+ ice_set_bit(vsi_handle, rss_cfg->vsis);
+
+ LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
+
+ return ICE_SUCCESS;
+}
+
+#define ICE_FLOW_PROF_HASH_S 0
+#define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S)
+#define ICE_FLOW_PROF_HDR_S 32
+#define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S)
+#define ICE_FLOW_PROF_ENCAP_S 63
+#define ICE_FLOW_PROF_ENCAP_M (BIT_ULL(ICE_FLOW_PROF_ENCAP_S))
+
+#define ICE_RSS_OUTER_HEADERS 1
+#define ICE_RSS_INNER_HEADERS 2
+
+/* Flow profile ID format:
+ * [0:31] - Packet match fields
+ * [32:62] - Protocol header
+ * [63] - Encapsulation flag, 0 if non-tunneled, 1 if tunneled
+ */
+#define ICE_FLOW_GEN_PROFID(hash, hdr, segs_cnt) \
+ (u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \
+ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \
+ ((u8)((segs_cnt) - 1) ? ICE_FLOW_PROF_ENCAP_M : 0))
+
+/**
+ * ice_add_rss_cfg_sync - add an RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ * @segs_cnt: packet segment count
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs, u8 segs_cnt)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_prof *prof = NULL;
+ struct ice_flow_seg_info *segs;
+ enum ice_status status;
+
+ if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
+ return ICE_ERR_PARAM;
+
+ segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
+ sizeof(*segs));
+ if (!segs)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Construct the packet segment info from the hashed fields */
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
+ if (status)
+ goto exit;
+
+ /* Search for a flow profile that has matching headers, hash fields
+ * and has the input VSI associated to it. If found, no further
+ * operations required and exit.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS |
+ ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof)
+ goto exit;
+
+ /* Check if a flow profile exists with the same protocol headers and
+ * associated with the input VSI. If so disasscociate the VSI from
+ * this profile. The VSI will be added to a new profile created with
+ * the protocol header and new hash field configuration.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI);
+ if (prof) {
+ status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ ice_rem_rss_list(hw, vsi_handle, prof);
+ else
+ goto exit;
+
+ /* Remove profile if it has no VSIs associated */
+ if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) {
+ status = ice_flow_rem_prof(hw, blk, prof->id);
+ if (status)
+ goto exit;
+ }
+ }
+
+ /* Search for a profile that has same match fields only. If this
+ * exists then associate the VSI to this profile.
+ */
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS);
+ if (prof) {
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ if (!status)
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+ goto exit;
+ }
+
+ /* Create a new flow profile with generated profile and packet
+ * segment information.
+ */
+ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX,
+ ICE_FLOW_GEN_PROFID(hashed_flds,
+ segs[segs_cnt - 1].hdrs,
+ segs_cnt),
+ segs, segs_cnt, NULL, 0, &prof);
+ if (status)
+ goto exit;
+
+ status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
+ /* If association to a new flow profile failed then this profile can
+ * be removed.
+ */
+ if (status) {
+ ice_flow_rem_prof(hw, blk, prof->id);
+ goto exit;
+ }
+
+ status = ice_add_rss_list(hw, vsi_handle, prof);
+
+exit:
+ ice_free(hw, segs);
+ return status;
+}
+
+/**
+ * ice_add_rss_cfg - add an RSS configuration with specified hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: hash bit fields (ICE_FLOW_HASH_*) to configure
+ * @addl_hdrs: protocol header fields
+ *
+ * This function will generate a flow profile based on fields associated with
+ * the input fields to hash on, the flow type and use the VSI number to add
+ * a flow entry to the profile.
+ */
+enum ice_status
+ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs)
+{
+ enum ice_status status;
+
+ if (hashed_flds == ICE_HASH_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&hw->rss_locks);
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
+ ice_release_lock(&hw->rss_locks);
+
+ return status;
+}
+
+/**
+ * ice_rem_rss_cfg_sync - remove an existing RSS configuration
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ * @segs_cnt: packet segment count
+ *
+ * Assumption: lock has already been acquired for RSS list
+ */
+static enum ice_status
+ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs, u8 segs_cnt)
+{
+ const enum ice_block blk = ICE_BLK_RSS;
+ struct ice_flow_seg_info *segs;
+ struct ice_flow_prof *prof;
+ enum ice_status status;
+
+ segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt,
+ sizeof(*segs));
+ if (!segs)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Construct the packet segment info from the hashed fields */
+ status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
+ addl_hdrs);
+ if (status)
+ goto out;
+
+ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt,
+ vsi_handle,
+ ICE_FLOW_FIND_PROF_CHK_FLDS);
+ if (!prof) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto out;
+ }
+
+ status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle);
+ if (status)
+ goto out;
+
+ /* Remove RSS configuration from VSI context before deleting
+ * the flow profile.
+ */
+ ice_rem_rss_list(hw, vsi_handle, prof);
+
+ if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI))
+ status = ice_flow_rem_prof(hw, blk, prof->id);
+
+out:
+ ice_free(hw, segs);
+ return status;
+}
+
+/**
+ * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hashed_flds: Packet hash types (ICE_FLOW_HASH_*) to remove
+ * @addl_hdrs: Protocol header fields within a packet segment
+ *
+ * This function will lookup the flow profile based on the input
+ * hash field bitmap, iterate through the profile entry list of
+ * that profile and find entry associated with input VSI to be
+ * removed. Calls are made to underlying flow apis which will in
+ * turn build or update buffers for RSS XLT1 section.
+ */
+enum ice_status
+ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
+ u32 addl_hdrs)
+{
+ enum ice_status status;
+
+ if (hashed_flds == ICE_HASH_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&hw->rss_locks);
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
+ ICE_RSS_OUTER_HEADERS);
+ if (!status)
+ status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds,
+ addl_hdrs, ICE_RSS_INNER_HEADERS);
+ ice_release_lock(&hw->rss_locks);
+
+ return status;
+}
+
+/* Mapping of AVF hash bit fields to an L3-L4 hash combination.
+ * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash,
+ * convert its values to their appropriate flow L3, L4 values.
+ */
+#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+
+#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
+ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
+ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+
+/**
+ * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ *
+ * This function will take the hash bitmap provided by the AVF driver via a
+ * message, convert it to ICE-compatible values, and configure RSS flow
+ * profiles.
+ */
+enum ice_status
+ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u64 hash_flds;
+
+ if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
+ !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Make sure no unsupported bits are specified */
+ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
+ ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
+ return ICE_ERR_CFG;
+
+ hash_flds = avf_hash;
+
+ /* Always create an L3 RSS configuration for any L4 RSS configuration */
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)
+ hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS;
+
+ /* Create the corresponding RSS configuration for each valid hash bit */
+ while (hash_flds) {
+ u64 rss_hash = ICE_HASH_INVALID;
+
+ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV4 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ }
+ } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
+ if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_TCP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS;
+ } else if (hash_flds &
+ ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_UDP_PORT;
+ hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
+ } else if (hash_flds &
+ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ rss_hash = ICE_FLOW_HASH_IPV6 |
+ ICE_FLOW_HASH_SCTP_PORT;
+ hash_flds &=
+ ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ }
+ }
+
+ if (rss_hash == ICE_HASH_INVALID)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
+ ICE_FLOW_SEG_HDR_NONE);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_replay_rss_cfg - replay RSS configurations associated with VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ */
+enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+{
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_rss_cfg *r;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&hw->rss_locks);
+ LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
+ ice_rss_cfg, l_entry) {
+ if (ice_is_bit_set(r->vsis, vsi_handle)) {
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_OUTER_HEADERS);
+ if (status)
+ break;
+ status = ice_add_rss_cfg_sync(hw, vsi_handle,
+ r->hashed_flds,
+ r->packet_hdr,
+ ICE_RSS_INNER_HEADERS);
+ if (status)
+ break;
+ }
+ }
+ ice_release_lock(&hw->rss_locks);
+
+ return status;
+}
+
+/**
+ * ice_get_rss_cfg - returns hashed fields for the given header types
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @hdrs: protocol header type
+ *
+ * This function will return the match fields of the first instance of flow
+ * profile having the given header types and containing input VSI
+ */
+u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs)
+{
+ struct ice_rss_cfg *r, *rss_cfg = NULL;
+
+ /* verify if the protocol header is non zero and VSI is valid */
+ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_HASH_INVALID;
+
+ ice_acquire_lock(&hw->rss_locks);
+ LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head,
+ ice_rss_cfg, l_entry)
+ if (ice_is_bit_set(r->vsis, vsi_handle) &&
+ r->packet_hdr == hdrs) {
+ rss_cfg = r;
+ break;
+ }
+ ice_release_lock(&hw->rss_locks);
+
+ return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID;
+}
Index: sys/dev/ice/ice_hw_autogen.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_hw_autogen.h
@@ -0,0 +1,9480 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/* Machine-generated file; do not edit */
+#ifndef _ICE_HW_AUTOGEN_H_
+#define _ICE_HW_AUTOGEN_H_
+
+#define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */
+#define GL_RDPU_CNTRL_RX_PAD_EN_S 0
+#define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0)
+#define GL_RDPU_CNTRL_UDP_ZERO_EN_S 1
+#define GL_RDPU_CNTRL_UDP_ZERO_EN_M BIT(1)
+#define GL_RDPU_CNTRL_BLNC_EN_S 2
+#define GL_RDPU_CNTRL_BLNC_EN_M BIT(2)
+#define GL_RDPU_CNTRL_RECIPE_BYPASS_S 3
+#define GL_RDPU_CNTRL_RECIPE_BYPASS_M BIT(3)
+#define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_S 4
+#define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 4)
+#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_S 10
+#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 10)
+#define GL_RDPU_CNTRL_REQ_WB_PM_TH_S 16
+#define GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16)
+#define GL_RDPU_CNTRL_ECO_S 21
+#define GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21)
+#define MSIX_PBA(_i) (0x00008000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: FLR */
+#define MSIX_PBA_MAX_INDEX 2
+#define MSIX_PBA_PENBIT_S 0
+#define MSIX_PBA_PENBIT_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */
+#define MSIX_TADD_MAX_INDEX 64
+#define MSIX_TADD_MSIXTADD10_S 0
+#define MSIX_TADD_MSIXTADD10_M MAKEMASK(0x3, 0)
+#define MSIX_TADD_MSIXTADD_S 2
+#define MSIX_TADD_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2)
+#define MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */
+#define MSIX_TUADD_MAX_INDEX 64
+#define MSIX_TUADD_MSIXTUADD_S 0
+#define MSIX_TUADD_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */
+#define MSIX_TVCTRL_MAX_INDEX 64
+#define MSIX_TVCTRL_MASK_S 0
+#define MSIX_TVCTRL_MASK_M BIT(0)
+#define PF0_FW_HLP_ARQBAH_PAGE 0x02D00180 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_HLP_ARQBAL_PAGE 0x02D00080 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_HLP_ARQH_PAGE 0x02D00380 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQH_PAGE_ARQH_S 0
+#define PF0_FW_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ARQLEN_PAGE 0x02D00280 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_FW_HLP_ARQT_PAGE 0x02D00480 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQT_PAGE_ARQT_S 0
+#define PF0_FW_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ATQBAH_PAGE 0x02D00100 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_HLP_ATQBAL_PAGE 0x02D00000 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_S 0
+#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_HLP_ATQH_PAGE 0x02D00300 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQH_PAGE_ATQH_S 0
+#define PF0_FW_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ATQLEN_PAGE 0x02D00200 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_FW_HLP_ATQT_PAGE 0x02D00400 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQT_PAGE_ATQT_S 0
+#define PF0_FW_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ARQBAH_PAGE 0x02D40180 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_PSM_ARQBAL_PAGE 0x02D40080 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_PSM_ARQH_PAGE 0x02D40380 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQH_PAGE_ARQH_S 0
+#define PF0_FW_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ARQLEN_PAGE 0x02D40280 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_FW_PSM_ARQT_PAGE 0x02D40480 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQT_PAGE_ARQT_S 0
+#define PF0_FW_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ATQBAH_PAGE 0x02D40100 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_PSM_ATQBAL_PAGE 0x02D40000 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_S 0
+#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_PSM_ATQH_PAGE 0x02D40300 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQH_PAGE_ATQH_S 0
+#define PF0_FW_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ATQLEN_PAGE 0x02D40200 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_FW_PSM_ATQT_PAGE 0x02D40400 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQT_PAGE_ATQT_S 0
+#define PF0_FW_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ARQBAH_PAGE 0x02D80190 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_CPM_ARQBAL_PAGE 0x02D80090 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_CPM_ARQH_PAGE 0x02D80390 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQH_PAGE_ARQH_S 0
+#define PF0_MBX_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ARQLEN_PAGE 0x02D80290 /* Reset Source: PFR */
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_MBX_CPM_ARQT_PAGE 0x02D80490 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQT_PAGE_ARQT_S 0
+#define PF0_MBX_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ATQBAH_PAGE 0x02D80110 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_CPM_ATQBAL_PAGE 0x02D80010 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_CPM_ATQH_PAGE 0x02D80310 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQH_PAGE_ATQH_S 0
+#define PF0_MBX_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ATQLEN_PAGE 0x02D80210 /* Reset Source: PFR */
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_MBX_CPM_ATQT_PAGE 0x02D80410 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQT_PAGE_ATQT_S 0
+#define PF0_MBX_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ARQBAH_PAGE 0x02D00190 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_HLP_ARQBAL_PAGE 0x02D00090 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_HLP_ARQH_PAGE 0x02D00390 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQH_PAGE_ARQH_S 0
+#define PF0_MBX_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ARQLEN_PAGE 0x02D00290 /* Reset Source: PFR */
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_MBX_HLP_ARQT_PAGE 0x02D00490 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQT_PAGE_ARQT_S 0
+#define PF0_MBX_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ATQBAH_PAGE 0x02D00110 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_HLP_ATQBAL_PAGE 0x02D00010 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_HLP_ATQH_PAGE 0x02D00310 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQH_PAGE_ATQH_S 0
+#define PF0_MBX_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ATQLEN_PAGE 0x02D00210 /* Reset Source: PFR */
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_MBX_HLP_ATQT_PAGE 0x02D00410 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQT_PAGE_ATQT_S 0
+#define PF0_MBX_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ARQBAH_PAGE 0x02D40190 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_PSM_ARQBAL_PAGE 0x02D40090 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_PSM_ARQH_PAGE 0x02D40390 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQH_PAGE_ARQH_S 0
+#define PF0_MBX_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ARQLEN_PAGE 0x02D40290 /* Reset Source: PFR */
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_MBX_PSM_ARQT_PAGE 0x02D40490 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQT_PAGE_ARQT_S 0
+#define PF0_MBX_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ATQBAH_PAGE 0x02D40110 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_PSM_ATQBAL_PAGE 0x02D40010 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_PSM_ATQH_PAGE 0x02D40310 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQH_PAGE_ATQH_S 0
+#define PF0_MBX_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ATQLEN_PAGE 0x02D40210 /* Reset Source: PFR */
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_MBX_PSM_ATQT_PAGE 0x02D40410 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQT_PAGE_ATQT_S 0
+#define PF0_MBX_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ARQBAH_PAGE 0x02D801A0 /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_CPM_ARQBAL_PAGE 0x02D800A0 /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_CPM_ARQH_PAGE 0x02D803A0 /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQH_PAGE_ARQH_S 0
+#define PF0_SB_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ARQLEN_PAGE 0x02D802A0 /* Reset Source: PFR */
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_SB_CPM_ARQT_PAGE 0x02D804A0 /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQT_PAGE_ARQT_S 0
+#define PF0_SB_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ATQBAH_PAGE 0x02D80120 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_CPM_ATQBAL_PAGE 0x02D80020 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_CPM_ATQH_PAGE 0x02D80320 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQH_PAGE_ATQH_S 0
+#define PF0_SB_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ATQLEN_PAGE 0x02D80220 /* Reset Source: PFR */
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_SB_CPM_ATQT_PAGE 0x02D80420 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQT_PAGE_ATQT_S 0
+#define PF0_SB_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ARQBAH_PAGE 0x02D001A0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_S 0
+#define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_HLP_ARQBAL_PAGE 0x02D000A0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0
+#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_S 6
+#define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_HLP_ARQH_PAGE 0x02D003A0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQH_PAGE_ARQH_S 0
+#define PF0_SB_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ARQLEN_PAGE 0x02D002A0 /* Reset Source: PFR */
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_S 0
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_S 28
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28)
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_S 29
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29)
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_S 30
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30)
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_S 31
+#define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31)
+#define PF0_SB_HLP_ARQT_PAGE 0x02D004A0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQT_PAGE_ARQT_S 0
+#define PF0_SB_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ATQBAH_PAGE 0x02D00120 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_S 0
+#define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_HLP_ATQBAL_PAGE 0x02D00020 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_S 6
+#define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_HLP_ATQH_PAGE 0x02D00320 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQH_PAGE_ATQH_S 0
+#define PF0_SB_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ATQLEN_PAGE 0x02D00220 /* Reset Source: PFR */
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_S 0
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_S 28
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28)
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_S 29
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29)
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_S 30
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30)
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_S 31
+#define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31)
+#define PF0_SB_HLP_ATQT_PAGE 0x02D00420 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQT_PAGE_ATQT_S 0
+#define PF0_SB_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0INT_DYN_CTL(_i) (0x03000000 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define PF0INT_DYN_CTL_MAX_INDEX 2047
+#define PF0INT_DYN_CTL_INTENA_S 0
+#define PF0INT_DYN_CTL_INTENA_M BIT(0)
+#define PF0INT_DYN_CTL_CLEARPBA_S 1
+#define PF0INT_DYN_CTL_CLEARPBA_M BIT(1)
+#define PF0INT_DYN_CTL_SWINT_TRIG_S 2
+#define PF0INT_DYN_CTL_SWINT_TRIG_M BIT(2)
+#define PF0INT_DYN_CTL_ITR_INDX_S 3
+#define PF0INT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3)
+#define PF0INT_DYN_CTL_INTERVAL_S 5
+#define PF0INT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5)
+#define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_S 24
+#define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define PF0INT_DYN_CTL_SW_ITR_INDX_S 25
+#define PF0INT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25)
+#define PF0INT_DYN_CTL_WB_ON_ITR_S 30
+#define PF0INT_DYN_CTL_WB_ON_ITR_M BIT(30)
+#define PF0INT_DYN_CTL_INTENA_MSK_S 31
+#define PF0INT_DYN_CTL_INTENA_MSK_M BIT(31)
+#define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define PF0INT_ITR_0_MAX_INDEX 2047
+#define PF0INT_ITR_0_INTERVAL_S 0
+#define PF0INT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define PF0INT_ITR_1_MAX_INDEX 2047
+#define PF0INT_ITR_1_INTERVAL_S 0
+#define PF0INT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define PF0INT_ITR_2_MAX_INDEX 2047
+#define PF0INT_ITR_2_INTERVAL_S 0
+#define PF0INT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define PF0INT_OICR_CPM_PAGE 0x02D03000 /* Reset Source: CORER */
+#define PF0INT_OICR_CPM_PAGE_INTEVENT_S 0
+#define PF0INT_OICR_CPM_PAGE_INTEVENT_M BIT(0)
+#define PF0INT_OICR_CPM_PAGE_QUEUE_S 1
+#define PF0INT_OICR_CPM_PAGE_QUEUE_M BIT(1)
+#define PF0INT_OICR_CPM_PAGE_RSV1_S 2
+#define PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define PF0INT_OICR_CPM_PAGE_HH_COMP_S 10
+#define PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_CPM_PAGE_TSYN_TX_S 11
+#define PF0INT_OICR_CPM_PAGE_TSYN_TX_M BIT(11)
+#define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_S 12
+#define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_M BIT(12)
+#define PF0INT_OICR_CPM_PAGE_TSYN_TGT_S 13
+#define PF0INT_OICR_CPM_PAGE_TSYN_TGT_M BIT(13)
+#define PF0INT_OICR_CPM_PAGE_HLP_RDY_S 14
+#define PF0INT_OICR_CPM_PAGE_HLP_RDY_M BIT(14)
+#define PF0INT_OICR_CPM_PAGE_CPM_RDY_S 15
+#define PF0INT_OICR_CPM_PAGE_CPM_RDY_M BIT(15)
+#define PF0INT_OICR_CPM_PAGE_ECC_ERR_S 16
+#define PF0INT_OICR_CPM_PAGE_ECC_ERR_M BIT(16)
+#define PF0INT_OICR_CPM_PAGE_RSV2_S 17
+#define PF0INT_OICR_CPM_PAGE_RSV2_M MAKEMASK(0x3, 17)
+#define PF0INT_OICR_CPM_PAGE_MAL_DETECT_S 19
+#define PF0INT_OICR_CPM_PAGE_MAL_DETECT_M BIT(19)
+#define PF0INT_OICR_CPM_PAGE_GRST_S 20
+#define PF0INT_OICR_CPM_PAGE_GRST_M BIT(20)
+#define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_S 21
+#define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_M BIT(21)
+#define PF0INT_OICR_CPM_PAGE_GPIO_S 22
+#define PF0INT_OICR_CPM_PAGE_GPIO_M BIT(22)
+#define PF0INT_OICR_CPM_PAGE_RSV3_S 23
+#define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23)
+#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24
+#define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24)
+#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25
+#define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25)
+#define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26
+#define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26)
+#define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27
+#define PF0INT_OICR_CPM_PAGE_PE_PUSH_M BIT(27)
+#define PF0INT_OICR_CPM_PAGE_PE_CRITERR_S 28
+#define PF0INT_OICR_CPM_PAGE_PE_CRITERR_M BIT(28)
+#define PF0INT_OICR_CPM_PAGE_VFLR_S 29
+#define PF0INT_OICR_CPM_PAGE_VFLR_M BIT(29)
+#define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_S 30
+#define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_M BIT(30)
+#define PF0INT_OICR_CPM_PAGE_SWINT_S 31
+#define PF0INT_OICR_CPM_PAGE_SWINT_M BIT(31)
+#define PF0INT_OICR_ENA_CPM_PAGE 0x02D03100 /* Reset Source: CORER */
+#define PF0INT_OICR_ENA_CPM_PAGE_RSV0_S 0
+#define PF0INT_OICR_ENA_CPM_PAGE_RSV0_M BIT(0)
+#define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_S 1
+#define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PF0INT_OICR_ENA_HLP_PAGE 0x02D01100 /* Reset Source: CORER */
+#define PF0INT_OICR_ENA_HLP_PAGE_RSV0_S 0
+#define PF0INT_OICR_ENA_HLP_PAGE_RSV0_M BIT(0)
+#define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_S 1
+#define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PF0INT_OICR_ENA_PSM_PAGE 0x02D02100 /* Reset Source: CORER */
+#define PF0INT_OICR_ENA_PSM_PAGE_RSV0_S 0
+#define PF0INT_OICR_ENA_PSM_PAGE_RSV0_M BIT(0)
+#define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_S 1
+#define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PF0INT_OICR_HLP_PAGE 0x02D01000 /* Reset Source: CORER */
+#define PF0INT_OICR_HLP_PAGE_INTEVENT_S 0
+#define PF0INT_OICR_HLP_PAGE_INTEVENT_M BIT(0)
+#define PF0INT_OICR_HLP_PAGE_QUEUE_S 1
+#define PF0INT_OICR_HLP_PAGE_QUEUE_M BIT(1)
+#define PF0INT_OICR_HLP_PAGE_RSV1_S 2
+#define PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define PF0INT_OICR_HLP_PAGE_HH_COMP_S 10
+#define PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_HLP_PAGE_TSYN_TX_S 11
+#define PF0INT_OICR_HLP_PAGE_TSYN_TX_M BIT(11)
+#define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_S 12
+#define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_M BIT(12)
+#define PF0INT_OICR_HLP_PAGE_TSYN_TGT_S 13
+#define PF0INT_OICR_HLP_PAGE_TSYN_TGT_M BIT(13)
+#define PF0INT_OICR_HLP_PAGE_HLP_RDY_S 14
+#define PF0INT_OICR_HLP_PAGE_HLP_RDY_M BIT(14)
+#define PF0INT_OICR_HLP_PAGE_CPM_RDY_S 15
+#define PF0INT_OICR_HLP_PAGE_CPM_RDY_M BIT(15)
+#define PF0INT_OICR_HLP_PAGE_ECC_ERR_S 16
+#define PF0INT_OICR_HLP_PAGE_ECC_ERR_M BIT(16)
+#define PF0INT_OICR_HLP_PAGE_RSV2_S 17
+#define PF0INT_OICR_HLP_PAGE_RSV2_M MAKEMASK(0x3, 17)
+#define PF0INT_OICR_HLP_PAGE_MAL_DETECT_S 19
+#define PF0INT_OICR_HLP_PAGE_MAL_DETECT_M BIT(19)
+#define PF0INT_OICR_HLP_PAGE_GRST_S 20
+#define PF0INT_OICR_HLP_PAGE_GRST_M BIT(20)
+#define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_S 21
+#define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_M BIT(21)
+#define PF0INT_OICR_HLP_PAGE_GPIO_S 22
+#define PF0INT_OICR_HLP_PAGE_GPIO_M BIT(22)
+#define PF0INT_OICR_HLP_PAGE_RSV3_S 23
+#define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23)
+#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24
+#define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24)
+#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25
+#define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25)
+#define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26
+#define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26)
+#define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27
+#define PF0INT_OICR_HLP_PAGE_PE_PUSH_M BIT(27)
+#define PF0INT_OICR_HLP_PAGE_PE_CRITERR_S 28
+#define PF0INT_OICR_HLP_PAGE_PE_CRITERR_M BIT(28)
+#define PF0INT_OICR_HLP_PAGE_VFLR_S 29
+#define PF0INT_OICR_HLP_PAGE_VFLR_M BIT(29)
+#define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_S 30
+#define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_M BIT(30)
+#define PF0INT_OICR_HLP_PAGE_SWINT_S 31
+#define PF0INT_OICR_HLP_PAGE_SWINT_M BIT(31)
+#define PF0INT_OICR_PSM_PAGE 0x02D02000 /* Reset Source: CORER */
+#define PF0INT_OICR_PSM_PAGE_INTEVENT_S 0
+#define PF0INT_OICR_PSM_PAGE_INTEVENT_M BIT(0)
+#define PF0INT_OICR_PSM_PAGE_QUEUE_S 1
+#define PF0INT_OICR_PSM_PAGE_QUEUE_M BIT(1)
+#define PF0INT_OICR_PSM_PAGE_RSV1_S 2
+#define PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define PF0INT_OICR_PSM_PAGE_HH_COMP_S 10
+#define PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_PSM_PAGE_TSYN_TX_S 11
+#define PF0INT_OICR_PSM_PAGE_TSYN_TX_M BIT(11)
+#define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_S 12
+#define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_M BIT(12)
+#define PF0INT_OICR_PSM_PAGE_TSYN_TGT_S 13
+#define PF0INT_OICR_PSM_PAGE_TSYN_TGT_M BIT(13)
+#define PF0INT_OICR_PSM_PAGE_HLP_RDY_S 14
+#define PF0INT_OICR_PSM_PAGE_HLP_RDY_M BIT(14)
+#define PF0INT_OICR_PSM_PAGE_CPM_RDY_S 15
+#define PF0INT_OICR_PSM_PAGE_CPM_RDY_M BIT(15)
+#define PF0INT_OICR_PSM_PAGE_ECC_ERR_S 16
+#define PF0INT_OICR_PSM_PAGE_ECC_ERR_M BIT(16)
+#define PF0INT_OICR_PSM_PAGE_RSV2_S 17
+#define PF0INT_OICR_PSM_PAGE_RSV2_M MAKEMASK(0x3, 17)
+#define PF0INT_OICR_PSM_PAGE_MAL_DETECT_S 19
+#define PF0INT_OICR_PSM_PAGE_MAL_DETECT_M BIT(19)
+#define PF0INT_OICR_PSM_PAGE_GRST_S 20
+#define PF0INT_OICR_PSM_PAGE_GRST_M BIT(20)
+#define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_S 21
+#define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_M BIT(21)
+#define PF0INT_OICR_PSM_PAGE_GPIO_S 22
+#define PF0INT_OICR_PSM_PAGE_GPIO_M BIT(22)
+#define PF0INT_OICR_PSM_PAGE_RSV3_S 23
+#define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23)
+#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24
+#define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24)
+#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25
+#define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25)
+#define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26
+#define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26)
+#define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27
+#define PF0INT_OICR_PSM_PAGE_PE_PUSH_M BIT(27)
+#define PF0INT_OICR_PSM_PAGE_PE_CRITERR_S 28
+#define PF0INT_OICR_PSM_PAGE_PE_CRITERR_M BIT(28)
+#define PF0INT_OICR_PSM_PAGE_VFLR_S 29
+#define PF0INT_OICR_PSM_PAGE_VFLR_M BIT(29)
+#define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_S 30
+#define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_M BIT(30)
+#define PF0INT_OICR_PSM_PAGE_SWINT_S 31
+#define PF0INT_OICR_PSM_PAGE_SWINT_M BIT(31)
+#define QRX_TAIL_PAGE(_QRX) (0x03800000 + ((_QRX) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define QRX_TAIL_PAGE_MAX_INDEX 2047
+#define QRX_TAIL_PAGE_TAIL_S 0
+#define QRX_TAIL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0)
+#define QTX_COMM_DBELL_PAGE(_DBQM) (0x04000000 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define QTX_COMM_DBELL_PAGE_MAX_INDEX 16383
+#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_S 0
+#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */
+#define QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255
+#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0
+#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0)
+#define VSI_MBX_ARQBAH(_VSI) (0x02000018 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ARQBAH_MAX_INDEX 767
+#define VSI_MBX_ARQBAH_ARQBAH_S 0
+#define VSI_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VSI_MBX_ARQBAL(_VSI) (0x02000014 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ARQBAL_MAX_INDEX 767
+#define VSI_MBX_ARQBAL_ARQBAL_LSB_S 0
+#define VSI_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VSI_MBX_ARQBAL_ARQBAL_S 6
+#define VSI_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VSI_MBX_ARQH(_VSI) (0x02000020 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ARQH_MAX_INDEX 767
+#define VSI_MBX_ARQH_ARQH_S 0
+#define VSI_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define VSI_MBX_ARQLEN(_VSI) (0x0200001C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSI_MBX_ARQLEN_MAX_INDEX 767
+#define VSI_MBX_ARQLEN_ARQLEN_S 0
+#define VSI_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VSI_MBX_ARQLEN_ARQVFE_S 28
+#define VSI_MBX_ARQLEN_ARQVFE_M BIT(28)
+#define VSI_MBX_ARQLEN_ARQOVFL_S 29
+#define VSI_MBX_ARQLEN_ARQOVFL_M BIT(29)
+#define VSI_MBX_ARQLEN_ARQCRIT_S 30
+#define VSI_MBX_ARQLEN_ARQCRIT_M BIT(30)
+#define VSI_MBX_ARQLEN_ARQENABLE_S 31
+#define VSI_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define VSI_MBX_ARQT(_VSI) (0x02000024 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ARQT_MAX_INDEX 767
+#define VSI_MBX_ARQT_ARQT_S 0
+#define VSI_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define VSI_MBX_ATQBAH(_VSI) (0x02000004 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ATQBAH_MAX_INDEX 767
+#define VSI_MBX_ATQBAH_ATQBAH_S 0
+#define VSI_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VSI_MBX_ATQBAL(_VSI) (0x02000000 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ATQBAL_MAX_INDEX 767
+#define VSI_MBX_ATQBAL_ATQBAL_S 6
+#define VSI_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VSI_MBX_ATQH(_VSI) (0x0200000C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ATQH_MAX_INDEX 767
+#define VSI_MBX_ATQH_ATQH_S 0
+#define VSI_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define VSI_MBX_ATQLEN(_VSI) (0x02000008 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSI_MBX_ATQLEN_MAX_INDEX 767
+#define VSI_MBX_ATQLEN_ATQLEN_S 0
+#define VSI_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VSI_MBX_ATQLEN_ATQVFE_S 28
+#define VSI_MBX_ATQLEN_ATQVFE_M BIT(28)
+#define VSI_MBX_ATQLEN_ATQOVFL_S 29
+#define VSI_MBX_ATQLEN_ATQOVFL_M BIT(29)
+#define VSI_MBX_ATQLEN_ATQCRIT_S 30
+#define VSI_MBX_ATQLEN_ATQCRIT_M BIT(30)
+#define VSI_MBX_ATQLEN_ATQENABLE_S 31
+#define VSI_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define VSI_MBX_ATQT(_VSI) (0x02000010 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_MBX_ATQT_MAX_INDEX 767
+#define VSI_MBX_ATQT_ATQT_S 0
+#define VSI_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define GL_ACL_ACCESS_CMD 0x00391000 /* Reset Source: CORER */
+#define GL_ACL_ACCESS_CMD_TABLE_ID_S 0
+#define GL_ACL_ACCESS_CMD_TABLE_ID_M MAKEMASK(0xFF, 0)
+#define GL_ACL_ACCESS_CMD_ENTRY_INDEX_S 8
+#define GL_ACL_ACCESS_CMD_ENTRY_INDEX_M MAKEMASK(0xFFF, 8)
+#define GL_ACL_ACCESS_CMD_OPERATION_S 20
+#define GL_ACL_ACCESS_CMD_OPERATION_M BIT(20)
+#define GL_ACL_ACCESS_CMD_OBJ_TYPE_S 24
+#define GL_ACL_ACCESS_CMD_OBJ_TYPE_M MAKEMASK(0xF, 24)
+#define GL_ACL_ACCESS_CMD_EXECUTE_S 31
+#define GL_ACL_ACCESS_CMD_EXECUTE_M BIT(31)
+#define GL_ACL_ACCESS_STATUS 0x00391004 /* Reset Source: CORER */
+#define GL_ACL_ACCESS_STATUS_BUSY_S 0
+#define GL_ACL_ACCESS_STATUS_BUSY_M BIT(0)
+#define GL_ACL_ACCESS_STATUS_DONE_S 1
+#define GL_ACL_ACCESS_STATUS_DONE_M BIT(1)
+#define GL_ACL_ACCESS_STATUS_ERROR_S 2
+#define GL_ACL_ACCESS_STATUS_ERROR_M BIT(2)
+#define GL_ACL_ACCESS_STATUS_OPERATION_S 3
+#define GL_ACL_ACCESS_STATUS_OPERATION_M BIT(3)
+#define GL_ACL_ACCESS_STATUS_ERROR_CODE_S 4
+#define GL_ACL_ACCESS_STATUS_ERROR_CODE_M MAKEMASK(0xF, 4)
+#define GL_ACL_ACCESS_STATUS_TABLE_ID_S 8
+#define GL_ACL_ACCESS_STATUS_TABLE_ID_M MAKEMASK(0xFF, 8)
+#define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_S 16
+#define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_M MAKEMASK(0xFFF, 16)
+#define GL_ACL_ACCESS_STATUS_OBJ_TYPE_S 28
+#define GL_ACL_ACCESS_STATUS_OBJ_TYPE_M MAKEMASK(0xF, 28)
+#define GL_ACL_ACTMEM_ACT(_i) (0x00393824 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GL_ACL_ACTMEM_ACT_MAX_INDEX 1
+#define GL_ACL_ACTMEM_ACT_VALUE_S 0
+#define GL_ACL_ACTMEM_ACT_VALUE_M MAKEMASK(0xFFFF, 0)
+#define GL_ACL_ACTMEM_ACT_MDID_S 20
+#define GL_ACL_ACTMEM_ACT_MDID_M MAKEMASK(0x3F, 20)
+#define GL_ACL_ACTMEM_ACT_PRIORITY_S 28
+#define GL_ACL_ACTMEM_ACT_PRIORITY_M MAKEMASK(0x7, 28)
+#define GL_ACL_CHICKEN_REGISTER 0x00393810 /* Reset Source: CORER */
+#define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_S 0
+#define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_M BIT(0)
+#define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_S 1
+#define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_M BIT(1)
+#define GL_ACL_DEFAULT_ACT(_i) (0x00391168 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GL_ACL_DEFAULT_ACT_MAX_INDEX 15
+#define GL_ACL_DEFAULT_ACT_VALUE_S 0
+#define GL_ACL_DEFAULT_ACT_VALUE_M MAKEMASK(0xFFFF, 0)
+#define GL_ACL_DEFAULT_ACT_MDID_S 20
+#define GL_ACL_DEFAULT_ACT_MDID_M MAKEMASK(0x3F, 20)
+#define GL_ACL_DEFAULT_ACT_PRIORITY_S 28
+#define GL_ACL_DEFAULT_ACT_PRIORITY_M MAKEMASK(0x7, 28)
+#define GL_ACL_PROFILE_BWSB_SEL(_i) (0x00391008 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GL_ACL_PROFILE_BWSB_SEL_MAX_INDEX 31
+#define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_S 0
+#define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_M MAKEMASK(0x3F, 0)
+#define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_S 8
+#define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M MAKEMASK(0x1F, 8)
+#define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15
+#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0
+#define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M MAKEMASK(0xF, 0)
+#define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7
+#define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0
+#define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_M MAKEMASK(0x3F, 0)
+#define GL_ACL_PROFILE_RC_CFG(_i) (0x003910E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_ACL_PROFILE_RC_CFG_MAX_INDEX 7
+#define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_S 0
+#define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_M MAKEMASK(0xFFFF, 0)
+#define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_S 16
+#define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_M MAKEMASK(0xFFFF, 16)
+#define GL_ACL_PROFILE_RCF_MASK(_i) (0x00391108 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_ACL_PROFILE_RCF_MASK_MAX_INDEX 7
+#define GL_ACL_PROFILE_RCF_MASK_MASK_S 0
+#define GL_ACL_PROFILE_RCF_MASK_MASK_M MAKEMASK(0xFFFF, 0)
+#define GL_ACL_SCENARIO_ACT_CFG(_i) (0x003938AC + ((_i) * 4)) /* _i=0...19 */ /* Reset Source: CORER */
+#define GL_ACL_SCENARIO_ACT_CFG_MAX_INDEX 19
+#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_S 0
+#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_M MAKEMASK(0xF, 0)
+#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_S 8
+#define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_M BIT(8)
+#define GL_ACL_SCENARIO_CFG_H(_i) (0x0039386C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GL_ACL_SCENARIO_CFG_H_MAX_INDEX 15
+#define GL_ACL_SCENARIO_CFG_H_SELECT4_S 0
+#define GL_ACL_SCENARIO_CFG_H_SELECT4_M MAKEMASK(0x1F, 0)
+#define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_S 8
+#define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_M MAKEMASK(0xFF, 8)
+#define GL_ACL_SCENARIO_CFG_H_START_COMPARE_S 24
+#define GL_ACL_SCENARIO_CFG_H_START_COMPARE_M BIT(24)
+#define GL_ACL_SCENARIO_CFG_H_START_SET_S 28
+#define GL_ACL_SCENARIO_CFG_H_START_SET_M BIT(28)
+#define GL_ACL_SCENARIO_CFG_L(_i) (0x0039382C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GL_ACL_SCENARIO_CFG_L_MAX_INDEX 15
+#define GL_ACL_SCENARIO_CFG_L_SELECT0_S 0
+#define GL_ACL_SCENARIO_CFG_L_SELECT0_M MAKEMASK(0x7F, 0)
+#define GL_ACL_SCENARIO_CFG_L_SELECT1_S 8
+#define GL_ACL_SCENARIO_CFG_L_SELECT1_M MAKEMASK(0x7F, 8)
+#define GL_ACL_SCENARIO_CFG_L_SELECT2_S 16
+#define GL_ACL_SCENARIO_CFG_L_SELECT2_M MAKEMASK(0x7F, 16)
+#define GL_ACL_SCENARIO_CFG_L_SELECT3_S 24
+#define GL_ACL_SCENARIO_CFG_L_SELECT3_M MAKEMASK(0x7F, 24)
+#define GL_ACL_TCAM_KEY_H 0x00393818 /* Reset Source: CORER */
+#define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_S 0
+#define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_M MAKEMASK(0xFF, 0)
+#define GL_ACL_TCAM_KEY_INV_H 0x00393820 /* Reset Source: CORER */
+#define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_S 0
+#define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_M MAKEMASK(0xFF, 0)
+#define GL_ACL_TCAM_KEY_INV_L 0x0039381C /* Reset Source: CORER */
+#define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_S 0
+#define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACL_TCAM_KEY_L 0x00393814 /* Reset Source: CORER */
+#define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_S 0
+#define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define VSI_ACL_DEF_SEL(_VSI) (0x00391800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_ACL_DEF_SEL_MAX_INDEX 767
+#define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_S 0
+#define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 0)
+#define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_S 4
+#define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_M MAKEMASK(0x3, 4)
+#define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_S 8
+#define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 8)
+#define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_S 12
+#define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_M MAKEMASK(0x3, 12)
+#define GL_SWT_L2TAG0(_i) (0x000492A8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_SWT_L2TAG0_MAX_INDEX 7
+#define GL_SWT_L2TAG0_DATA_S 0
+#define GL_SWT_L2TAG0_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_SWT_L2TAG1(_i) (0x000492C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_SWT_L2TAG1_MAX_INDEX 7
+#define GL_SWT_L2TAG1_DATA_S 0
+#define GL_SWT_L2TAG1_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_SWT_L2TAGCTRL(_i) (0x001D2660 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_SWT_L2TAGCTRL_MAX_INDEX 7
+#define GL_SWT_L2TAGCTRL_LENGTH_S 0
+#define GL_SWT_L2TAGCTRL_LENGTH_M MAKEMASK(0x7F, 0)
+#define GL_SWT_L2TAGCTRL_HAS_UP_S 7
+#define GL_SWT_L2TAGCTRL_HAS_UP_M BIT(7)
+#define GL_SWT_L2TAGCTRL_ISVLAN_S 9
+#define GL_SWT_L2TAGCTRL_ISVLAN_M BIT(9)
+#define GL_SWT_L2TAGCTRL_INNERUP_S 10
+#define GL_SWT_L2TAGCTRL_INNERUP_M BIT(10)
+#define GL_SWT_L2TAGCTRL_OUTERUP_S 11
+#define GL_SWT_L2TAGCTRL_OUTERUP_M BIT(11)
+#define GL_SWT_L2TAGCTRL_LONG_S 12
+#define GL_SWT_L2TAGCTRL_LONG_M BIT(12)
+#define GL_SWT_L2TAGCTRL_ISMPLS_S 13
+#define GL_SWT_L2TAGCTRL_ISMPLS_M BIT(13)
+#define GL_SWT_L2TAGCTRL_ISNSH_S 14
+#define GL_SWT_L2TAGCTRL_ISNSH_M BIT(14)
+#define GL_SWT_L2TAGCTRL_ETHERTYPE_S 16
+#define GL_SWT_L2TAGCTRL_ETHERTYPE_M MAKEMASK(0xFFFF, 16)
+#define GL_SWT_L2TAGRXEB(_i) (0x00052000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_SWT_L2TAGRXEB_MAX_INDEX 7
+#define GL_SWT_L2TAGRXEB_OFFSET_S 0
+#define GL_SWT_L2TAGRXEB_OFFSET_M MAKEMASK(0xFF, 0)
+#define GL_SWT_L2TAGRXEB_LENGTH_S 8
+#define GL_SWT_L2TAGRXEB_LENGTH_M MAKEMASK(0x3, 8)
+#define GL_SWT_L2TAGTXIB(_i) (0x000492E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_SWT_L2TAGTXIB_MAX_INDEX 7
+#define GL_SWT_L2TAGTXIB_OFFSET_S 0
+#define GL_SWT_L2TAGTXIB_OFFSET_M MAKEMASK(0xFF, 0)
+#define GL_SWT_L2TAGTXIB_LENGTH_S 8
+#define GL_SWT_L2TAGTXIB_LENGTH_M MAKEMASK(0x3, 8)
+#define GLCM_PE_CACHESIZE 0x005046B4 /* Reset Source: CORER */
+#define GLCM_PE_CACHESIZE_WORD_SIZE_S 0
+#define GLCM_PE_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFFF, 0)
+#define GLCM_PE_CACHESIZE_SETS_S 12
+#define GLCM_PE_CACHESIZE_SETS_M MAKEMASK(0xF, 12)
+#define GLCM_PE_CACHESIZE_WAYS_S 16
+#define GLCM_PE_CACHESIZE_WAYS_M MAKEMASK(0x1FF, 16)
+#define GLCOMM_CQ_CTL(_CQ) (0x000F0000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLCOMM_CQ_CTL_MAX_INDEX 511
+#define GLCOMM_CQ_CTL_COMP_TYPE_S 0
+#define GLCOMM_CQ_CTL_COMP_TYPE_M MAKEMASK(0x7, 0)
+#define GLCOMM_CQ_CTL_CMD_S 4
+#define GLCOMM_CQ_CTL_CMD_M MAKEMASK(0x7, 4)
+#define GLCOMM_CQ_CTL_ID_S 16
+#define GLCOMM_CQ_CTL_ID_M MAKEMASK(0x3FFF, 16)
+#define GLCOMM_MIN_MAX_PKT 0x000FC064 /* Reset Source: CORER */
+#define GLCOMM_MIN_MAX_PKT_MAHDL_S 0
+#define GLCOMM_MIN_MAX_PKT_MAHDL_M MAKEMASK(0x3FFF, 0)
+#define GLCOMM_MIN_MAX_PKT_MIHDL_S 16
+#define GLCOMM_MIN_MAX_PKT_MIHDL_M MAKEMASK(0x3F, 16)
+#define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_S 22
+#define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_M MAKEMASK(0x3FF, 22)
+#define GLCOMM_PKT_SHAPER_PROF(_i) (0x002D2DA8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLCOMM_PKT_SHAPER_PROF_MAX_INDEX 7
+#define GLCOMM_PKT_SHAPER_PROF_PKTCNT_S 0
+#define GLCOMM_PKT_SHAPER_PROF_PKTCNT_M MAKEMASK(0x3F, 0)
+#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8 /* Reset Source: CORER */
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_S 0
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x3FFF, 0)
+#define GLCOMM_QTX_CNTX_CTL_CMD_S 16
+#define GLCOMM_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16)
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_S 19
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: CORER */
+#define GLCOMM_QTX_CNTX_DATA_MAX_INDEX 9
+#define GLCOMM_QTX_CNTX_DATA_DATA_S 0
+#define GLCOMM_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLCOMM_QTX_CNTX_STAT 0x002D2DCC /* Reset Source: CORER */
+#define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_S 0
+#define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0)
+#define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLCOMM_QUANTA_PROF_MAX_INDEX 15
+#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0
+#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M MAKEMASK(0x3FFF, 0)
+#define GLCOMM_QUANTA_PROF_MAX_CMD_S 16
+#define GLCOMM_QUANTA_PROF_MAX_CMD_M MAKEMASK(0xFF, 16)
+#define GLCOMM_QUANTA_PROF_MAX_DESC_S 24
+#define GLCOMM_QUANTA_PROF_MAX_DESC_M MAKEMASK(0x3F, 24)
+#define GLLAN_TCLAN_CACHE_CTL 0x000FC0B8 /* Reset Source: CORER */
+#define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_S 0
+#define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_M MAKEMASK(0x3F, 0)
+#define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_S 6
+#define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6)
+#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7
+#define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M MAKEMASK(0x7F, 7)
+#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14
+#define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M MAKEMASK(0xFF, 14)
+#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22
+#define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M MAKEMASK(0x3FF, 22)
+#define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX0_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_S 0
+#define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX1(_CQ) (0x000F1000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX1_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_S 0
+#define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_M MAKEMASK(0x1FFFFFF, 0)
+#define GLTCLAN_CQ_CNTX10(_CQ) (0x000F5800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX10_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX11(_CQ) (0x000F6000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX11_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX12(_CQ) (0x000F6800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX12_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX13(_CQ) (0x000F7000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX13_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX14(_CQ) (0x000F7800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX14_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX15(_CQ) (0x000F8000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX15_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX16(_CQ) (0x000F8800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX16_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX17(_CQ) (0x000F9000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX17_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX18(_CQ) (0x000F9800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX18_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX19(_CQ) (0x000FA000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX19_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX2(_CQ) (0x000F1800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX2_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX2_RING_LEN_S 0
+#define GLTCLAN_CQ_CNTX2_RING_LEN_M MAKEMASK(0x3FFFF, 0)
+#define GLTCLAN_CQ_CNTX20(_CQ) (0x000FA800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX20_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX21(_CQ) (0x000FB000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX21_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX3(_CQ) (0x000F2000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX3_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX3_GENERATION_S 0
+#define GLTCLAN_CQ_CNTX3_GENERATION_M BIT(0)
+#define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_S 1
+#define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_M MAKEMASK(0x3FFFFF, 1)
+#define GLTCLAN_CQ_CNTX4(_CQ) (0x000F2800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX4_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX4_PF_NUM_S 0
+#define GLTCLAN_CQ_CNTX4_PF_NUM_M MAKEMASK(0x7, 0)
+#define GLTCLAN_CQ_CNTX4_VMVF_NUM_S 3
+#define GLTCLAN_CQ_CNTX4_VMVF_NUM_M MAKEMASK(0x3FF, 3)
+#define GLTCLAN_CQ_CNTX4_VMVF_TYPE_S 13
+#define GLTCLAN_CQ_CNTX4_VMVF_TYPE_M MAKEMASK(0x3, 13)
+#define GLTCLAN_CQ_CNTX5(_CQ) (0x000F3000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX5_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX5_TPH_EN_S 0
+#define GLTCLAN_CQ_CNTX5_TPH_EN_M BIT(0)
+#define GLTCLAN_CQ_CNTX5_CPU_ID_S 1
+#define GLTCLAN_CQ_CNTX5_CPU_ID_M MAKEMASK(0xFF, 1)
+#define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_S 9
+#define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_M BIT(9)
+#define GLTCLAN_CQ_CNTX6(_CQ) (0x000F3800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX6_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX7(_CQ) (0x000F4000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX7_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX8(_CQ) (0x000F4800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX8_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCLAN_CQ_CNTX9(_CQ) (0x000F5000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLTCLAN_CQ_CNTX9_MAX_INDEX 511
+#define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_S 0
+#define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0)
+#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define QTX_COMM_DBELL_MAX_INDEX 16383
+#define QTX_COMM_DBELL_QTX_COMM_DBELL_S 0
+#define QTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define QTX_COMM_DBLQ_CNTX(_i, _DBLQ) (0x002D0000 + ((_i) * 1024 + (_DBLQ) * 4)) /* _i=0...4, _DBLQ=0...255 */ /* Reset Source: CORER */
+#define QTX_COMM_DBLQ_CNTX_MAX_INDEX 4
+#define QTX_COMM_DBLQ_CNTX_DATA_S 0
+#define QTX_COMM_DBLQ_CNTX_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define QTX_COMM_DBLQ_DBELL(_DBLQ) (0x002D1400 + ((_DBLQ) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define QTX_COMM_DBLQ_DBELL_MAX_INDEX 255
+#define QTX_COMM_DBLQ_DBELL_TAIL_S 0
+#define QTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0)
+#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define QTX_COMM_HEAD_MAX_INDEX 16383
+#define QTX_COMM_HEAD_HEAD_S 0
+#define QTX_COMM_HEAD_HEAD_M MAKEMASK(0x1FFF, 0)
+#define QTX_COMM_HEAD_RS_PENDING_S 16
+#define QTX_COMM_HEAD_RS_PENDING_M BIT(16)
+#define GL_FW_TOOL_ARQBAH 0x000801C0 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ARQBAH_ARQBAH_S 0
+#define GL_FW_TOOL_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_FW_TOOL_ARQBAL 0x000800C0 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_S 0
+#define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define GL_FW_TOOL_ARQBAL_ARQBAL_S 6
+#define GL_FW_TOOL_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define GL_FW_TOOL_ARQH 0x000803C0 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ARQH_ARQH_S 0
+#define GL_FW_TOOL_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define GL_FW_TOOL_ARQLEN 0x000802C0 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ARQLEN_ARQLEN_S 0
+#define GL_FW_TOOL_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define GL_FW_TOOL_ARQLEN_ARQVFE_S 28
+#define GL_FW_TOOL_ARQLEN_ARQVFE_M BIT(28)
+#define GL_FW_TOOL_ARQLEN_ARQOVFL_S 29
+#define GL_FW_TOOL_ARQLEN_ARQOVFL_M BIT(29)
+#define GL_FW_TOOL_ARQLEN_ARQCRIT_S 30
+#define GL_FW_TOOL_ARQLEN_ARQCRIT_M BIT(30)
+#define GL_FW_TOOL_ARQLEN_ARQENABLE_S 31
+#define GL_FW_TOOL_ARQLEN_ARQENABLE_M BIT(31)
+#define GL_FW_TOOL_ARQT 0x000804C0 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ARQT_ARQT_S 0
+#define GL_FW_TOOL_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define GL_FW_TOOL_ATQBAH 0x00080140 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ATQBAH_ATQBAH_S 0
+#define GL_FW_TOOL_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_FW_TOOL_ATQBAL 0x00080040 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_S 0
+#define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define GL_FW_TOOL_ATQBAL_ATQBAL_S 6
+#define GL_FW_TOOL_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define GL_FW_TOOL_ATQH 0x00080340 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ATQH_ATQH_S 0
+#define GL_FW_TOOL_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define GL_FW_TOOL_ATQLEN 0x00080240 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ATQLEN_ATQLEN_S 0
+#define GL_FW_TOOL_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define GL_FW_TOOL_ATQLEN_ATQVFE_S 28
+#define GL_FW_TOOL_ATQLEN_ATQVFE_M BIT(28)
+#define GL_FW_TOOL_ATQLEN_ATQOVFL_S 29
+#define GL_FW_TOOL_ATQLEN_ATQOVFL_M BIT(29)
+#define GL_FW_TOOL_ATQLEN_ATQCRIT_S 30
+#define GL_FW_TOOL_ATQLEN_ATQCRIT_M BIT(30)
+#define GL_FW_TOOL_ATQLEN_ATQENABLE_S 31
+#define GL_FW_TOOL_ATQLEN_ATQENABLE_M BIT(31)
+#define GL_FW_TOOL_ATQT 0x00080440 /* Reset Source: EMPR */
+#define GL_FW_TOOL_ATQT_ATQT_S 0
+#define GL_FW_TOOL_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define GL_MBX_PASID 0x00231EC0 /* Reset Source: CORER */
+#define GL_MBX_PASID_PASID_MODE_S 0
+#define GL_MBX_PASID_PASID_MODE_M BIT(0)
+#define GL_MBX_PASID_PASID_MODE_VALID_S 1
+#define GL_MBX_PASID_PASID_MODE_VALID_M BIT(1)
+#define PF_FW_ARQBAH 0x00080180 /* Reset Source: EMPR */
+#define PF_FW_ARQBAH_ARQBAH_S 0
+#define PF_FW_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF_FW_ARQBAL 0x00080080 /* Reset Source: EMPR */
+#define PF_FW_ARQBAL_ARQBAL_LSB_S 0
+#define PF_FW_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF_FW_ARQBAL_ARQBAL_S 6
+#define PF_FW_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF_FW_ARQH 0x00080380 /* Reset Source: EMPR */
+#define PF_FW_ARQH_ARQH_S 0
+#define PF_FW_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF_FW_ARQLEN 0x00080280 /* Reset Source: EMPR */
+#define PF_FW_ARQLEN_ARQLEN_S 0
+#define PF_FW_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF_FW_ARQLEN_ARQVFE_S 28
+#define PF_FW_ARQLEN_ARQVFE_M BIT(28)
+#define PF_FW_ARQLEN_ARQOVFL_S 29
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_FW_ARQLEN_ARQCRIT_S 30
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_FW_ARQLEN_ARQENABLE_S 31
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_FW_ARQT 0x00080480 /* Reset Source: EMPR */
+#define PF_FW_ARQT_ARQT_S 0
+#define PF_FW_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF_FW_ATQBAH 0x00080100 /* Reset Source: EMPR */
+#define PF_FW_ATQBAH_ATQBAH_S 0
+#define PF_FW_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF_FW_ATQBAL 0x00080000 /* Reset Source: EMPR */
+#define PF_FW_ATQBAL_ATQBAL_LSB_S 0
+#define PF_FW_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF_FW_ATQBAL_ATQBAL_S 6
+#define PF_FW_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF_FW_ATQH 0x00080300 /* Reset Source: EMPR */
+#define PF_FW_ATQH_ATQH_S 0
+#define PF_FW_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF_FW_ATQLEN 0x00080200 /* Reset Source: EMPR */
+#define PF_FW_ATQLEN_ATQLEN_S 0
+#define PF_FW_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF_FW_ATQLEN_ATQVFE_S 28
+#define PF_FW_ATQLEN_ATQVFE_M BIT(28)
+#define PF_FW_ATQLEN_ATQOVFL_S 29
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_FW_ATQLEN_ATQCRIT_S 30
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_FW_ATQLEN_ATQENABLE_S 31
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_FW_ATQT 0x00080400 /* Reset Source: EMPR */
+#define PF_FW_ATQT_ATQT_S 0
+#define PF_FW_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF_MBX_ARQBAH 0x0022E400 /* Reset Source: CORER */
+#define PF_MBX_ARQBAH_ARQBAH_S 0
+#define PF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF_MBX_ARQBAL 0x0022E380 /* Reset Source: CORER */
+#define PF_MBX_ARQBAL_ARQBAL_LSB_S 0
+#define PF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF_MBX_ARQBAL_ARQBAL_S 6
+#define PF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF_MBX_ARQH 0x0022E500 /* Reset Source: CORER */
+#define PF_MBX_ARQH_ARQH_S 0
+#define PF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF_MBX_ARQLEN 0x0022E480 /* Reset Source: PFR */
+#define PF_MBX_ARQLEN_ARQLEN_S 0
+#define PF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF_MBX_ARQLEN_ARQVFE_S 28
+#define PF_MBX_ARQLEN_ARQVFE_M BIT(28)
+#define PF_MBX_ARQLEN_ARQOVFL_S 29
+#define PF_MBX_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_MBX_ARQLEN_ARQCRIT_S 30
+#define PF_MBX_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_MBX_ARQLEN_ARQENABLE_S 31
+#define PF_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_MBX_ARQT 0x0022E580 /* Reset Source: CORER */
+#define PF_MBX_ARQT_ARQT_S 0
+#define PF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF_MBX_ATQBAH 0x0022E180 /* Reset Source: CORER */
+#define PF_MBX_ATQBAH_ATQBAH_S 0
+#define PF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF_MBX_ATQBAL 0x0022E100 /* Reset Source: CORER */
+#define PF_MBX_ATQBAL_ATQBAL_S 6
+#define PF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF_MBX_ATQH 0x0022E280 /* Reset Source: CORER */
+#define PF_MBX_ATQH_ATQH_S 0
+#define PF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF_MBX_ATQLEN 0x0022E200 /* Reset Source: PFR */
+#define PF_MBX_ATQLEN_ATQLEN_S 0
+#define PF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF_MBX_ATQLEN_ATQVFE_S 28
+#define PF_MBX_ATQLEN_ATQVFE_M BIT(28)
+#define PF_MBX_ATQLEN_ATQOVFL_S 29
+#define PF_MBX_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_MBX_ATQLEN_ATQCRIT_S 30
+#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_MBX_ATQLEN_ATQENABLE_S 31
+#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_MBX_ATQT 0x0022E300 /* Reset Source: CORER */
+#define PF_MBX_ATQT_ATQT_S 0
+#define PF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF_SB_ARQBAH 0x0022FF00 /* Reset Source: CORER */
+#define PF_SB_ARQBAH_ARQBAH_S 0
+#define PF_SB_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF_SB_ARQBAL 0x0022FE80 /* Reset Source: CORER */
+#define PF_SB_ARQBAL_ARQBAL_LSB_S 0
+#define PF_SB_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF_SB_ARQBAL_ARQBAL_S 6
+#define PF_SB_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF_SB_ARQH 0x00230000 /* Reset Source: CORER */
+#define PF_SB_ARQH_ARQH_S 0
+#define PF_SB_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF_SB_ARQLEN 0x0022FF80 /* Reset Source: PFR */
+#define PF_SB_ARQLEN_ARQLEN_S 0
+#define PF_SB_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF_SB_ARQLEN_ARQVFE_S 28
+#define PF_SB_ARQLEN_ARQVFE_M BIT(28)
+#define PF_SB_ARQLEN_ARQOVFL_S 29
+#define PF_SB_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_SB_ARQLEN_ARQCRIT_S 30
+#define PF_SB_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_SB_ARQLEN_ARQENABLE_S 31
+#define PF_SB_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_SB_ARQT 0x00230080 /* Reset Source: CORER */
+#define PF_SB_ARQT_ARQT_S 0
+#define PF_SB_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF_SB_ATQBAH 0x0022FC80 /* Reset Source: CORER */
+#define PF_SB_ATQBAH_ATQBAH_S 0
+#define PF_SB_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF_SB_ATQBAL 0x0022FC00 /* Reset Source: CORER */
+#define PF_SB_ATQBAL_ATQBAL_S 6
+#define PF_SB_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF_SB_ATQH 0x0022FD80 /* Reset Source: CORER */
+#define PF_SB_ATQH_ATQH_S 0
+#define PF_SB_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF_SB_ATQLEN 0x0022FD00 /* Reset Source: PFR */
+#define PF_SB_ATQLEN_ATQLEN_S 0
+#define PF_SB_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF_SB_ATQLEN_ATQVFE_S 28
+#define PF_SB_ATQLEN_ATQVFE_M BIT(28)
+#define PF_SB_ATQLEN_ATQOVFL_S 29
+#define PF_SB_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_SB_ATQLEN_ATQCRIT_S 30
+#define PF_SB_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_SB_ATQLEN_ATQENABLE_S 31
+#define PF_SB_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_SB_ATQT 0x0022FE00 /* Reset Source: CORER */
+#define PF_SB_ATQT_ATQT_S 0
+#define PF_SB_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF_SB_REM_DEV_CTL 0x002300F0 /* Reset Source: CORER */
+#define PF_SB_REM_DEV_CTL_DEST_EN_S 0
+#define PF_SB_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0)
+#define PF0_FW_HLP_ARQBAH 0x000801C8 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQBAH_ARQBAH_S 0
+#define PF0_FW_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_HLP_ARQBAL 0x000800C8 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_HLP_ARQBAL_ARQBAL_S 6
+#define PF0_FW_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_HLP_ARQH 0x000803C8 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQH_ARQH_S 0
+#define PF0_FW_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ARQLEN 0x000802C8 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQLEN_ARQLEN_S 0
+#define PF0_FW_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ARQLEN_ARQVFE_S 28
+#define PF0_FW_HLP_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_FW_HLP_ARQLEN_ARQOVFL_S 29
+#define PF0_FW_HLP_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_FW_HLP_ARQLEN_ARQCRIT_S 30
+#define PF0_FW_HLP_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_FW_HLP_ARQLEN_ARQENABLE_S 31
+#define PF0_FW_HLP_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_FW_HLP_ARQT 0x000804C8 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ARQT_ARQT_S 0
+#define PF0_FW_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ATQBAH 0x00080148 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQBAH_ATQBAH_S 0
+#define PF0_FW_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_HLP_ATQBAL 0x00080048 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_S 0
+#define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_HLP_ATQBAL_ATQBAL_S 6
+#define PF0_FW_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_HLP_ATQH 0x00080348 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQH_ATQH_S 0
+#define PF0_FW_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ATQLEN 0x00080248 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQLEN_ATQLEN_S 0
+#define PF0_FW_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_HLP_ATQLEN_ATQVFE_S 28
+#define PF0_FW_HLP_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_FW_HLP_ATQLEN_ATQOVFL_S 29
+#define PF0_FW_HLP_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_FW_HLP_ATQLEN_ATQCRIT_S 30
+#define PF0_FW_HLP_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_FW_HLP_ATQLEN_ATQENABLE_S 31
+#define PF0_FW_HLP_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_FW_HLP_ATQT 0x00080448 /* Reset Source: EMPR */
+#define PF0_FW_HLP_ATQT_ATQT_S 0
+#define PF0_FW_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ARQBAH 0x000801C4 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQBAH_ARQBAH_S 0
+#define PF0_FW_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_PSM_ARQBAL 0x000800C4 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_PSM_ARQBAL_ARQBAL_S 6
+#define PF0_FW_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_PSM_ARQH 0x000803C4 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQH_ARQH_S 0
+#define PF0_FW_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ARQLEN 0x000802C4 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQLEN_ARQLEN_S 0
+#define PF0_FW_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ARQLEN_ARQVFE_S 28
+#define PF0_FW_PSM_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_FW_PSM_ARQLEN_ARQOVFL_S 29
+#define PF0_FW_PSM_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_FW_PSM_ARQLEN_ARQCRIT_S 30
+#define PF0_FW_PSM_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_FW_PSM_ARQLEN_ARQENABLE_S 31
+#define PF0_FW_PSM_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_FW_PSM_ARQT 0x000804C4 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ARQT_ARQT_S 0
+#define PF0_FW_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ATQBAH 0x00080144 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQBAH_ATQBAH_S 0
+#define PF0_FW_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_FW_PSM_ATQBAL 0x00080044 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_S 0
+#define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_FW_PSM_ATQBAL_ATQBAL_S 6
+#define PF0_FW_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_FW_PSM_ATQH 0x00080344 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQH_ATQH_S 0
+#define PF0_FW_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ATQLEN 0x00080244 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQLEN_ATQLEN_S 0
+#define PF0_FW_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_FW_PSM_ATQLEN_ATQVFE_S 28
+#define PF0_FW_PSM_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_FW_PSM_ATQLEN_ATQOVFL_S 29
+#define PF0_FW_PSM_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_FW_PSM_ATQLEN_ATQCRIT_S 30
+#define PF0_FW_PSM_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_FW_PSM_ATQLEN_ATQENABLE_S 31
+#define PF0_FW_PSM_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_FW_PSM_ATQT 0x00080444 /* Reset Source: EMPR */
+#define PF0_FW_PSM_ATQT_ATQT_S 0
+#define PF0_FW_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ARQBAH 0x0022E5D8 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQBAH_ARQBAH_S 0
+#define PF0_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_CPM_ARQBAL 0x0022E5D4 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_MBX_CPM_ARQBAL_ARQBAL_S 6
+#define PF0_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_CPM_ARQH 0x0022E5E0 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQH_ARQH_S 0
+#define PF0_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ARQLEN 0x0022E5DC /* Reset Source: PFR */
+#define PF0_MBX_CPM_ARQLEN_ARQLEN_S 0
+#define PF0_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ARQLEN_ARQVFE_S 28
+#define PF0_MBX_CPM_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_MBX_CPM_ARQLEN_ARQOVFL_S 29
+#define PF0_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_MBX_CPM_ARQLEN_ARQCRIT_S 30
+#define PF0_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_MBX_CPM_ARQLEN_ARQENABLE_S 31
+#define PF0_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_MBX_CPM_ARQT 0x0022E5E4 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ARQT_ARQT_S 0
+#define PF0_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ATQBAH 0x0022E5C4 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQBAH_ATQBAH_S 0
+#define PF0_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_CPM_ATQBAL 0x0022E5C0 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQBAL_ATQBAL_S 6
+#define PF0_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_CPM_ATQH 0x0022E5CC /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQH_ATQH_S 0
+#define PF0_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ATQLEN 0x0022E5C8 /* Reset Source: PFR */
+#define PF0_MBX_CPM_ATQLEN_ATQLEN_S 0
+#define PF0_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_CPM_ATQLEN_ATQVFE_S 28
+#define PF0_MBX_CPM_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_MBX_CPM_ATQLEN_ATQOVFL_S 29
+#define PF0_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_MBX_CPM_ATQLEN_ATQCRIT_S 30
+#define PF0_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_MBX_CPM_ATQLEN_ATQENABLE_S 31
+#define PF0_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_MBX_CPM_ATQT 0x0022E5D0 /* Reset Source: CORER */
+#define PF0_MBX_CPM_ATQT_ATQT_S 0
+#define PF0_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ARQBAH 0x0022E600 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQBAH_ARQBAH_S 0
+#define PF0_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_HLP_ARQBAL 0x0022E5FC /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_MBX_HLP_ARQBAL_ARQBAL_S 6
+#define PF0_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_HLP_ARQH 0x0022E608 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQH_ARQH_S 0
+#define PF0_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ARQLEN 0x0022E604 /* Reset Source: PFR */
+#define PF0_MBX_HLP_ARQLEN_ARQLEN_S 0
+#define PF0_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ARQLEN_ARQVFE_S 28
+#define PF0_MBX_HLP_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_MBX_HLP_ARQLEN_ARQOVFL_S 29
+#define PF0_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_MBX_HLP_ARQLEN_ARQCRIT_S 30
+#define PF0_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_MBX_HLP_ARQLEN_ARQENABLE_S 31
+#define PF0_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_MBX_HLP_ARQT 0x0022E60C /* Reset Source: CORER */
+#define PF0_MBX_HLP_ARQT_ARQT_S 0
+#define PF0_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ATQBAH 0x0022E5EC /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQBAH_ATQBAH_S 0
+#define PF0_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_HLP_ATQBAL 0x0022E5E8 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQBAL_ATQBAL_S 6
+#define PF0_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_HLP_ATQH 0x0022E5F4 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQH_ATQH_S 0
+#define PF0_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ATQLEN 0x0022E5F0 /* Reset Source: PFR */
+#define PF0_MBX_HLP_ATQLEN_ATQLEN_S 0
+#define PF0_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_HLP_ATQLEN_ATQVFE_S 28
+#define PF0_MBX_HLP_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_MBX_HLP_ATQLEN_ATQOVFL_S 29
+#define PF0_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_MBX_HLP_ATQLEN_ATQCRIT_S 30
+#define PF0_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_MBX_HLP_ATQLEN_ATQENABLE_S 31
+#define PF0_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_MBX_HLP_ATQT 0x0022E5F8 /* Reset Source: CORER */
+#define PF0_MBX_HLP_ATQT_ATQT_S 0
+#define PF0_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ARQBAH 0x0022E628 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQBAH_ARQBAH_S 0
+#define PF0_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_PSM_ARQBAL 0x0022E624 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_MBX_PSM_ARQBAL_ARQBAL_S 6
+#define PF0_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_PSM_ARQH 0x0022E630 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQH_ARQH_S 0
+#define PF0_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ARQLEN 0x0022E62C /* Reset Source: PFR */
+#define PF0_MBX_PSM_ARQLEN_ARQLEN_S 0
+#define PF0_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ARQLEN_ARQVFE_S 28
+#define PF0_MBX_PSM_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_MBX_PSM_ARQLEN_ARQOVFL_S 29
+#define PF0_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_MBX_PSM_ARQLEN_ARQCRIT_S 30
+#define PF0_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_MBX_PSM_ARQLEN_ARQENABLE_S 31
+#define PF0_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_MBX_PSM_ARQT 0x0022E634 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ARQT_ARQT_S 0
+#define PF0_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ATQBAH 0x0022E614 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQBAH_ATQBAH_S 0
+#define PF0_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_MBX_PSM_ATQBAL 0x0022E610 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQBAL_ATQBAL_S 6
+#define PF0_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_MBX_PSM_ATQH 0x0022E61C /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQH_ATQH_S 0
+#define PF0_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ATQLEN 0x0022E618 /* Reset Source: PFR */
+#define PF0_MBX_PSM_ATQLEN_ATQLEN_S 0
+#define PF0_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_MBX_PSM_ATQLEN_ATQVFE_S 28
+#define PF0_MBX_PSM_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_MBX_PSM_ATQLEN_ATQOVFL_S 29
+#define PF0_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_MBX_PSM_ATQLEN_ATQCRIT_S 30
+#define PF0_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_MBX_PSM_ATQLEN_ATQENABLE_S 31
+#define PF0_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_MBX_PSM_ATQT 0x0022E620 /* Reset Source: CORER */
+#define PF0_MBX_PSM_ATQT_ATQT_S 0
+#define PF0_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ARQBAH 0x0022E650 /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQBAH_ARQBAH_S 0
+#define PF0_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_CPM_ARQBAL 0x0022E64C /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_SB_CPM_ARQBAL_ARQBAL_S 6
+#define PF0_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_CPM_ARQH 0x0022E658 /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQH_ARQH_S 0
+#define PF0_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ARQLEN 0x0022E654 /* Reset Source: PFR */
+#define PF0_SB_CPM_ARQLEN_ARQLEN_S 0
+#define PF0_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ARQLEN_ARQVFE_S 28
+#define PF0_SB_CPM_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_SB_CPM_ARQLEN_ARQOVFL_S 29
+#define PF0_SB_CPM_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_SB_CPM_ARQLEN_ARQCRIT_S 30
+#define PF0_SB_CPM_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_SB_CPM_ARQLEN_ARQENABLE_S 31
+#define PF0_SB_CPM_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_SB_CPM_ARQT 0x0022E65C /* Reset Source: CORER */
+#define PF0_SB_CPM_ARQT_ARQT_S 0
+#define PF0_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ATQBAH 0x0022E63C /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQBAH_ATQBAH_S 0
+#define PF0_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_CPM_ATQBAL 0x0022E638 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQBAL_ATQBAL_S 6
+#define PF0_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_CPM_ATQH 0x0022E644 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQH_ATQH_S 0
+#define PF0_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ATQLEN 0x0022E640 /* Reset Source: PFR */
+#define PF0_SB_CPM_ATQLEN_ATQLEN_S 0
+#define PF0_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_ATQLEN_ATQVFE_S 28
+#define PF0_SB_CPM_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_SB_CPM_ATQLEN_ATQOVFL_S 29
+#define PF0_SB_CPM_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_SB_CPM_ATQLEN_ATQCRIT_S 30
+#define PF0_SB_CPM_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_SB_CPM_ATQLEN_ATQENABLE_S 31
+#define PF0_SB_CPM_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_SB_CPM_ATQT 0x0022E648 /* Reset Source: CORER */
+#define PF0_SB_CPM_ATQT_ATQT_S 0
+#define PF0_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_CPM_REM_DEV_CTL 0x002300F4 /* Reset Source: CORER */
+#define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_S 0
+#define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0)
+#define PF0_SB_HLP_ARQBAH 0x002300D8 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQBAH_ARQBAH_S 0
+#define PF0_SB_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_HLP_ARQBAL 0x002300D4 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQBAL_ARQBAL_LSB_S 0
+#define PF0_SB_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define PF0_SB_HLP_ARQBAL_ARQBAL_S 6
+#define PF0_SB_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_HLP_ARQH 0x002300E0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQH_ARQH_S 0
+#define PF0_SB_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ARQLEN 0x002300DC /* Reset Source: PFR */
+#define PF0_SB_HLP_ARQLEN_ARQLEN_S 0
+#define PF0_SB_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ARQLEN_ARQVFE_S 28
+#define PF0_SB_HLP_ARQLEN_ARQVFE_M BIT(28)
+#define PF0_SB_HLP_ARQLEN_ARQOVFL_S 29
+#define PF0_SB_HLP_ARQLEN_ARQOVFL_M BIT(29)
+#define PF0_SB_HLP_ARQLEN_ARQCRIT_S 30
+#define PF0_SB_HLP_ARQLEN_ARQCRIT_M BIT(30)
+#define PF0_SB_HLP_ARQLEN_ARQENABLE_S 31
+#define PF0_SB_HLP_ARQLEN_ARQENABLE_M BIT(31)
+#define PF0_SB_HLP_ARQT 0x002300E4 /* Reset Source: CORER */
+#define PF0_SB_HLP_ARQT_ARQT_S 0
+#define PF0_SB_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ATQBAH 0x002300C4 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQBAH_ATQBAH_S 0
+#define PF0_SB_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PF0_SB_HLP_ATQBAL 0x002300C0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQBAL_ATQBAL_S 6
+#define PF0_SB_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define PF0_SB_HLP_ATQH 0x002300CC /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQH_ATQH_S 0
+#define PF0_SB_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ATQLEN 0x002300C8 /* Reset Source: PFR */
+#define PF0_SB_HLP_ATQLEN_ATQLEN_S 0
+#define PF0_SB_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_ATQLEN_ATQVFE_S 28
+#define PF0_SB_HLP_ATQLEN_ATQVFE_M BIT(28)
+#define PF0_SB_HLP_ATQLEN_ATQOVFL_S 29
+#define PF0_SB_HLP_ATQLEN_ATQOVFL_M BIT(29)
+#define PF0_SB_HLP_ATQLEN_ATQCRIT_S 30
+#define PF0_SB_HLP_ATQLEN_ATQCRIT_M BIT(30)
+#define PF0_SB_HLP_ATQLEN_ATQENABLE_S 31
+#define PF0_SB_HLP_ATQLEN_ATQENABLE_M BIT(31)
+#define PF0_SB_HLP_ATQT 0x002300D0 /* Reset Source: CORER */
+#define PF0_SB_HLP_ATQT_ATQT_S 0
+#define PF0_SB_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define PF0_SB_HLP_REM_DEV_CTL 0x002300E8 /* Reset Source: CORER */
+#define PF0_SB_HLP_REM_DEV_CTL_DEST_EN_S 0
+#define PF0_SB_HLP_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0)
+#define SB_REM_DEV_DEST(_i) (0x002300F8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define SB_REM_DEV_DEST_MAX_INDEX 7
+#define SB_REM_DEV_DEST_DEST_S 0
+#define SB_REM_DEV_DEST_DEST_M MAKEMASK(0xF, 0)
+#define SB_REM_DEV_DEST_DEST_VALID_S 31
+#define SB_REM_DEV_DEST_DEST_VALID_M BIT(31)
+#define VF_MBX_ARQBAH(_VF) (0x0022B800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ARQBAH_MAX_INDEX 255
+#define VF_MBX_ARQBAH_ARQBAH_S 0
+#define VF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_ARQBAL(_VF) (0x0022B400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ARQBAL_MAX_INDEX 255
+#define VF_MBX_ARQBAL_ARQBAL_LSB_S 0
+#define VF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_ARQBAL_ARQBAL_S 6
+#define VF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_ARQH(_VF) (0x0022C000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ARQH_MAX_INDEX 255
+#define VF_MBX_ARQH_ARQH_S 0
+#define VF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VF_MBX_ARQLEN_MAX_INDEX 255
+#define VF_MBX_ARQLEN_ARQLEN_S 0
+#define VF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ARQLEN_ARQVFE_S 28
+#define VF_MBX_ARQLEN_ARQVFE_M BIT(28)
+#define VF_MBX_ARQLEN_ARQOVFL_S 29
+#define VF_MBX_ARQLEN_ARQOVFL_M BIT(29)
+#define VF_MBX_ARQLEN_ARQCRIT_S 30
+#define VF_MBX_ARQLEN_ARQCRIT_M BIT(30)
+#define VF_MBX_ARQLEN_ARQENABLE_S 31
+#define VF_MBX_ARQLEN_ARQENABLE_M BIT(31)
+#define VF_MBX_ARQT(_VF) (0x0022C400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ARQT_MAX_INDEX 255
+#define VF_MBX_ARQT_ARQT_S 0
+#define VF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ATQBAH(_VF) (0x0022A400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ATQBAH_MAX_INDEX 255
+#define VF_MBX_ATQBAH_ATQBAH_S 0
+#define VF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_ATQBAL(_VF) (0x0022A000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ATQBAL_MAX_INDEX 255
+#define VF_MBX_ATQBAL_ATQBAL_S 6
+#define VF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_ATQH(_VF) (0x0022AC00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ATQH_MAX_INDEX 255
+#define VF_MBX_ATQH_ATQH_S 0
+#define VF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VF_MBX_ATQLEN_MAX_INDEX 255
+#define VF_MBX_ATQLEN_ATQLEN_S 0
+#define VF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ATQLEN_ATQVFE_S 28
+#define VF_MBX_ATQLEN_ATQVFE_M BIT(28)
+#define VF_MBX_ATQLEN_ATQOVFL_S 29
+#define VF_MBX_ATQLEN_ATQOVFL_M BIT(29)
+#define VF_MBX_ATQLEN_ATQCRIT_S 30
+#define VF_MBX_ATQLEN_ATQCRIT_M BIT(30)
+#define VF_MBX_ATQLEN_ATQENABLE_S 31
+#define VF_MBX_ATQLEN_ATQENABLE_M BIT(31)
+#define VF_MBX_ATQT(_VF) (0x0022B000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VF_MBX_ATQT_MAX_INDEX 255
+#define VF_MBX_ATQT_ATQT_S 0
+#define VF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ARQBAH(_VF128) (0x0022D400 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQBAH_MAX_INDEX 127
+#define VF_MBX_CPM_ARQBAH_ARQBAH_S 0
+#define VF_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_CPM_ARQBAL(_VF128) (0x0022D200 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQBAL_MAX_INDEX 127
+#define VF_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0
+#define VF_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_CPM_ARQBAL_ARQBAL_S 6
+#define VF_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_CPM_ARQH(_VF128) (0x0022D800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQH_MAX_INDEX 127
+#define VF_MBX_CPM_ARQH_ARQH_S 0
+#define VF_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ARQLEN(_VF128) (0x0022D600 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */
+#define VF_MBX_CPM_ARQLEN_MAX_INDEX 127
+#define VF_MBX_CPM_ARQLEN_ARQLEN_S 0
+#define VF_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ARQLEN_ARQVFE_S 28
+#define VF_MBX_CPM_ARQLEN_ARQVFE_M BIT(28)
+#define VF_MBX_CPM_ARQLEN_ARQOVFL_S 29
+#define VF_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29)
+#define VF_MBX_CPM_ARQLEN_ARQCRIT_S 30
+#define VF_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30)
+#define VF_MBX_CPM_ARQLEN_ARQENABLE_S 31
+#define VF_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31)
+#define VF_MBX_CPM_ARQT(_VF128) (0x0022DA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQT_MAX_INDEX 127
+#define VF_MBX_CPM_ARQT_ARQT_S 0
+#define VF_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ATQBAH(_VF128) (0x0022CA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQBAH_MAX_INDEX 127
+#define VF_MBX_CPM_ATQBAH_ATQBAH_S 0
+#define VF_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_CPM_ATQBAL(_VF128) (0x0022C800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQBAL_MAX_INDEX 127
+#define VF_MBX_CPM_ATQBAL_ATQBAL_S 6
+#define VF_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_CPM_ATQH(_VF128) (0x0022CE00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQH_MAX_INDEX 127
+#define VF_MBX_CPM_ATQH_ATQH_S 0
+#define VF_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ATQLEN(_VF128) (0x0022CC00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */
+#define VF_MBX_CPM_ATQLEN_MAX_INDEX 127
+#define VF_MBX_CPM_ATQLEN_ATQLEN_S 0
+#define VF_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ATQLEN_ATQVFE_S 28
+#define VF_MBX_CPM_ATQLEN_ATQVFE_M BIT(28)
+#define VF_MBX_CPM_ATQLEN_ATQOVFL_S 29
+#define VF_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29)
+#define VF_MBX_CPM_ATQLEN_ATQCRIT_S 30
+#define VF_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30)
+#define VF_MBX_CPM_ATQLEN_ATQENABLE_S 31
+#define VF_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31)
+#define VF_MBX_CPM_ATQT(_VF128) (0x0022D000 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQT_MAX_INDEX 127
+#define VF_MBX_CPM_ATQT_ATQT_S 0
+#define VF_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ARQBAH(_VF16) (0x0022DD80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQBAH_MAX_INDEX 15
+#define VF_MBX_HLP_ARQBAH_ARQBAH_S 0
+#define VF_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_HLP_ARQBAL(_VF16) (0x0022DD40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQBAL_MAX_INDEX 15
+#define VF_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0
+#define VF_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_HLP_ARQBAL_ARQBAL_S 6
+#define VF_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_HLP_ARQH(_VF16) (0x0022DE00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQH_MAX_INDEX 15
+#define VF_MBX_HLP_ARQH_ARQH_S 0
+#define VF_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ARQLEN(_VF16) (0x0022DDC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */
+#define VF_MBX_HLP_ARQLEN_MAX_INDEX 15
+#define VF_MBX_HLP_ARQLEN_ARQLEN_S 0
+#define VF_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ARQLEN_ARQVFE_S 28
+#define VF_MBX_HLP_ARQLEN_ARQVFE_M BIT(28)
+#define VF_MBX_HLP_ARQLEN_ARQOVFL_S 29
+#define VF_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29)
+#define VF_MBX_HLP_ARQLEN_ARQCRIT_S 30
+#define VF_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30)
+#define VF_MBX_HLP_ARQLEN_ARQENABLE_S 31
+#define VF_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31)
+#define VF_MBX_HLP_ARQT(_VF16) (0x0022DE40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQT_MAX_INDEX 15
+#define VF_MBX_HLP_ARQT_ARQT_S 0
+#define VF_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ATQBAH(_VF16) (0x0022DC40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQBAH_MAX_INDEX 15
+#define VF_MBX_HLP_ATQBAH_ATQBAH_S 0
+#define VF_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_HLP_ATQBAL(_VF16) (0x0022DC00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQBAL_MAX_INDEX 15
+#define VF_MBX_HLP_ATQBAL_ATQBAL_S 6
+#define VF_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_HLP_ATQH(_VF16) (0x0022DCC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQH_MAX_INDEX 15
+#define VF_MBX_HLP_ATQH_ATQH_S 0
+#define VF_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ATQLEN(_VF16) (0x0022DC80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */
+#define VF_MBX_HLP_ATQLEN_MAX_INDEX 15
+#define VF_MBX_HLP_ATQLEN_ATQLEN_S 0
+#define VF_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ATQLEN_ATQVFE_S 28
+#define VF_MBX_HLP_ATQLEN_ATQVFE_M BIT(28)
+#define VF_MBX_HLP_ATQLEN_ATQOVFL_S 29
+#define VF_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29)
+#define VF_MBX_HLP_ATQLEN_ATQCRIT_S 30
+#define VF_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30)
+#define VF_MBX_HLP_ATQLEN_ATQENABLE_S 31
+#define VF_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31)
+#define VF_MBX_HLP_ATQT(_VF16) (0x0022DD00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQT_MAX_INDEX 15
+#define VF_MBX_HLP_ATQT_ATQT_S 0
+#define VF_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ARQBAH(_VF16) (0x0022E000 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQBAH_MAX_INDEX 15
+#define VF_MBX_PSM_ARQBAH_ARQBAH_S 0
+#define VF_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_PSM_ARQBAL(_VF16) (0x0022DFC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQBAL_MAX_INDEX 15
+#define VF_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0
+#define VF_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_PSM_ARQBAL_ARQBAL_S 6
+#define VF_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_PSM_ARQH(_VF16) (0x0022E080 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQH_MAX_INDEX 15
+#define VF_MBX_PSM_ARQH_ARQH_S 0
+#define VF_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ARQLEN(_VF16) (0x0022E040 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */
+#define VF_MBX_PSM_ARQLEN_MAX_INDEX 15
+#define VF_MBX_PSM_ARQLEN_ARQLEN_S 0
+#define VF_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ARQLEN_ARQVFE_S 28
+#define VF_MBX_PSM_ARQLEN_ARQVFE_M BIT(28)
+#define VF_MBX_PSM_ARQLEN_ARQOVFL_S 29
+#define VF_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29)
+#define VF_MBX_PSM_ARQLEN_ARQCRIT_S 30
+#define VF_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30)
+#define VF_MBX_PSM_ARQLEN_ARQENABLE_S 31
+#define VF_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31)
+#define VF_MBX_PSM_ARQT(_VF16) (0x0022E0C0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQT_MAX_INDEX 15
+#define VF_MBX_PSM_ARQT_ARQT_S 0
+#define VF_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ATQBAH(_VF16) (0x0022DEC0 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQBAH_MAX_INDEX 15
+#define VF_MBX_PSM_ATQBAH_ATQBAH_S 0
+#define VF_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_PSM_ATQBAL(_VF16) (0x0022DE80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQBAL_MAX_INDEX 15
+#define VF_MBX_PSM_ATQBAL_ATQBAL_S 6
+#define VF_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_PSM_ATQH(_VF16) (0x0022DF40 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQH_MAX_INDEX 15
+#define VF_MBX_PSM_ATQH_ATQH_S 0
+#define VF_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ATQLEN(_VF16) (0x0022DF00 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: PFR */
+#define VF_MBX_PSM_ATQLEN_MAX_INDEX 15
+#define VF_MBX_PSM_ATQLEN_ATQLEN_S 0
+#define VF_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ATQLEN_ATQVFE_S 28
+#define VF_MBX_PSM_ATQLEN_ATQVFE_M BIT(28)
+#define VF_MBX_PSM_ATQLEN_ATQOVFL_S 29
+#define VF_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29)
+#define VF_MBX_PSM_ATQLEN_ATQCRIT_S 30
+#define VF_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30)
+#define VF_MBX_PSM_ATQLEN_ATQENABLE_S 31
+#define VF_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31)
+#define VF_MBX_PSM_ATQT(_VF16) (0x0022DF80 + ((_VF16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQT_MAX_INDEX 15
+#define VF_MBX_PSM_ATQT_ATQT_S 0
+#define VF_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ARQBAH(_VF128) (0x0022F400 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ARQBAH_MAX_INDEX 127
+#define VF_SB_CPM_ARQBAH_ARQBAH_S 0
+#define VF_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_SB_CPM_ARQBAL(_VF128) (0x0022F200 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ARQBAL_MAX_INDEX 127
+#define VF_SB_CPM_ARQBAL_ARQBAL_LSB_S 0
+#define VF_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_SB_CPM_ARQBAL_ARQBAL_S 6
+#define VF_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_SB_CPM_ARQH(_VF128) (0x0022F800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ARQH_MAX_INDEX 127
+#define VF_SB_CPM_ARQH_ARQH_S 0
+#define VF_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ARQLEN(_VF128) (0x0022F600 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */
+#define VF_SB_CPM_ARQLEN_MAX_INDEX 127
+#define VF_SB_CPM_ARQLEN_ARQLEN_S 0
+#define VF_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ARQLEN_ARQVFE_S 28
+#define VF_SB_CPM_ARQLEN_ARQVFE_M BIT(28)
+#define VF_SB_CPM_ARQLEN_ARQOVFL_S 29
+#define VF_SB_CPM_ARQLEN_ARQOVFL_M BIT(29)
+#define VF_SB_CPM_ARQLEN_ARQCRIT_S 30
+#define VF_SB_CPM_ARQLEN_ARQCRIT_M BIT(30)
+#define VF_SB_CPM_ARQLEN_ARQENABLE_S 31
+#define VF_SB_CPM_ARQLEN_ARQENABLE_M BIT(31)
+#define VF_SB_CPM_ARQT(_VF128) (0x0022FA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ARQT_MAX_INDEX 127
+#define VF_SB_CPM_ARQT_ARQT_S 0
+#define VF_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ATQBAH(_VF128) (0x0022EA00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ATQBAH_MAX_INDEX 127
+#define VF_SB_CPM_ATQBAH_ATQBAH_S 0
+#define VF_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_SB_CPM_ATQBAL(_VF128) (0x0022E800 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ATQBAL_MAX_INDEX 127
+#define VF_SB_CPM_ATQBAL_ATQBAL_S 6
+#define VF_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_SB_CPM_ATQH(_VF128) (0x0022EE00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ATQH_MAX_INDEX 127
+#define VF_SB_CPM_ATQH_ATQH_S 0
+#define VF_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ATQLEN(_VF128) (0x0022EC00 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: PFR */
+#define VF_SB_CPM_ATQLEN_MAX_INDEX 127
+#define VF_SB_CPM_ATQLEN_ATQLEN_S 0
+#define VF_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ATQLEN_ATQVFE_S 28
+#define VF_SB_CPM_ATQLEN_ATQVFE_M BIT(28)
+#define VF_SB_CPM_ATQLEN_ATQOVFL_S 29
+#define VF_SB_CPM_ATQLEN_ATQOVFL_M BIT(29)
+#define VF_SB_CPM_ATQLEN_ATQCRIT_S 30
+#define VF_SB_CPM_ATQLEN_ATQCRIT_M BIT(30)
+#define VF_SB_CPM_ATQLEN_ATQENABLE_S 31
+#define VF_SB_CPM_ATQLEN_ATQENABLE_M BIT(31)
+#define VF_SB_CPM_ATQT(_VF128) (0x0022F000 + ((_VF128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VF_SB_CPM_ATQT_MAX_INDEX 127
+#define VF_SB_CPM_ATQT_ATQT_S 0
+#define VF_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_REM_DEV_CTL 0x002300EC /* Reset Source: CORER */
+#define VF_SB_CPM_REM_DEV_CTL_DEST_EN_S 0
+#define VF_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0)
+#define VP_MBX_CPM_PF_VF_CTRL(_VP128) (0x00231800 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VP_MBX_CPM_PF_VF_CTRL_MAX_INDEX 127
+#define VP_MBX_CPM_PF_VF_CTRL_QUEUE_EN_S 0
+#define VP_MBX_CPM_PF_VF_CTRL_QUEUE_EN_M BIT(0)
+#define VP_MBX_HLP_PF_VF_CTRL(_VP16) (0x00231A00 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VP_MBX_HLP_PF_VF_CTRL_MAX_INDEX 15
+#define VP_MBX_HLP_PF_VF_CTRL_QUEUE_EN_S 0
+#define VP_MBX_HLP_PF_VF_CTRL_QUEUE_EN_M BIT(0)
+#define VP_MBX_PF_VF_CTRL(_VSI) (0x00230800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VP_MBX_PF_VF_CTRL_MAX_INDEX 767
+#define VP_MBX_PF_VF_CTRL_QUEUE_EN_S 0
+#define VP_MBX_PF_VF_CTRL_QUEUE_EN_M BIT(0)
+#define VP_MBX_PSM_PF_VF_CTRL(_VP16) (0x00231A40 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VP_MBX_PSM_PF_VF_CTRL_MAX_INDEX 15
+#define VP_MBX_PSM_PF_VF_CTRL_QUEUE_EN_S 0
+#define VP_MBX_PSM_PF_VF_CTRL_QUEUE_EN_M BIT(0)
+#define VP_SB_CPM_PF_VF_CTRL(_VP128) (0x00231C00 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VP_SB_CPM_PF_VF_CTRL_MAX_INDEX 127
+#define VP_SB_CPM_PF_VF_CTRL_QUEUE_EN_S 0
+#define VP_SB_CPM_PF_VF_CTRL_QUEUE_EN_M BIT(0)
+#define GL_DCB_TDSCP2TC_BLOCK_DIS 0x00049218 /* Reset Source: CORER */
+#define GL_DCB_TDSCP2TC_BLOCK_DIS_DSCP2TC_BLOCK_DIS_S 0
+#define GL_DCB_TDSCP2TC_BLOCK_DIS_DSCP2TC_BLOCK_DIS_M BIT(0)
+#define GL_DCB_TDSCP2TC_BLOCK_IPV4(_i) (0x00049018 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_DCB_TDSCP2TC_BLOCK_IPV4_MAX_INDEX 63
+#define GL_DCB_TDSCP2TC_BLOCK_IPV4_TC_BLOCK_LUT_S 0
+#define GL_DCB_TDSCP2TC_BLOCK_IPV4_TC_BLOCK_LUT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_DCB_TDSCP2TC_BLOCK_IPV6(_i) (0x00049118 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_DCB_TDSCP2TC_BLOCK_IPV6_MAX_INDEX 63
+#define GL_DCB_TDSCP2TC_BLOCK_IPV6_TC_BLOCK_LUT_S 0
+#define GL_DCB_TDSCP2TC_BLOCK_IPV6_TC_BLOCK_LUT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_GENC 0x00083044 /* Reset Source: CORER */
+#define GLDCB_GENC_PCIRTT_S 0
+#define GLDCB_GENC_PCIRTT_M MAKEMASK(0xFFFF, 0)
+#define GLDCB_PRS_RETSTCC(_i) (0x002000B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLDCB_PRS_RETSTCC_MAX_INDEX 31
+#define GLDCB_PRS_RETSTCC_BWSHARE_S 0
+#define GLDCB_PRS_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0)
+#define GLDCB_PRS_RETSTCC_ETSTC_S 31
+#define GLDCB_PRS_RETSTCC_ETSTC_M BIT(31)
+#define GLDCB_PRS_RSPMC 0x00200160 /* Reset Source: CORER */
+#define GLDCB_PRS_RSPMC_RSPM_S 0
+#define GLDCB_PRS_RSPMC_RSPM_M MAKEMASK(0xFF, 0)
+#define GLDCB_PRS_RSPMC_RPM_MODE_S 8
+#define GLDCB_PRS_RSPMC_RPM_MODE_M MAKEMASK(0x3, 8)
+#define GLDCB_PRS_RSPMC_PRR_MAX_EXP_S 10
+#define GLDCB_PRS_RSPMC_PRR_MAX_EXP_M MAKEMASK(0xF, 10)
+#define GLDCB_PRS_RSPMC_PFCTIMER_S 14
+#define GLDCB_PRS_RSPMC_PFCTIMER_M MAKEMASK(0x3FFF, 14)
+#define GLDCB_PRS_RSPMC_RPM_DIS_S 31
+#define GLDCB_PRS_RSPMC_RPM_DIS_M BIT(31)
+#define GLDCB_RETSTCC(_i) (0x00122140 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLDCB_RETSTCC_MAX_INDEX 31
+#define GLDCB_RETSTCC_BWSHARE_S 0
+#define GLDCB_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0)
+#define GLDCB_RETSTCC_ETSTC_S 31
+#define GLDCB_RETSTCC_ETSTC_M BIT(31)
+#define GLDCB_RETSTCS(_i) (0x001221C0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLDCB_RETSTCS_MAX_INDEX 31
+#define GLDCB_RETSTCS_CREDITS_S 0
+#define GLDCB_RETSTCS_CREDITS_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_RTC2PFC_RCB 0x00122100 /* Reset Source: CORER */
+#define GLDCB_RTC2PFC_RCB_TC2PFC_S 0
+#define GLDCB_RTC2PFC_RCB_TC2PFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_SWT_RETSTCC(_i) (0x0020A040 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLDCB_SWT_RETSTCC_MAX_INDEX 31
+#define GLDCB_SWT_RETSTCC_BWSHARE_S 0
+#define GLDCB_SWT_RETSTCC_BWSHARE_M MAKEMASK(0x7F, 0)
+#define GLDCB_SWT_RETSTCC_ETSTC_S 31
+#define GLDCB_SWT_RETSTCC_ETSTC_M BIT(31)
+#define GLDCB_TC2PFC 0x001D2694 /* Reset Source: CORER */
+#define GLDCB_TC2PFC_TC2PFC_S 0
+#define GLDCB_TC2PFC_TC2PFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TCB_MNG_SP 0x000AE12C /* Reset Source: CORER */
+#define GLDCB_TCB_MNG_SP_MNG_SP_S 0
+#define GLDCB_TCB_MNG_SP_MNG_SP_M BIT(0)
+#define GLDCB_TCB_TCLL_CFG 0x000AE134 /* Reset Source: CORER */
+#define GLDCB_TCB_TCLL_CFG_LLTC_S 0
+#define GLDCB_TCB_TCLL_CFG_LLTC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TCB_WB_SP 0x000AE310 /* Reset Source: CORER */
+#define GLDCB_TCB_WB_SP_WB_SP_S 0
+#define GLDCB_TCB_WB_SP_WB_SP_M BIT(0)
+#define GLDCB_TCUPM_IMM_EN 0x000BC824 /* Reset Source: CORER */
+#define GLDCB_TCUPM_IMM_EN_IMM_EN_S 0
+#define GLDCB_TCUPM_IMM_EN_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TCUPM_LEGACY_TC 0x000BC828 /* Reset Source: CORER */
+#define GLDCB_TCUPM_LEGACY_TC_LEGTC_S 0
+#define GLDCB_TCUPM_LEGACY_TC_LEGTC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TCUPM_NO_EXCEED_DIS 0x000BC830 /* Reset Source: CORER */
+#define GLDCB_TCUPM_NO_EXCEED_DIS_NON_EXCEED_DIS_S 0
+#define GLDCB_TCUPM_NO_EXCEED_DIS_NON_EXCEED_DIS_M BIT(0)
+#define GLDCB_TCUPM_WB_DIS 0x000BC834 /* Reset Source: CORER */
+#define GLDCB_TCUPM_WB_DIS_PORT_DISABLE_S 0
+#define GLDCB_TCUPM_WB_DIS_PORT_DISABLE_M BIT(0)
+#define GLDCB_TCUPM_WB_DIS_TC_DISABLE_S 1
+#define GLDCB_TCUPM_WB_DIS_TC_DISABLE_M BIT(1)
+#define GLDCB_TFPFCI 0x0009949C /* Reset Source: CORER */
+#define GLDCB_TFPFCI_GLDCB_TFPFCI_S 0
+#define GLDCB_TFPFCI_GLDCB_TFPFCI_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TLPM_IMM_TCB 0x000A0190 /* Reset Source: CORER */
+#define GLDCB_TLPM_IMM_TCB_IMM_EN_S 0
+#define GLDCB_TLPM_IMM_TCB_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TLPM_IMM_TCUPM 0x000A018C /* Reset Source: CORER */
+#define GLDCB_TLPM_IMM_TCUPM_IMM_EN_S 0
+#define GLDCB_TLPM_IMM_TCUPM_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TLPM_PCI_DM 0x000A0180 /* Reset Source: CORER */
+#define GLDCB_TLPM_PCI_DM_MONITOR_S 0
+#define GLDCB_TLPM_PCI_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define GLDCB_TLPM_PCI_DTHR 0x000A0184 /* Reset Source: CORER */
+#define GLDCB_TLPM_PCI_DTHR_PCI_TDATA_S 0
+#define GLDCB_TLPM_PCI_DTHR_PCI_TDATA_M MAKEMASK(0xFFF, 0)
+#define GLDCB_TPB_IMM_TLPM 0x00099468 /* Reset Source: CORER */
+#define GLDCB_TPB_IMM_TLPM_IMM_EN_S 0
+#define GLDCB_TPB_IMM_TLPM_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TPB_IMM_TPB 0x0009946C /* Reset Source: CORER */
+#define GLDCB_TPB_IMM_TPB_IMM_EN_S 0
+#define GLDCB_TPB_IMM_TPB_IMM_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_TPB_TCLL_CFG 0x00099464 /* Reset Source: CORER */
+#define GLDCB_TPB_TCLL_CFG_LLTC_S 0
+#define GLDCB_TPB_TCLL_CFG_LLTC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTCB_BULK_DWRR_REG_QUANTA 0x000AE0E0 /* Reset Source: CORER */
+#define GLTCB_BULK_DWRR_REG_QUANTA_QUANTA_S 0
+#define GLTCB_BULK_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define GLTCB_BULK_DWRR_REG_SAT 0x000AE0F0 /* Reset Source: CORER */
+#define GLTCB_BULK_DWRR_REG_SAT_SATURATION_S 0
+#define GLTCB_BULK_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define GLTCB_BULK_DWRR_WB_QUANTA 0x000AE0E4 /* Reset Source: CORER */
+#define GLTCB_BULK_DWRR_WB_QUANTA_QUANTA_S 0
+#define GLTCB_BULK_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define GLTCB_BULK_DWRR_WB_SAT 0x000AE0F4 /* Reset Source: CORER */
+#define GLTCB_BULK_DWRR_WB_SAT_SATURATION_S 0
+#define GLTCB_BULK_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define GLTCB_CREDIT_EXP_CTL 0x000AE120 /* Reset Source: CORER */
+#define GLTCB_CREDIT_EXP_CTL_EN_S 0
+#define GLTCB_CREDIT_EXP_CTL_EN_M BIT(0)
+#define GLTCB_CREDIT_EXP_CTL_MIN_PKT_S 1
+#define GLTCB_CREDIT_EXP_CTL_MIN_PKT_M MAKEMASK(0x1FF, 1)
+#define GLTCB_LL_DWRR_REG_QUANTA 0x000AE0E8 /* Reset Source: CORER */
+#define GLTCB_LL_DWRR_REG_QUANTA_QUANTA_S 0
+#define GLTCB_LL_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define GLTCB_LL_DWRR_REG_SAT 0x000AE0F8 /* Reset Source: CORER */
+#define GLTCB_LL_DWRR_REG_SAT_SATURATION_S 0
+#define GLTCB_LL_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define GLTCB_LL_DWRR_WB_QUANTA 0x000AE0EC /* Reset Source: CORER */
+#define GLTCB_LL_DWRR_WB_QUANTA_QUANTA_S 0
+#define GLTCB_LL_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define GLTCB_LL_DWRR_WB_SAT 0x000AE0FC /* Reset Source: CORER */
+#define GLTCB_LL_DWRR_WB_SAT_SATURATION_S 0
+#define GLTCB_LL_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define GLTCB_WB_RL 0x000AE238 /* Reset Source: CORER */
+#define GLTCB_WB_RL_PERIOD_S 0
+#define GLTCB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0)
+#define GLTCB_WB_RL_EN_S 16
+#define GLTCB_WB_RL_EN_M BIT(16)
+#define GLTPB_WB_RL 0x00099460 /* Reset Source: CORER */
+#define GLTPB_WB_RL_PERIOD_S 0
+#define GLTPB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0)
+#define GLTPB_WB_RL_EN_S 16
+#define GLTPB_WB_RL_EN_M BIT(16)
+#define PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */
+#define PRTDCB_FCCFG_TFCE_S 3
+#define PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3)
+#define PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */
+#define PRTDCB_FCRTV_FC_REFRESH_TH_S 0
+#define PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0)
+#define PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */
+#define PRTDCB_FCTTVN_MAX_INDEX 3
+#define PRTDCB_FCTTVN_TTV_2N_S 0
+#define PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0)
+#define PRTDCB_FCTTVN_TTV_2N_P1_S 16
+#define PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16)
+#define PRTDCB_GENC 0x00083000 /* Reset Source: CORER */
+#define PRTDCB_GENC_NUMTC_S 2
+#define PRTDCB_GENC_NUMTC_M MAKEMASK(0xF, 2)
+#define PRTDCB_GENC_FCOEUP_S 6
+#define PRTDCB_GENC_FCOEUP_M MAKEMASK(0x7, 6)
+#define PRTDCB_GENC_FCOEUP_VALID_S 9
+#define PRTDCB_GENC_FCOEUP_VALID_M BIT(9)
+#define PRTDCB_GENC_PFCLDA_S 16
+#define PRTDCB_GENC_PFCLDA_M MAKEMASK(0xFFFF, 16)
+#define PRTDCB_GENS 0x00083020 /* Reset Source: CORER */
+#define PRTDCB_GENS_DCBX_STATUS_S 0
+#define PRTDCB_GENS_DCBX_STATUS_M MAKEMASK(0x7, 0)
+#define PRTDCB_PRS_RETSC 0x002001A0 /* Reset Source: CORER */
+#define PRTDCB_PRS_RETSC_ETS_MODE_S 0
+#define PRTDCB_PRS_RETSC_ETS_MODE_M BIT(0)
+#define PRTDCB_PRS_RETSC_NON_ETS_MODE_S 1
+#define PRTDCB_PRS_RETSC_NON_ETS_MODE_M BIT(1)
+#define PRTDCB_PRS_RETSC_ETS_MAX_EXP_S 2
+#define PRTDCB_PRS_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2)
+#define PRTDCB_PRS_RPRRC 0x00200180 /* Reset Source: CORER */
+#define PRTDCB_PRS_RPRRC_BWSHARE_S 0
+#define PRTDCB_PRS_RPRRC_BWSHARE_M MAKEMASK(0x3FF, 0)
+#define PRTDCB_PRS_RPRRC_BWSHARE_DIS_S 31
+#define PRTDCB_PRS_RPRRC_BWSHARE_DIS_M BIT(31)
+#define PRTDCB_RETSC 0x001222A0 /* Reset Source: CORER */
+#define PRTDCB_RETSC_ETS_MODE_S 0
+#define PRTDCB_RETSC_ETS_MODE_M BIT(0)
+#define PRTDCB_RETSC_NON_ETS_MODE_S 1
+#define PRTDCB_RETSC_NON_ETS_MODE_M BIT(1)
+#define PRTDCB_RETSC_ETS_MAX_EXP_S 2
+#define PRTDCB_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2)
+#define PRTDCB_RPRRC 0x001220C0 /* Reset Source: CORER */
+#define PRTDCB_RPRRC_BWSHARE_S 0
+#define PRTDCB_RPRRC_BWSHARE_M MAKEMASK(0x3FF, 0)
+#define PRTDCB_RPRRC_BWSHARE_DIS_S 31
+#define PRTDCB_RPRRC_BWSHARE_DIS_M BIT(31)
+#define PRTDCB_RPRRS 0x001220E0 /* Reset Source: CORER */
+#define PRTDCB_RPRRS_CREDITS_S 0
+#define PRTDCB_RPRRS_CREDITS_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTDCB_RUP_TDPU 0x00040960 /* Reset Source: CORER */
+#define PRTDCB_RUP_TDPU_NOVLANUP_S 0
+#define PRTDCB_RUP_TDPU_NOVLANUP_M MAKEMASK(0x7, 0)
+#define PRTDCB_RUP2TC 0x001D2640 /* Reset Source: CORER */
+#define PRTDCB_RUP2TC_UP0TC_S 0
+#define PRTDCB_RUP2TC_UP0TC_M MAKEMASK(0x7, 0)
+#define PRTDCB_RUP2TC_UP1TC_S 3
+#define PRTDCB_RUP2TC_UP1TC_M MAKEMASK(0x7, 3)
+#define PRTDCB_RUP2TC_UP2TC_S 6
+#define PRTDCB_RUP2TC_UP2TC_M MAKEMASK(0x7, 6)
+#define PRTDCB_RUP2TC_UP3TC_S 9
+#define PRTDCB_RUP2TC_UP3TC_M MAKEMASK(0x7, 9)
+#define PRTDCB_RUP2TC_UP4TC_S 12
+#define PRTDCB_RUP2TC_UP4TC_M MAKEMASK(0x7, 12)
+#define PRTDCB_RUP2TC_UP5TC_S 15
+#define PRTDCB_RUP2TC_UP5TC_M MAKEMASK(0x7, 15)
+#define PRTDCB_RUP2TC_UP6TC_S 18
+#define PRTDCB_RUP2TC_UP6TC_M MAKEMASK(0x7, 18)
+#define PRTDCB_RUP2TC_UP7TC_S 21
+#define PRTDCB_RUP2TC_UP7TC_M MAKEMASK(0x7, 21)
+#define PRTDCB_SWT_RETSC 0x0020A140 /* Reset Source: CORER */
+#define PRTDCB_SWT_RETSC_ETS_MODE_S 0
+#define PRTDCB_SWT_RETSC_ETS_MODE_M BIT(0)
+#define PRTDCB_SWT_RETSC_NON_ETS_MODE_S 1
+#define PRTDCB_SWT_RETSC_NON_ETS_MODE_M BIT(1)
+#define PRTDCB_SWT_RETSC_ETS_MAX_EXP_S 2
+#define PRTDCB_SWT_RETSC_ETS_MAX_EXP_M MAKEMASK(0xF, 2)
+#define PRTDCB_TCB_DWRR_CREDITS 0x000AE000 /* Reset Source: CORER */
+#define PRTDCB_TCB_DWRR_CREDITS_CREDITS_S 0
+#define PRTDCB_TCB_DWRR_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define PRTDCB_TCB_DWRR_QUANTA 0x000AE020 /* Reset Source: CORER */
+#define PRTDCB_TCB_DWRR_QUANTA_QUANTA_S 0
+#define PRTDCB_TCB_DWRR_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define PRTDCB_TCB_DWRR_SAT 0x000AE040 /* Reset Source: CORER */
+#define PRTDCB_TCB_DWRR_SAT_SATURATION_S 0
+#define PRTDCB_TCB_DWRR_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define PRTDCB_TCUPM_NO_EXCEED_DM 0x000BC3C0 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_NO_EXCEED_DM_MONITOR_S 0
+#define PRTDCB_TCUPM_NO_EXCEED_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define PRTDCB_TCUPM_REG_CM 0x000BC360 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_REG_CM_MONITOR_S 0
+#define PRTDCB_TCUPM_REG_CM_MONITOR_M MAKEMASK(0x7FFF, 0)
+#define PRTDCB_TCUPM_REG_CTHR 0x000BC380 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_H_S 0
+#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_H_M MAKEMASK(0x7FFF, 0)
+#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_L_S 15
+#define PRTDCB_TCUPM_REG_CTHR_PORTOFFTH_L_M MAKEMASK(0x7FFF, 15)
+#define PRTDCB_TCUPM_REG_DM 0x000BC3A0 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_REG_DM_MONITOR_S 0
+#define PRTDCB_TCUPM_REG_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define PRTDCB_TCUPM_REG_DTHR 0x000BC3E0 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_H_S 0
+#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_L_S 12
+#define PRTDCB_TCUPM_REG_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12)
+#define PRTDCB_TCUPM_REG_PE_HB_DM 0x000BC400 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_REG_PE_HB_DM_MONITOR_S 0
+#define PRTDCB_TCUPM_REG_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TCUPM_REG_PE_HB_DTHR 0x000BC420 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_H_S 0
+#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_L_S 12
+#define PRTDCB_TCUPM_REG_PE_HB_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12)
+#define PRTDCB_TCUPM_WAIT_PFC_CM 0x000BC440 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_WAIT_PFC_CM_MONITOR_S 0
+#define PRTDCB_TCUPM_WAIT_PFC_CM_MONITOR_M MAKEMASK(0x7FFF, 0)
+#define PRTDCB_TCUPM_WAIT_PFC_CTHR 0x000BC460 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_WAIT_PFC_CTHR_PORTOFFTH_S 0
+#define PRTDCB_TCUPM_WAIT_PFC_CTHR_PORTOFFTH_M MAKEMASK(0x7FFF, 0)
+#define PRTDCB_TCUPM_WAIT_PFC_DM 0x000BC480 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_WAIT_PFC_DM_MONITOR_S 0
+#define PRTDCB_TCUPM_WAIT_PFC_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define PRTDCB_TCUPM_WAIT_PFC_DTHR 0x000BC4A0 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_WAIT_PFC_DTHR_PORTOFFTH_S 0
+#define PRTDCB_TCUPM_WAIT_PFC_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM 0x000BC4C0 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM_MONITOR_S 0
+#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR 0x000BC4E0 /* Reset Source: CORER */
+#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR_PORTOFFTH_S 0
+#define PRTDCB_TCUPM_WAIT_PFC_PE_HB_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TDPUC 0x00040940 /* Reset Source: CORER */
+#define PRTDCB_TDPUC_MAX_TXFRAME_S 0
+#define PRTDCB_TDPUC_MAX_TXFRAME_M MAKEMASK(0xFFFF, 0)
+#define PRTDCB_TDPUC_MAL_LENGTH_S 16
+#define PRTDCB_TDPUC_MAL_LENGTH_M BIT(16)
+#define PRTDCB_TDPUC_MAL_CMD_S 17
+#define PRTDCB_TDPUC_MAL_CMD_M BIT(17)
+#define PRTDCB_TDPUC_TTL_DROP_S 18
+#define PRTDCB_TDPUC_TTL_DROP_M BIT(18)
+#define PRTDCB_TDPUC_UR_DROP_S 19
+#define PRTDCB_TDPUC_UR_DROP_M BIT(19)
+#define PRTDCB_TDPUC_DUMMY_S 20
+#define PRTDCB_TDPUC_DUMMY_M BIT(20)
+#define PRTDCB_TDPUC_BIG_PKT_SIZE_S 21
+#define PRTDCB_TDPUC_BIG_PKT_SIZE_M BIT(21)
+#define PRTDCB_TDPUC_L2_ACCEPT_FAIL_S 22
+#define PRTDCB_TDPUC_L2_ACCEPT_FAIL_M BIT(22)
+#define PRTDCB_TDPUC_DSCP_CHECK_FAIL_S 23
+#define PRTDCB_TDPUC_DSCP_CHECK_FAIL_M BIT(23)
+#define PRTDCB_TDPUC_RCU_ANTISPOOF_S 24
+#define PRTDCB_TDPUC_RCU_ANTISPOOF_M BIT(24)
+#define PRTDCB_TDPUC_NIC_DSI_S 25
+#define PRTDCB_TDPUC_NIC_DSI_M BIT(25)
+#define PRTDCB_TDPUC_NIC_IPSEC_S 26
+#define PRTDCB_TDPUC_NIC_IPSEC_M BIT(26)
+#define PRTDCB_TDPUC_CLEAR_DROP_S 31
+#define PRTDCB_TDPUC_CLEAR_DROP_M BIT(31)
+#define PRTDCB_TFCS 0x001E4560 /* Reset Source: GLOBR */
+#define PRTDCB_TFCS_TXOFF_S 0
+#define PRTDCB_TFCS_TXOFF_M BIT(0)
+#define PRTDCB_TFCS_TXOFF0_S 8
+#define PRTDCB_TFCS_TXOFF0_M BIT(8)
+#define PRTDCB_TFCS_TXOFF1_S 9
+#define PRTDCB_TFCS_TXOFF1_M BIT(9)
+#define PRTDCB_TFCS_TXOFF2_S 10
+#define PRTDCB_TFCS_TXOFF2_M BIT(10)
+#define PRTDCB_TFCS_TXOFF3_S 11
+#define PRTDCB_TFCS_TXOFF3_M BIT(11)
+#define PRTDCB_TFCS_TXOFF4_S 12
+#define PRTDCB_TFCS_TXOFF4_M BIT(12)
+#define PRTDCB_TFCS_TXOFF5_S 13
+#define PRTDCB_TFCS_TXOFF5_M BIT(13)
+#define PRTDCB_TFCS_TXOFF6_S 14
+#define PRTDCB_TFCS_TXOFF6_M BIT(14)
+#define PRTDCB_TFCS_TXOFF7_S 15
+#define PRTDCB_TFCS_TXOFF7_M BIT(15)
+#define PRTDCB_TLPM_REG_DM 0x000A0000 /* Reset Source: CORER */
+#define PRTDCB_TLPM_REG_DM_MONITOR_S 0
+#define PRTDCB_TLPM_REG_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define PRTDCB_TLPM_REG_DTHR 0x000A0020 /* Reset Source: CORER */
+#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_H_S 0
+#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_H_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_L_S 12
+#define PRTDCB_TLPM_REG_DTHR_PORTOFFTH_L_M MAKEMASK(0xFFF, 12)
+#define PRTDCB_TLPM_WAIT_PFC_DM 0x000A0040 /* Reset Source: CORER */
+#define PRTDCB_TLPM_WAIT_PFC_DM_MONITOR_S 0
+#define PRTDCB_TLPM_WAIT_PFC_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define PRTDCB_TLPM_WAIT_PFC_DTHR 0x000A0060 /* Reset Source: CORER */
+#define PRTDCB_TLPM_WAIT_PFC_DTHR_PORTOFFTH_S 0
+#define PRTDCB_TLPM_WAIT_PFC_DTHR_PORTOFFTH_M MAKEMASK(0xFFF, 0)
+#define PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define PRTDCB_TPFCTS_MAX_INDEX 7
+#define PRTDCB_TPFCTS_PFCTIMER_S 0
+#define PRTDCB_TPFCTS_PFCTIMER_M MAKEMASK(0x3FFF, 0)
+#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */
+#define PRTDCB_TUP2TC_UP0TC_S 0
+#define PRTDCB_TUP2TC_UP0TC_M MAKEMASK(0x7, 0)
+#define PRTDCB_TUP2TC_UP1TC_S 3
+#define PRTDCB_TUP2TC_UP1TC_M MAKEMASK(0x7, 3)
+#define PRTDCB_TUP2TC_UP2TC_S 6
+#define PRTDCB_TUP2TC_UP2TC_M MAKEMASK(0x7, 6)
+#define PRTDCB_TUP2TC_UP3TC_S 9
+#define PRTDCB_TUP2TC_UP3TC_M MAKEMASK(0x7, 9)
+#define PRTDCB_TUP2TC_UP4TC_S 12
+#define PRTDCB_TUP2TC_UP4TC_M MAKEMASK(0x7, 12)
+#define PRTDCB_TUP2TC_UP5TC_S 15
+#define PRTDCB_TUP2TC_UP5TC_M MAKEMASK(0x7, 15)
+#define PRTDCB_TUP2TC_UP6TC_S 18
+#define PRTDCB_TUP2TC_UP6TC_M MAKEMASK(0x7, 18)
+#define PRTDCB_TUP2TC_UP7TC_S 21
+#define PRTDCB_TUP2TC_UP7TC_M MAKEMASK(0x7, 21)
+#define PRTDCB_TX_DSCP2UP_CTL 0x00040980 /* Reset Source: CORER */
+#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_S 0
+#define PRTDCB_TX_DSCP2UP_CTL_DSCP2UP_ENA_M BIT(0)
+#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_S 1
+#define PRTDCB_TX_DSCP2UP_CTL_DSCP_DEFAULT_UP_M MAKEMASK(0x7, 1)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT(_i) (0x000409A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_MAX_INDEX 7
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_S 0
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_0_M MAKEMASK(0x7, 0)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_1_S 4
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_1_M MAKEMASK(0x7, 4)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_2_S 8
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_2_M MAKEMASK(0x7, 8)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_3_S 12
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_3_M MAKEMASK(0x7, 12)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_4_S 16
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_4_M MAKEMASK(0x7, 16)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_5_S 20
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_5_M MAKEMASK(0x7, 20)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_6_S 24
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_6_M MAKEMASK(0x7, 24)
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_7_S 28
+#define PRTDCB_TX_DSCP2UP_IPV4_LUT_DSCP2UP_LUT_7_M MAKEMASK(0x7, 28)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT(_i) (0x00040AA0 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: CORER */
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_MAX_INDEX 7
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_0_S 0
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_0_M MAKEMASK(0x7, 0)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_1_S 4
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_1_M MAKEMASK(0x7, 4)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_2_S 8
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_2_M MAKEMASK(0x7, 8)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_3_S 12
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_3_M MAKEMASK(0x7, 12)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_4_S 16
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_4_M MAKEMASK(0x7, 16)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_5_S 20
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_5_M MAKEMASK(0x7, 20)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_6_S 24
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_6_M MAKEMASK(0x7, 24)
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_7_S 28
+#define PRTDCB_TX_DSCP2UP_IPV6_LUT_DSCP2UP_LUT_7_M MAKEMASK(0x7, 28)
+#define PRTTCB_BULK_DWRR_REG_CREDITS 0x000AE060 /* Reset Source: CORER */
+#define PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_S 0
+#define PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define PRTTCB_BULK_DWRR_WB_CREDITS 0x000AE080 /* Reset Source: CORER */
+#define PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_S 0
+#define PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define PRTTCB_CREDIT_EXP 0x000AE100 /* Reset Source: CORER */
+#define PRTTCB_CREDIT_EXP_EXPANSION_S 0
+#define PRTTCB_CREDIT_EXP_EXPANSION_M MAKEMASK(0xFF, 0)
+#define PRTTCB_LL_DWRR_REG_CREDITS 0x000AE0A0 /* Reset Source: CORER */
+#define PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0
+#define PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define PRTTCB_LL_DWRR_WB_CREDITS 0x000AE0C0 /* Reset Source: CORER */
+#define PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0
+#define PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define TCDCB_TCUPM_WAIT_CM(_i) (0x000BC520 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TCUPM_WAIT_CM_MAX_INDEX 31
+#define TCDCB_TCUPM_WAIT_CM_MONITOR_S 0
+#define TCDCB_TCUPM_WAIT_CM_MONITOR_M MAKEMASK(0x7FFF, 0)
+#define TCDCB_TCUPM_WAIT_CTHR(_i) (0x000BC5A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TCUPM_WAIT_CTHR_MAX_INDEX 31
+#define TCDCB_TCUPM_WAIT_CTHR_TCOFFTH_S 0
+#define TCDCB_TCUPM_WAIT_CTHR_TCOFFTH_M MAKEMASK(0x7FFF, 0)
+#define TCDCB_TCUPM_WAIT_DM(_i) (0x000BC620 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TCUPM_WAIT_DM_MAX_INDEX 31
+#define TCDCB_TCUPM_WAIT_DM_MONITOR_S 0
+#define TCDCB_TCUPM_WAIT_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define TCDCB_TCUPM_WAIT_DTHR(_i) (0x000BC6A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TCUPM_WAIT_DTHR_MAX_INDEX 31
+#define TCDCB_TCUPM_WAIT_DTHR_TCOFFTH_S 0
+#define TCDCB_TCUPM_WAIT_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0)
+#define TCDCB_TCUPM_WAIT_PE_HB_DM(_i) (0x000BC720 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TCUPM_WAIT_PE_HB_DM_MAX_INDEX 31
+#define TCDCB_TCUPM_WAIT_PE_HB_DM_MONITOR_S 0
+#define TCDCB_TCUPM_WAIT_PE_HB_DM_MONITOR_M MAKEMASK(0xFFF, 0)
+#define TCDCB_TCUPM_WAIT_PE_HB_DTHR(_i) (0x000BC7A0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TCUPM_WAIT_PE_HB_DTHR_MAX_INDEX 31
+#define TCDCB_TCUPM_WAIT_PE_HB_DTHR_TCOFFTH_S 0
+#define TCDCB_TCUPM_WAIT_PE_HB_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0)
+#define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TLPM_WAIT_DM_MAX_INDEX 31
+#define TCDCB_TLPM_WAIT_DM_MONITOR_S 0
+#define TCDCB_TLPM_WAIT_DM_MONITOR_M MAKEMASK(0x7FFFF, 0)
+#define TCDCB_TLPM_WAIT_DTHR(_i) (0x000A0100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCDCB_TLPM_WAIT_DTHR_MAX_INDEX 31
+#define TCDCB_TLPM_WAIT_DTHR_TCOFFTH_S 0
+#define TCDCB_TLPM_WAIT_DTHR_TCOFFTH_M MAKEMASK(0xFFF, 0)
+#define TCTCB_WB_RL_TC_CFG(_i) (0x000AE138 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCTCB_WB_RL_TC_CFG_MAX_INDEX 31
+#define TCTCB_WB_RL_TC_CFG_TOKENS_S 0
+#define TCTCB_WB_RL_TC_CFG_TOKENS_M MAKEMASK(0xFFF, 0)
+#define TCTCB_WB_RL_TC_CFG_BURST_SIZE_S 12
+#define TCTCB_WB_RL_TC_CFG_BURST_SIZE_M MAKEMASK(0x3FF, 12)
+#define TCTCB_WB_RL_TC_STAT(_i) (0x000AE1B8 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TCTCB_WB_RL_TC_STAT_MAX_INDEX 31
+#define TCTCB_WB_RL_TC_STAT_BUCKET_S 0
+#define TCTCB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0)
+#define TPB_BULK_DWRR_REG_QUANTA 0x00099340 /* Reset Source: CORER */
+#define TPB_BULK_DWRR_REG_QUANTA_QUANTA_S 0
+#define TPB_BULK_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define TPB_BULK_DWRR_REG_SAT 0x00099350 /* Reset Source: CORER */
+#define TPB_BULK_DWRR_REG_SAT_SATURATION_S 0
+#define TPB_BULK_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define TPB_BULK_DWRR_WB_QUANTA 0x00099344 /* Reset Source: CORER */
+#define TPB_BULK_DWRR_WB_QUANTA_QUANTA_S 0
+#define TPB_BULK_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define TPB_BULK_DWRR_WB_SAT 0x00099354 /* Reset Source: CORER */
+#define TPB_BULK_DWRR_WB_SAT_SATURATION_S 0
+#define TPB_BULK_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define TPB_GLDCB_TCB_WB_SP 0x0009966C /* Reset Source: CORER */
+#define TPB_GLDCB_TCB_WB_SP_WB_SP_S 0
+#define TPB_GLDCB_TCB_WB_SP_WB_SP_M BIT(0)
+#define TPB_GLTCB_CREDIT_EXP_CTL 0x00099664 /* Reset Source: CORER */
+#define TPB_GLTCB_CREDIT_EXP_CTL_EN_S 0
+#define TPB_GLTCB_CREDIT_EXP_CTL_EN_M BIT(0)
+#define TPB_GLTCB_CREDIT_EXP_CTL_MIN_PKT_S 1
+#define TPB_GLTCB_CREDIT_EXP_CTL_MIN_PKT_M MAKEMASK(0x1FF, 1)
+#define TPB_LL_DWRR_REG_QUANTA 0x00099348 /* Reset Source: CORER */
+#define TPB_LL_DWRR_REG_QUANTA_QUANTA_S 0
+#define TPB_LL_DWRR_REG_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define TPB_LL_DWRR_REG_SAT 0x00099358 /* Reset Source: CORER */
+#define TPB_LL_DWRR_REG_SAT_SATURATION_S 0
+#define TPB_LL_DWRR_REG_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define TPB_LL_DWRR_WB_QUANTA 0x0009934C /* Reset Source: CORER */
+#define TPB_LL_DWRR_WB_QUANTA_QUANTA_S 0
+#define TPB_LL_DWRR_WB_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define TPB_LL_DWRR_WB_SAT 0x0009935C /* Reset Source: CORER */
+#define TPB_LL_DWRR_WB_SAT_SATURATION_S 0
+#define TPB_LL_DWRR_WB_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define TPB_PRTDCB_TCB_DWRR_CREDITS 0x000991C0 /* Reset Source: CORER */
+#define TPB_PRTDCB_TCB_DWRR_CREDITS_CREDITS_S 0
+#define TPB_PRTDCB_TCB_DWRR_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define TPB_PRTDCB_TCB_DWRR_QUANTA 0x00099220 /* Reset Source: CORER */
+#define TPB_PRTDCB_TCB_DWRR_QUANTA_QUANTA_S 0
+#define TPB_PRTDCB_TCB_DWRR_QUANTA_QUANTA_M MAKEMASK(0x7FF, 0)
+#define TPB_PRTDCB_TCB_DWRR_SAT 0x00099260 /* Reset Source: CORER */
+#define TPB_PRTDCB_TCB_DWRR_SAT_SATURATION_S 0
+#define TPB_PRTDCB_TCB_DWRR_SAT_SATURATION_M MAKEMASK(0x1FFFF, 0)
+#define TPB_PRTTCB_BULK_DWRR_REG_CREDITS 0x000992A0 /* Reset Source: CORER */
+#define TPB_PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_S 0
+#define TPB_PRTTCB_BULK_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define TPB_PRTTCB_BULK_DWRR_WB_CREDITS 0x000992C0 /* Reset Source: CORER */
+#define TPB_PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_S 0
+#define TPB_PRTTCB_BULK_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define TPB_PRTTCB_CREDIT_EXP 0x00099644 /* Reset Source: CORER */
+#define TPB_PRTTCB_CREDIT_EXP_EXPANSION_S 0
+#define TPB_PRTTCB_CREDIT_EXP_EXPANSION_M MAKEMASK(0xFF, 0)
+#define TPB_PRTTCB_LL_DWRR_REG_CREDITS 0x00099300 /* Reset Source: CORER */
+#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_S 0
+#define TPB_PRTTCB_LL_DWRR_REG_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define TPB_PRTTCB_LL_DWRR_WB_CREDITS 0x00099320 /* Reset Source: CORER */
+#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_S 0
+#define TPB_PRTTCB_LL_DWRR_WB_CREDITS_CREDITS_M MAKEMASK(0x3FFFF, 0)
+#define TPB_WB_RL_TC_CFG(_i) (0x00099360 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TPB_WB_RL_TC_CFG_MAX_INDEX 31
+#define TPB_WB_RL_TC_CFG_TOKENS_S 0
+#define TPB_WB_RL_TC_CFG_TOKENS_M MAKEMASK(0xFFF, 0)
+#define TPB_WB_RL_TC_CFG_BURST_SIZE_S 12
+#define TPB_WB_RL_TC_CFG_BURST_SIZE_M MAKEMASK(0x3FF, 12)
+#define TPB_WB_RL_TC_STAT(_i) (0x000993E0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define TPB_WB_RL_TC_STAT_MAX_INDEX 31
+#define TPB_WB_RL_TC_STAT_BUCKET_S 0
+#define TPB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0)
+#define GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2
+#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0
+#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8
+#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16
+#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24
+#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30
+#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
+#define GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2
+#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
+#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
+#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
+#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
+#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2
+#define GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0
+#define GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2
+#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
+#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
+#define GL_ACLEXT_DFLT_L2PRFL_ACL(_i) (0x00393800 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_DFLT_L2PRFL_ACL_MAX_INDEX 2
+#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_S 0
+#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
+#define GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2
+#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0
+#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16
+#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2
+#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0
+#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16
+#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2
+#define GL_ACLEXT_FLGS_L1TBL_LSB_S 0
+#define GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
+#define GL_ACLEXT_FLGS_L1TBL_MSB_S 16
+#define GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
+#define GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
+#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
+#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
+#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
+#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
+#define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_FORCE_PID_MAX_INDEX 2
+#define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
+#define GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
+#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31
+#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
+#define GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2
+#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
+#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2
+#define GL_ACLEXT_K2N_L2DATA_DATA0_S 0
+#define GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_K2N_L2DATA_DATA1_S 8
+#define GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define GL_ACLEXT_K2N_L2DATA_DATA2_S 16
+#define GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define GL_ACLEXT_K2N_L2DATA_DATA3_S 24
+#define GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2_PMASK0_MAX_INDEX 2
+#define GL_ACLEXT_L2_PMASK0_BITMASK_S 0
+#define GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2_PMASK1_MAX_INDEX 2
+#define GL_ACLEXT_L2_PMASK1_BITMASK_S 0
+#define GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
+#define GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2_TMASK0_MAX_INDEX 2
+#define GL_ACLEXT_L2_TMASK0_BITMASK_S 0
+#define GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2_TMASK1_MAX_INDEX 2
+#define GL_ACLEXT_L2_TMASK1_BITMASK_S 0
+#define GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2BMP0_3_MAX_INDEX 2
+#define GL_ACLEXT_L2BMP0_3_BMP0_S 0
+#define GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_L2BMP0_3_BMP1_S 8
+#define GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8)
+#define GL_ACLEXT_L2BMP0_3_BMP2_S 16
+#define GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16)
+#define GL_ACLEXT_L2BMP0_3_BMP3_S 24
+#define GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24)
+#define GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2BMP4_7_MAX_INDEX 2
+#define GL_ACLEXT_L2BMP4_7_BMP4_S 0
+#define GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_L2BMP4_7_BMP5_S 8
+#define GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8)
+#define GL_ACLEXT_L2BMP4_7_BMP6_S 16
+#define GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16)
+#define GL_ACLEXT_L2BMP4_7_BMP7_S 24
+#define GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24)
+#define GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_L2PRTMOD_MAX_INDEX 2
+#define GL_ACLEXT_L2PRTMOD_XLT1_S 0
+#define GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
+#define GL_ACLEXT_L2PRTMOD_XLT2_S 8
+#define GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
+#define GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2
+#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
+#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2
+#define GL_ACLEXT_N2N_L2DATA_DATA0_S 0
+#define GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_N2N_L2DATA_DATA1_S 8
+#define GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define GL_ACLEXT_N2N_L2DATA_DATA2_S 16
+#define GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define GL_ACLEXT_N2N_L2DATA_DATA3_S 24
+#define GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2
+#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
+#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2
+#define GL_ACLEXT_P2P_L1DATA_DATA_S 0
+#define GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2
+#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
+#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
+#define GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_PLVL_SEL_MAX_INDEX 2
+#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0
+#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
+#define GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2
+#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
+#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2
+#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0
+#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2
+#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0
+#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2
+#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
+#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2
+#define GL_ACLEXT_XLT0_L1DATA_DATA_S 0
+#define GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2
+#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
+#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2
+#define GL_ACLEXT_XLT1_L2DATA_DATA_S 0
+#define GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2
+#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0
+#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
+#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31
+#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2
+#define GL_ACLEXT_XLT2_L2DATA_DATA_S 0
+#define GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_CDMD_L1SEL(_i) (0x0020F054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_CDMD_L1SEL_MAX_INDEX 2
+#define GL_PREEXT_CDMD_L1SEL_RX_SEL_S 0
+#define GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_PREEXT_CDMD_L1SEL_TX_SEL_S 8
+#define GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_S 16
+#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_S 24
+#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_S 30
+#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
+#define GL_PREEXT_CTLTBL_L2ADDR(_i) (0x0020F084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_CTLTBL_L2ADDR_MAX_INDEX 2
+#define GL_PREEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
+#define GL_PREEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
+#define GL_PREEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
+#define GL_PREEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
+#define GL_PREEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
+#define GL_PREEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_CTLTBL_L2DATA(_i) (0x0020F090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_CTLTBL_L2DATA_MAX_INDEX 2
+#define GL_PREEXT_CTLTBL_L2DATA_DATA_S 0
+#define GL_PREEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_DFLT_L2PRFL(_i) (0x0020F138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_DFLT_L2PRFL_MAX_INDEX 2
+#define GL_PREEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
+#define GL_PREEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
+#define GL_PREEXT_FLGS_L1SEL0_1(_i) (0x0020F06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_FLGS_L1SEL0_1_MAX_INDEX 2
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_S 0
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_S 16
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_PREEXT_FLGS_L1SEL2_3(_i) (0x0020F078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_FLGS_L1SEL2_3_MAX_INDEX 2
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_S 0
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_S 16
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_PREEXT_FLGS_L1TBL(_i) (0x0020F060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_FLGS_L1TBL_MAX_INDEX 2
+#define GL_PREEXT_FLGS_L1TBL_LSB_S 0
+#define GL_PREEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
+#define GL_PREEXT_FLGS_L1TBL_MSB_S 16
+#define GL_PREEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
+#define GL_PREEXT_FORCE_L1CDID(_i) (0x0020F018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_FORCE_L1CDID_MAX_INDEX 2
+#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_S 0
+#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
+#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
+#define GL_PREEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
+#define GL_PREEXT_FORCE_PID(_i) (0x0020F000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_FORCE_PID_MAX_INDEX 2
+#define GL_PREEXT_FORCE_PID_STATIC_PID_S 0
+#define GL_PREEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
+#define GL_PREEXT_FORCE_PID_STATIC_PID_EN_S 31
+#define GL_PREEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
+#define GL_PREEXT_K2N_L2ADDR(_i) (0x0020F144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_K2N_L2ADDR_MAX_INDEX 2
+#define GL_PREEXT_K2N_L2ADDR_LINE_IDX_S 0
+#define GL_PREEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
+#define GL_PREEXT_K2N_L2ADDR_AUTO_INC_S 31
+#define GL_PREEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_K2N_L2DATA(_i) (0x0020F150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_K2N_L2DATA_MAX_INDEX 2
+#define GL_PREEXT_K2N_L2DATA_DATA0_S 0
+#define GL_PREEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_K2N_L2DATA_DATA1_S 8
+#define GL_PREEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define GL_PREEXT_K2N_L2DATA_DATA2_S 16
+#define GL_PREEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define GL_PREEXT_K2N_L2DATA_DATA3_S 24
+#define GL_PREEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2_PMASK0_MAX_INDEX 2
+#define GL_PREEXT_L2_PMASK0_BITMASK_S 0
+#define GL_PREEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2_PMASK1_MAX_INDEX 2
+#define GL_PREEXT_L2_PMASK1_BITMASK_S 0
+#define GL_PREEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
+#define GL_PREEXT_L2_TMASK0(_i) (0x0020F498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2_TMASK0_MAX_INDEX 2
+#define GL_PREEXT_L2_TMASK0_BITMASK_S 0
+#define GL_PREEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_L2_TMASK1(_i) (0x0020F4A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2_TMASK1_MAX_INDEX 2
+#define GL_PREEXT_L2_TMASK1_BITMASK_S 0
+#define GL_PREEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_L2BMP0_3(_i) (0x0020F0A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2BMP0_3_MAX_INDEX 2
+#define GL_PREEXT_L2BMP0_3_BMP0_S 0
+#define GL_PREEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_L2BMP0_3_BMP1_S 8
+#define GL_PREEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8)
+#define GL_PREEXT_L2BMP0_3_BMP2_S 16
+#define GL_PREEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16)
+#define GL_PREEXT_L2BMP0_3_BMP3_S 24
+#define GL_PREEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24)
+#define GL_PREEXT_L2BMP4_7(_i) (0x0020F0B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2BMP4_7_MAX_INDEX 2
+#define GL_PREEXT_L2BMP4_7_BMP4_S 0
+#define GL_PREEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_L2BMP4_7_BMP5_S 8
+#define GL_PREEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8)
+#define GL_PREEXT_L2BMP4_7_BMP6_S 16
+#define GL_PREEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16)
+#define GL_PREEXT_L2BMP4_7_BMP7_S 24
+#define GL_PREEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24)
+#define GL_PREEXT_L2PRTMOD(_i) (0x0020F09C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_L2PRTMOD_MAX_INDEX 2
+#define GL_PREEXT_L2PRTMOD_XLT1_S 0
+#define GL_PREEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
+#define GL_PREEXT_L2PRTMOD_XLT2_S 8
+#define GL_PREEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
+#define GL_PREEXT_N2N_L2ADDR(_i) (0x0020F15C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_N2N_L2ADDR_MAX_INDEX 2
+#define GL_PREEXT_N2N_L2ADDR_LINE_IDX_S 0
+#define GL_PREEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
+#define GL_PREEXT_N2N_L2ADDR_AUTO_INC_S 31
+#define GL_PREEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_N2N_L2DATA(_i) (0x0020F168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_N2N_L2DATA_MAX_INDEX 2
+#define GL_PREEXT_N2N_L2DATA_DATA0_S 0
+#define GL_PREEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_N2N_L2DATA_DATA1_S 8
+#define GL_PREEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define GL_PREEXT_N2N_L2DATA_DATA2_S 16
+#define GL_PREEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define GL_PREEXT_N2N_L2DATA_DATA3_S 24
+#define GL_PREEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define GL_PREEXT_P2P_L1ADDR(_i) (0x0020F024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_P2P_L1ADDR_MAX_INDEX 2
+#define GL_PREEXT_P2P_L1ADDR_LINE_IDX_S 0
+#define GL_PREEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
+#define GL_PREEXT_P2P_L1ADDR_AUTO_INC_S 31
+#define GL_PREEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_P2P_L1DATA(_i) (0x0020F030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_P2P_L1DATA_MAX_INDEX 2
+#define GL_PREEXT_P2P_L1DATA_DATA_S 0
+#define GL_PREEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_PID_L2GKTYPE(_i) (0x0020F0F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_PID_L2GKTYPE_MAX_INDEX 2
+#define GL_PREEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
+#define GL_PREEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
+#define GL_PREEXT_PLVL_SEL(_i) (0x0020F00C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_PLVL_SEL_MAX_INDEX 2
+#define GL_PREEXT_PLVL_SEL_PLVL_SEL_S 0
+#define GL_PREEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
+#define GL_PREEXT_TCAM_L2ADDR(_i) (0x0020F114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_TCAM_L2ADDR_MAX_INDEX 2
+#define GL_PREEXT_TCAM_L2ADDR_LINE_IDX_S 0
+#define GL_PREEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
+#define GL_PREEXT_TCAM_L2ADDR_AUTO_INC_S 31
+#define GL_PREEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_TCAM_L2DATALSB(_i) (0x0020F120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_TCAM_L2DATALSB_MAX_INDEX 2
+#define GL_PREEXT_TCAM_L2DATALSB_DATALSB_S 0
+#define GL_PREEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_TCAM_L2DATAMSB(_i) (0x0020F12C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_TCAM_L2DATAMSB_MAX_INDEX 2
+#define GL_PREEXT_TCAM_L2DATAMSB_DATAMSB_S 0
+#define GL_PREEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_XLT0_L1ADDR(_i) (0x0020F03C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_XLT0_L1ADDR_MAX_INDEX 2
+#define GL_PREEXT_XLT0_L1ADDR_LINE_IDX_S 0
+#define GL_PREEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
+#define GL_PREEXT_XLT0_L1ADDR_AUTO_INC_S 31
+#define GL_PREEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_XLT0_L1DATA(_i) (0x0020F048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_XLT0_L1DATA_MAX_INDEX 2
+#define GL_PREEXT_XLT0_L1DATA_DATA_S 0
+#define GL_PREEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_XLT1_L2ADDR(_i) (0x0020F0C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_XLT1_L2ADDR_MAX_INDEX 2
+#define GL_PREEXT_XLT1_L2ADDR_LINE_IDX_S 0
+#define GL_PREEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
+#define GL_PREEXT_XLT1_L2ADDR_AUTO_INC_S 31
+#define GL_PREEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_XLT1_L2DATA(_i) (0x0020F0CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_XLT1_L2DATA_MAX_INDEX 2
+#define GL_PREEXT_XLT1_L2DATA_DATA_S 0
+#define GL_PREEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PREEXT_XLT2_L2ADDR(_i) (0x0020F0D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_XLT2_L2ADDR_MAX_INDEX 2
+#define GL_PREEXT_XLT2_L2ADDR_LINE_IDX_S 0
+#define GL_PREEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_XLT2_L2ADDR_AUTO_INC_S 31
+#define GL_PREEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PREEXT_XLT2_L2DATA(_i) (0x0020F0E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PREEXT_XLT2_L2DATA_MAX_INDEX 2
+#define GL_PREEXT_XLT2_L2DATA_DATA_S 0
+#define GL_PREEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_CDMD_L1SEL(_i) (0x0020E054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_CDMD_L1SEL_MAX_INDEX 2
+#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_S 0
+#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_S 8
+#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_S 16
+#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_S 24
+#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_S 30
+#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
+#define GL_PSTEXT_CTLTBL_L2ADDR(_i) (0x0020E084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_CTLTBL_L2ADDR_MAX_INDEX 2
+#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
+#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
+#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
+#define GL_PSTEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
+#define GL_PSTEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_CTLTBL_L2DATA(_i) (0x0020E090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_CTLTBL_L2DATA_MAX_INDEX 2
+#define GL_PSTEXT_CTLTBL_L2DATA_DATA_S 0
+#define GL_PSTEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_DFLT_L2PRFL(_i) (0x0020E138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_DFLT_L2PRFL_MAX_INDEX 2
+#define GL_PSTEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
+#define GL_PSTEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
+#define GL_PSTEXT_FL15_BMPLSB(_i) (0x0020E480 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FL15_BMPLSB_MAX_INDEX 2
+#define GL_PSTEXT_FL15_BMPLSB_BMPLSB_S 0
+#define GL_PSTEXT_FL15_BMPLSB_BMPLSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_FL15_BMPMSB(_i) (0x0020E48C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FL15_BMPMSB_MAX_INDEX 2
+#define GL_PSTEXT_FL15_BMPMSB_BMPMSB_S 0
+#define GL_PSTEXT_FL15_BMPMSB_BMPMSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_FLGS_L1SEL0_1(_i) (0x0020E06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FLGS_L1SEL0_1_MAX_INDEX 2
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_S 0
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_S 16
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_FLGS_L1SEL2_3(_i) (0x0020E078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FLGS_L1SEL2_3_MAX_INDEX 2
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_S 0
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_S 16
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_FLGS_L1TBL(_i) (0x0020E060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FLGS_L1TBL_MAX_INDEX 2
+#define GL_PSTEXT_FLGS_L1TBL_LSB_S 0
+#define GL_PSTEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
+#define GL_PSTEXT_FLGS_L1TBL_MSB_S 16
+#define GL_PSTEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
+#define GL_PSTEXT_FORCE_L1CDID(_i) (0x0020E018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FORCE_L1CDID_MAX_INDEX 2
+#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_S 0
+#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
+#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
+#define GL_PSTEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
+#define GL_PSTEXT_FORCE_PID(_i) (0x0020E000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_FORCE_PID_MAX_INDEX 2
+#define GL_PSTEXT_FORCE_PID_STATIC_PID_S 0
+#define GL_PSTEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
+#define GL_PSTEXT_FORCE_PID_STATIC_PID_EN_S 31
+#define GL_PSTEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
+#define GL_PSTEXT_K2N_L2ADDR(_i) (0x0020E144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_K2N_L2ADDR_MAX_INDEX 2
+#define GL_PSTEXT_K2N_L2ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
+#define GL_PSTEXT_K2N_L2ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_K2N_L2DATA(_i) (0x0020E150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_K2N_L2DATA_MAX_INDEX 2
+#define GL_PSTEXT_K2N_L2DATA_DATA0_S 0
+#define GL_PSTEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_K2N_L2DATA_DATA1_S 8
+#define GL_PSTEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define GL_PSTEXT_K2N_L2DATA_DATA2_S 16
+#define GL_PSTEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define GL_PSTEXT_K2N_L2DATA_DATA3_S 24
+#define GL_PSTEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define GL_PSTEXT_L2_PMASK0(_i) (0x0020E0FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_L2_PMASK0_MAX_INDEX 2
+#define GL_PSTEXT_L2_PMASK0_BITMASK_S 0
+#define GL_PSTEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_L2_PMASK1(_i) (0x0020E108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_L2_PMASK1_MAX_INDEX 2
+#define GL_PSTEXT_L2_PMASK1_BITMASK_S 0
+#define GL_PSTEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
+#define GL_PSTEXT_L2_TMASK0(_i) (0x0020E498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_L2_TMASK0_MAX_INDEX 2
+#define GL_PSTEXT_L2_TMASK0_BITMASK_S 0
+#define GL_PSTEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_L2_TMASK1(_i) (0x0020E4A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_L2_TMASK1_MAX_INDEX 2
+#define GL_PSTEXT_L2_TMASK1_BITMASK_S 0
+#define GL_PSTEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_L2PRTMOD(_i) (0x0020E09C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_L2PRTMOD_MAX_INDEX 2
+#define GL_PSTEXT_L2PRTMOD_XLT1_S 0
+#define GL_PSTEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
+#define GL_PSTEXT_L2PRTMOD_XLT2_S 8
+#define GL_PSTEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
+#define GL_PSTEXT_N2N_L2ADDR(_i) (0x0020E15C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_N2N_L2ADDR_MAX_INDEX 2
+#define GL_PSTEXT_N2N_L2ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
+#define GL_PSTEXT_N2N_L2ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_N2N_L2DATA(_i) (0x0020E168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_N2N_L2DATA_MAX_INDEX 2
+#define GL_PSTEXT_N2N_L2DATA_DATA0_S 0
+#define GL_PSTEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_N2N_L2DATA_DATA1_S 8
+#define GL_PSTEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define GL_PSTEXT_N2N_L2DATA_DATA2_S 16
+#define GL_PSTEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define GL_PSTEXT_N2N_L2DATA_DATA3_S 24
+#define GL_PSTEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define GL_PSTEXT_P2P_L1ADDR(_i) (0x0020E024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_P2P_L1ADDR_MAX_INDEX 2
+#define GL_PSTEXT_P2P_L1ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
+#define GL_PSTEXT_P2P_L1ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_P2P_L1DATA(_i) (0x0020E030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_P2P_L1DATA_MAX_INDEX 2
+#define GL_PSTEXT_P2P_L1DATA_DATA_S 0
+#define GL_PSTEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_PID_L2GKTYPE(_i) (0x0020E0F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_PID_L2GKTYPE_MAX_INDEX 2
+#define GL_PSTEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
+#define GL_PSTEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
+#define GL_PSTEXT_PLVL_SEL(_i) (0x0020E00C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_PLVL_SEL_MAX_INDEX 2
+#define GL_PSTEXT_PLVL_SEL_PLVL_SEL_S 0
+#define GL_PSTEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
+#define GL_PSTEXT_PRFLM_CTRL(_i) (0x0020E474 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_PRFLM_CTRL_MAX_INDEX 2
+#define GL_PSTEXT_PRFLM_CTRL_PRFL_IDX_S 0
+#define GL_PSTEXT_PRFLM_CTRL_PRFL_IDX_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_PRFLM_CTRL_RD_REQ_S 30
+#define GL_PSTEXT_PRFLM_CTRL_RD_REQ_M BIT(30)
+#define GL_PSTEXT_PRFLM_CTRL_WR_REQ_S 31
+#define GL_PSTEXT_PRFLM_CTRL_WR_REQ_M BIT(31)
+#define GL_PSTEXT_PRFLM_DATA_0(_i) (0x0020E174 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_PSTEXT_PRFLM_DATA_0_MAX_INDEX 63
+#define GL_PSTEXT_PRFLM_DATA_0_PROT_S 0
+#define GL_PSTEXT_PRFLM_DATA_0_PROT_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_PRFLM_DATA_0_OFF_S 16
+#define GL_PSTEXT_PRFLM_DATA_0_OFF_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_PRFLM_DATA_1(_i) (0x0020E274 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_PSTEXT_PRFLM_DATA_1_MAX_INDEX 63
+#define GL_PSTEXT_PRFLM_DATA_1_PROT_S 0
+#define GL_PSTEXT_PRFLM_DATA_1_PROT_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_PRFLM_DATA_1_OFF_S 16
+#define GL_PSTEXT_PRFLM_DATA_1_OFF_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_PRFLM_DATA_2(_i) (0x0020E374 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_PSTEXT_PRFLM_DATA_2_MAX_INDEX 63
+#define GL_PSTEXT_PRFLM_DATA_2_PROT_S 0
+#define GL_PSTEXT_PRFLM_DATA_2_PROT_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_PRFLM_DATA_2_OFF_S 16
+#define GL_PSTEXT_PRFLM_DATA_2_OFF_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_TCAM_L2ADDR(_i) (0x0020E114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_TCAM_L2ADDR_MAX_INDEX 2
+#define GL_PSTEXT_TCAM_L2ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
+#define GL_PSTEXT_TCAM_L2ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_TCAM_L2DATALSB(_i) (0x0020E120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_TCAM_L2DATALSB_MAX_INDEX 2
+#define GL_PSTEXT_TCAM_L2DATALSB_DATALSB_S 0
+#define GL_PSTEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_TCAM_L2DATAMSB(_i) (0x0020E12C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_TCAM_L2DATAMSB_MAX_INDEX 2
+#define GL_PSTEXT_TCAM_L2DATAMSB_DATAMSB_S 0
+#define GL_PSTEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_XLT0_L1ADDR(_i) (0x0020E03C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_XLT0_L1ADDR_MAX_INDEX 2
+#define GL_PSTEXT_XLT0_L1ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
+#define GL_PSTEXT_XLT0_L1ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_XLT0_L1DATA(_i) (0x0020E048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_XLT0_L1DATA_MAX_INDEX 2
+#define GL_PSTEXT_XLT0_L1DATA_DATA_S 0
+#define GL_PSTEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_XLT1_L2ADDR(_i) (0x0020E0C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_XLT1_L2ADDR_MAX_INDEX 2
+#define GL_PSTEXT_XLT1_L2ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
+#define GL_PSTEXT_XLT1_L2ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_XLT1_L2DATA(_i) (0x0020E0CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_XLT1_L2DATA_MAX_INDEX 2
+#define GL_PSTEXT_XLT1_L2DATA_DATA_S 0
+#define GL_PSTEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PSTEXT_XLT2_L2ADDR(_i) (0x0020E0D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_XLT2_L2ADDR_MAX_INDEX 2
+#define GL_PSTEXT_XLT2_L2ADDR_LINE_IDX_S 0
+#define GL_PSTEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_XLT2_L2ADDR_AUTO_INC_S 31
+#define GL_PSTEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
+#define GL_PSTEXT_XLT2_L2DATA(_i) (0x0020E0E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GL_PSTEXT_XLT2_L2DATA_MAX_INDEX 2
+#define GL_PSTEXT_XLT2_L2DATA_DATA_S 0
+#define GL_PSTEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLFLXP_PTYPE_TRANSLATION(_i) (0x0045C000 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define GLFLXP_PTYPE_TRANSLATION_MAX_INDEX 255
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_S 0
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_M MAKEMASK(0xFF, 0)
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_1_S 8
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_1_M MAKEMASK(0xFF, 8)
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_2_S 16
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_2_M MAKEMASK(0xFF, 16)
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_3_S 24
+#define GLFLXP_PTYPE_TRANSLATION_PTYPE_4N_3_M MAKEMASK(0xFF, 24)
+#define GLFLXP_RX_CMD_LX_PROT_IDX(_i) (0x0045C400 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define GLFLXP_RX_CMD_LX_PROT_IDX_MAX_INDEX 255
+#define GLFLXP_RX_CMD_LX_PROT_IDX_INNER_CLOUD_OFFSET_INDEX_S 0
+#define GLFLXP_RX_CMD_LX_PROT_IDX_INNER_CLOUD_OFFSET_INDEX_M MAKEMASK(0x7, 0)
+#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_S 4
+#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_OFFSET_INDEX_M MAKEMASK(0x7, 4)
+#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_S 8
+#define GLFLXP_RX_CMD_LX_PROT_IDX_PAYLOAD_OFFSET_INDEX_M MAKEMASK(0x7, 8)
+#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_S 12
+#define GLFLXP_RX_CMD_LX_PROT_IDX_L3_PROTOCOL_M MAKEMASK(0x3, 12)
+#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_S 14
+#define GLFLXP_RX_CMD_LX_PROT_IDX_L4_PROTOCOL_M MAKEMASK(0x3, 14)
+#define GLFLXP_RX_CMD_PROTIDS(_i, _j) (0x0045A000 + ((_i) * 4 + (_j) * 1024)) /* _i=0...255, _j=0...5 */ /* Reset Source: CORER */
+#define GLFLXP_RX_CMD_PROTIDS_MAX_INDEX 255
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_S 0
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_1_S 8
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_1_M MAKEMASK(0xFF, 8)
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_2_S 16
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_2_M MAKEMASK(0xFF, 16)
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_3_S 24
+#define GLFLXP_RX_CMD_PROTIDS_PROTID_4N_3_M MAKEMASK(0xFF, 24)
+#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...4 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLAGS_MAX_INDEX 63
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M MAKEMASK(0x3F, 0)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M MAKEMASK(0x3F, 8)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M MAKEMASK(0x3F, 16)
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M MAKEMASK(0x3F, 24)
+#define GLFLXP_RXDID_FLAGS1_OVERRIDE(_i) (0x0045D600 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLAGS1_OVERRIDE_MAX_INDEX 63
+#define GLFLXP_RXDID_FLAGS1_OVERRIDE_FLEXIFLAGS1_OVERRIDE_S 0
+#define GLFLXP_RXDID_FLAGS1_OVERRIDE_FLEXIFLAGS1_OVERRIDE_M MAKEMASK(0xF, 0)
+#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045C800 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLX_WRD_0_MAX_INDEX 63
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RXDID_FLX_WRD_0_EXTRACTION_OFFSET_S 8
+#define GLFLXP_RXDID_FLX_WRD_0_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045C900 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLX_WRD_1_MAX_INDEX 63
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RXDID_FLX_WRD_1_EXTRACTION_OFFSET_S 8
+#define GLFLXP_RXDID_FLX_WRD_1_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045CA00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLX_WRD_2_MAX_INDEX 63
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RXDID_FLX_WRD_2_EXTRACTION_OFFSET_S 8
+#define GLFLXP_RXDID_FLX_WRD_2_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045CB00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLX_WRD_3_MAX_INDEX 63
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RXDID_FLX_WRD_3_EXTRACTION_OFFSET_S 8
+#define GLFLXP_RXDID_FLX_WRD_3_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define GLFLXP_RXDID_FLX_WRD_4(_i) (0x0045CC00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLX_WRD_4_MAX_INDEX 63
+#define GLFLXP_RXDID_FLX_WRD_4_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_4_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RXDID_FLX_WRD_4_EXTRACTION_OFFSET_S 8
+#define GLFLXP_RXDID_FLX_WRD_4_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define GLFLXP_RXDID_FLX_WRD_4_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_4_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define GLFLXP_RXDID_FLX_WRD_5(_i) (0x0045CD00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLFLXP_RXDID_FLX_WRD_5_MAX_INDEX 63
+#define GLFLXP_RXDID_FLX_WRD_5_PROT_MDID_S 0
+#define GLFLXP_RXDID_FLX_WRD_5_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define GLFLXP_RXDID_FLX_WRD_5_EXTRACTION_OFFSET_S 8
+#define GLFLXP_RXDID_FLX_WRD_5_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define GLFLXP_RXDID_FLX_WRD_5_RXDID_OPCODE_S 30
+#define GLFLXP_RXDID_FLX_WRD_5_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define GLFLXP_TX_SCHED_CORRECT(_i, _j) (0x00458000 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...31 */ /* Reset Source: CORER */
+#define GLFLXP_TX_SCHED_CORRECT_MAX_INDEX 63
+#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_S 0
+#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_M MAKEMASK(0xFF, 0)
+#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_S 8
+#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_M MAKEMASK(0x1F, 8)
+#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_S 16
+#define GLFLXP_TX_SCHED_CORRECT_PROTD_ID_2N_1_M MAKEMASK(0xFF, 16)
+#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_S 24
+#define GLFLXP_TX_SCHED_CORRECT_RECIPE_2N_1_M MAKEMASK(0x1F, 24)
+#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define QRXFLXP_CNTXT_MAX_INDEX 2047
+#define QRXFLXP_CNTXT_RXDID_IDX_S 0
+#define QRXFLXP_CNTXT_RXDID_IDX_M MAKEMASK(0x3F, 0)
+#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
+#define QRXFLXP_CNTXT_RXDID_PRIO_M MAKEMASK(0x7, 8)
+#define QRXFLXP_CNTXT_TS_S 11
+#define QRXFLXP_CNTXT_TS_M BIT(11)
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S 0
+#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S 8
+#define GL_FWSTS_FWROWD_M BIT(8)
+#define GL_FWSTS_FWRI_S 9
+#define GL_FWSTS_FWRI_M BIT(9)
+#define GL_FWSTS_FWS1B_S 16
+#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16)
+#define GL_TCVMLR_DRAIN_CNTR_CTL 0x000A21E0 /* Reset Source: CORER */
+#define GL_TCVMLR_DRAIN_CNTR_CTL_OP_S 0
+#define GL_TCVMLR_DRAIN_CNTR_CTL_OP_M BIT(0)
+#define GL_TCVMLR_DRAIN_CNTR_CTL_PORT_S 1
+#define GL_TCVMLR_DRAIN_CNTR_CTL_PORT_M MAKEMASK(0x7, 1)
+#define GL_TCVMLR_DRAIN_CNTR_CTL_VALUE_S 4
+#define GL_TCVMLR_DRAIN_CNTR_CTL_VALUE_M MAKEMASK(0x3FFF, 4)
+#define GL_TCVMLR_DRAIN_DONE_DEC 0x000A21A8 /* Reset Source: CORER */
+#define GL_TCVMLR_DRAIN_DONE_DEC_TARGET_S 0
+#define GL_TCVMLR_DRAIN_DONE_DEC_TARGET_M BIT(0)
+#define GL_TCVMLR_DRAIN_DONE_DEC_INDEX_S 1
+#define GL_TCVMLR_DRAIN_DONE_DEC_INDEX_M MAKEMASK(0x1F, 1)
+#define GL_TCVMLR_DRAIN_DONE_DEC_VALUE_S 6
+#define GL_TCVMLR_DRAIN_DONE_DEC_VALUE_M MAKEMASK(0xFF, 6)
+#define GL_TCVMLR_DRAIN_DONE_TCLAN(_i) (0x000A20A8 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GL_TCVMLR_DRAIN_DONE_TCLAN_MAX_INDEX 31
+#define GL_TCVMLR_DRAIN_DONE_TCLAN_COUNT_S 0
+#define GL_TCVMLR_DRAIN_DONE_TCLAN_COUNT_M MAKEMASK(0xFF, 0)
+#define GL_TCVMLR_DRAIN_DONE_TPB(_i) (0x000A2128 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GL_TCVMLR_DRAIN_DONE_TPB_MAX_INDEX 31
+#define GL_TCVMLR_DRAIN_DONE_TPB_COUNT_S 0
+#define GL_TCVMLR_DRAIN_DONE_TPB_COUNT_M MAKEMASK(0xFF, 0)
+#define GL_TCVMLR_DRAIN_MARKER 0x000A2008 /* Reset Source: CORER */
+#define GL_TCVMLR_DRAIN_MARKER_PORT_S 0
+#define GL_TCVMLR_DRAIN_MARKER_PORT_M MAKEMASK(0x7, 0)
+#define GL_TCVMLR_DRAIN_MARKER_TC_S 3
+#define GL_TCVMLR_DRAIN_MARKER_TC_M MAKEMASK(0x1F, 3)
+#define GL_TCVMLR_ERR_STAT 0x000A2024 /* Reset Source: CORER */
+#define GL_TCVMLR_ERR_STAT_ERROR_S 0
+#define GL_TCVMLR_ERR_STAT_ERROR_M BIT(0)
+#define GL_TCVMLR_ERR_STAT_FW_REQ_S 1
+#define GL_TCVMLR_ERR_STAT_FW_REQ_M BIT(1)
+#define GL_TCVMLR_ERR_STAT_STAT_S 2
+#define GL_TCVMLR_ERR_STAT_STAT_M MAKEMASK(0x7, 2)
+#define GL_TCVMLR_ERR_STAT_ENT_TYPE_S 5
+#define GL_TCVMLR_ERR_STAT_ENT_TYPE_M MAKEMASK(0x7, 5)
+#define GL_TCVMLR_ERR_STAT_ENT_ID_S 8
+#define GL_TCVMLR_ERR_STAT_ENT_ID_M MAKEMASK(0x3FFF, 8)
+#define GL_TCVMLR_QCFG 0x000A2010 /* Reset Source: CORER */
+#define GL_TCVMLR_QCFG_QID_S 0
+#define GL_TCVMLR_QCFG_QID_M MAKEMASK(0x3FFF, 0)
+#define GL_TCVMLR_QCFG_OP_S 14
+#define GL_TCVMLR_QCFG_OP_M BIT(14)
+#define GL_TCVMLR_QCFG_PORT_S 15
+#define GL_TCVMLR_QCFG_PORT_M MAKEMASK(0x7, 15)
+#define GL_TCVMLR_QCFG_TC_S 18
+#define GL_TCVMLR_QCFG_TC_M MAKEMASK(0x1F, 18)
+#define GL_TCVMLR_QCFG_RD 0x000A2014 /* Reset Source: CORER */
+#define GL_TCVMLR_QCFG_RD_QID_S 0
+#define GL_TCVMLR_QCFG_RD_QID_M MAKEMASK(0x3FFF, 0)
+#define GL_TCVMLR_QCFG_RD_PORT_S 14
+#define GL_TCVMLR_QCFG_RD_PORT_M MAKEMASK(0x7, 14)
+#define GL_TCVMLR_QCFG_RD_TC_S 17
+#define GL_TCVMLR_QCFG_RD_TC_M MAKEMASK(0x1F, 17)
+#define GL_TCVMLR_QCNTR 0x000A200C /* Reset Source: CORER */
+#define GL_TCVMLR_QCNTR_CNTR_S 0
+#define GL_TCVMLR_QCNTR_CNTR_M MAKEMASK(0x7FFF, 0)
+#define GL_TCVMLR_QCTL 0x000A2004 /* Reset Source: CORER */
+#define GL_TCVMLR_QCTL_QID_S 0
+#define GL_TCVMLR_QCTL_QID_M MAKEMASK(0x3FFF, 0)
+#define GL_TCVMLR_QCTL_OP_S 14
+#define GL_TCVMLR_QCTL_OP_M BIT(14)
+#define GL_TCVMLR_REQ_STAT 0x000A2018 /* Reset Source: CORER */
+#define GL_TCVMLR_REQ_STAT_ENT_TYPE_S 0
+#define GL_TCVMLR_REQ_STAT_ENT_TYPE_M MAKEMASK(0x7, 0)
+#define GL_TCVMLR_REQ_STAT_ENT_ID_S 3
+#define GL_TCVMLR_REQ_STAT_ENT_ID_M MAKEMASK(0x3FFF, 3)
+#define GL_TCVMLR_REQ_STAT_OP_S 17
+#define GL_TCVMLR_REQ_STAT_OP_M BIT(17)
+#define GL_TCVMLR_REQ_STAT_WRITE_STATUS_S 18
+#define GL_TCVMLR_REQ_STAT_WRITE_STATUS_M MAKEMASK(0x7, 18)
+#define GL_TCVMLR_STAT 0x000A201C /* Reset Source: CORER */
+#define GL_TCVMLR_STAT_ENT_TYPE_S 0
+#define GL_TCVMLR_STAT_ENT_TYPE_M MAKEMASK(0x7, 0)
+#define GL_TCVMLR_STAT_ENT_ID_S 3
+#define GL_TCVMLR_STAT_ENT_ID_M MAKEMASK(0x3FFF, 3)
+#define GL_TCVMLR_STAT_STATUS_S 17
+#define GL_TCVMLR_STAT_STATUS_M MAKEMASK(0x7, 17)
+#define GL_XLR_MARKER_TRIG_TCVMLR 0x000A2000 /* Reset Source: CORER */
+#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_NUM_S 0
+#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0)
+#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_TYPE_S 10
+#define GL_XLR_MARKER_TRIG_TCVMLR_VM_VF_TYPE_M MAKEMASK(0x3, 10)
+#define GL_XLR_MARKER_TRIG_TCVMLR_PF_NUM_S 12
+#define GL_XLR_MARKER_TRIG_TCVMLR_PF_NUM_M MAKEMASK(0x7, 12)
+#define GL_XLR_MARKER_TRIG_TCVMLR_PORT_NUM_S 16
+#define GL_XLR_MARKER_TRIG_TCVMLR_PORT_NUM_M MAKEMASK(0x7, 16)
+#define GL_XLR_MARKER_TRIG_VMLR 0x00093804 /* Reset Source: CORER */
+#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_NUM_S 0
+#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0)
+#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_TYPE_S 10
+#define GL_XLR_MARKER_TRIG_VMLR_VM_VF_TYPE_M MAKEMASK(0x3, 10)
+#define GL_XLR_MARKER_TRIG_VMLR_PF_NUM_S 12
+#define GL_XLR_MARKER_TRIG_VMLR_PF_NUM_M MAKEMASK(0x7, 12)
+#define GL_XLR_MARKER_TRIG_VMLR_PORT_NUM_S 16
+#define GL_XLR_MARKER_TRIG_VMLR_PORT_NUM_M MAKEMASK(0x7, 16)
+#define GLGEN_ANA_ABORT_PTYPE 0x0020C21C /* Reset Source: CORER */
+#define GLGEN_ANA_ABORT_PTYPE_ABORT_S 0
+#define GLGEN_ANA_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0)
+#define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT 0x0020C208 /* Reset Source: CORER */
+#define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT_NPC_S 0
+#define GLGEN_ANA_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0)
+#define GLGEN_ANA_CFG_CTRL 0x0020C104 /* Reset Source: CORER */
+#define GLGEN_ANA_CFG_CTRL_LINE_IDX_S 0
+#define GLGEN_ANA_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0)
+#define GLGEN_ANA_CFG_CTRL_TABLE_ID_S 18
+#define GLGEN_ANA_CFG_CTRL_TABLE_ID_M MAKEMASK(0xFF, 18)
+#define GLGEN_ANA_CFG_CTRL_RESRVED_S 26
+#define GLGEN_ANA_CFG_CTRL_RESRVED_M MAKEMASK(0x7, 26)
+#define GLGEN_ANA_CFG_CTRL_OPERATION_ID_S 29
+#define GLGEN_ANA_CFG_CTRL_OPERATION_ID_M MAKEMASK(0x7, 29)
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT 0x0020C158 /* Reset Source: CORER */
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT_HIT_S 0
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT_HIT_M BIT(0)
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT_PG_MEM_IDX_S 1
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT_PG_MEM_IDX_M MAKEMASK(0x7, 1)
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT_ADDR_S 4
+#define GLGEN_ANA_CFG_HTBL_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4)
+#define GLGEN_ANA_CFG_LU_KEY(_i) (0x0020C14C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GLGEN_ANA_CFG_LU_KEY_MAX_INDEX 2
+#define GLGEN_ANA_CFG_LU_KEY_LU_KEY_S 0
+#define GLGEN_ANA_CFG_LU_KEY_LU_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_CFG_RDDATA(_i) (0x0020C10C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLGEN_ANA_CFG_RDDATA_MAX_INDEX 15
+#define GLGEN_ANA_CFG_RDDATA_RD_DATA_S 0
+#define GLGEN_ANA_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT 0x0020C15C /* Reset Source: CORER */
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_HIT_S 0
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0)
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_RSV_S 1
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1)
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_ADDR_S 4
+#define GLGEN_ANA_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4)
+#define GLGEN_ANA_CFG_WRDATA 0x0020C108 /* Reset Source: CORER */
+#define GLGEN_ANA_CFG_WRDATA_WR_DATA_S 0
+#define GLGEN_ANA_CFG_WRDATA_WR_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_DEF_PTYPE 0x0020C100 /* Reset Source: CORER */
+#define GLGEN_ANA_DEF_PTYPE_DEF_PTYPE_S 0
+#define GLGEN_ANA_DEF_PTYPE_DEF_PTYPE_M MAKEMASK(0x3FF, 0)
+#define GLGEN_ANA_ERR_CTRL 0x0020C220 /* Reset Source: CORER */
+#define GLGEN_ANA_ERR_CTRL_ERR_MASK_EN_S 0
+#define GLGEN_ANA_ERR_CTRL_ERR_MASK_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_FLAG_MAP(_i) (0x0020C000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLGEN_ANA_FLAG_MAP_MAX_INDEX 63
+#define GLGEN_ANA_FLAG_MAP_FLAG_EN_S 0
+#define GLGEN_ANA_FLAG_MAP_FLAG_EN_M BIT(0)
+#define GLGEN_ANA_FLAG_MAP_EXT_FLAG_ID_S 1
+#define GLGEN_ANA_FLAG_MAP_EXT_FLAG_ID_M MAKEMASK(0x3F, 1)
+#define GLGEN_ANA_INV_NODE_PTYPE 0x0020C210 /* Reset Source: CORER */
+#define GLGEN_ANA_INV_NODE_PTYPE_INV_NODE_PTYPE_S 0
+#define GLGEN_ANA_INV_NODE_PTYPE_INV_NODE_PTYPE_M MAKEMASK(0x7FF, 0)
+#define GLGEN_ANA_INV_PTYPE_MARKER 0x0020C218 /* Reset Source: CORER */
+#define GLGEN_ANA_INV_PTYPE_MARKER_INV_PTYPE_MARKER_S 0
+#define GLGEN_ANA_INV_PTYPE_MARKER_INV_PTYPE_MARKER_M MAKEMASK(0x7F, 0)
+#define GLGEN_ANA_LAST_PROT_ID(_i) (0x0020C1E4 + ((_i) * 4)) /* _i=0...5 */ /* Reset Source: CORER */
+#define GLGEN_ANA_LAST_PROT_ID_MAX_INDEX 5
+#define GLGEN_ANA_LAST_PROT_ID_EN_S 0
+#define GLGEN_ANA_LAST_PROT_ID_EN_M BIT(0)
+#define GLGEN_ANA_LAST_PROT_ID_PROT_ID_S 1
+#define GLGEN_ANA_LAST_PROT_ID_PROT_ID_M MAKEMASK(0xFF, 1)
+#define GLGEN_ANA_NMPG_KEYMASK(_i) (0x0020C1D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_NMPG_KEYMASK_MAX_INDEX 3
+#define GLGEN_ANA_NMPG_KEYMASK_HASH_KEY_S 0
+#define GLGEN_ANA_NMPG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_NMPG0_HASHKEY(_i) (0x0020C1B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_NMPG0_HASHKEY_MAX_INDEX 3
+#define GLGEN_ANA_NMPG0_HASHKEY_HASH_KEY_S 0
+#define GLGEN_ANA_NMPG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_NO_HIT_PG_NM_PG 0x0020C204 /* Reset Source: CORER */
+#define GLGEN_ANA_NO_HIT_PG_NM_PG_NPC_S 0
+#define GLGEN_ANA_NO_HIT_PG_NM_PG_NPC_M MAKEMASK(0xFF, 0)
+#define GLGEN_ANA_OUT_OF_PKT 0x0020C200 /* Reset Source: CORER */
+#define GLGEN_ANA_OUT_OF_PKT_NPC_S 0
+#define GLGEN_ANA_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0)
+#define GLGEN_ANA_P2P(_i) (0x0020C160 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLGEN_ANA_P2P_MAX_INDEX 15
+#define GLGEN_ANA_P2P_TARGET_PROF_S 0
+#define GLGEN_ANA_P2P_TARGET_PROF_M MAKEMASK(0xF, 0)
+#define GLGEN_ANA_PG_KEYMASK(_i) (0x0020C1C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_PG_KEYMASK_MAX_INDEX 3
+#define GLGEN_ANA_PG_KEYMASK_HASH_KEY_S 0
+#define GLGEN_ANA_PG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_PG0_HASHKEY(_i) (0x0020C1A0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_PG0_HASHKEY_MAX_INDEX 3
+#define GLGEN_ANA_PG0_HASHKEY_HASH_KEY_S 0
+#define GLGEN_ANA_PG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_PROFIL_CTRL 0x0020C1FC /* Reset Source: CORER */
+#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDID_S 0
+#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDID_M MAKEMASK(0x1F, 0)
+#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_S 5
+#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5)
+#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9
+#define GLGEN_ANA_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9)
+#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14
+#define GLGEN_ANA_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14)
+#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_S 16
+#define GLGEN_ANA_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16)
+#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20
+#define GLGEN_ANA_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20)
+#define GLGEN_ANA_TX_ABORT_PTYPE 0x0020D21C /* Reset Source: CORER */
+#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_S 0
+#define GLGEN_ANA_TX_ABORT_PTYPE_ABORT_M MAKEMASK(0x3FF, 0)
+#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT 0x0020D208 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_S 0
+#define GLGEN_ANA_TX_ALU_ACCSS_OUT_OF_PKT_NPC_M MAKEMASK(0xFF, 0)
+#define GLGEN_ANA_TX_CFG_CTRL 0x0020D104 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_S 0
+#define GLGEN_ANA_TX_CFG_CTRL_LINE_IDX_M MAKEMASK(0x3FFFF, 0)
+#define GLGEN_ANA_TX_CFG_CTRL_TABLE_ID_S 18
+#define GLGEN_ANA_TX_CFG_CTRL_TABLE_ID_M MAKEMASK(0xFF, 18)
+#define GLGEN_ANA_TX_CFG_CTRL_RESRVED_S 26
+#define GLGEN_ANA_TX_CFG_CTRL_RESRVED_M MAKEMASK(0x7, 26)
+#define GLGEN_ANA_TX_CFG_CTRL_OPERATION_ID_S 29
+#define GLGEN_ANA_TX_CFG_CTRL_OPERATION_ID_M MAKEMASK(0x7, 29)
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT 0x0020D158 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_HIT_S 0
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_HIT_M BIT(0)
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_PG_MEM_IDX_S 1
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_PG_MEM_IDX_M MAKEMASK(0x7, 1)
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_ADDR_S 4
+#define GLGEN_ANA_TX_CFG_HTBL_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4)
+#define GLGEN_ANA_TX_CFG_LU_KEY(_i) (0x0020D14C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_CFG_LU_KEY_MAX_INDEX 2
+#define GLGEN_ANA_TX_CFG_LU_KEY_LU_KEY_S 0
+#define GLGEN_ANA_TX_CFG_LU_KEY_LU_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_CFG_RDDATA(_i) (0x0020D10C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_CFG_RDDATA_MAX_INDEX 15
+#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_S 0
+#define GLGEN_ANA_TX_CFG_RDDATA_RD_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT 0x0020D15C /* Reset Source: CORER */
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_S 0
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_HIT_M BIT(0)
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_S 1
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_RSV_M MAKEMASK(0x7, 1)
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_S 4
+#define GLGEN_ANA_TX_CFG_SPLBUF_LU_RESULT_ADDR_M MAKEMASK(0x1FF, 4)
+#define GLGEN_ANA_TX_CFG_WRDATA 0x0020D108 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_CFG_WRDATA_WR_DATA_S 0
+#define GLGEN_ANA_TX_CFG_WRDATA_WR_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_DEF_PTYPE 0x0020D100 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_DEF_PTYPE_DEF_PTYPE_S 0
+#define GLGEN_ANA_TX_DEF_PTYPE_DEF_PTYPE_M MAKEMASK(0x3FF, 0)
+#define GLGEN_ANA_TX_DFD_PACE_OUT 0x0020D4CC /* Reset Source: CORER */
+#define GLGEN_ANA_TX_DFD_PACE_OUT_PUSH_S 0
+#define GLGEN_ANA_TX_DFD_PACE_OUT_PUSH_M BIT(0)
+#define GLGEN_ANA_TX_ERR_CTRL 0x0020D220 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_ERR_CTRL_ERR_MASK_EN_S 0
+#define GLGEN_ANA_TX_ERR_CTRL_ERR_MASK_EN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_FLAG_MAP(_i) (0x0020D000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_FLAG_MAP_MAX_INDEX 63
+#define GLGEN_ANA_TX_FLAG_MAP_FLAG_EN_S 0
+#define GLGEN_ANA_TX_FLAG_MAP_FLAG_EN_M BIT(0)
+#define GLGEN_ANA_TX_FLAG_MAP_EXT_FLAG_ID_S 1
+#define GLGEN_ANA_TX_FLAG_MAP_EXT_FLAG_ID_M MAKEMASK(0x3F, 1)
+#define GLGEN_ANA_TX_INV_NODE_PTYPE 0x0020D210 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_INV_NODE_PTYPE_INV_NODE_PTYPE_S 0
+#define GLGEN_ANA_TX_INV_NODE_PTYPE_INV_NODE_PTYPE_M MAKEMASK(0x7FF, 0)
+#define GLGEN_ANA_TX_INV_PROT_ID 0x0020D214 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_INV_PROT_ID_INV_PROT_ID_S 0
+#define GLGEN_ANA_TX_INV_PROT_ID_INV_PROT_ID_M MAKEMASK(0xFF, 0)
+#define GLGEN_ANA_TX_INV_PTYPE_MARKER 0x0020D218 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_INV_PTYPE_MARKER_INV_PTYPE_MARKER_S 0
+#define GLGEN_ANA_TX_INV_PTYPE_MARKER_INV_PTYPE_MARKER_M MAKEMASK(0x7F, 0)
+#define GLGEN_ANA_TX_NMPG_KEYMASK(_i) (0x0020D1D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_NMPG_KEYMASK_MAX_INDEX 3
+#define GLGEN_ANA_TX_NMPG_KEYMASK_HASH_KEY_S 0
+#define GLGEN_ANA_TX_NMPG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_NMPG0_HASHKEY(_i) (0x0020D1B0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_NMPG0_HASHKEY_MAX_INDEX 3
+#define GLGEN_ANA_TX_NMPG0_HASHKEY_HASH_KEY_S 0
+#define GLGEN_ANA_TX_NMPG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_NO_HIT_PG_NM_PG 0x0020D204 /* Reset Source: CORER */
+#define GLGEN_ANA_TX_NO_HIT_PG_NM_PG_NPC_S 0
+#define GLGEN_ANA_TX_NO_HIT_PG_NM_PG_NPC_M MAKEMASK(0xFF, 0)
+#define GLGEN_ANA_TX_P2P(_i) (0x0020D160 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_P2P_MAX_INDEX 15
+#define GLGEN_ANA_TX_P2P_TARGET_PROF_S 0
+#define GLGEN_ANA_TX_P2P_TARGET_PROF_M MAKEMASK(0xF, 0)
+#define GLGEN_ANA_TX_PG_KEYMASK(_i) (0x0020D1C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_PG_KEYMASK_MAX_INDEX 3
+#define GLGEN_ANA_TX_PG_KEYMASK_HASH_KEY_S 0
+#define GLGEN_ANA_TX_PG_KEYMASK_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_PG0_HASHKEY(_i) (0x0020D1A0 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLGEN_ANA_TX_PG0_HASHKEY_MAX_INDEX 3
+#define GLGEN_ANA_TX_PG0_HASHKEY_HASH_KEY_S 0
+#define GLGEN_ANA_TX_PG0_HASHKEY_HASH_KEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ANA_TX_PROFIL_CTRL 0x0020D1FC /* Reset Source: CORER */
+#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDID_S 0
+#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDID_M MAKEMASK(0x1F, 0)
+#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDSTART_S 5
+#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MDSTART_M MAKEMASK(0xF, 5)
+#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_S 9
+#define GLGEN_ANA_TX_PROFIL_CTRL_PROFILE_SELECT_MD_LEN_M MAKEMASK(0x1F, 9)
+#define GLGEN_ANA_TX_PROFIL_CTRL_NUM_CTRL_DOMAIN_S 14
+#define GLGEN_ANA_TX_PROFIL_CTRL_NUM_CTRL_DOMAIN_M MAKEMASK(0x3, 14)
+#define GLGEN_ANA_TX_PROFIL_CTRL_DEF_PROF_ID_S 16
+#define GLGEN_ANA_TX_PROFIL_CTRL_DEF_PROF_ID_M MAKEMASK(0xF, 16)
+#define GLGEN_ANA_TX_PROFIL_CTRL_SEL_DEF_PROF_ID_S 20
+#define GLGEN_ANA_TX_PROFIL_CTRL_SEL_DEF_PROF_ID_M BIT(20)
+#define GLGEN_ASSERT_HLP 0x000B81E4 /* Reset Source: POR */
+#define GLGEN_ASSERT_HLP_CORE_ON_RST_S 0
+#define GLGEN_ASSERT_HLP_CORE_ON_RST_M BIT(0)
+#define GLGEN_ASSERT_HLP_FULL_ON_RST_S 1
+#define GLGEN_ASSERT_HLP_FULL_ON_RST_M BIT(1)
+#define GLGEN_CLKSTAT 0x000B8184 /* Reset Source: POR */
+#define GLGEN_CLKSTAT_U_CLK_SPEED_S 0
+#define GLGEN_CLKSTAT_U_CLK_SPEED_M MAKEMASK(0x7, 0)
+#define GLGEN_CLKSTAT_L_CLK_SPEED_S 3
+#define GLGEN_CLKSTAT_L_CLK_SPEED_M MAKEMASK(0x7, 3)
+#define GLGEN_CLKSTAT_PSM_CLK_SPEED_S 6
+#define GLGEN_CLKSTAT_PSM_CLK_SPEED_M MAKEMASK(0x7, 6)
+#define GLGEN_CLKSTAT_RXCTL_CLK_SPEED_S 9
+#define GLGEN_CLKSTAT_RXCTL_CLK_SPEED_M MAKEMASK(0x7, 9)
+#define GLGEN_CLKSTAT_UANA_CLK_SPEED_S 12
+#define GLGEN_CLKSTAT_UANA_CLK_SPEED_M MAKEMASK(0x7, 12)
+#define GLGEN_CLKSTAT_PE_CLK_SPEED_S 18
+#define GLGEN_CLKSTAT_PE_CLK_SPEED_M MAKEMASK(0x7, 18)
+#define GLGEN_CLKSTAT_SRC 0x000B826C /* Reset Source: POR */
+#define GLGEN_CLKSTAT_SRC_U_CLK_SRC_S 0
+#define GLGEN_CLKSTAT_SRC_U_CLK_SRC_M MAKEMASK(0x3, 0)
+#define GLGEN_CLKSTAT_SRC_L_CLK_SRC_S 2
+#define GLGEN_CLKSTAT_SRC_L_CLK_SRC_M MAKEMASK(0x3, 2)
+#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S 4
+#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M MAKEMASK(0x3, 4)
+#define GLGEN_CLKSTAT_SRC_RXCTL_CLK_SRC_S 6
+#define GLGEN_CLKSTAT_SRC_RXCTL_CLK_SRC_M MAKEMASK(0x3, 6)
+#define GLGEN_CLKSTAT_SRC_UANA_CLK_SRC_S 8
+#define GLGEN_CLKSTAT_SRC_UANA_CLK_SRC_M MAKEMASK(0xF, 8)
+#define GLGEN_ECC_ERR_INT_TOG_MASK_H 0x00093A00 /* Reset Source: CORER */
+#define GLGEN_ECC_ERR_INT_TOG_MASK_H_CLIENT_NUM_S 0
+#define GLGEN_ECC_ERR_INT_TOG_MASK_H_CLIENT_NUM_M MAKEMASK(0x7F, 0)
+#define GLGEN_ECC_ERR_INT_TOG_MASK_L 0x000939FC /* Reset Source: CORER */
+#define GLGEN_ECC_ERR_INT_TOG_MASK_L_CLIENT_NUM_S 0
+#define GLGEN_ECC_ERR_INT_TOG_MASK_L_CLIENT_NUM_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_ECC_ERR_RST_MASK_H 0x000939F8 /* Reset Source: CORER */
+#define GLGEN_ECC_ERR_RST_MASK_H_CLIENT_NUM_S 0
+#define GLGEN_ECC_ERR_RST_MASK_H_CLIENT_NUM_M MAKEMASK(0x7F, 0)
+#define GLGEN_ECC_ERR_RST_MASK_L 0x000939F4 /* Reset Source: CORER */
+#define GLGEN_ECC_ERR_RST_MASK_L_CLIENT_NUM_S 0
+#define GLGEN_ECC_ERR_RST_MASK_L_CLIENT_NUM_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_GPIO_CTL(_i) (0x000880C8 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: POR */
+#define GLGEN_GPIO_CTL_MAX_INDEX 6
+#define GLGEN_GPIO_CTL_IN_VALUE_S 0
+#define GLGEN_GPIO_CTL_IN_VALUE_M BIT(0)
+#define GLGEN_GPIO_CTL_IN_TRANSIT_S 1
+#define GLGEN_GPIO_CTL_IN_TRANSIT_M BIT(1)
+#define GLGEN_GPIO_CTL_OUT_VALUE_S 2
+#define GLGEN_GPIO_CTL_OUT_VALUE_M BIT(2)
+#define GLGEN_GPIO_CTL_NO_P_UP_S 3
+#define GLGEN_GPIO_CTL_NO_P_UP_M BIT(3)
+#define GLGEN_GPIO_CTL_PIN_DIR_S 4
+#define GLGEN_GPIO_CTL_PIN_DIR_M BIT(4)
+#define GLGEN_GPIO_CTL_TRI_CTL_S 5
+#define GLGEN_GPIO_CTL_TRI_CTL_M BIT(5)
+#define GLGEN_GPIO_CTL_PIN_FUNC_S 8
+#define GLGEN_GPIO_CTL_PIN_FUNC_M MAKEMASK(0xF, 8)
+#define GLGEN_GPIO_CTL_INT_MODE_S 12
+#define GLGEN_GPIO_CTL_INT_MODE_M MAKEMASK(0x3, 12)
+#define GLGEN_MARKER_COUNT 0x000939E8 /* Reset Source: CORER */
+#define GLGEN_MARKER_COUNT_MARKER_COUNT_S 0
+#define GLGEN_MARKER_COUNT_MARKER_COUNT_M MAKEMASK(0xFF, 0)
+#define GLGEN_MARKER_COUNT_MARKER_COUNT_EN_S 31
+#define GLGEN_MARKER_COUNT_MARKER_COUNT_EN_M BIT(31)
+#define GLGEN_RSTAT 0x000B8188 /* Reset Source: POR */
+#define GLGEN_RSTAT_DEVSTATE_S 0
+#define GLGEN_RSTAT_DEVSTATE_M MAKEMASK(0x3, 0)
+#define GLGEN_RSTAT_RESET_TYPE_S 2
+#define GLGEN_RSTAT_RESET_TYPE_M MAKEMASK(0x3, 2)
+#define GLGEN_RSTAT_CORERCNT_S 4
+#define GLGEN_RSTAT_CORERCNT_M MAKEMASK(0x3, 4)
+#define GLGEN_RSTAT_GLOBRCNT_S 6
+#define GLGEN_RSTAT_GLOBRCNT_M MAKEMASK(0x3, 6)
+#define GLGEN_RSTAT_EMPRCNT_S 8
+#define GLGEN_RSTAT_EMPRCNT_M MAKEMASK(0x3, 8)
+#define GLGEN_RSTAT_TIME_TO_RST_S 10
+#define GLGEN_RSTAT_TIME_TO_RST_M MAKEMASK(0x3F, 10)
+#define GLGEN_RSTAT_RTRIG_FLR_S 16
+#define GLGEN_RSTAT_RTRIG_FLR_M BIT(16)
+#define GLGEN_RSTAT_RTRIG_ECC_S 17
+#define GLGEN_RSTAT_RTRIG_ECC_M BIT(17)
+#define GLGEN_RSTAT_RTRIG_FW_AUX_S 18
+#define GLGEN_RSTAT_RTRIG_FW_AUX_M BIT(18)
+#define GLGEN_RSTCTL 0x000B8180 /* Reset Source: POR */
+#define GLGEN_RSTCTL_GRSTDEL_S 0
+#define GLGEN_RSTCTL_GRSTDEL_M MAKEMASK(0x3F, 0)
+#define GLGEN_RSTCTL_ECC_RST_ENA_S 8
+#define GLGEN_RSTCTL_ECC_RST_ENA_M BIT(8)
+#define GLGEN_RSTCTL_ECC_RT_EN_S 30
+#define GLGEN_RSTCTL_ECC_RT_EN_M BIT(30)
+#define GLGEN_RSTCTL_FLR_RT_EN_S 31
+#define GLGEN_RSTCTL_FLR_RT_EN_M BIT(31)
+#define GLGEN_RTRIG 0x000B8190 /* Reset Source: CORER */
+#define GLGEN_RTRIG_CORER_S 0
+#define GLGEN_RTRIG_CORER_M BIT(0)
+#define GLGEN_RTRIG_GLOBR_S 1
+#define GLGEN_RTRIG_GLOBR_M BIT(1)
+#define GLGEN_RTRIG_EMPFWR_S 2
+#define GLGEN_RTRIG_EMPFWR_M BIT(2)
+#define GLGEN_STAT 0x000B612C /* Reset Source: POR */
+#define GLGEN_STAT_RSVD4FW_S 0
+#define GLGEN_STAT_RSVD4FW_M MAKEMASK(0xFF, 0)
+#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLGEN_VFLRSTAT_MAX_INDEX 7
+#define GLGEN_VFLRSTAT_VFLRS_S 0
+#define GLGEN_VFLRSTAT_VFLRS_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLGEN_XLR_MSK2HLP_RDY 0x000939F0 /* Reset Source: CORER */
+#define GLGEN_XLR_MSK2HLP_RDY_GLGEN_XLR_MSK2HLP_RDY_S 0
+#define GLGEN_XLR_MSK2HLP_RDY_GLGEN_XLR_MSK2HLP_RDY_M BIT(0)
+#define GLGEN_XLR_TRNS_WAIT_COUNT 0x000939EC /* Reset Source: CORER */
+#define GLGEN_XLR_TRNS_WAIT_COUNT_W_BTWN_TRNS_COUNT_S 0
+#define GLGEN_XLR_TRNS_WAIT_COUNT_W_BTWN_TRNS_COUNT_M MAKEMASK(0x1F, 0)
+#define GLGEN_XLR_TRNS_WAIT_COUNT_W_PEND_TRNS_COUNT_S 8
+#define GLGEN_XLR_TRNS_WAIT_COUNT_W_PEND_TRNS_COUNT_M MAKEMASK(0xFF, 8)
+#define GLVFGEN_TIMER 0x000B8214 /* Reset Source: POR */
+#define GLVFGEN_TIMER_GTIME_S 0
+#define GLVFGEN_TIMER_GTIME_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFGEN_CTRL 0x00091000 /* Reset Source: CORER */
+#define PFGEN_CTRL_PFSWR_S 0
+#define PFGEN_CTRL_PFSWR_M BIT(0)
+#define PFGEN_DRUN 0x00091180 /* Reset Source: CORER */
+#define PFGEN_DRUN_DRVUNLD_S 0
+#define PFGEN_DRUN_DRVUNLD_M BIT(0)
+#define PFGEN_PFRSTAT 0x00091080 /* Reset Source: CORER */
+#define PFGEN_PFRSTAT_PFRD_S 0
+#define PFGEN_PFRSTAT_PFRD_M BIT(0)
+#define PFGEN_PORTNUM 0x001D2400 /* Reset Source: CORER */
+#define PFGEN_PORTNUM_PORT_NUM_S 0
+#define PFGEN_PORTNUM_PORT_NUM_M MAKEMASK(0x7, 0)
+#define PFGEN_STATE 0x00088000 /* Reset Source: CORER */
+#define PFGEN_STATE_PFPEEN_S 0
+#define PFGEN_STATE_PFPEEN_M BIT(0)
+#define PFGEN_STATE_RSVD_S 1
+#define PFGEN_STATE_RSVD_M BIT(1)
+#define PFGEN_STATE_PFLINKEN_S 2
+#define PFGEN_STATE_PFLINKEN_M BIT(2)
+#define PFGEN_STATE_PFSCEN_S 3
+#define PFGEN_STATE_PFSCEN_M BIT(3)
+#define PRT_TCVMLR_DRAIN_CNTR 0x000A21C0 /* Reset Source: CORER */
+#define PRT_TCVMLR_DRAIN_CNTR_CNTR_S 0
+#define PRT_TCVMLR_DRAIN_CNTR_CNTR_M MAKEMASK(0x3FFF, 0)
+#define PRTGEN_CNF 0x000B8120 /* Reset Source: POR */
+#define PRTGEN_CNF_PORT_DIS_S 0
+#define PRTGEN_CNF_PORT_DIS_M BIT(0)
+#define PRTGEN_CNF_ALLOW_PORT_DIS_S 1
+#define PRTGEN_CNF_ALLOW_PORT_DIS_M BIT(1)
+#define PRTGEN_CNF_EMP_PORT_DIS_S 2
+#define PRTGEN_CNF_EMP_PORT_DIS_M BIT(2)
+#define PRTGEN_CNF2 0x000B8160 /* Reset Source: POR */
+#define PRTGEN_CNF2_ACTIVATE_PORT_LINK_S 0
+#define PRTGEN_CNF2_ACTIVATE_PORT_LINK_M BIT(0)
+#define PRTGEN_CNF3 0x000B8280 /* Reset Source: POR */
+#define PRTGEN_CNF3_PORT_STAGERING_EN_S 0
+#define PRTGEN_CNF3_PORT_STAGERING_EN_M BIT(0)
+#define PRTGEN_STATUS 0x000B8100 /* Reset Source: POR */
+#define PRTGEN_STATUS_PORT_VALID_S 0
+#define PRTGEN_STATUS_PORT_VALID_M BIT(0)
+#define PRTGEN_STATUS_PORT_ACTIVE_S 1
+#define PRTGEN_STATUS_PORT_ACTIVE_M BIT(1)
+#define VFGEN_RSTAT(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: VFR */
+#define VFGEN_RSTAT_MAX_INDEX 255
+#define VFGEN_RSTAT_VFR_STATE_S 0
+#define VFGEN_RSTAT_VFR_STATE_M MAKEMASK(0x3, 0)
+#define VPGEN_VFRSTAT(_VF) (0x00090800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPGEN_VFRSTAT_MAX_INDEX 255
+#define VPGEN_VFRSTAT_VFRD_S 0
+#define VPGEN_VFRSTAT_VFRD_M BIT(0)
+#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPGEN_VFRTRIG_MAX_INDEX 255
+#define VPGEN_VFRTRIG_VFSWR_S 0
+#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
+#define VSIGEN_RSTAT(_VSI) (0x00092800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIGEN_RSTAT_MAX_INDEX 767
+#define VSIGEN_RSTAT_VMRD_S 0
+#define VSIGEN_RSTAT_VMRD_M BIT(0)
+#define VSIGEN_RTRIG(_VSI) (0x00091800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIGEN_RTRIG_MAX_INDEX 767
+#define VSIGEN_RTRIG_VMSWR_S 0
+#define VSIGEN_RTRIG_VMSWR_M BIT(0)
+#define GLHMC_APBVTINUSEBASE(_i) (0x00524A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_APBVTINUSEBASE_MAX_INDEX 7
+#define GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_S 0
+#define GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_CEQPART(_i) (0x005031C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_CEQPART_MAX_INDEX 7
+#define GLHMC_CEQPART_PMCEQBASE_S 0
+#define GLHMC_CEQPART_PMCEQBASE_M MAKEMASK(0x3FF, 0)
+#define GLHMC_CEQPART_PMCEQSIZE_S 16
+#define GLHMC_CEQPART_PMCEQSIZE_M MAKEMASK(0x3FF, 16)
+#define GLHMC_DBCQMAX 0x005220F0 /* Reset Source: CORER */
+#define GLHMC_DBCQMAX_GLHMC_DBCQMAX_S 0
+#define GLHMC_DBCQMAX_GLHMC_DBCQMAX_M MAKEMASK(0xFFFFF, 0)
+#define GLHMC_DBCQPART(_i) (0x00503180 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_DBCQPART_MAX_INDEX 7
+#define GLHMC_DBCQPART_PMDBCQBASE_S 0
+#define GLHMC_DBCQPART_PMDBCQBASE_M MAKEMASK(0x3FFF, 0)
+#define GLHMC_DBCQPART_PMDBCQSIZE_S 16
+#define GLHMC_DBCQPART_PMDBCQSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLHMC_DBQPMAX 0x005220EC /* Reset Source: CORER */
+#define GLHMC_DBQPMAX_GLHMC_DBQPMAX_S 0
+#define GLHMC_DBQPMAX_GLHMC_DBQPMAX_M MAKEMASK(0x7FFFF, 0)
+#define GLHMC_DBQPPART(_i) (0x005044C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_DBQPPART_MAX_INDEX 7
+#define GLHMC_DBQPPART_PMDBQPBASE_S 0
+#define GLHMC_DBQPPART_PMDBQPBASE_M MAKEMASK(0x3FFF, 0)
+#define GLHMC_DBQPPART_PMDBQPSIZE_S 16
+#define GLHMC_DBQPPART_PMDBQPSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLHMC_FSIAVBASE(_i) (0x00525600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_FSIAVBASE_MAX_INDEX 7
+#define GLHMC_FSIAVBASE_FPMFSIAVBASE_S 0
+#define GLHMC_FSIAVBASE_FPMFSIAVBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_FSIAVCNT(_i) (0x00525700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_FSIAVCNT_MAX_INDEX 7
+#define GLHMC_FSIAVCNT_FPMFSIAVCNT_S 0
+#define GLHMC_FSIAVCNT_FPMFSIAVCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_FSIAVMAX 0x00522068 /* Reset Source: CORER */
+#define GLHMC_FSIAVMAX_PMFSIAVMAX_S 0
+#define GLHMC_FSIAVMAX_PMFSIAVMAX_M MAKEMASK(0x3FFFF, 0)
+#define GLHMC_FSIAVOBJSZ 0x00522064 /* Reset Source: CORER */
+#define GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_S 0
+#define GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_FSIMCBASE(_i) (0x00526000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_FSIMCBASE_MAX_INDEX 7
+#define GLHMC_FSIMCBASE_FPMFSIMCBASE_S 0
+#define GLHMC_FSIMCBASE_FPMFSIMCBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_FSIMCCNT(_i) (0x00526100 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_FSIMCCNT_MAX_INDEX 7
+#define GLHMC_FSIMCCNT_FPMFSIMCSZ_S 0
+#define GLHMC_FSIMCCNT_FPMFSIMCSZ_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_FSIMCMAX 0x00522060 /* Reset Source: CORER */
+#define GLHMC_FSIMCMAX_PMFSIMCMAX_S 0
+#define GLHMC_FSIMCMAX_PMFSIMCMAX_M MAKEMASK(0x3FFF, 0)
+#define GLHMC_FSIMCOBJSZ 0x0052205C /* Reset Source: CORER */
+#define GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_S 0
+#define GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_FWPDINV 0x0052207C /* Reset Source: CORER */
+#define GLHMC_FWPDINV_PMSDIDX_S 0
+#define GLHMC_FWPDINV_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define GLHMC_FWPDINV_PMSDPARTSEL_S 15
+#define GLHMC_FWPDINV_PMSDPARTSEL_M BIT(15)
+#define GLHMC_FWPDINV_PMPDIDX_S 16
+#define GLHMC_FWPDINV_PMPDIDX_M MAKEMASK(0x1FF, 16)
+#define GLHMC_FWPDINV_FPMAT 0x0010207C /* Reset Source: CORER */
+#define GLHMC_FWPDINV_FPMAT_PMSDIDX_S 0
+#define GLHMC_FWPDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define GLHMC_FWPDINV_FPMAT_PMSDPARTSEL_S 15
+#define GLHMC_FWPDINV_FPMAT_PMSDPARTSEL_M BIT(15)
+#define GLHMC_FWPDINV_FPMAT_PMPDIDX_S 16
+#define GLHMC_FWPDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16)
+#define GLHMC_FWSDDATAHIGH 0x00522078 /* Reset Source: CORER */
+#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_S 0
+#define GLHMC_FWSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_FWSDDATAHIGH_FPMAT 0x00102078 /* Reset Source: CORER */
+#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
+#define GLHMC_FWSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_FWSDDATALOW 0x00522074 /* Reset Source: CORER */
+#define GLHMC_FWSDDATALOW_PMSDVALID_S 0
+#define GLHMC_FWSDDATALOW_PMSDVALID_M BIT(0)
+#define GLHMC_FWSDDATALOW_PMSDTYPE_S 1
+#define GLHMC_FWSDDATALOW_PMSDTYPE_M BIT(1)
+#define GLHMC_FWSDDATALOW_PMSDBPCOUNT_S 2
+#define GLHMC_FWSDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2)
+#define GLHMC_FWSDDATALOW_PMSDDATALOW_S 12
+#define GLHMC_FWSDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12)
+#define GLHMC_FWSDDATALOW_FPMAT 0x00102074 /* Reset Source: CORER */
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDVALID_S 0
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDVALID_M BIT(0)
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDTYPE_S 1
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDTYPE_M BIT(1)
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDBPCOUNT_S 2
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2)
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDDATALOW_S 12
+#define GLHMC_FWSDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12)
+#define GLHMC_PEARPBASE(_i) (0x00524800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEARPBASE_MAX_INDEX 7
+#define GLHMC_PEARPBASE_FPMPEARPBASE_S 0
+#define GLHMC_PEARPBASE_FPMPEARPBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEARPCNT(_i) (0x00524900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEARPCNT_MAX_INDEX 7
+#define GLHMC_PEARPCNT_FPMPEARPCNT_S 0
+#define GLHMC_PEARPCNT_FPMPEARPCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEARPMAX 0x00522038 /* Reset Source: CORER */
+#define GLHMC_PEARPMAX_PMPEARPMAX_S 0
+#define GLHMC_PEARPMAX_PMPEARPMAX_M MAKEMASK(0x1FFFF, 0)
+#define GLHMC_PEARPOBJSZ 0x00522034 /* Reset Source: CORER */
+#define GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_S 0
+#define GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_M MAKEMASK(0x7, 0)
+#define GLHMC_PECQBASE(_i) (0x00524200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PECQBASE_MAX_INDEX 7
+#define GLHMC_PECQBASE_FPMPECQBASE_S 0
+#define GLHMC_PECQBASE_FPMPECQBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PECQCNT(_i) (0x00524300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PECQCNT_MAX_INDEX 7
+#define GLHMC_PECQCNT_FPMPECQCNT_S 0
+#define GLHMC_PECQCNT_FPMPECQCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PECQOBJSZ 0x00522020 /* Reset Source: CORER */
+#define GLHMC_PECQOBJSZ_PMPECQOBJSZ_S 0
+#define GLHMC_PECQOBJSZ_PMPECQOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEHDRBASE(_i) (0x00526200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEHDRBASE_MAX_INDEX 7
+#define GLHMC_PEHDRBASE_GLHMC_PEHDRBASE_S 0
+#define GLHMC_PEHDRBASE_GLHMC_PEHDRBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEHDRCNT(_i) (0x00526300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEHDRCNT_MAX_INDEX 7
+#define GLHMC_PEHDRCNT_GLHMC_PEHDRCNT_S 0
+#define GLHMC_PEHDRCNT_GLHMC_PEHDRCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEHDRMAX 0x00522008 /* Reset Source: CORER */
+#define GLHMC_PEHDRMAX_PMPEHDRMAX_S 0
+#define GLHMC_PEHDRMAX_PMPEHDRMAX_M MAKEMASK(0x7FFFF, 0)
+#define GLHMC_PEHDRMAX_RSVD_S 19
+#define GLHMC_PEHDRMAX_RSVD_M MAKEMASK(0x1FFF, 19)
+#define GLHMC_PEHDROBJSZ 0x00522004 /* Reset Source: CORER */
+#define GLHMC_PEHDROBJSZ_PMPEHDROBJSZ_S 0
+#define GLHMC_PEHDROBJSZ_PMPEHDROBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEHDROBJSZ_RSVD_S 4
+#define GLHMC_PEHDROBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4)
+#define GLHMC_PEHTCNT(_i) (0x00524700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEHTCNT_MAX_INDEX 7
+#define GLHMC_PEHTCNT_FPMPEHTCNT_S 0
+#define GLHMC_PEHTCNT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEHTCNT_FPMAT(_i) (0x00104700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEHTCNT_FPMAT_MAX_INDEX 7
+#define GLHMC_PEHTCNT_FPMAT_FPMPEHTCNT_S 0
+#define GLHMC_PEHTCNT_FPMAT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEHTEBASE(_i) (0x00524600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEHTEBASE_MAX_INDEX 7
+#define GLHMC_PEHTEBASE_FPMPEHTEBASE_S 0
+#define GLHMC_PEHTEBASE_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEHTEBASE_FPMAT(_i) (0x00104600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEHTEBASE_FPMAT_MAX_INDEX 7
+#define GLHMC_PEHTEBASE_FPMAT_FPMPEHTEBASE_S 0
+#define GLHMC_PEHTEBASE_FPMAT_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEHTEOBJSZ 0x0052202C /* Reset Source: CORER */
+#define GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_S 0
+#define GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEHTEOBJSZ_FPMAT 0x0010202C /* Reset Source: CORER */
+#define GLHMC_PEHTEOBJSZ_FPMAT_PMPEHTEOBJSZ_S 0
+#define GLHMC_PEHTEOBJSZ_FPMAT_PMPEHTEOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEHTMAX 0x00522030 /* Reset Source: CORER */
+#define GLHMC_PEHTMAX_PMPEHTMAX_S 0
+#define GLHMC_PEHTMAX_PMPEHTMAX_M MAKEMASK(0x1FFFFF, 0)
+#define GLHMC_PEHTMAX_FPMAT 0x00102030 /* Reset Source: CORER */
+#define GLHMC_PEHTMAX_FPMAT_PMPEHTMAX_S 0
+#define GLHMC_PEHTMAX_FPMAT_PMPEHTMAX_M MAKEMASK(0x1FFFFF, 0)
+#define GLHMC_PEMDBASE(_i) (0x00526400 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEMDBASE_MAX_INDEX 7
+#define GLHMC_PEMDBASE_GLHMC_PEMDBASE_S 0
+#define GLHMC_PEMDBASE_GLHMC_PEMDBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEMDCNT(_i) (0x00526500 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEMDCNT_MAX_INDEX 7
+#define GLHMC_PEMDCNT_GLHMC_PEMDCNT_S 0
+#define GLHMC_PEMDCNT_GLHMC_PEMDCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEMDMAX 0x00522010 /* Reset Source: CORER */
+#define GLHMC_PEMDMAX_PMPEMDMAX_S 0
+#define GLHMC_PEMDMAX_PMPEMDMAX_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEMDMAX_RSVD_S 24
+#define GLHMC_PEMDMAX_RSVD_M MAKEMASK(0xFF, 24)
+#define GLHMC_PEMDOBJSZ 0x0052200C /* Reset Source: CORER */
+#define GLHMC_PEMDOBJSZ_PMPEMDOBJSZ_S 0
+#define GLHMC_PEMDOBJSZ_PMPEMDOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEMDOBJSZ_RSVD_S 4
+#define GLHMC_PEMDOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4)
+#define GLHMC_PEMRBASE(_i) (0x00524C00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEMRBASE_MAX_INDEX 7
+#define GLHMC_PEMRBASE_FPMPEMRBASE_S 0
+#define GLHMC_PEMRBASE_FPMPEMRBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEMRCNT(_i) (0x00524D00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEMRCNT_MAX_INDEX 7
+#define GLHMC_PEMRCNT_FPMPEMRSZ_S 0
+#define GLHMC_PEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEMRMAX 0x00522040 /* Reset Source: CORER */
+#define GLHMC_PEMRMAX_PMPEMRMAX_S 0
+#define GLHMC_PEMRMAX_PMPEMRMAX_M MAKEMASK(0x7FFFFF, 0)
+#define GLHMC_PEMROBJSZ 0x0052203C /* Reset Source: CORER */
+#define GLHMC_PEMROBJSZ_PMPEMROBJSZ_S 0
+#define GLHMC_PEMROBJSZ_PMPEMROBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEOOISCBASE(_i) (0x00526600 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEOOISCBASE_MAX_INDEX 7
+#define GLHMC_PEOOISCBASE_GLHMC_PEOOISCBASE_S 0
+#define GLHMC_PEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEOOISCCNT(_i) (0x00526700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEOOISCCNT_MAX_INDEX 7
+#define GLHMC_PEOOISCCNT_GLHMC_PEOOISCCNT_S 0
+#define GLHMC_PEOOISCCNT_GLHMC_PEOOISCCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEOOISCFFLBASE(_i) (0x00526C00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEOOISCFFLBASE_MAX_INDEX 7
+#define GLHMC_PEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_S 0
+#define GLHMC_PEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PEOOISCFFLCNT_PMAT(_i) (0x00526D00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEOOISCFFLCNT_PMAT_MAX_INDEX 7
+#define GLHMC_PEOOISCFFLCNT_PMAT_FPMPEOOISCFLCNT_S 0
+#define GLHMC_PEOOISCFFLCNT_PMAT_FPMPEOOISCFLCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEOOISCFFLMAX 0x005220A4 /* Reset Source: CORER */
+#define GLHMC_PEOOISCFFLMAX_PMPEOOISCFFLMAX_S 0
+#define GLHMC_PEOOISCFFLMAX_PMPEOOISCFFLMAX_M MAKEMASK(0x7FFFF, 0)
+#define GLHMC_PEOOISCFFLMAX_RSVD_S 19
+#define GLHMC_PEOOISCFFLMAX_RSVD_M MAKEMASK(0x1FFF, 19)
+#define GLHMC_PEOOISCMAX 0x00522018 /* Reset Source: CORER */
+#define GLHMC_PEOOISCMAX_PMPEOOISCMAX_S 0
+#define GLHMC_PEOOISCMAX_PMPEOOISCMAX_M MAKEMASK(0x7FFFF, 0)
+#define GLHMC_PEOOISCMAX_RSVD_S 19
+#define GLHMC_PEOOISCMAX_RSVD_M MAKEMASK(0x1FFF, 19)
+#define GLHMC_PEOOISCOBJSZ 0x00522014 /* Reset Source: CORER */
+#define GLHMC_PEOOISCOBJSZ_PMPEOOISCOBJSZ_S 0
+#define GLHMC_PEOOISCOBJSZ_PMPEOOISCOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEOOISCOBJSZ_RSVD_S 4
+#define GLHMC_PEOOISCOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4)
+#define GLHMC_PEPBLBASE(_i) (0x00525800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEPBLBASE_MAX_INDEX 7
+#define GLHMC_PEPBLBASE_FPMPEPBLBASE_S 0
+#define GLHMC_PEPBLBASE_FPMPEPBLBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEPBLCNT(_i) (0x00525900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEPBLCNT_MAX_INDEX 7
+#define GLHMC_PEPBLCNT_FPMPEPBLCNT_S 0
+#define GLHMC_PEPBLCNT_FPMPEPBLCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEPBLMAX 0x0052206C /* Reset Source: CORER */
+#define GLHMC_PEPBLMAX_PMPEPBLMAX_S 0
+#define GLHMC_PEPBLMAX_PMPEPBLMAX_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEQ1BASE(_i) (0x00525200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEQ1BASE_MAX_INDEX 7
+#define GLHMC_PEQ1BASE_FPMPEQ1BASE_S 0
+#define GLHMC_PEQ1BASE_FPMPEQ1BASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEQ1CNT(_i) (0x00525300 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEQ1CNT_MAX_INDEX 7
+#define GLHMC_PEQ1CNT_FPMPEQ1CNT_S 0
+#define GLHMC_PEQ1CNT_FPMPEQ1CNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEQ1FLBASE(_i) (0x00525400 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEQ1FLBASE_MAX_INDEX 7
+#define GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_S 0
+#define GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEQ1FLMAX 0x00522058 /* Reset Source: CORER */
+#define GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_S 0
+#define GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_M MAKEMASK(0x3FFFFFF, 0)
+#define GLHMC_PEQ1MAX 0x00522054 /* Reset Source: CORER */
+#define GLHMC_PEQ1MAX_PMPEQ1MAX_S 0
+#define GLHMC_PEQ1MAX_PMPEQ1MAX_M MAKEMASK(0xFFFFFFF, 0)
+#define GLHMC_PEQ1OBJSZ 0x00522050 /* Reset Source: CORER */
+#define GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_S 0
+#define GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEQPBASE(_i) (0x00524000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEQPBASE_MAX_INDEX 7
+#define GLHMC_PEQPBASE_FPMPEQPBASE_S 0
+#define GLHMC_PEQPBASE_FPMPEQPBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEQPCNT(_i) (0x00524100 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEQPCNT_MAX_INDEX 7
+#define GLHMC_PEQPCNT_FPMPEQPCNT_S 0
+#define GLHMC_PEQPCNT_FPMPEQPCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEQPOBJSZ 0x0052201C /* Reset Source: CORER */
+#define GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_S 0
+#define GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PERRFBASE(_i) (0x00526800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PERRFBASE_MAX_INDEX 7
+#define GLHMC_PERRFBASE_GLHMC_PERRFBASE_S 0
+#define GLHMC_PERRFBASE_GLHMC_PERRFBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PERRFCNT(_i) (0x00526900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PERRFCNT_MAX_INDEX 7
+#define GLHMC_PERRFCNT_GLHMC_PERRFCNT_S 0
+#define GLHMC_PERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PERRFFLBASE(_i) (0x00526A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PERRFFLBASE_MAX_INDEX 7
+#define GLHMC_PERRFFLBASE_GLHMC_PERRFFLBASE_S 0
+#define GLHMC_PERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_PERRFFLCNT_PMAT(_i) (0x00526B00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PERRFFLCNT_PMAT_MAX_INDEX 7
+#define GLHMC_PERRFFLCNT_PMAT_FPMPERRFFLCNT_S 0
+#define GLHMC_PERRFFLCNT_PMAT_FPMPERRFFLCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PERRFFLMAX 0x005220A0 /* Reset Source: CORER */
+#define GLHMC_PERRFFLMAX_PMPERRFFLMAX_S 0
+#define GLHMC_PERRFFLMAX_PMPERRFFLMAX_M MAKEMASK(0x3FFFFFF, 0)
+#define GLHMC_PERRFFLMAX_RSVD_S 26
+#define GLHMC_PERRFFLMAX_RSVD_M MAKEMASK(0x3F, 26)
+#define GLHMC_PERRFMAX 0x0052209C /* Reset Source: CORER */
+#define GLHMC_PERRFMAX_PMPERRFMAX_S 0
+#define GLHMC_PERRFMAX_PMPERRFMAX_M MAKEMASK(0xFFFFFFF, 0)
+#define GLHMC_PERRFMAX_RSVD_S 28
+#define GLHMC_PERRFMAX_RSVD_M MAKEMASK(0xF, 28)
+#define GLHMC_PERRFOBJSZ 0x00522098 /* Reset Source: CORER */
+#define GLHMC_PERRFOBJSZ_PMPERRFOBJSZ_S 0
+#define GLHMC_PERRFOBJSZ_PMPERRFOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PERRFOBJSZ_RSVD_S 4
+#define GLHMC_PERRFOBJSZ_RSVD_M MAKEMASK(0xFFFFFFF, 4)
+#define GLHMC_PETIMERBASE(_i) (0x00525A00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PETIMERBASE_MAX_INDEX 7
+#define GLHMC_PETIMERBASE_FPMPETIMERBASE_S 0
+#define GLHMC_PETIMERBASE_FPMPETIMERBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PETIMERCNT(_i) (0x00525B00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PETIMERCNT_MAX_INDEX 7
+#define GLHMC_PETIMERCNT_FPMPETIMERCNT_S 0
+#define GLHMC_PETIMERCNT_FPMPETIMERCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PETIMERMAX 0x00522084 /* Reset Source: CORER */
+#define GLHMC_PETIMERMAX_PMPETIMERMAX_S 0
+#define GLHMC_PETIMERMAX_PMPETIMERMAX_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PETIMEROBJSZ 0x00522080 /* Reset Source: CORER */
+#define GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_S 0
+#define GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PEXFBASE(_i) (0x00524E00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEXFBASE_MAX_INDEX 7
+#define GLHMC_PEXFBASE_FPMPEXFBASE_S 0
+#define GLHMC_PEXFBASE_FPMPEXFBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEXFCNT(_i) (0x00524F00 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEXFCNT_MAX_INDEX 7
+#define GLHMC_PEXFCNT_FPMPEXFCNT_S 0
+#define GLHMC_PEXFCNT_FPMPEXFCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_PEXFFLBASE(_i) (0x00525000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PEXFFLBASE_MAX_INDEX 7
+#define GLHMC_PEXFFLBASE_FPMPEXFFLBASE_S 0
+#define GLHMC_PEXFFLBASE_FPMPEXFFLBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_PEXFFLMAX 0x0052204C /* Reset Source: CORER */
+#define GLHMC_PEXFFLMAX_PMPEXFFLMAX_S 0
+#define GLHMC_PEXFFLMAX_PMPEXFFLMAX_M MAKEMASK(0xFFFFFFF, 0)
+#define GLHMC_PEXFMAX 0x00522048 /* Reset Source: CORER */
+#define GLHMC_PEXFMAX_PMPEXFMAX_S 0
+#define GLHMC_PEXFMAX_PMPEXFMAX_M MAKEMASK(0xFFFFFFF, 0)
+#define GLHMC_PEXFOBJSZ 0x00522044 /* Reset Source: CORER */
+#define GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_S 0
+#define GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_M MAKEMASK(0xF, 0)
+#define GLHMC_PFPESDPART(_i) (0x00520880 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PFPESDPART_MAX_INDEX 7
+#define GLHMC_PFPESDPART_PMSDBASE_S 0
+#define GLHMC_PFPESDPART_PMSDBASE_M MAKEMASK(0xFFF, 0)
+#define GLHMC_PFPESDPART_PMSDSIZE_S 16
+#define GLHMC_PFPESDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16)
+#define GLHMC_PFPESDPART_FPMAT(_i) (0x00100880 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_PFPESDPART_FPMAT_MAX_INDEX 7
+#define GLHMC_PFPESDPART_FPMAT_PMSDBASE_S 0
+#define GLHMC_PFPESDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0)
+#define GLHMC_PFPESDPART_FPMAT_PMSDSIZE_S 16
+#define GLHMC_PFPESDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16)
+#define GLHMC_SDPART(_i) (0x00520800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_SDPART_MAX_INDEX 7
+#define GLHMC_SDPART_PMSDBASE_S 0
+#define GLHMC_SDPART_PMSDBASE_M MAKEMASK(0xFFF, 0)
+#define GLHMC_SDPART_PMSDSIZE_S 16
+#define GLHMC_SDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16)
+#define GLHMC_SDPART_FPMAT(_i) (0x00100800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLHMC_SDPART_FPMAT_MAX_INDEX 7
+#define GLHMC_SDPART_FPMAT_PMSDBASE_S 0
+#define GLHMC_SDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0)
+#define GLHMC_SDPART_FPMAT_PMSDSIZE_S 16
+#define GLHMC_SDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16)
+#define GLHMC_VFAPBVTINUSEBASE(_i) (0x0052CA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_S 0
+#define GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFCEQPART(_i) (0x00502F00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFCEQPART_MAX_INDEX 31
+#define GLHMC_VFCEQPART_PMCEQBASE_S 0
+#define GLHMC_VFCEQPART_PMCEQBASE_M MAKEMASK(0x3FF, 0)
+#define GLHMC_VFCEQPART_PMCEQSIZE_S 16
+#define GLHMC_VFCEQPART_PMCEQSIZE_M MAKEMASK(0x3FF, 16)
+#define GLHMC_VFDBCQPART(_i) (0x00502E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFDBCQPART_MAX_INDEX 31
+#define GLHMC_VFDBCQPART_PMDBCQBASE_S 0
+#define GLHMC_VFDBCQPART_PMDBCQBASE_M MAKEMASK(0x3FFF, 0)
+#define GLHMC_VFDBCQPART_PMDBCQSIZE_S 16
+#define GLHMC_VFDBCQPART_PMDBCQSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLHMC_VFDBQPPART(_i) (0x00504520 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFDBQPPART_MAX_INDEX 31
+#define GLHMC_VFDBQPPART_PMDBQPBASE_S 0
+#define GLHMC_VFDBQPPART_PMDBQPBASE_M MAKEMASK(0x3FFF, 0)
+#define GLHMC_VFDBQPPART_PMDBQPSIZE_S 16
+#define GLHMC_VFDBQPPART_PMDBQPSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLHMC_VFFSIAVBASE(_i) (0x0052D600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define GLHMC_VFFSIAVBASE_FPMFSIAVBASE_S 0
+#define GLHMC_VFFSIAVBASE_FPMFSIAVBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFFSIAVCNT(_i) (0x0052D700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define GLHMC_VFFSIAVCNT_FPMFSIAVCNT_S 0
+#define GLHMC_VFFSIAVCNT_FPMFSIAVCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFFSIMCBASE(_i) (0x0052E000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFFSIMCBASE_MAX_INDEX 31
+#define GLHMC_VFFSIMCBASE_FPMFSIMCBASE_S 0
+#define GLHMC_VFFSIMCBASE_FPMFSIMCBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFFSIMCCNT(_i) (0x0052E100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFFSIMCCNT_MAX_INDEX 31
+#define GLHMC_VFFSIMCCNT_FPMFSIMCSZ_S 0
+#define GLHMC_VFFSIMCCNT_FPMFSIMCSZ_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPDINV_MAX_INDEX 31
+#define GLHMC_VFPDINV_PMSDIDX_S 0
+#define GLHMC_VFPDINV_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define GLHMC_VFPDINV_PMSDPARTSEL_S 15
+#define GLHMC_VFPDINV_PMSDPARTSEL_M BIT(15)
+#define GLHMC_VFPDINV_PMPDIDX_S 16
+#define GLHMC_VFPDINV_PMPDIDX_M MAKEMASK(0x1FF, 16)
+#define GLHMC_VFPDINV_FPMAT(_i) (0x00108300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPDINV_FPMAT_MAX_INDEX 31
+#define GLHMC_VFPDINV_FPMAT_PMSDIDX_S 0
+#define GLHMC_VFPDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define GLHMC_VFPDINV_FPMAT_PMSDPARTSEL_S 15
+#define GLHMC_VFPDINV_FPMAT_PMSDPARTSEL_M BIT(15)
+#define GLHMC_VFPDINV_FPMAT_PMPDIDX_S 16
+#define GLHMC_VFPDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16)
+#define GLHMC_VFPEARPBASE(_i) (0x0052C800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define GLHMC_VFPEARPBASE_FPMPEARPBASE_S 0
+#define GLHMC_VFPEARPBASE_FPMPEARPBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEARPCNT(_i) (0x0052C900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define GLHMC_VFPEARPCNT_FPMPEARPCNT_S 0
+#define GLHMC_VFPEARPCNT_FPMPEARPCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPECQBASE(_i) (0x0052C200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPECQBASE_MAX_INDEX 31
+#define GLHMC_VFPECQBASE_FPMPECQBASE_S 0
+#define GLHMC_VFPECQBASE_FPMPECQBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPECQCNT(_i) (0x0052C300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPECQCNT_MAX_INDEX 31
+#define GLHMC_VFPECQCNT_FPMPECQCNT_S 0
+#define GLHMC_VFPECQCNT_FPMPECQCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEHDRBASE(_i) (0x0052E200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEHDRBASE_MAX_INDEX 31
+#define GLHMC_VFPEHDRBASE_GLHMC_PEHDRBASE_S 0
+#define GLHMC_VFPEHDRBASE_GLHMC_PEHDRBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEHDRCNT(_i) (0x0052E300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEHDRCNT_MAX_INDEX 31
+#define GLHMC_VFPEHDRCNT_GLHMC_PEHDRCNT_S 0
+#define GLHMC_VFPEHDRCNT_GLHMC_PEHDRCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEHTCNT(_i) (0x0052C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define GLHMC_VFPEHTCNT_FPMPEHTCNT_S 0
+#define GLHMC_VFPEHTCNT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEHTCNT_FPMAT(_i) (0x0010C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEHTCNT_FPMAT_MAX_INDEX 31
+#define GLHMC_VFPEHTCNT_FPMAT_FPMPEHTCNT_S 0
+#define GLHMC_VFPEHTCNT_FPMAT_FPMPEHTCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEHTEBASE(_i) (0x0052C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define GLHMC_VFPEHTEBASE_FPMPEHTEBASE_S 0
+#define GLHMC_VFPEHTEBASE_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEHTEBASE_FPMAT(_i) (0x0010C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEHTEBASE_FPMAT_MAX_INDEX 31
+#define GLHMC_VFPEHTEBASE_FPMAT_FPMPEHTEBASE_S 0
+#define GLHMC_VFPEHTEBASE_FPMAT_FPMPEHTEBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEMDBASE(_i) (0x0052E400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEMDBASE_MAX_INDEX 31
+#define GLHMC_VFPEMDBASE_GLHMC_PEMDBASE_S 0
+#define GLHMC_VFPEMDBASE_GLHMC_PEMDBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEMDCNT(_i) (0x0052E500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEMDCNT_MAX_INDEX 31
+#define GLHMC_VFPEMDCNT_GLHMC_PEMDCNT_S 0
+#define GLHMC_VFPEMDCNT_GLHMC_PEMDCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEMRBASE(_i) (0x0052CC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define GLHMC_VFPEMRBASE_FPMPEMRBASE_S 0
+#define GLHMC_VFPEMRBASE_FPMPEMRBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEMRCNT(_i) (0x0052CD00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define GLHMC_VFPEMRCNT_FPMPEMRSZ_S 0
+#define GLHMC_VFPEMRCNT_FPMPEMRSZ_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEOOISCBASE(_i) (0x0052E600 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEOOISCBASE_MAX_INDEX 31
+#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_S 0
+#define GLHMC_VFPEOOISCBASE_GLHMC_PEOOISCBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEOOISCCNT(_i) (0x0052E700 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEOOISCCNT_MAX_INDEX 31
+#define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_S 0
+#define GLHMC_VFPEOOISCCNT_GLHMC_PEOOISCCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEOOISCFFLBASE(_i) (0x0052EC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEOOISCFFLBASE_MAX_INDEX 31
+#define GLHMC_VFPEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_S 0
+#define GLHMC_VFPEOOISCFFLBASE_GLHMC_PEOOISCFFLBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPEPBLBASE(_i) (0x0052D800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define GLHMC_VFPEPBLBASE_FPMPEPBLBASE_S 0
+#define GLHMC_VFPEPBLBASE_FPMPEPBLBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEPBLCNT(_i) (0x0052D900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define GLHMC_VFPEPBLCNT_FPMPEPBLCNT_S 0
+#define GLHMC_VFPEPBLCNT_FPMPEPBLCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEQ1BASE(_i) (0x0052D200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define GLHMC_VFPEQ1BASE_FPMPEQ1BASE_S 0
+#define GLHMC_VFPEQ1BASE_FPMPEQ1BASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEQ1CNT(_i) (0x0052D300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define GLHMC_VFPEQ1CNT_FPMPEQ1CNT_S 0
+#define GLHMC_VFPEQ1CNT_FPMPEQ1CNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEQ1FLBASE(_i) (0x0052D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_S 0
+#define GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEQPBASE(_i) (0x0052C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define GLHMC_VFPEQPBASE_FPMPEQPBASE_S 0
+#define GLHMC_VFPEQPBASE_FPMPEQPBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEQPCNT(_i) (0x0052C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define GLHMC_VFPEQPCNT_FPMPEQPCNT_S 0
+#define GLHMC_VFPEQPCNT_FPMPEQPCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPERRFBASE(_i) (0x0052E800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPERRFBASE_MAX_INDEX 31
+#define GLHMC_VFPERRFBASE_GLHMC_PERRFBASE_S 0
+#define GLHMC_VFPERRFBASE_GLHMC_PERRFBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPERRFCNT(_i) (0x0052E900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPERRFCNT_MAX_INDEX 31
+#define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_S 0
+#define GLHMC_VFPERRFCNT_GLHMC_PERRFCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPERRFFLBASE(_i) (0x0052EA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPERRFFLBASE_MAX_INDEX 31
+#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_S 0
+#define GLHMC_VFPERRFFLBASE_GLHMC_PERRFFLBASE_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFPETIMERBASE(_i) (0x0052DA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_S 0
+#define GLHMC_VFPETIMERBASE_FPMPETIMERBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPETIMERCNT(_i) (0x0052DB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define GLHMC_VFPETIMERCNT_FPMPETIMERCNT_S 0
+#define GLHMC_VFPETIMERCNT_FPMPETIMERCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEXFBASE(_i) (0x0052CE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define GLHMC_VFPEXFBASE_FPMPEXFBASE_S 0
+#define GLHMC_VFPEXFBASE_FPMPEXFBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFPEXFCNT(_i) (0x0052CF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define GLHMC_VFPEXFCNT_FPMPEXFCNT_S 0
+#define GLHMC_VFPEXFCNT_FPMPEXFCNT_M MAKEMASK(0x1FFFFFFF, 0)
+#define GLHMC_VFPEXFFLBASE(_i) (0x0052D000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_S 0
+#define GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_M MAKEMASK(0xFFFFFF, 0)
+#define GLHMC_VFSDDATAHIGH(_i) (0x00528200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFSDDATAHIGH_MAX_INDEX 31
+#define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_S 0
+#define GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFSDDATAHIGH_FPMAT(_i) (0x00108200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFSDDATAHIGH_FPMAT_MAX_INDEX 31
+#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
+#define GLHMC_VFSDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHMC_VFSDDATALOW(_i) (0x00528100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFSDDATALOW_MAX_INDEX 31
+#define GLHMC_VFSDDATALOW_PMSDVALID_S 0
+#define GLHMC_VFSDDATALOW_PMSDVALID_M BIT(0)
+#define GLHMC_VFSDDATALOW_PMSDTYPE_S 1
+#define GLHMC_VFSDDATALOW_PMSDTYPE_M BIT(1)
+#define GLHMC_VFSDDATALOW_PMSDBPCOUNT_S 2
+#define GLHMC_VFSDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2)
+#define GLHMC_VFSDDATALOW_PMSDDATALOW_S 12
+#define GLHMC_VFSDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12)
+#define GLHMC_VFSDDATALOW_FPMAT(_i) (0x00108100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFSDDATALOW_FPMAT_MAX_INDEX 31
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDVALID_S 0
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDVALID_M BIT(0)
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDTYPE_S 1
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDTYPE_M BIT(1)
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDBPCOUNT_S 2
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2)
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDDATALOW_S 12
+#define GLHMC_VFSDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12)
+#define GLHMC_VFSDPART(_i) (0x00528800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFSDPART_MAX_INDEX 31
+#define GLHMC_VFSDPART_PMSDBASE_S 0
+#define GLHMC_VFSDPART_PMSDBASE_M MAKEMASK(0xFFF, 0)
+#define GLHMC_VFSDPART_PMSDSIZE_S 16
+#define GLHMC_VFSDPART_PMSDSIZE_M MAKEMASK(0x1FFF, 16)
+#define GLHMC_VFSDPART_FPMAT(_i) (0x00108800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLHMC_VFSDPART_FPMAT_MAX_INDEX 31
+#define GLHMC_VFSDPART_FPMAT_PMSDBASE_S 0
+#define GLHMC_VFSDPART_FPMAT_PMSDBASE_M MAKEMASK(0xFFF, 0)
+#define GLHMC_VFSDPART_FPMAT_PMSDSIZE_S 16
+#define GLHMC_VFSDPART_FPMAT_PMSDSIZE_M MAKEMASK(0x1FFF, 16)
+#define GLMDOC_CACHESIZE 0x0051C06C /* Reset Source: CORER */
+#define GLMDOC_CACHESIZE_WORD_SIZE_S 0
+#define GLMDOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLMDOC_CACHESIZE_SETS_S 8
+#define GLMDOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLMDOC_CACHESIZE_WAYS_S 20
+#define GLMDOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define GLPBLOC0_CACHESIZE 0x00518074 /* Reset Source: CORER */
+#define GLPBLOC0_CACHESIZE_WORD_SIZE_S 0
+#define GLPBLOC0_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLPBLOC0_CACHESIZE_SETS_S 8
+#define GLPBLOC0_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLPBLOC0_CACHESIZE_WAYS_S 20
+#define GLPBLOC0_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define GLPBLOC1_CACHESIZE 0x0051A074 /* Reset Source: CORER */
+#define GLPBLOC1_CACHESIZE_WORD_SIZE_S 0
+#define GLPBLOC1_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLPBLOC1_CACHESIZE_SETS_S 8
+#define GLPBLOC1_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLPBLOC1_CACHESIZE_WAYS_S 20
+#define GLPBLOC1_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define GLPDOC_CACHESIZE 0x00530048 /* Reset Source: CORER */
+#define GLPDOC_CACHESIZE_WORD_SIZE_S 0
+#define GLPDOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLPDOC_CACHESIZE_SETS_S 8
+#define GLPDOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLPDOC_CACHESIZE_WAYS_S 20
+#define GLPDOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define GLPDOC_CACHESIZE_FPMAT 0x00110088 /* Reset Source: CORER */
+#define GLPDOC_CACHESIZE_FPMAT_WORD_SIZE_S 0
+#define GLPDOC_CACHESIZE_FPMAT_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLPDOC_CACHESIZE_FPMAT_SETS_S 8
+#define GLPDOC_CACHESIZE_FPMAT_SETS_M MAKEMASK(0xFFF, 8)
+#define GLPDOC_CACHESIZE_FPMAT_WAYS_S 20
+#define GLPDOC_CACHESIZE_FPMAT_WAYS_M MAKEMASK(0xF, 20)
+#define GLPEOC0_CACHESIZE 0x005140A8 /* Reset Source: CORER */
+#define GLPEOC0_CACHESIZE_WORD_SIZE_S 0
+#define GLPEOC0_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLPEOC0_CACHESIZE_SETS_S 8
+#define GLPEOC0_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLPEOC0_CACHESIZE_WAYS_S 20
+#define GLPEOC0_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define GLPEOC1_CACHESIZE 0x005160A8 /* Reset Source: CORER */
+#define GLPEOC1_CACHESIZE_WORD_SIZE_S 0
+#define GLPEOC1_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLPEOC1_CACHESIZE_SETS_S 8
+#define GLPEOC1_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLPEOC1_CACHESIZE_WAYS_S 20
+#define GLPEOC1_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define PFHMC_ERRORDATA 0x00520500 /* Reset Source: PFR */
+#define PFHMC_ERRORDATA_HMC_ERROR_DATA_S 0
+#define PFHMC_ERRORDATA_HMC_ERROR_DATA_M MAKEMASK(0x3FFFFFFF, 0)
+#define PFHMC_ERRORDATA_FPMAT 0x00100500 /* Reset Source: PFR */
+#define PFHMC_ERRORDATA_FPMAT_HMC_ERROR_DATA_S 0
+#define PFHMC_ERRORDATA_FPMAT_HMC_ERROR_DATA_M MAKEMASK(0x3FFFFFFF, 0)
+#define PFHMC_ERRORINFO 0x00520400 /* Reset Source: PFR */
+#define PFHMC_ERRORINFO_PMF_INDEX_S 0
+#define PFHMC_ERRORINFO_PMF_INDEX_M MAKEMASK(0x1F, 0)
+#define PFHMC_ERRORINFO_PMF_ISVF_S 7
+#define PFHMC_ERRORINFO_PMF_ISVF_M BIT(7)
+#define PFHMC_ERRORINFO_HMC_ERROR_TYPE_S 8
+#define PFHMC_ERRORINFO_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8)
+#define PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S 16
+#define PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16)
+#define PFHMC_ERRORINFO_ERROR_DETECTED_S 31
+#define PFHMC_ERRORINFO_ERROR_DETECTED_M BIT(31)
+#define PFHMC_ERRORINFO_FPMAT 0x00100400 /* Reset Source: PFR */
+#define PFHMC_ERRORINFO_FPMAT_PMF_INDEX_S 0
+#define PFHMC_ERRORINFO_FPMAT_PMF_INDEX_M MAKEMASK(0x1F, 0)
+#define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_S 7
+#define PFHMC_ERRORINFO_FPMAT_PMF_ISVF_M BIT(7)
+#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_S 8
+#define PFHMC_ERRORINFO_FPMAT_HMC_ERROR_TYPE_M MAKEMASK(0xF, 8)
+#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_S 16
+#define PFHMC_ERRORINFO_FPMAT_HMC_OBJECT_TYPE_M MAKEMASK(0x1F, 16)
+#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_S 31
+#define PFHMC_ERRORINFO_FPMAT_ERROR_DETECTED_M BIT(31)
+#define PFHMC_PDINV 0x00520300 /* Reset Source: PFR */
+#define PFHMC_PDINV_PMSDIDX_S 0
+#define PFHMC_PDINV_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define PFHMC_PDINV_PMSDPARTSEL_S 15
+#define PFHMC_PDINV_PMSDPARTSEL_M BIT(15)
+#define PFHMC_PDINV_PMPDIDX_S 16
+#define PFHMC_PDINV_PMPDIDX_M MAKEMASK(0x1FF, 16)
+#define PFHMC_PDINV_FPMAT 0x00100300 /* Reset Source: PFR */
+#define PFHMC_PDINV_FPMAT_PMSDIDX_S 0
+#define PFHMC_PDINV_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define PFHMC_PDINV_FPMAT_PMSDPARTSEL_S 15
+#define PFHMC_PDINV_FPMAT_PMSDPARTSEL_M BIT(15)
+#define PFHMC_PDINV_FPMAT_PMPDIDX_S 16
+#define PFHMC_PDINV_FPMAT_PMPDIDX_M MAKEMASK(0x1FF, 16)
+#define PFHMC_SDCMD 0x00520000 /* Reset Source: PFR */
+#define PFHMC_SDCMD_PMSDIDX_S 0
+#define PFHMC_SDCMD_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define PFHMC_SDCMD_PMSDPARTSEL_S 15
+#define PFHMC_SDCMD_PMSDPARTSEL_M BIT(15)
+#define PFHMC_SDCMD_PMSDWR_S 31
+#define PFHMC_SDCMD_PMSDWR_M BIT(31)
+#define PFHMC_SDCMD_FPMAT 0x00100000 /* Reset Source: PFR */
+#define PFHMC_SDCMD_FPMAT_PMSDIDX_S 0
+#define PFHMC_SDCMD_FPMAT_PMSDIDX_M MAKEMASK(0xFFF, 0)
+#define PFHMC_SDCMD_FPMAT_PMSDPARTSEL_S 15
+#define PFHMC_SDCMD_FPMAT_PMSDPARTSEL_M BIT(15)
+#define PFHMC_SDCMD_FPMAT_PMSDWR_S 31
+#define PFHMC_SDCMD_FPMAT_PMSDWR_M BIT(31)
+#define PFHMC_SDDATAHIGH 0x00520200 /* Reset Source: PFR */
+#define PFHMC_SDDATAHIGH_PMSDDATAHIGH_S 0
+#define PFHMC_SDDATAHIGH_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFHMC_SDDATAHIGH_FPMAT 0x00100200 /* Reset Source: PFR */
+#define PFHMC_SDDATAHIGH_FPMAT_PMSDDATAHIGH_S 0
+#define PFHMC_SDDATAHIGH_FPMAT_PMSDDATAHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFHMC_SDDATALOW 0x00520100 /* Reset Source: PFR */
+#define PFHMC_SDDATALOW_PMSDVALID_S 0
+#define PFHMC_SDDATALOW_PMSDVALID_M BIT(0)
+#define PFHMC_SDDATALOW_PMSDTYPE_S 1
+#define PFHMC_SDDATALOW_PMSDTYPE_M BIT(1)
+#define PFHMC_SDDATALOW_PMSDBPCOUNT_S 2
+#define PFHMC_SDDATALOW_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2)
+#define PFHMC_SDDATALOW_PMSDDATALOW_S 12
+#define PFHMC_SDDATALOW_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12)
+#define PFHMC_SDDATALOW_FPMAT 0x00100100 /* Reset Source: PFR */
+#define PFHMC_SDDATALOW_FPMAT_PMSDVALID_S 0
+#define PFHMC_SDDATALOW_FPMAT_PMSDVALID_M BIT(0)
+#define PFHMC_SDDATALOW_FPMAT_PMSDTYPE_S 1
+#define PFHMC_SDDATALOW_FPMAT_PMSDTYPE_M BIT(1)
+#define PFHMC_SDDATALOW_FPMAT_PMSDBPCOUNT_S 2
+#define PFHMC_SDDATALOW_FPMAT_PMSDBPCOUNT_M MAKEMASK(0x3FF, 2)
+#define PFHMC_SDDATALOW_FPMAT_PMSDDATALOW_S 12
+#define PFHMC_SDDATALOW_FPMAT_PMSDDATALOW_M MAKEMASK(0xFFFFF, 12)
+#define GL_DSI_REPC 0x00294208 /* Reset Source: CORER */
+#define GL_DSI_REPC_NO_DESC_CNT_S 0
+#define GL_DSI_REPC_NO_DESC_CNT_M MAKEMASK(0xFFFF, 0)
+#define GL_DSI_REPC_ERROR_CNT_S 16
+#define GL_DSI_REPC_ERROR_CNT_M MAKEMASK(0xFFFF, 16)
+#define GL_MDCK_TDAT_TCLAN 0x000FC0DC /* Reset Source: CORER */
+#define GL_MDCK_TDAT_TCLAN_WRONG_ORDER_FORMAT_DESC_S 0
+#define GL_MDCK_TDAT_TCLAN_WRONG_ORDER_FORMAT_DESC_M BIT(0)
+#define GL_MDCK_TDAT_TCLAN_UR_S 1
+#define GL_MDCK_TDAT_TCLAN_UR_M BIT(1)
+#define GL_MDCK_TDAT_TCLAN_TAIL_DESC_NOT_DDESC_EOP_NOP_S 2
+#define GL_MDCK_TDAT_TCLAN_TAIL_DESC_NOT_DDESC_EOP_NOP_M BIT(2)
+#define GL_MDCK_TDAT_TCLAN_FALSE_SCHEDULING_S 3
+#define GL_MDCK_TDAT_TCLAN_FALSE_SCHEDULING_M BIT(3)
+#define GL_MDCK_TDAT_TCLAN_TAIL_VALUE_BIGGER_THAN_RING_LEN_S 4
+#define GL_MDCK_TDAT_TCLAN_TAIL_VALUE_BIGGER_THAN_RING_LEN_M BIT(4)
+#define GL_MDCK_TDAT_TCLAN_MORE_THAN_8_DCMDS_IN_PKT_S 5
+#define GL_MDCK_TDAT_TCLAN_MORE_THAN_8_DCMDS_IN_PKT_M BIT(5)
+#define GL_MDCK_TDAT_TCLAN_NO_HEAD_UPDATE_IN_QUANTA_S 6
+#define GL_MDCK_TDAT_TCLAN_NO_HEAD_UPDATE_IN_QUANTA_M BIT(6)
+#define GL_MDCK_TDAT_TCLAN_PKT_LEN_NOT_LEGAL_S 7
+#define GL_MDCK_TDAT_TCLAN_PKT_LEN_NOT_LEGAL_M BIT(7)
+#define GL_MDCK_TDAT_TCLAN_TSO_TLEN_NOT_COHERENT_WITH_SUM_BUFS_S 8
+#define GL_MDCK_TDAT_TCLAN_TSO_TLEN_NOT_COHERENT_WITH_SUM_BUFS_M BIT(8)
+#define GL_MDCK_TDAT_TCLAN_TSO_TAIL_REACHED_BEFORE_TLEN_END_S 9
+#define GL_MDCK_TDAT_TCLAN_TSO_TAIL_REACHED_BEFORE_TLEN_END_M BIT(9)
+#define GL_MDCK_TDAT_TCLAN_TSO_MORE_THAN_3_HDRS_S 10
+#define GL_MDCK_TDAT_TCLAN_TSO_MORE_THAN_3_HDRS_M BIT(10)
+#define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_S 11
+#define GL_MDCK_TDAT_TCLAN_TSO_SUM_BUFFS_LT_SUM_HDRS_M BIT(11)
+#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_S 12
+#define GL_MDCK_TDAT_TCLAN_TSO_ZERO_MSS_TLEN_HDRS_M BIT(12)
+#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_S 13
+#define GL_MDCK_TDAT_TCLAN_TSO_CTX_DESC_IPSEC_M BIT(13)
+#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_S 14
+#define GL_MDCK_TDAT_TCLAN_SSO_COMS_NOT_WHOLE_PKT_NUM_IN_QUANTA_M BIT(14)
+#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_S 15
+#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_BYTES_EXCEED_PKTLEN_X_64_M BIT(15)
+#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_CMDS_EXCEED_S 16
+#define GL_MDCK_TDAT_TCLAN_COMS_QUANTA_CMDS_EXCEED_M BIT(16)
+#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_LAST_LSO_QUANTA_S 17
+#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_LAST_LSO_QUANTA_M BIT(17)
+#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_TLEN_S 18
+#define GL_MDCK_TDAT_TCLAN_TSO_COMS_TSO_DESCS_TLEN_M BIT(18)
+#define GL_MDCK_TDAT_TCLAN_TSO_COMS_QUANTA_FINISHED_TOO_EARLY_S 19
+#define GL_MDCK_TDAT_TCLAN_TSO_COMS_QUANTA_FINISHED_TOO_EARLY_M BIT(19)
+#define GL_MDCK_TDAT_TCLAN_COMS_NUM_PKTS_IN_QUANTA_S 20
+#define GL_MDCK_TDAT_TCLAN_COMS_NUM_PKTS_IN_QUANTA_M BIT(20)
+#define GLCORE_CLKCTL_H 0x000B81E8 /* Reset Source: POR */
+#define GLCORE_CLKCTL_H_UPPER_CLK_SRC_H_S 0
+#define GLCORE_CLKCTL_H_UPPER_CLK_SRC_H_M MAKEMASK(0x3, 0)
+#define GLCORE_CLKCTL_H_LOWER_CLK_SRC_H_S 2
+#define GLCORE_CLKCTL_H_LOWER_CLK_SRC_H_M MAKEMASK(0x3, 2)
+#define GLCORE_CLKCTL_H_PSM_CLK_SRC_H_S 4
+#define GLCORE_CLKCTL_H_PSM_CLK_SRC_H_M MAKEMASK(0x3, 4)
+#define GLCORE_CLKCTL_H_RXCTL_CLK_SRC_H_S 6
+#define GLCORE_CLKCTL_H_RXCTL_CLK_SRC_H_M MAKEMASK(0x3, 6)
+#define GLCORE_CLKCTL_H_UANA_CLK_SRC_H_S 8
+#define GLCORE_CLKCTL_H_UANA_CLK_SRC_H_M MAKEMASK(0x7, 8)
+#define GLCORE_CLKCTL_L 0x000B8254 /* Reset Source: POR */
+#define GLCORE_CLKCTL_L_UPPER_CLK_SRC_L_S 0
+#define GLCORE_CLKCTL_L_UPPER_CLK_SRC_L_M MAKEMASK(0x3, 0)
+#define GLCORE_CLKCTL_L_LOWER_CLK_SRC_L_S 2
+#define GLCORE_CLKCTL_L_LOWER_CLK_SRC_L_M MAKEMASK(0x3, 2)
+#define GLCORE_CLKCTL_L_PSM_CLK_SRC_L_S 4
+#define GLCORE_CLKCTL_L_PSM_CLK_SRC_L_M MAKEMASK(0x3, 4)
+#define GLCORE_CLKCTL_L_RXCTL_CLK_SRC_L_S 6
+#define GLCORE_CLKCTL_L_RXCTL_CLK_SRC_L_M MAKEMASK(0x3, 6)
+#define GLCORE_CLKCTL_L_UANA_CLK_SRC_L_S 8
+#define GLCORE_CLKCTL_L_UANA_CLK_SRC_L_M MAKEMASK(0x7, 8)
+#define GLCORE_CLKCTL_M 0x000B8258 /* Reset Source: POR */
+#define GLCORE_CLKCTL_M_UPPER_CLK_SRC_M_S 0
+#define GLCORE_CLKCTL_M_UPPER_CLK_SRC_M_M MAKEMASK(0x3, 0)
+#define GLCORE_CLKCTL_M_LOWER_CLK_SRC_M_S 2
+#define GLCORE_CLKCTL_M_LOWER_CLK_SRC_M_M MAKEMASK(0x3, 2)
+#define GLCORE_CLKCTL_M_PSM_CLK_SRC_M_S 4
+#define GLCORE_CLKCTL_M_PSM_CLK_SRC_M_M MAKEMASK(0x3, 4)
+#define GLCORE_CLKCTL_M_RXCTL_CLK_SRC_M_S 6
+#define GLCORE_CLKCTL_M_RXCTL_CLK_SRC_M_M MAKEMASK(0x3, 6)
+#define GLCORE_CLKCTL_M_UANA_CLK_SRC_M_S 8
+#define GLCORE_CLKCTL_M_UANA_CLK_SRC_M_M MAKEMASK(0x7, 8)
+#define GLFOC_CACHESIZE 0x000AA074 /* Reset Source: CORER */
+#define GLFOC_CACHESIZE_WORD_SIZE_S 0
+#define GLFOC_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFF, 0)
+#define GLFOC_CACHESIZE_SETS_S 8
+#define GLFOC_CACHESIZE_SETS_M MAKEMASK(0xFFF, 8)
+#define GLFOC_CACHESIZE_WAYS_S 20
+#define GLFOC_CACHESIZE_WAYS_M MAKEMASK(0xF, 20)
+#define GLMAC_CLKSTAT 0x000B8210 /* Reset Source: POR */
+#define GLMAC_CLKSTAT_P0_CLK_SPEED_S 0
+#define GLMAC_CLKSTAT_P0_CLK_SPEED_M MAKEMASK(0xF, 0)
+#define GLMAC_CLKSTAT_P1_CLK_SPEED_S 4
+#define GLMAC_CLKSTAT_P1_CLK_SPEED_M MAKEMASK(0xF, 4)
+#define GLMAC_CLKSTAT_P2_CLK_SPEED_S 8
+#define GLMAC_CLKSTAT_P2_CLK_SPEED_M MAKEMASK(0xF, 8)
+#define GLMAC_CLKSTAT_P3_CLK_SPEED_S 12
+#define GLMAC_CLKSTAT_P3_CLK_SPEED_M MAKEMASK(0xF, 12)
+#define GLMAC_CLKSTAT_P4_CLK_SPEED_S 16
+#define GLMAC_CLKSTAT_P4_CLK_SPEED_M MAKEMASK(0xF, 16)
+#define GLMAC_CLKSTAT_P5_CLK_SPEED_S 20
+#define GLMAC_CLKSTAT_P5_CLK_SPEED_M MAKEMASK(0xF, 20)
+#define GLMAC_CLKSTAT_P6_CLK_SPEED_S 24
+#define GLMAC_CLKSTAT_P6_CLK_SPEED_M MAKEMASK(0xF, 24)
+#define GLMAC_CLKSTAT_P7_CLK_SPEED_S 28
+#define GLMAC_CLKSTAT_P7_CLK_SPEED_M MAKEMASK(0xF, 28)
+#define GLTPB_100G_MAC_FC_THRESH 0x00099510 /* Reset Source: CORER */
+#define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_S 0
+#define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_S 16
+#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */
+#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0
+#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16
+#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define GLTPB_PACING_10G 0x000994E4 /* Reset Source: CORER */
+#define GLTPB_PACING_10G_N_S 0
+#define GLTPB_PACING_10G_N_M MAKEMASK(0xFF, 0)
+#define GLTPB_PACING_10G_K_S 8
+#define GLTPB_PACING_10G_K_M MAKEMASK(0xFF, 8)
+#define GLTPB_PACING_10G_S_S 16
+#define GLTPB_PACING_10G_S_M MAKEMASK(0x1FF, 16)
+#define GLTPB_PACING_25G 0x000994E0 /* Reset Source: CORER */
+#define GLTPB_PACING_25G_N_S 0
+#define GLTPB_PACING_25G_N_M MAKEMASK(0xFF, 0)
+#define GLTPB_PACING_25G_K_S 8
+#define GLTPB_PACING_25G_K_M MAKEMASK(0xFF, 8)
+#define GLTPB_PACING_25G_S_S 16
+#define GLTPB_PACING_25G_S_M MAKEMASK(0x1FF, 16)
+#define GLTPB_PORT_PACING_SPEED 0x000994E8 /* Reset Source: CORER */
+#define GLTPB_PORT_PACING_SPEED_PORT0_SPEED_S 0
+#define GLTPB_PORT_PACING_SPEED_PORT0_SPEED_M BIT(0)
+#define GLTPB_PORT_PACING_SPEED_PORT1_SPEED_S 1
+#define GLTPB_PORT_PACING_SPEED_PORT1_SPEED_M BIT(1)
+#define GLTPB_PORT_PACING_SPEED_PORT2_SPEED_S 2
+#define GLTPB_PORT_PACING_SPEED_PORT2_SPEED_M BIT(2)
+#define GLTPB_PORT_PACING_SPEED_PORT3_SPEED_S 3
+#define GLTPB_PORT_PACING_SPEED_PORT3_SPEED_M BIT(3)
+#define GLTPB_PORT_PACING_SPEED_PORT4_SPEED_S 4
+#define GLTPB_PORT_PACING_SPEED_PORT4_SPEED_M BIT(4)
+#define GLTPB_PORT_PACING_SPEED_PORT5_SPEED_S 5
+#define GLTPB_PORT_PACING_SPEED_PORT5_SPEED_M BIT(5)
+#define GLTPB_PORT_PACING_SPEED_PORT6_SPEED_S 6
+#define GLTPB_PORT_PACING_SPEED_PORT6_SPEED_M BIT(6)
+#define GLTPB_PORT_PACING_SPEED_PORT7_SPEED_S 7
+#define GLTPB_PORT_PACING_SPEED_PORT7_SPEED_M BIT(7)
+#define TPB_CFG_SCHEDULED_BC_THRESHOLD 0x00099494 /* Reset Source: CORER */
+#define TPB_CFG_SCHEDULED_BC_THRESHOLD_THRESHOLD_S 0
+#define TPB_CFG_SCHEDULED_BC_THRESHOLD_THRESHOLD_M MAKEMASK(0x7FFF, 0)
+#define GL_UFUSE_SOC 0x000A400C /* Reset Source: POR */
+#define GL_UFUSE_SOC_PORT_MODE_S 0
+#define GL_UFUSE_SOC_PORT_MODE_M MAKEMASK(0x3, 0)
+#define GL_UFUSE_SOC_BANDWIDTH_S 2
+#define GL_UFUSE_SOC_BANDWIDTH_M MAKEMASK(0x3, 2)
+#define GL_UFUSE_SOC_PE_DISABLE_S 4
+#define GL_UFUSE_SOC_PE_DISABLE_M BIT(4)
+#define GL_UFUSE_SOC_SWITCH_MODE_S 5
+#define GL_UFUSE_SOC_SWITCH_MODE_M BIT(5)
+#define GL_UFUSE_SOC_CSR_PROTECTION_ENABLE_S 6
+#define GL_UFUSE_SOC_CSR_PROTECTION_ENABLE_M BIT(6)
+#define GL_UFUSE_SOC_SERIAL_50G_S 7
+#define GL_UFUSE_SOC_SERIAL_50G_M BIT(7)
+#define GL_UFUSE_SOC_NIC_ID_S 8
+#define GL_UFUSE_SOC_NIC_ID_M BIT(8)
+#define GL_UFUSE_SOC_BLOCK_BME_TO_FW_S 9
+#define GL_UFUSE_SOC_BLOCK_BME_TO_FW_M BIT(9)
+#define GL_UFUSE_SOC_SOC_TYPE_S 10
+#define GL_UFUSE_SOC_SOC_TYPE_M BIT(10)
+#define GL_UFUSE_SOC_BTS_MODE_S 11
+#define GL_UFUSE_SOC_BTS_MODE_M BIT(11)
+#define GL_UFUSE_SOC_SPARE_FUSES_S 12
+#define GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12)
+#define EMPINT_GPIO_ENA 0x000880C0 /* Reset Source: POR */
+#define EMPINT_GPIO_ENA_GPIO0_ENA_S 0
+#define EMPINT_GPIO_ENA_GPIO0_ENA_M BIT(0)
+#define EMPINT_GPIO_ENA_GPIO1_ENA_S 1
+#define EMPINT_GPIO_ENA_GPIO1_ENA_M BIT(1)
+#define EMPINT_GPIO_ENA_GPIO2_ENA_S 2
+#define EMPINT_GPIO_ENA_GPIO2_ENA_M BIT(2)
+#define EMPINT_GPIO_ENA_GPIO3_ENA_S 3
+#define EMPINT_GPIO_ENA_GPIO3_ENA_M BIT(3)
+#define EMPINT_GPIO_ENA_GPIO4_ENA_S 4
+#define EMPINT_GPIO_ENA_GPIO4_ENA_M BIT(4)
+#define EMPINT_GPIO_ENA_GPIO5_ENA_S 5
+#define EMPINT_GPIO_ENA_GPIO5_ENA_M BIT(5)
+#define EMPINT_GPIO_ENA_GPIO6_ENA_S 6
+#define EMPINT_GPIO_ENA_GPIO6_ENA_M BIT(6)
+#define GLGEN_MAC_LINK_TOPO 0x000B81DC /* Reset Source: GLOBR */
+#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_S 0
+#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M MAKEMASK(0x3, 0)
+#define GLINT_CEQCTL(_INT) (0x0015C000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define GLINT_CEQCTL_MAX_INDEX 2047
+#define GLINT_CEQCTL_MSIX_INDX_S 0
+#define GLINT_CEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define GLINT_CEQCTL_ITR_INDX_S 11
+#define GLINT_CEQCTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define GLINT_CEQCTL_CAUSE_ENA_S 30
+#define GLINT_CEQCTL_CAUSE_ENA_M BIT(30)
+#define GLINT_CEQCTL_INTEVENT_S 31
+#define GLINT_CEQCTL_INTEVENT_M BIT(31)
+#define GLINT_CTL 0x0016CC54 /* Reset Source: CORER */
+#define GLINT_CTL_DIS_AUTOMASK_S 0
+#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
+#define GLINT_CTL_RSVD_S 1
+#define GLINT_CTL_RSVD_M MAKEMASK(0x7FFF, 1)
+#define GLINT_CTL_ITR_GRAN_200_S 16
+#define GLINT_CTL_ITR_GRAN_200_M MAKEMASK(0xF, 16)
+#define GLINT_CTL_ITR_GRAN_100_S 20
+#define GLINT_CTL_ITR_GRAN_100_M MAKEMASK(0xF, 20)
+#define GLINT_CTL_ITR_GRAN_50_S 24
+#define GLINT_CTL_ITR_GRAN_50_M MAKEMASK(0xF, 24)
+#define GLINT_CTL_ITR_GRAN_25_S 28
+#define GLINT_CTL_ITR_GRAN_25_M MAKEMASK(0xF, 28)
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define GLINT_DYN_CTL_MAX_INDEX 2047
+#define GLINT_DYN_CTL_INTENA_S 0
+#define GLINT_DYN_CTL_INTENA_M BIT(0)
+#define GLINT_DYN_CTL_CLEARPBA_S 1
+#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
+#define GLINT_DYN_CTL_SWINT_TRIG_S 2
+#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
+#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3)
+#define GLINT_DYN_CTL_INTERVAL_S 5
+#define GLINT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5)
+#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
+#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
+#define GLINT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25)
+#define GLINT_DYN_CTL_WB_ON_ITR_S 30
+#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
+#define GLINT_DYN_CTL_INTENA_MSK_S 31
+#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
+#define GLINT_FW_TOOL_CTL 0x0016C840 /* Reset Source: CORER */
+#define GLINT_FW_TOOL_CTL_MSIX_INDX_S 0
+#define GLINT_FW_TOOL_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define GLINT_FW_TOOL_CTL_ITR_INDX_S 11
+#define GLINT_FW_TOOL_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define GLINT_FW_TOOL_CTL_CAUSE_ENA_S 30
+#define GLINT_FW_TOOL_CTL_CAUSE_ENA_M BIT(30)
+#define GLINT_FW_TOOL_CTL_INTEVENT_S 31
+#define GLINT_FW_TOOL_CTL_INTEVENT_M BIT(31)
+#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) /* _i=0...2, _INT=0...2047 */ /* Reset Source: CORER */
+#define GLINT_ITR_MAX_INDEX 2
+#define GLINT_ITR_INTERVAL_S 0
+#define GLINT_ITR_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define GLINT_RATE_MAX_INDEX 2047
+#define GLINT_RATE_INTERVAL_S 0
+#define GLINT_RATE_INTERVAL_M MAKEMASK(0x3F, 0)
+#define GLINT_RATE_INTRL_ENA_S 6
+#define GLINT_RATE_INTRL_ENA_M BIT(6)
+#define GLINT_TSYN_PFMSTR(_i) (0x0016CCC0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLINT_TSYN_PFMSTR_MAX_INDEX 1
+#define GLINT_TSYN_PFMSTR_PF_MASTER_S 0
+#define GLINT_TSYN_PFMSTR_PF_MASTER_M MAKEMASK(0x7, 0)
+#define GLINT_TSYN_PHY 0x0016CC50 /* Reset Source: CORER */
+#define GLINT_TSYN_PHY_PHY_INDX_S 0
+#define GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define GLINT_VECT2FUNC_MAX_INDEX 2047
+#define GLINT_VECT2FUNC_VF_NUM_S 0
+#define GLINT_VECT2FUNC_VF_NUM_M MAKEMASK(0xFF, 0)
+#define GLINT_VECT2FUNC_PF_NUM_S 12
+#define GLINT_VECT2FUNC_PF_NUM_M MAKEMASK(0x7, 12)
+#define GLINT_VECT2FUNC_IS_PF_S 16
+#define GLINT_VECT2FUNC_IS_PF_M BIT(16)
+#define PF0INT_FW_HLP_CTL 0x0016C844 /* Reset Source: CORER */
+#define PF0INT_FW_HLP_CTL_MSIX_INDX_S 0
+#define PF0INT_FW_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_FW_HLP_CTL_ITR_INDX_S 11
+#define PF0INT_FW_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_FW_HLP_CTL_CAUSE_ENA_S 30
+#define PF0INT_FW_HLP_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_FW_HLP_CTL_INTEVENT_S 31
+#define PF0INT_FW_HLP_CTL_INTEVENT_M BIT(31)
+#define PF0INT_FW_PSM_CTL 0x0016C848 /* Reset Source: CORER */
+#define PF0INT_FW_PSM_CTL_MSIX_INDX_S 0
+#define PF0INT_FW_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_FW_PSM_CTL_ITR_INDX_S 11
+#define PF0INT_FW_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_FW_PSM_CTL_CAUSE_ENA_S 30
+#define PF0INT_FW_PSM_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_FW_PSM_CTL_INTEVENT_S 31
+#define PF0INT_FW_PSM_CTL_INTEVENT_M BIT(31)
+#define PF0INT_MBX_CPM_CTL 0x0016B2C0 /* Reset Source: CORER */
+#define PF0INT_MBX_CPM_CTL_MSIX_INDX_S 0
+#define PF0INT_MBX_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_MBX_CPM_CTL_ITR_INDX_S 11
+#define PF0INT_MBX_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_MBX_CPM_CTL_CAUSE_ENA_S 30
+#define PF0INT_MBX_CPM_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_MBX_CPM_CTL_INTEVENT_S 31
+#define PF0INT_MBX_CPM_CTL_INTEVENT_M BIT(31)
+#define PF0INT_MBX_HLP_CTL 0x0016B2C4 /* Reset Source: CORER */
+#define PF0INT_MBX_HLP_CTL_MSIX_INDX_S 0
+#define PF0INT_MBX_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_MBX_HLP_CTL_ITR_INDX_S 11
+#define PF0INT_MBX_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_MBX_HLP_CTL_CAUSE_ENA_S 30
+#define PF0INT_MBX_HLP_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_MBX_HLP_CTL_INTEVENT_S 31
+#define PF0INT_MBX_HLP_CTL_INTEVENT_M BIT(31)
+#define PF0INT_MBX_PSM_CTL 0x0016B2C8 /* Reset Source: CORER */
+#define PF0INT_MBX_PSM_CTL_MSIX_INDX_S 0
+#define PF0INT_MBX_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_MBX_PSM_CTL_ITR_INDX_S 11
+#define PF0INT_MBX_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_MBX_PSM_CTL_CAUSE_ENA_S 30
+#define PF0INT_MBX_PSM_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_MBX_PSM_CTL_INTEVENT_S 31
+#define PF0INT_MBX_PSM_CTL_INTEVENT_M BIT(31)
+#define PF0INT_OICR_CPM 0x0016CC40 /* Reset Source: CORER */
+#define PF0INT_OICR_CPM_INTEVENT_S 0
+#define PF0INT_OICR_CPM_INTEVENT_M BIT(0)
+#define PF0INT_OICR_CPM_QUEUE_S 1
+#define PF0INT_OICR_CPM_QUEUE_M BIT(1)
+#define PF0INT_OICR_CPM_RSV1_S 2
+#define PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2)
+#define PF0INT_OICR_CPM_HH_COMP_S 10
+#define PF0INT_OICR_CPM_HH_COMP_M BIT(10)
+#define PF0INT_OICR_CPM_TSYN_TX_S 11
+#define PF0INT_OICR_CPM_TSYN_TX_M BIT(11)
+#define PF0INT_OICR_CPM_TSYN_EVNT_S 12
+#define PF0INT_OICR_CPM_TSYN_EVNT_M BIT(12)
+#define PF0INT_OICR_CPM_TSYN_TGT_S 13
+#define PF0INT_OICR_CPM_TSYN_TGT_M BIT(13)
+#define PF0INT_OICR_CPM_HLP_RDY_S 14
+#define PF0INT_OICR_CPM_HLP_RDY_M BIT(14)
+#define PF0INT_OICR_CPM_CPM_RDY_S 15
+#define PF0INT_OICR_CPM_CPM_RDY_M BIT(15)
+#define PF0INT_OICR_CPM_ECC_ERR_S 16
+#define PF0INT_OICR_CPM_ECC_ERR_M BIT(16)
+#define PF0INT_OICR_CPM_RSV2_S 17
+#define PF0INT_OICR_CPM_RSV2_M MAKEMASK(0x3, 17)
+#define PF0INT_OICR_CPM_MAL_DETECT_S 19
+#define PF0INT_OICR_CPM_MAL_DETECT_M BIT(19)
+#define PF0INT_OICR_CPM_GRST_S 20
+#define PF0INT_OICR_CPM_GRST_M BIT(20)
+#define PF0INT_OICR_CPM_PCI_EXCEPTION_S 21
+#define PF0INT_OICR_CPM_PCI_EXCEPTION_M BIT(21)
+#define PF0INT_OICR_CPM_GPIO_S 22
+#define PF0INT_OICR_CPM_GPIO_M BIT(22)
+#define PF0INT_OICR_CPM_RSV3_S 23
+#define PF0INT_OICR_CPM_RSV3_M BIT(23)
+#define PF0INT_OICR_CPM_STORM_DETECT_S 24
+#define PF0INT_OICR_CPM_STORM_DETECT_M BIT(24)
+#define PF0INT_OICR_CPM_LINK_STAT_CHANGE_S 25
+#define PF0INT_OICR_CPM_LINK_STAT_CHANGE_M BIT(25)
+#define PF0INT_OICR_CPM_HMC_ERR_S 26
+#define PF0INT_OICR_CPM_HMC_ERR_M BIT(26)
+#define PF0INT_OICR_CPM_PE_PUSH_S 27
+#define PF0INT_OICR_CPM_PE_PUSH_M BIT(27)
+#define PF0INT_OICR_CPM_PE_CRITERR_S 28
+#define PF0INT_OICR_CPM_PE_CRITERR_M BIT(28)
+#define PF0INT_OICR_CPM_VFLR_S 29
+#define PF0INT_OICR_CPM_VFLR_M BIT(29)
+#define PF0INT_OICR_CPM_XLR_HW_DONE_S 30
+#define PF0INT_OICR_CPM_XLR_HW_DONE_M BIT(30)
+#define PF0INT_OICR_CPM_SWINT_S 31
+#define PF0INT_OICR_CPM_SWINT_M BIT(31)
+#define PF0INT_OICR_CTL_CPM 0x0016CC48 /* Reset Source: CORER */
+#define PF0INT_OICR_CTL_CPM_MSIX_INDX_S 0
+#define PF0INT_OICR_CTL_CPM_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_OICR_CTL_CPM_ITR_INDX_S 11
+#define PF0INT_OICR_CTL_CPM_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_OICR_CTL_CPM_CAUSE_ENA_S 30
+#define PF0INT_OICR_CTL_CPM_CAUSE_ENA_M BIT(30)
+#define PF0INT_OICR_CTL_CPM_INTEVENT_S 31
+#define PF0INT_OICR_CTL_CPM_INTEVENT_M BIT(31)
+#define PF0INT_OICR_CTL_HLP 0x0016CC5C /* Reset Source: CORER */
+#define PF0INT_OICR_CTL_HLP_MSIX_INDX_S 0
+#define PF0INT_OICR_CTL_HLP_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_OICR_CTL_HLP_ITR_INDX_S 11
+#define PF0INT_OICR_CTL_HLP_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_OICR_CTL_HLP_CAUSE_ENA_S 30
+#define PF0INT_OICR_CTL_HLP_CAUSE_ENA_M BIT(30)
+#define PF0INT_OICR_CTL_HLP_INTEVENT_S 31
+#define PF0INT_OICR_CTL_HLP_INTEVENT_M BIT(31)
+#define PF0INT_OICR_CTL_PSM 0x0016CC64 /* Reset Source: CORER */
+#define PF0INT_OICR_CTL_PSM_MSIX_INDX_S 0
+#define PF0INT_OICR_CTL_PSM_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_OICR_CTL_PSM_ITR_INDX_S 11
+#define PF0INT_OICR_CTL_PSM_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_OICR_CTL_PSM_CAUSE_ENA_S 30
+#define PF0INT_OICR_CTL_PSM_CAUSE_ENA_M BIT(30)
+#define PF0INT_OICR_CTL_PSM_INTEVENT_S 31
+#define PF0INT_OICR_CTL_PSM_INTEVENT_M BIT(31)
+#define PF0INT_OICR_ENA_CPM 0x0016CC60 /* Reset Source: CORER */
+#define PF0INT_OICR_ENA_CPM_RSV0_S 0
+#define PF0INT_OICR_ENA_CPM_RSV0_M BIT(0)
+#define PF0INT_OICR_ENA_CPM_INT_ENA_S 1
+#define PF0INT_OICR_ENA_CPM_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PF0INT_OICR_ENA_HLP 0x0016CC4C /* Reset Source: CORER */
+#define PF0INT_OICR_ENA_HLP_RSV0_S 0
+#define PF0INT_OICR_ENA_HLP_RSV0_M BIT(0)
+#define PF0INT_OICR_ENA_HLP_INT_ENA_S 1
+#define PF0INT_OICR_ENA_HLP_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PF0INT_OICR_ENA_PSM 0x0016CC58 /* Reset Source: CORER */
+#define PF0INT_OICR_ENA_PSM_RSV0_S 0
+#define PF0INT_OICR_ENA_PSM_RSV0_M BIT(0)
+#define PF0INT_OICR_ENA_PSM_INT_ENA_S 1
+#define PF0INT_OICR_ENA_PSM_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PF0INT_OICR_HLP 0x0016CC68 /* Reset Source: CORER */
+#define PF0INT_OICR_HLP_INTEVENT_S 0
+#define PF0INT_OICR_HLP_INTEVENT_M BIT(0)
+#define PF0INT_OICR_HLP_QUEUE_S 1
+#define PF0INT_OICR_HLP_QUEUE_M BIT(1)
+#define PF0INT_OICR_HLP_RSV1_S 2
+#define PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2)
+#define PF0INT_OICR_HLP_HH_COMP_S 10
+#define PF0INT_OICR_HLP_HH_COMP_M BIT(10)
+#define PF0INT_OICR_HLP_TSYN_TX_S 11
+#define PF0INT_OICR_HLP_TSYN_TX_M BIT(11)
+#define PF0INT_OICR_HLP_TSYN_EVNT_S 12
+#define PF0INT_OICR_HLP_TSYN_EVNT_M BIT(12)
+#define PF0INT_OICR_HLP_TSYN_TGT_S 13
+#define PF0INT_OICR_HLP_TSYN_TGT_M BIT(13)
+#define PF0INT_OICR_HLP_HLP_RDY_S 14
+#define PF0INT_OICR_HLP_HLP_RDY_M BIT(14)
+#define PF0INT_OICR_HLP_CPM_RDY_S 15
+#define PF0INT_OICR_HLP_CPM_RDY_M BIT(15)
+#define PF0INT_OICR_HLP_ECC_ERR_S 16
+#define PF0INT_OICR_HLP_ECC_ERR_M BIT(16)
+#define PF0INT_OICR_HLP_RSV2_S 17
+#define PF0INT_OICR_HLP_RSV2_M MAKEMASK(0x3, 17)
+#define PF0INT_OICR_HLP_MAL_DETECT_S 19
+#define PF0INT_OICR_HLP_MAL_DETECT_M BIT(19)
+#define PF0INT_OICR_HLP_GRST_S 20
+#define PF0INT_OICR_HLP_GRST_M BIT(20)
+#define PF0INT_OICR_HLP_PCI_EXCEPTION_S 21
+#define PF0INT_OICR_HLP_PCI_EXCEPTION_M BIT(21)
+#define PF0INT_OICR_HLP_GPIO_S 22
+#define PF0INT_OICR_HLP_GPIO_M BIT(22)
+#define PF0INT_OICR_HLP_RSV3_S 23
+#define PF0INT_OICR_HLP_RSV3_M BIT(23)
+#define PF0INT_OICR_HLP_STORM_DETECT_S 24
+#define PF0INT_OICR_HLP_STORM_DETECT_M BIT(24)
+#define PF0INT_OICR_HLP_LINK_STAT_CHANGE_S 25
+#define PF0INT_OICR_HLP_LINK_STAT_CHANGE_M BIT(25)
+#define PF0INT_OICR_HLP_HMC_ERR_S 26
+#define PF0INT_OICR_HLP_HMC_ERR_M BIT(26)
+#define PF0INT_OICR_HLP_PE_PUSH_S 27
+#define PF0INT_OICR_HLP_PE_PUSH_M BIT(27)
+#define PF0INT_OICR_HLP_PE_CRITERR_S 28
+#define PF0INT_OICR_HLP_PE_CRITERR_M BIT(28)
+#define PF0INT_OICR_HLP_VFLR_S 29
+#define PF0INT_OICR_HLP_VFLR_M BIT(29)
+#define PF0INT_OICR_HLP_XLR_HW_DONE_S 30
+#define PF0INT_OICR_HLP_XLR_HW_DONE_M BIT(30)
+#define PF0INT_OICR_HLP_SWINT_S 31
+#define PF0INT_OICR_HLP_SWINT_M BIT(31)
+#define PF0INT_OICR_PSM 0x0016CC44 /* Reset Source: CORER */
+#define PF0INT_OICR_PSM_INTEVENT_S 0
+#define PF0INT_OICR_PSM_INTEVENT_M BIT(0)
+#define PF0INT_OICR_PSM_QUEUE_S 1
+#define PF0INT_OICR_PSM_QUEUE_M BIT(1)
+#define PF0INT_OICR_PSM_RSV1_S 2
+#define PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2)
+#define PF0INT_OICR_PSM_HH_COMP_S 10
+#define PF0INT_OICR_PSM_HH_COMP_M BIT(10)
+#define PF0INT_OICR_PSM_TSYN_TX_S 11
+#define PF0INT_OICR_PSM_TSYN_TX_M BIT(11)
+#define PF0INT_OICR_PSM_TSYN_EVNT_S 12
+#define PF0INT_OICR_PSM_TSYN_EVNT_M BIT(12)
+#define PF0INT_OICR_PSM_TSYN_TGT_S 13
+#define PF0INT_OICR_PSM_TSYN_TGT_M BIT(13)
+#define PF0INT_OICR_PSM_HLP_RDY_S 14
+#define PF0INT_OICR_PSM_HLP_RDY_M BIT(14)
+#define PF0INT_OICR_PSM_CPM_RDY_S 15
+#define PF0INT_OICR_PSM_CPM_RDY_M BIT(15)
+#define PF0INT_OICR_PSM_ECC_ERR_S 16
+#define PF0INT_OICR_PSM_ECC_ERR_M BIT(16)
+#define PF0INT_OICR_PSM_RSV2_S 17
+#define PF0INT_OICR_PSM_RSV2_M MAKEMASK(0x3, 17)
+#define PF0INT_OICR_PSM_MAL_DETECT_S 19
+#define PF0INT_OICR_PSM_MAL_DETECT_M BIT(19)
+#define PF0INT_OICR_PSM_GRST_S 20
+#define PF0INT_OICR_PSM_GRST_M BIT(20)
+#define PF0INT_OICR_PSM_PCI_EXCEPTION_S 21
+#define PF0INT_OICR_PSM_PCI_EXCEPTION_M BIT(21)
+#define PF0INT_OICR_PSM_GPIO_S 22
+#define PF0INT_OICR_PSM_GPIO_M BIT(22)
+#define PF0INT_OICR_PSM_RSV3_S 23
+#define PF0INT_OICR_PSM_RSV3_M BIT(23)
+#define PF0INT_OICR_PSM_STORM_DETECT_S 24
+#define PF0INT_OICR_PSM_STORM_DETECT_M BIT(24)
+#define PF0INT_OICR_PSM_LINK_STAT_CHANGE_S 25
+#define PF0INT_OICR_PSM_LINK_STAT_CHANGE_M BIT(25)
+#define PF0INT_OICR_PSM_HMC_ERR_S 26
+#define PF0INT_OICR_PSM_HMC_ERR_M BIT(26)
+#define PF0INT_OICR_PSM_PE_PUSH_S 27
+#define PF0INT_OICR_PSM_PE_PUSH_M BIT(27)
+#define PF0INT_OICR_PSM_PE_CRITERR_S 28
+#define PF0INT_OICR_PSM_PE_CRITERR_M BIT(28)
+#define PF0INT_OICR_PSM_VFLR_S 29
+#define PF0INT_OICR_PSM_VFLR_M BIT(29)
+#define PF0INT_OICR_PSM_XLR_HW_DONE_S 30
+#define PF0INT_OICR_PSM_XLR_HW_DONE_M BIT(30)
+#define PF0INT_OICR_PSM_SWINT_S 31
+#define PF0INT_OICR_PSM_SWINT_M BIT(31)
+#define PF0INT_SB_CPM_CTL 0x0016B2CC /* Reset Source: CORER */
+#define PF0INT_SB_CPM_CTL_MSIX_INDX_S 0
+#define PF0INT_SB_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_SB_CPM_CTL_ITR_INDX_S 11
+#define PF0INT_SB_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_SB_CPM_CTL_CAUSE_ENA_S 30
+#define PF0INT_SB_CPM_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_SB_CPM_CTL_INTEVENT_S 31
+#define PF0INT_SB_CPM_CTL_INTEVENT_M BIT(31)
+#define PF0INT_SB_HLP_CTL 0x0016B640 /* Reset Source: CORER */
+#define PF0INT_SB_HLP_CTL_MSIX_INDX_S 0
+#define PF0INT_SB_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PF0INT_SB_HLP_CTL_ITR_INDX_S 11
+#define PF0INT_SB_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PF0INT_SB_HLP_CTL_CAUSE_ENA_S 30
+#define PF0INT_SB_HLP_CTL_CAUSE_ENA_M BIT(30)
+#define PF0INT_SB_HLP_CTL_INTEVENT_S 31
+#define PF0INT_SB_HLP_CTL_INTEVENT_M BIT(31)
+#define PFINT_AEQCTL 0x0016CB00 /* Reset Source: CORER */
+#define PFINT_AEQCTL_MSIX_INDX_S 0
+#define PFINT_AEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PFINT_AEQCTL_ITR_INDX_S 11
+#define PFINT_AEQCTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PFINT_AEQCTL_CAUSE_ENA_S 30
+#define PFINT_AEQCTL_CAUSE_ENA_M BIT(30)
+#define PFINT_AEQCTL_INTEVENT_S 31
+#define PFINT_AEQCTL_INTEVENT_M BIT(31)
+#define PFINT_ALLOC 0x001D2600 /* Reset Source: CORER */
+#define PFINT_ALLOC_FIRST_S 0
+#define PFINT_ALLOC_FIRST_M MAKEMASK(0x7FF, 0)
+#define PFINT_ALLOC_LAST_S 12
+#define PFINT_ALLOC_LAST_M MAKEMASK(0x7FF, 12)
+#define PFINT_ALLOC_VALID_S 31
+#define PFINT_ALLOC_VALID_M BIT(31)
+#define PFINT_ALLOC_PCI 0x0009D800 /* Reset Source: PCIR */
+#define PFINT_ALLOC_PCI_FIRST_S 0
+#define PFINT_ALLOC_PCI_FIRST_M MAKEMASK(0x7FF, 0)
+#define PFINT_ALLOC_PCI_LAST_S 12
+#define PFINT_ALLOC_PCI_LAST_M MAKEMASK(0x7FF, 12)
+#define PFINT_ALLOC_PCI_VALID_S 31
+#define PFINT_ALLOC_PCI_VALID_M BIT(31)
+#define PFINT_FW_CTL 0x0016C800 /* Reset Source: CORER */
+#define PFINT_FW_CTL_MSIX_INDX_S 0
+#define PFINT_FW_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PFINT_FW_CTL_ITR_INDX_S 11
+#define PFINT_FW_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PFINT_FW_CTL_CAUSE_ENA_S 30
+#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_FW_CTL_INTEVENT_S 31
+#define PFINT_FW_CTL_INTEVENT_M BIT(31)
+#define PFINT_GPIO_ENA 0x00088080 /* Reset Source: CORER */
+#define PFINT_GPIO_ENA_GPIO0_ENA_S 0
+#define PFINT_GPIO_ENA_GPIO0_ENA_M BIT(0)
+#define PFINT_GPIO_ENA_GPIO1_ENA_S 1
+#define PFINT_GPIO_ENA_GPIO1_ENA_M BIT(1)
+#define PFINT_GPIO_ENA_GPIO2_ENA_S 2
+#define PFINT_GPIO_ENA_GPIO2_ENA_M BIT(2)
+#define PFINT_GPIO_ENA_GPIO3_ENA_S 3
+#define PFINT_GPIO_ENA_GPIO3_ENA_M BIT(3)
+#define PFINT_GPIO_ENA_GPIO4_ENA_S 4
+#define PFINT_GPIO_ENA_GPIO4_ENA_M BIT(4)
+#define PFINT_GPIO_ENA_GPIO5_ENA_S 5
+#define PFINT_GPIO_ENA_GPIO5_ENA_M BIT(5)
+#define PFINT_GPIO_ENA_GPIO6_ENA_S 6
+#define PFINT_GPIO_ENA_GPIO6_ENA_M BIT(6)
+#define PFINT_MBX_CTL 0x0016B280 /* Reset Source: CORER */
+#define PFINT_MBX_CTL_MSIX_INDX_S 0
+#define PFINT_MBX_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PFINT_MBX_CTL_ITR_INDX_S 11
+#define PFINT_MBX_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PFINT_MBX_CTL_CAUSE_ENA_S 30
+#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_MBX_CTL_INTEVENT_S 31
+#define PFINT_MBX_CTL_INTEVENT_M BIT(31)
+#define PFINT_OICR 0x0016CA00 /* Reset Source: CORER */
+#define PFINT_OICR_INTEVENT_S 0
+#define PFINT_OICR_INTEVENT_M BIT(0)
+#define PFINT_OICR_QUEUE_S 1
+#define PFINT_OICR_QUEUE_M BIT(1)
+#define PFINT_OICR_RSV1_S 2
+#define PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2)
+#define PFINT_OICR_HH_COMP_S 10
+#define PFINT_OICR_HH_COMP_M BIT(10)
+#define PFINT_OICR_TSYN_TX_S 11
+#define PFINT_OICR_TSYN_TX_M BIT(11)
+#define PFINT_OICR_TSYN_EVNT_S 12
+#define PFINT_OICR_TSYN_EVNT_M BIT(12)
+#define PFINT_OICR_TSYN_TGT_S 13
+#define PFINT_OICR_TSYN_TGT_M BIT(13)
+#define PFINT_OICR_HLP_RDY_S 14
+#define PFINT_OICR_HLP_RDY_M BIT(14)
+#define PFINT_OICR_CPM_RDY_S 15
+#define PFINT_OICR_CPM_RDY_M BIT(15)
+#define PFINT_OICR_ECC_ERR_S 16
+#define PFINT_OICR_ECC_ERR_M BIT(16)
+#define PFINT_OICR_RSV2_S 17
+#define PFINT_OICR_RSV2_M MAKEMASK(0x3, 17)
+#define PFINT_OICR_MAL_DETECT_S 19
+#define PFINT_OICR_MAL_DETECT_M BIT(19)
+#define PFINT_OICR_GRST_S 20
+#define PFINT_OICR_GRST_M BIT(20)
+#define PFINT_OICR_PCI_EXCEPTION_S 21
+#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
+#define PFINT_OICR_GPIO_S 22
+#define PFINT_OICR_GPIO_M BIT(22)
+#define PFINT_OICR_RSV3_S 23
+#define PFINT_OICR_RSV3_M BIT(23)
+#define PFINT_OICR_STORM_DETECT_S 24
+#define PFINT_OICR_STORM_DETECT_M BIT(24)
+#define PFINT_OICR_LINK_STAT_CHANGE_S 25
+#define PFINT_OICR_LINK_STAT_CHANGE_M BIT(25)
+#define PFINT_OICR_HMC_ERR_S 26
+#define PFINT_OICR_HMC_ERR_M BIT(26)
+#define PFINT_OICR_PE_PUSH_S 27
+#define PFINT_OICR_PE_PUSH_M BIT(27)
+#define PFINT_OICR_PE_CRITERR_S 28
+#define PFINT_OICR_PE_CRITERR_M BIT(28)
+#define PFINT_OICR_VFLR_S 29
+#define PFINT_OICR_VFLR_M BIT(29)
+#define PFINT_OICR_XLR_HW_DONE_S 30
+#define PFINT_OICR_XLR_HW_DONE_M BIT(30)
+#define PFINT_OICR_SWINT_S 31
+#define PFINT_OICR_SWINT_M BIT(31)
+#define PFINT_OICR_CTL 0x0016CA80 /* Reset Source: CORER */
+#define PFINT_OICR_CTL_MSIX_INDX_S 0
+#define PFINT_OICR_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PFINT_OICR_CTL_ITR_INDX_S 11
+#define PFINT_OICR_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PFINT_OICR_CTL_CAUSE_ENA_S 30
+#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_OICR_CTL_INTEVENT_S 31
+#define PFINT_OICR_CTL_INTEVENT_M BIT(31)
+#define PFINT_OICR_ENA 0x0016C900 /* Reset Source: CORER */
+#define PFINT_OICR_ENA_RSV0_S 0
+#define PFINT_OICR_ENA_RSV0_M BIT(0)
+#define PFINT_OICR_ENA_INT_ENA_S 1
+#define PFINT_OICR_ENA_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1)
+#define PFINT_SB_CTL 0x0016B600 /* Reset Source: CORER */
+#define PFINT_SB_CTL_MSIX_INDX_S 0
+#define PFINT_SB_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define PFINT_SB_CTL_ITR_INDX_S 11
+#define PFINT_SB_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define PFINT_SB_CTL_CAUSE_ENA_S 30
+#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_SB_CTL_INTEVENT_S 31
+#define PFINT_SB_CTL_INTEVENT_M BIT(31)
+#define PFINT_TSYN_MSK 0x0016C980 /* Reset Source: CORER */
+#define PFINT_TSYN_MSK_PHY_INDX_S 0
+#define PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define QINT_RQCTL_MAX_INDEX 2047
+#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define QINT_RQCTL_CAUSE_ENA_S 30
+#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
+#define QINT_RQCTL_INTEVENT_S 31
+#define QINT_RQCTL_INTEVENT_M BIT(31)
+#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define QINT_TQCTL_MAX_INDEX 16383
+#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define QINT_TQCTL_CAUSE_ENA_S 30
+#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
+#define QINT_TQCTL_INTEVENT_S 31
+#define QINT_TQCTL_INTEVENT_M BIT(31)
+#define VPINT_AEQCTL(_VF) (0x0016B800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPINT_AEQCTL_MAX_INDEX 255
+#define VPINT_AEQCTL_MSIX_INDX_S 0
+#define VPINT_AEQCTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define VPINT_AEQCTL_ITR_INDX_S 11
+#define VPINT_AEQCTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define VPINT_AEQCTL_CAUSE_ENA_S 30
+#define VPINT_AEQCTL_CAUSE_ENA_M BIT(30)
+#define VPINT_AEQCTL_INTEVENT_S 31
+#define VPINT_AEQCTL_INTEVENT_M BIT(31)
+#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPINT_ALLOC_MAX_INDEX 255
+#define VPINT_ALLOC_FIRST_S 0
+#define VPINT_ALLOC_FIRST_M MAKEMASK(0x7FF, 0)
+#define VPINT_ALLOC_LAST_S 12
+#define VPINT_ALLOC_LAST_M MAKEMASK(0x7FF, 12)
+#define VPINT_ALLOC_VALID_S 31
+#define VPINT_ALLOC_VALID_M BIT(31)
+#define VPINT_ALLOC_PCI(_VF) (0x0009D000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PCIR */
+#define VPINT_ALLOC_PCI_MAX_INDEX 255
+#define VPINT_ALLOC_PCI_FIRST_S 0
+#define VPINT_ALLOC_PCI_FIRST_M MAKEMASK(0x7FF, 0)
+#define VPINT_ALLOC_PCI_LAST_S 12
+#define VPINT_ALLOC_PCI_LAST_M MAKEMASK(0x7FF, 12)
+#define VPINT_ALLOC_PCI_VALID_S 31
+#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define VPINT_MBX_CPM_CTL(_VP128) (0x0016B000 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VPINT_MBX_CPM_CTL_MAX_INDEX 127
+#define VPINT_MBX_CPM_CTL_MSIX_INDX_S 0
+#define VPINT_MBX_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define VPINT_MBX_CPM_CTL_ITR_INDX_S 11
+#define VPINT_MBX_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define VPINT_MBX_CPM_CTL_CAUSE_ENA_S 30
+#define VPINT_MBX_CPM_CTL_CAUSE_ENA_M BIT(30)
+#define VPINT_MBX_CPM_CTL_INTEVENT_S 31
+#define VPINT_MBX_CPM_CTL_INTEVENT_M BIT(31)
+#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VPINT_MBX_CTL_MAX_INDEX 767
+#define VPINT_MBX_CTL_MSIX_INDX_S 0
+#define VPINT_MBX_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define VPINT_MBX_CTL_ITR_INDX_S 11
+#define VPINT_MBX_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define VPINT_MBX_CTL_CAUSE_ENA_S 30
+#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define VPINT_MBX_CTL_INTEVENT_S 31
+#define VPINT_MBX_CTL_INTEVENT_M BIT(31)
+#define VPINT_MBX_HLP_CTL(_VP16) (0x0016B200 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VPINT_MBX_HLP_CTL_MAX_INDEX 15
+#define VPINT_MBX_HLP_CTL_MSIX_INDX_S 0
+#define VPINT_MBX_HLP_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define VPINT_MBX_HLP_CTL_ITR_INDX_S 11
+#define VPINT_MBX_HLP_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define VPINT_MBX_HLP_CTL_CAUSE_ENA_S 30
+#define VPINT_MBX_HLP_CTL_CAUSE_ENA_M BIT(30)
+#define VPINT_MBX_HLP_CTL_INTEVENT_S 31
+#define VPINT_MBX_HLP_CTL_INTEVENT_M BIT(31)
+#define VPINT_MBX_PSM_CTL(_VP16) (0x0016B240 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VPINT_MBX_PSM_CTL_MAX_INDEX 15
+#define VPINT_MBX_PSM_CTL_MSIX_INDX_S 0
+#define VPINT_MBX_PSM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define VPINT_MBX_PSM_CTL_ITR_INDX_S 11
+#define VPINT_MBX_PSM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define VPINT_MBX_PSM_CTL_CAUSE_ENA_S 30
+#define VPINT_MBX_PSM_CTL_CAUSE_ENA_M BIT(30)
+#define VPINT_MBX_PSM_CTL_INTEVENT_S 31
+#define VPINT_MBX_PSM_CTL_INTEVENT_M BIT(31)
+#define VPINT_SB_CPM_CTL(_VP128) (0x0016B400 + ((_VP128) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define VPINT_SB_CPM_CTL_MAX_INDEX 127
+#define VPINT_SB_CPM_CTL_MSIX_INDX_S 0
+#define VPINT_SB_CPM_CTL_MSIX_INDX_M MAKEMASK(0x7FF, 0)
+#define VPINT_SB_CPM_CTL_ITR_INDX_S 11
+#define VPINT_SB_CPM_CTL_ITR_INDX_M MAKEMASK(0x3, 11)
+#define VPINT_SB_CPM_CTL_CAUSE_ENA_S 30
+#define VPINT_SB_CPM_CTL_CAUSE_ENA_M BIT(30)
+#define VPINT_SB_CPM_CTL_INTEVENT_S 31
+#define VPINT_SB_CPM_CTL_INTEVENT_M BIT(31)
+#define GL_HLP_PRT_IPG_PREAMBLE_SIZE(_i) (0x00049240 + ((_i) * 4)) /* _i=0...20 */ /* Reset Source: CORER */
+#define GL_HLP_PRT_IPG_PREAMBLE_SIZE_MAX_INDEX 20
+#define GL_HLP_PRT_IPG_PREAMBLE_SIZE_IPG_PREAMBLE_SIZE_S 0
+#define GL_HLP_PRT_IPG_PREAMBLE_SIZE_IPG_PREAMBLE_SIZE_M MAKEMASK(0xFF, 0)
+#define GL_TDPU_PSM_DEFAULT_RECIPE(_i) (0x00049294 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GL_TDPU_PSM_DEFAULT_RECIPE_MAX_INDEX 3
+#define GL_TDPU_PSM_DEFAULT_RECIPE_ADD_IPG_S 0
+#define GL_TDPU_PSM_DEFAULT_RECIPE_ADD_IPG_M BIT(0)
+#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_CRC_S 1
+#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_CRC_M BIT(1)
+#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_ESP_TRAILER_S 2
+#define GL_TDPU_PSM_DEFAULT_RECIPE_SUB_ESP_TRAILER_M BIT(2)
+#define GL_TDPU_PSM_DEFAULT_RECIPE_INCLUDE_L2_PAD_S 3
+#define GL_TDPU_PSM_DEFAULT_RECIPE_INCLUDE_L2_PAD_M BIT(3)
+#define GL_TDPU_PSM_DEFAULT_RECIPE_DEFAULT_UPDATE_MODE_S 4
+#define GL_TDPU_PSM_DEFAULT_RECIPE_DEFAULT_UPDATE_MODE_M BIT(4)
+#define GLLAN_PF_RECIPE(_i) (0x0029420C + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLLAN_PF_RECIPE_MAX_INDEX 7
+#define GLLAN_PF_RECIPE_RECIPE_S 0
+#define GLLAN_PF_RECIPE_RECIPE_M MAKEMASK(0x3, 0)
+#define GLLAN_RCTL_0 0x002941F8 /* Reset Source: CORER */
+#define GLLAN_RCTL_0_PXE_MODE_S 0
+#define GLLAN_RCTL_0_PXE_MODE_M BIT(0)
+#define GLLAN_RCTL_1 0x002941FC /* Reset Source: CORER */
+#define GLLAN_RCTL_1_RXMAX_EXPANSION_S 12
+#define GLLAN_RCTL_1_RXMAX_EXPANSION_M MAKEMASK(0xF, 12)
+#define GLLAN_RCTL_1_RXDRDCTL_S 17
+#define GLLAN_RCTL_1_RXDRDCTL_M BIT(17)
+#define GLLAN_RCTL_1_RXDESCRDROEN_S 18
+#define GLLAN_RCTL_1_RXDESCRDROEN_M BIT(18)
+#define GLLAN_RCTL_1_RXDATAWRROEN_S 19
+#define GLLAN_RCTL_1_RXDATAWRROEN_M BIT(19)
+#define GLLAN_TSOMSK_F 0x00049308 /* Reset Source: CORER */
+#define GLLAN_TSOMSK_F_TCPMSKF_S 0
+#define GLLAN_TSOMSK_F_TCPMSKF_M MAKEMASK(0xFFF, 0)
+#define GLLAN_TSOMSK_L 0x00049310 /* Reset Source: CORER */
+#define GLLAN_TSOMSK_L_TCPMSKL_S 0
+#define GLLAN_TSOMSK_L_TCPMSKL_M MAKEMASK(0xFFF, 0)
+#define GLLAN_TSOMSK_M 0x0004930C /* Reset Source: CORER */
+#define GLLAN_TSOMSK_M_TCPMSKM_S 0
+#define GLLAN_TSOMSK_M_TCPMSKM_M MAKEMASK(0xFFF, 0)
+#define PFLAN_CP_QALLOC 0x00075700 /* Reset Source: CORER */
+#define PFLAN_CP_QALLOC_FIRSTQ_S 0
+#define PFLAN_CP_QALLOC_FIRSTQ_M MAKEMASK(0x1FF, 0)
+#define PFLAN_CP_QALLOC_LASTQ_S 16
+#define PFLAN_CP_QALLOC_LASTQ_M MAKEMASK(0x1FF, 16)
+#define PFLAN_CP_QALLOC_VALID_S 31
+#define PFLAN_CP_QALLOC_VALID_M BIT(31)
+#define PFLAN_DB_QALLOC 0x00075680 /* Reset Source: CORER */
+#define PFLAN_DB_QALLOC_FIRSTQ_S 0
+#define PFLAN_DB_QALLOC_FIRSTQ_M MAKEMASK(0xFF, 0)
+#define PFLAN_DB_QALLOC_LASTQ_S 16
+#define PFLAN_DB_QALLOC_LASTQ_M MAKEMASK(0xFF, 16)
+#define PFLAN_DB_QALLOC_VALID_S 31
+#define PFLAN_DB_QALLOC_VALID_M BIT(31)
+#define PFLAN_RX_QALLOC 0x001D2500 /* Reset Source: CORER */
+#define PFLAN_RX_QALLOC_FIRSTQ_S 0
+#define PFLAN_RX_QALLOC_FIRSTQ_M MAKEMASK(0x7FF, 0)
+#define PFLAN_RX_QALLOC_LASTQ_S 16
+#define PFLAN_RX_QALLOC_LASTQ_M MAKEMASK(0x7FF, 16)
+#define PFLAN_RX_QALLOC_VALID_S 31
+#define PFLAN_RX_QALLOC_VALID_M BIT(31)
+#define PFLAN_TX_QALLOC 0x001D2580 /* Reset Source: CORER */
+#define PFLAN_TX_QALLOC_FIRSTQ_S 0
+#define PFLAN_TX_QALLOC_FIRSTQ_M MAKEMASK(0x3FFF, 0)
+#define PFLAN_TX_QALLOC_LASTQ_S 16
+#define PFLAN_TX_QALLOC_LASTQ_M MAKEMASK(0x3FFF, 16)
+#define PFLAN_TX_QALLOC_VALID_S 31
+#define PFLAN_TX_QALLOC_VALID_M BIT(31)
+#define PRT_TDPUL2TAGSEN 0x00040BA0 /* Reset Source: CORER */
+#define PRT_TDPUL2TAGSEN_ENABLE_S 0
+#define PRT_TDPUL2TAGSEN_ENABLE_M MAKEMASK(0xFF, 0)
+#define PRT_TDPUL2TAGSEN_NONLAST_TAG_S 8
+#define PRT_TDPUL2TAGSEN_NONLAST_TAG_M MAKEMASK(0xFF, 8)
+#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) /* _i=0...7, _QRX=0...2047 */ /* Reset Source: CORER */
+#define QRX_CONTEXT_MAX_INDEX 7
+#define QRX_CONTEXT_RXQ_CONTEXT_S 0
+#define QRX_CONTEXT_RXQ_CONTEXT_M MAKEMASK(0xFFFFFFFF, 0)
+#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: PFR */
+#define QRX_CTRL_MAX_INDEX 2047
+#define QRX_CTRL_QENA_REQ_S 0
+#define QRX_CTRL_QENA_REQ_M BIT(0)
+#define QRX_CTRL_FAST_QDIS_S 1
+#define QRX_CTRL_FAST_QDIS_M BIT(1)
+#define QRX_CTRL_QENA_STAT_S 2
+#define QRX_CTRL_QENA_STAT_M BIT(2)
+#define QRX_CTRL_CDE_S 3
+#define QRX_CTRL_CDE_M BIT(3)
+#define QRX_CTRL_CDS_S 4
+#define QRX_CTRL_CDS_M BIT(4)
+#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define QRX_ITR_MAX_INDEX 2047
+#define QRX_ITR_NO_EXPR_S 0
+#define QRX_ITR_NO_EXPR_M BIT(0)
+#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define QRX_TAIL_MAX_INDEX 2047
+#define QRX_TAIL_TAIL_S 0
+#define QRX_TAIL_TAIL_M MAKEMASK(0x1FFF, 0)
+#define VPDSI_RX_QTABLE(_i, _VP16) (0x00074C00 + ((_i) * 64 + (_VP16) * 4)) /* _i=0...15, _VP16=0...15 */ /* Reset Source: CORER */
+#define VPDSI_RX_QTABLE_MAX_INDEX 15
+#define VPDSI_RX_QTABLE_PAGE_INDEX0_S 0
+#define VPDSI_RX_QTABLE_PAGE_INDEX0_M MAKEMASK(0x7F, 0)
+#define VPDSI_RX_QTABLE_PAGE_INDEX1_S 8
+#define VPDSI_RX_QTABLE_PAGE_INDEX1_M MAKEMASK(0x7F, 8)
+#define VPDSI_RX_QTABLE_PAGE_INDEX2_S 16
+#define VPDSI_RX_QTABLE_PAGE_INDEX2_M MAKEMASK(0x7F, 16)
+#define VPDSI_RX_QTABLE_PAGE_INDEX3_S 24
+#define VPDSI_RX_QTABLE_PAGE_INDEX3_M MAKEMASK(0x7F, 24)
+#define VPDSI_TX_QTABLE(_i, _VP16) (0x001D2000 + ((_i) * 64 + (_VP16) * 4)) /* _i=0...15, _VP16=0...15 */ /* Reset Source: CORER */
+#define VPDSI_TX_QTABLE_MAX_INDEX 15
+#define VPDSI_TX_QTABLE_PAGE_INDEX0_S 0
+#define VPDSI_TX_QTABLE_PAGE_INDEX0_M MAKEMASK(0x7F, 0)
+#define VPDSI_TX_QTABLE_PAGE_INDEX1_S 8
+#define VPDSI_TX_QTABLE_PAGE_INDEX1_M MAKEMASK(0x7F, 8)
+#define VPDSI_TX_QTABLE_PAGE_INDEX2_S 16
+#define VPDSI_TX_QTABLE_PAGE_INDEX2_M MAKEMASK(0x7F, 16)
+#define VPDSI_TX_QTABLE_PAGE_INDEX3_S 24
+#define VPDSI_TX_QTABLE_PAGE_INDEX3_M MAKEMASK(0x7F, 24)
+#define VPLAN_DB_QTABLE(_i, _VF) (0x00070000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...3, _VF=0...255 */ /* Reset Source: CORER */
+#define VPLAN_DB_QTABLE_MAX_INDEX 3
+#define VPLAN_DB_QTABLE_QINDEX_S 0
+#define VPLAN_DB_QTABLE_QINDEX_M MAKEMASK(0x1FF, 0)
+#define VPLAN_DSI_VF_MODE(_VP16) (0x002D2C00 + ((_VP16) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define VPLAN_DSI_VF_MODE_MAX_INDEX 15
+#define VPLAN_DSI_VF_MODE_LAN_DSI_VF_MODE_S 0
+#define VPLAN_DSI_VF_MODE_LAN_DSI_VF_MODE_M BIT(0)
+#define VPLAN_RX_QBASE(_VF) (0x00072000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPLAN_RX_QBASE_MAX_INDEX 255
+#define VPLAN_RX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_RX_QBASE_VFFIRSTQ_M MAKEMASK(0x7FF, 0)
+#define VPLAN_RX_QBASE_VFNUMQ_S 16
+#define VPLAN_RX_QBASE_VFNUMQ_M MAKEMASK(0xFF, 16)
+#define VPLAN_RX_QBASE_VFQTABLE_ENA_S 31
+#define VPLAN_RX_QBASE_VFQTABLE_ENA_M BIT(31)
+#define VPLAN_RX_QTABLE(_i, _VF) (0x00060000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...15, _VF=0...255 */ /* Reset Source: CORER */
+#define VPLAN_RX_QTABLE_MAX_INDEX 15
+#define VPLAN_RX_QTABLE_QINDEX_S 0
+#define VPLAN_RX_QTABLE_QINDEX_M MAKEMASK(0xFFF, 0)
+#define VPLAN_RXQ_MAPENA(_VF) (0x00073000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPLAN_RXQ_MAPENA_MAX_INDEX 255
+#define VPLAN_RXQ_MAPENA_RX_ENA_S 0
+#define VPLAN_RXQ_MAPENA_RX_ENA_M BIT(0)
+#define VPLAN_TX_QBASE(_VF) (0x001D1800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPLAN_TX_QBASE_MAX_INDEX 255
+#define VPLAN_TX_QBASE_VFFIRSTQ_S 0
+#define VPLAN_TX_QBASE_VFFIRSTQ_M MAKEMASK(0x3FFF, 0)
+#define VPLAN_TX_QBASE_VFNUMQ_S 16
+#define VPLAN_TX_QBASE_VFNUMQ_M MAKEMASK(0xFF, 16)
+#define VPLAN_TX_QBASE_VFQTABLE_ENA_S 31
+#define VPLAN_TX_QBASE_VFQTABLE_ENA_M BIT(31)
+#define VPLAN_TX_QTABLE(_i, _VF) (0x001C0000 + ((_i) * 2048 + (_VF) * 4)) /* _i=0...15, _VF=0...255 */ /* Reset Source: CORER */
+#define VPLAN_TX_QTABLE_MAX_INDEX 15
+#define VPLAN_TX_QTABLE_QINDEX_S 0
+#define VPLAN_TX_QTABLE_QINDEX_M MAKEMASK(0x7FFF, 0)
+#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPLAN_TXQ_MAPENA_MAX_INDEX 255
+#define VPLAN_TXQ_MAPENA_TX_ENA_S 0
+#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
+#define VSILAN_QBASE(_VSI) (0x0044C000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSILAN_QBASE_MAX_INDEX 767
+#define VSILAN_QBASE_VSIBASE_S 0
+#define VSILAN_QBASE_VSIBASE_M MAKEMASK(0x7FF, 0)
+#define VSILAN_QBASE_VSIQTABLE_ENA_S 11
+#define VSILAN_QBASE_VSIQTABLE_ENA_M BIT(11)
+#define VSILAN_QTABLE(_i, _VSI) (0x00440000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...7, _VSI=0...767 */ /* Reset Source: PFR */
+#define VSILAN_QTABLE_MAX_INDEX 7
+#define VSILAN_QTABLE_QINDEX_0_S 0
+#define VSILAN_QTABLE_QINDEX_0_M MAKEMASK(0x7FF, 0)
+#define VSILAN_QTABLE_QINDEX_1_S 16
+#define VSILAN_QTABLE_QINDEX_1_M MAKEMASK(0x7FF, 16)
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0)
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0
+#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0)
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0
+#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0)
+#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
+#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
+#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
+#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
+#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0
+#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0
+#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0
+#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0
+#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0
+#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0
+#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
+#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
+#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */
+#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0
+#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */
+#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_S 0
+#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7
+#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_S 0
+#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7
+#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_S 0
+#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTMAC_RX_CNT_MRKR 0x001E48E0 /* Reset Source: GLOBR */
+#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_S 0
+#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */
+#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_S 0
+#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16
+#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16)
+#define PRTMAC_TX_CNT_MRKR 0x001E48C0 /* Reset Source: GLOBR */
+#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_S 0
+#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */
+#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_S 0
+#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_M MAKEMASK(0xFFFF, 0)
+#define GL_MDCK_CFG1_TX_PQM 0x002D2DF4 /* Reset Source: CORER */
+#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DATA_LEN_S 0
+#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DATA_LEN_M MAKEMASK(0xFF, 0)
+#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_PKT_CNT_S 8
+#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_PKT_CNT_M MAKEMASK(0x3F, 8)
+#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DESC_CNT_S 16
+#define GL_MDCK_CFG1_TX_PQM_SSO_MAX_DESC_CNT_M MAKEMASK(0x3F, 16)
+#define GL_MDCK_EN_TX_PQM 0x002D2DFC /* Reset Source: CORER */
+#define GL_MDCK_EN_TX_PQM_PCI_DUMMY_COMP_S 0
+#define GL_MDCK_EN_TX_PQM_PCI_DUMMY_COMP_M BIT(0)
+#define GL_MDCK_EN_TX_PQM_PCI_UR_COMP_S 1
+#define GL_MDCK_EN_TX_PQM_PCI_UR_COMP_M BIT(1)
+#define GL_MDCK_EN_TX_PQM_RCV_SH_BE_LSO_S 3
+#define GL_MDCK_EN_TX_PQM_RCV_SH_BE_LSO_M BIT(3)
+#define GL_MDCK_EN_TX_PQM_Q_FL_MNG_EPY_CH_S 4
+#define GL_MDCK_EN_TX_PQM_Q_FL_MNG_EPY_CH_M BIT(4)
+#define GL_MDCK_EN_TX_PQM_Q_EPY_MNG_FL_CH_S 5
+#define GL_MDCK_EN_TX_PQM_Q_EPY_MNG_FL_CH_M BIT(5)
+#define GL_MDCK_EN_TX_PQM_LSO_NUMDESCS_ZERO_S 6
+#define GL_MDCK_EN_TX_PQM_LSO_NUMDESCS_ZERO_M BIT(6)
+#define GL_MDCK_EN_TX_PQM_LSO_LENGTH_ZERO_S 7
+#define GL_MDCK_EN_TX_PQM_LSO_LENGTH_ZERO_M BIT(7)
+#define GL_MDCK_EN_TX_PQM_LSO_MSS_BELOW_MIN_S 8
+#define GL_MDCK_EN_TX_PQM_LSO_MSS_BELOW_MIN_M BIT(8)
+#define GL_MDCK_EN_TX_PQM_LSO_MSS_ABOVE_MAX_S 9
+#define GL_MDCK_EN_TX_PQM_LSO_MSS_ABOVE_MAX_M BIT(9)
+#define GL_MDCK_EN_TX_PQM_LSO_HDR_SIZE_ZERO_S 10
+#define GL_MDCK_EN_TX_PQM_LSO_HDR_SIZE_ZERO_M BIT(10)
+#define GL_MDCK_EN_TX_PQM_RCV_CNT_BE_LSO_S 11
+#define GL_MDCK_EN_TX_PQM_RCV_CNT_BE_LSO_M BIT(11)
+#define GL_MDCK_EN_TX_PQM_SKIP_ONE_QT_ONLY_S 12
+#define GL_MDCK_EN_TX_PQM_SKIP_ONE_QT_ONLY_M BIT(12)
+#define GL_MDCK_EN_TX_PQM_LSO_PKTCNT_ZERO_S 13
+#define GL_MDCK_EN_TX_PQM_LSO_PKTCNT_ZERO_M BIT(13)
+#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_ZERO_S 14
+#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_ZERO_M BIT(14)
+#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_EXCEED_S 15
+#define GL_MDCK_EN_TX_PQM_SSO_LENGTH_EXCEED_M BIT(15)
+#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_ZERO_S 16
+#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_ZERO_M BIT(16)
+#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_S 17
+#define GL_MDCK_EN_TX_PQM_SSO_PKTCNT_EXCEED_M BIT(17)
+#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_S 18
+#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_ZERO_M BIT(18)
+#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_S 19
+#define GL_MDCK_EN_TX_PQM_SSO_NUMDESCS_EXCEED_M BIT(19)
+#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_S 20
+#define GL_MDCK_EN_TX_PQM_TAIL_GT_RING_LENGTH_M BIT(20)
+#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_S 21
+#define GL_MDCK_EN_TX_PQM_RESERVED_DBL_TYPE_M BIT(21)
+#define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_S 22
+#define GL_MDCK_EN_TX_PQM_ILLEGAL_HEAD_DROP_DBL_M BIT(22)
+#define GL_MDCK_EN_TX_PQM_LSO_OVER_COMMS_Q_S 23
+#define GL_MDCK_EN_TX_PQM_LSO_OVER_COMMS_Q_M BIT(23)
+#define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_S 24
+#define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_M BIT(24)
+#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_S 25
+#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_M BIT(25)
+#define GL_MDCK_EN_TX_PQM_RSVD_S 26
+#define GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26)
+#define GL_MDCK_RX 0x0029422C /* Reset Source: CORER */
+#define GL_MDCK_RX_DESC_ADDR_S 0
+#define GL_MDCK_RX_DESC_ADDR_M BIT(0)
+#define GL_MDCK_TX_TDPU 0x00049348 /* Reset Source: CORER */
+#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_S 0
+#define GL_MDCK_TX_TDPU_TTL_ERR_ITR_DIS_M BIT(0)
+#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_S 1
+#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
+#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_S 2
+#define GL_MDCK_TX_TDPU_PCIE_UR_ITR_DIS_M BIT(2)
+#define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_S 3
+#define GL_MDCK_TX_TDPU_MAL_OFFSET_ITR_DIS_M BIT(3)
+#define GL_MDCK_TX_TDPU_MAL_CMD_ITR_DIS_S 4
+#define GL_MDCK_TX_TDPU_MAL_CMD_ITR_DIS_M BIT(4)
+#define GL_MDCK_TX_TDPU_BIG_PKT_SIZE_ITR_DIS_S 5
+#define GL_MDCK_TX_TDPU_BIG_PKT_SIZE_ITR_DIS_M BIT(5)
+#define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_S 6
+#define GL_MDCK_TX_TDPU_L2_ACCEPT_FAIL_ITR_DIS_M BIT(6)
+#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_S 7
+#define GL_MDCK_TX_TDPU_NIC_DSI_ITR_DIS_M BIT(7)
+#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_S 8
+#define GL_MDCK_TX_TDPU_MAL_IPSEC_CMD_ITR_DIS_M BIT(8)
+#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_S 9
+#define GL_MDCK_TX_TDPU_DSCP_CHECK_FAIL_ITR_DIS_M BIT(9)
+#define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_S 10
+#define GL_MDCK_TX_TDPU_NIC_IPSEC_ITR_DIS_M BIT(10)
+#define GL_MDET_RX 0x00294C00 /* Reset Source: CORER */
+#define GL_MDET_RX_QNUM_S 0
+#define GL_MDET_RX_QNUM_M MAKEMASK(0x7FFF, 0)
+#define GL_MDET_RX_VF_NUM_S 15
+#define GL_MDET_RX_VF_NUM_M MAKEMASK(0xFF, 15)
+#define GL_MDET_RX_PF_NUM_S 23
+#define GL_MDET_RX_PF_NUM_M MAKEMASK(0x7, 23)
+#define GL_MDET_RX_MAL_TYPE_S 26
+#define GL_MDET_RX_MAL_TYPE_M MAKEMASK(0x1F, 26)
+#define GL_MDET_RX_VALID_S 31
+#define GL_MDET_RX_VALID_M BIT(31)
+#define GL_MDET_TX_PQM 0x002D2E00 /* Reset Source: CORER */
+#define GL_MDET_TX_PQM_PF_NUM_S 0
+#define GL_MDET_TX_PQM_PF_NUM_M MAKEMASK(0x7, 0)
+#define GL_MDET_TX_PQM_VF_NUM_S 4
+#define GL_MDET_TX_PQM_VF_NUM_M MAKEMASK(0xFF, 4)
+#define GL_MDET_TX_PQM_QNUM_S 12
+#define GL_MDET_TX_PQM_QNUM_M MAKEMASK(0x3FFF, 12)
+#define GL_MDET_TX_PQM_MAL_TYPE_S 26
+#define GL_MDET_TX_PQM_MAL_TYPE_M MAKEMASK(0x1F, 26)
+#define GL_MDET_TX_PQM_VALID_S 31
+#define GL_MDET_TX_PQM_VALID_M BIT(31)
+#define GL_MDET_TX_TCLAN 0x000FC068 /* Reset Source: CORER */
+#define GL_MDET_TX_TCLAN_QNUM_S 0
+#define GL_MDET_TX_TCLAN_QNUM_M MAKEMASK(0x7FFF, 0)
+#define GL_MDET_TX_TCLAN_VF_NUM_S 15
+#define GL_MDET_TX_TCLAN_VF_NUM_M MAKEMASK(0xFF, 15)
+#define GL_MDET_TX_TCLAN_PF_NUM_S 23
+#define GL_MDET_TX_TCLAN_PF_NUM_M MAKEMASK(0x7, 23)
+#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26
+#define GL_MDET_TX_TCLAN_MAL_TYPE_M MAKEMASK(0x1F, 26)
+#define GL_MDET_TX_TCLAN_VALID_S 31
+#define GL_MDET_TX_TCLAN_VALID_M BIT(31)
+#define GL_MDET_TX_TDPU 0x00049350 /* Reset Source: CORER */
+#define GL_MDET_TX_TDPU_QNUM_S 0
+#define GL_MDET_TX_TDPU_QNUM_M MAKEMASK(0x7FFF, 0)
+#define GL_MDET_TX_TDPU_VF_NUM_S 15
+#define GL_MDET_TX_TDPU_VF_NUM_M MAKEMASK(0xFF, 15)
+#define GL_MDET_TX_TDPU_PF_NUM_S 23
+#define GL_MDET_TX_TDPU_PF_NUM_M MAKEMASK(0x7, 23)
+#define GL_MDET_TX_TDPU_MAL_TYPE_S 26
+#define GL_MDET_TX_TDPU_MAL_TYPE_M MAKEMASK(0x1F, 26)
+#define GL_MDET_TX_TDPU_VALID_S 31
+#define GL_MDET_TX_TDPU_VALID_M BIT(31)
+#define GLRLAN_MDET 0x00294200 /* Reset Source: CORER */
+#define GLRLAN_MDET_PCKT_EXTRCT_ERR_S 0
+#define GLRLAN_MDET_PCKT_EXTRCT_ERR_M BIT(0)
+#define PF_MDET_RX 0x00294280 /* Reset Source: CORER */
+#define PF_MDET_RX_VALID_S 0
+#define PF_MDET_RX_VALID_M BIT(0)
+#define PF_MDET_TX_PQM 0x002D2C80 /* Reset Source: CORER */
+#define PF_MDET_TX_PQM_VALID_S 0
+#define PF_MDET_TX_PQM_VALID_M BIT(0)
+#define PF_MDET_TX_TCLAN 0x000FC000 /* Reset Source: CORER */
+#define PF_MDET_TX_TCLAN_VALID_S 0
+#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
+#define PF_MDET_TX_TDPU 0x00040800 /* Reset Source: CORER */
+#define PF_MDET_TX_TDPU_VALID_S 0
+#define PF_MDET_TX_TDPU_VALID_M BIT(0)
+#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VP_MDET_RX_MAX_INDEX 255
+#define VP_MDET_RX_VALID_S 0
+#define VP_MDET_RX_VALID_M BIT(0)
+#define VP_MDET_TX_PQM(_VF) (0x002D2000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VP_MDET_TX_PQM_MAX_INDEX 255
+#define VP_MDET_TX_PQM_VALID_S 0
+#define VP_MDET_TX_PQM_VALID_M BIT(0)
+#define VP_MDET_TX_TCLAN(_VF) (0x000FB800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VP_MDET_TX_TCLAN_MAX_INDEX 255
+#define VP_MDET_TX_TCLAN_VALID_S 0
+#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
+#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VP_MDET_TX_TDPU_MAX_INDEX 255
+#define VP_MDET_TX_TDPU_VALID_S 0
+#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define GENERAL_MNG_FW_DBG_CSR(_i) (0x000B6180 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: POR */
+#define GENERAL_MNG_FW_DBG_CSR_MAX_INDEX 9
+#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_S 0
+#define GENERAL_MNG_FW_DBG_CSR_GENERAL_FW_DBG_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S 0
+#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */
+#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_S 0
+#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_M BIT(0)
+#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1
+#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1)
+#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S 0
+#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S 3
+#define GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
+#define GL_MNG_FWSM_RSV1_S 11
+#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S 15
+#define GL_MNG_FWSM_RSV2_M BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S 17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17)
+#define GL_MNG_FWSM_RSV3_S 18
+#define GL_MNG_FWSM_RSV3_M BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S 19
+#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S 25
+#define GL_MNG_FWSM_RSV4_M BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S 26
+#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S 30
+#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30)
+#define GL_MNG_HWARB_CTRL 0x000B6130 /* Reset Source: POR */
+#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_S 0
+#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_M BIT(0)
+#define GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_MAX_INDEX 7
+#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_S 0
+#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7
+#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_S 0
+#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_MNG_SHA_EXTEND_STATUS 0x00083148 /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_STATUS_STAGE_S 0
+#define GL_MNG_SHA_EXTEND_STATUS_STAGE_M MAKEMASK(0x7, 0)
+#define GL_MNG_SHA_EXTEND_STATUS_FW_HALTED_S 30
+#define GL_MNG_SHA_EXTEND_STATUS_FW_HALTED_M BIT(30)
+#define GL_MNG_SHA_EXTEND_STATUS_DONE_S 31
+#define GL_MNG_SHA_EXTEND_STATUS_DONE_M BIT(31)
+#define GL_SWT_PRT2MDEF(_i) (0x00216018 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: POR */
+#define GL_SWT_PRT2MDEF_MAX_INDEX 31
+#define GL_SWT_PRT2MDEF_MDEFIDX_S 0
+#define GL_SWT_PRT2MDEF_MDEFIDX_M MAKEMASK(0x7, 0)
+#define GL_SWT_PRT2MDEF_MDEFENA_S 31
+#define GL_SWT_PRT2MDEF_MDEFENA_M BIT(31)
+#define PRT_MNG_MANC 0x00214720 /* Reset Source: POR */
+#define PRT_MNG_MANC_FLOW_CONTROL_DISCARD_S 0
+#define PRT_MNG_MANC_FLOW_CONTROL_DISCARD_M BIT(0)
+#define PRT_MNG_MANC_NCSI_DISCARD_S 1
+#define PRT_MNG_MANC_NCSI_DISCARD_M BIT(1)
+#define PRT_MNG_MANC_RCV_TCO_EN_S 17
+#define PRT_MNG_MANC_RCV_TCO_EN_M BIT(17)
+#define PRT_MNG_MANC_RCV_ALL_S 19
+#define PRT_MNG_MANC_RCV_ALL_M BIT(19)
+#define PRT_MNG_MANC_FIXED_NET_TYPE_S 25
+#define PRT_MNG_MANC_FIXED_NET_TYPE_M BIT(25)
+#define PRT_MNG_MANC_NET_TYPE_S 26
+#define PRT_MNG_MANC_NET_TYPE_M BIT(26)
+#define PRT_MNG_MANC_EN_BMC2OS_S 28
+#define PRT_MNG_MANC_EN_BMC2OS_M BIT(28)
+#define PRT_MNG_MANC_EN_BMC2NET_S 29
+#define PRT_MNG_MANC_EN_BMC2NET_M BIT(29)
+#define PRT_MNG_MAVTV(_i) (0x00214780 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */
+#define PRT_MNG_MAVTV_MAX_INDEX 7
+#define PRT_MNG_MAVTV_VID_S 0
+#define PRT_MNG_MAVTV_VID_M MAKEMASK(0xFFF, 0)
+#define PRT_MNG_MDEF(_i) (0x00214880 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */
+#define PRT_MNG_MDEF_MAX_INDEX 7
+#define PRT_MNG_MDEF_MAC_EXACT_AND_S 0
+#define PRT_MNG_MDEF_MAC_EXACT_AND_M MAKEMASK(0xF, 0)
+#define PRT_MNG_MDEF_BROADCAST_AND_S 4
+#define PRT_MNG_MDEF_BROADCAST_AND_M BIT(4)
+#define PRT_MNG_MDEF_VLAN_AND_S 5
+#define PRT_MNG_MDEF_VLAN_AND_M MAKEMASK(0xFF, 5)
+#define PRT_MNG_MDEF_IPV4_ADDRESS_AND_S 13
+#define PRT_MNG_MDEF_IPV4_ADDRESS_AND_M MAKEMASK(0xF, 13)
+#define PRT_MNG_MDEF_IPV6_ADDRESS_AND_S 17
+#define PRT_MNG_MDEF_IPV6_ADDRESS_AND_M MAKEMASK(0xF, 17)
+#define PRT_MNG_MDEF_MAC_EXACT_OR_S 21
+#define PRT_MNG_MDEF_MAC_EXACT_OR_M MAKEMASK(0xF, 21)
+#define PRT_MNG_MDEF_BROADCAST_OR_S 25
+#define PRT_MNG_MDEF_BROADCAST_OR_M BIT(25)
+#define PRT_MNG_MDEF_MULTICAST_AND_S 26
+#define PRT_MNG_MDEF_MULTICAST_AND_M BIT(26)
+#define PRT_MNG_MDEF_ARP_REQUEST_OR_S 27
+#define PRT_MNG_MDEF_ARP_REQUEST_OR_M BIT(27)
+#define PRT_MNG_MDEF_ARP_RESPONSE_OR_S 28
+#define PRT_MNG_MDEF_ARP_RESPONSE_OR_M BIT(28)
+#define PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_S 29
+#define PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_M BIT(29)
+#define PRT_MNG_MDEF_PORT_0X298_OR_S 30
+#define PRT_MNG_MDEF_PORT_0X298_OR_M BIT(30)
+#define PRT_MNG_MDEF_PORT_0X26F_OR_S 31
+#define PRT_MNG_MDEF_PORT_0X26F_OR_M BIT(31)
+#define PRT_MNG_MDEF_EXT(_i) (0x00214A00 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: POR */
+#define PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_S 0
+#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_M MAKEMASK(0xF, 0)
+#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_S 4
+#define PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_M MAKEMASK(0xF, 4)
+#define PRT_MNG_MDEF_EXT_FLEX_PORT_OR_S 8
+#define PRT_MNG_MDEF_EXT_FLEX_PORT_OR_M MAKEMASK(0xFFFF, 8)
+#define PRT_MNG_MDEF_EXT_FLEX_TCO_S 24
+#define PRT_MNG_MDEF_EXT_FLEX_TCO_M BIT(24)
+#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_S 25
+#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_M BIT(25)
+#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_S 26
+#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_M BIT(26)
+#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_S 27
+#define PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_M BIT(27)
+#define PRT_MNG_MDEF_EXT_ICMP_OR_S 28
+#define PRT_MNG_MDEF_EXT_ICMP_OR_M BIT(28)
+#define PRT_MNG_MDEF_EXT_MLD_S 29
+#define PRT_MNG_MDEF_EXT_MLD_M BIT(29)
+#define PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_S 30
+#define PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_M BIT(30)
+#define PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_S 31
+#define PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_M BIT(31)
+#define PRT_MNG_MDEFVSI(_i) (0x00214980 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */
+#define PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define PRT_MNG_MDEFVSI_MDEFVSI_2N_S 0
+#define PRT_MNG_MDEFVSI_MDEFVSI_2N_M MAKEMASK(0xFFFF, 0)
+#define PRT_MNG_MDEFVSI_MDEFVSI_2NP1_S 16
+#define PRT_MNG_MDEFVSI_MDEFVSI_2NP1_M MAKEMASK(0xFFFF, 16)
+#define PRT_MNG_METF(_i) (0x00214120 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */
+#define PRT_MNG_METF_MAX_INDEX 3
+#define PRT_MNG_METF_ETYPE_S 0
+#define PRT_MNG_METF_ETYPE_M MAKEMASK(0xFFFF, 0)
+#define PRT_MNG_METF_POLARITY_S 30
+#define PRT_MNG_METF_POLARITY_M BIT(30)
+#define PRT_MNG_MFUTP(_i) (0x00214320 + ((_i) * 32)) /* _i=0...15 */ /* Reset Source: POR */
+#define PRT_MNG_MFUTP_MAX_INDEX 15
+#define PRT_MNG_MFUTP_MFUTP_N_S 0
+#define PRT_MNG_MFUTP_MFUTP_N_M MAKEMASK(0xFFFF, 0)
+#define PRT_MNG_MFUTP_UDP_S 16
+#define PRT_MNG_MFUTP_UDP_M BIT(16)
+#define PRT_MNG_MFUTP_TCP_S 17
+#define PRT_MNG_MFUTP_TCP_M BIT(17)
+#define PRT_MNG_MFUTP_SOURCE_DESTINATION_S 18
+#define PRT_MNG_MFUTP_SOURCE_DESTINATION_M BIT(18)
+#define PRT_MNG_MIPAF4(_i) (0x002141A0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */
+#define PRT_MNG_MIPAF4_MAX_INDEX 3
+#define PRT_MNG_MIPAF4_MIPAF_S 0
+#define PRT_MNG_MIPAF4_MIPAF_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRT_MNG_MIPAF6(_i) (0x00214520 + ((_i) * 32)) /* _i=0...15 */ /* Reset Source: POR */
+#define PRT_MNG_MIPAF6_MAX_INDEX 15
+#define PRT_MNG_MIPAF6_MIPAF_S 0
+#define PRT_MNG_MIPAF6_MIPAF_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRT_MNG_MMAH(_i) (0x00214220 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */
+#define PRT_MNG_MMAH_MAX_INDEX 3
+#define PRT_MNG_MMAH_MMAH_S 0
+#define PRT_MNG_MMAH_MMAH_M MAKEMASK(0xFFFF, 0)
+#define PRT_MNG_MMAL(_i) (0x002142A0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: POR */
+#define PRT_MNG_MMAL_MAX_INDEX 3
+#define PRT_MNG_MMAL_MMAL_S 0
+#define PRT_MNG_MMAL_MMAL_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRT_MNG_MNGONLY 0x00214740 /* Reset Source: POR */
+#define PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_S 0
+#define PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_M MAKEMASK(0xFF, 0)
+#define PRT_MNG_MSFM 0x00214760 /* Reset Source: POR */
+#define PRT_MNG_MSFM_PORT_26F_UDP_S 0
+#define PRT_MNG_MSFM_PORT_26F_UDP_M BIT(0)
+#define PRT_MNG_MSFM_PORT_26F_TCP_S 1
+#define PRT_MNG_MSFM_PORT_26F_TCP_M BIT(1)
+#define PRT_MNG_MSFM_PORT_298_UDP_S 2
+#define PRT_MNG_MSFM_PORT_298_UDP_M BIT(2)
+#define PRT_MNG_MSFM_PORT_298_TCP_S 3
+#define PRT_MNG_MSFM_PORT_298_TCP_M BIT(3)
+#define PRT_MNG_MSFM_IPV6_0_MASK_S 4
+#define PRT_MNG_MSFM_IPV6_0_MASK_M BIT(4)
+#define PRT_MNG_MSFM_IPV6_1_MASK_S 5
+#define PRT_MNG_MSFM_IPV6_1_MASK_M BIT(5)
+#define PRT_MNG_MSFM_IPV6_2_MASK_S 6
+#define PRT_MNG_MSFM_IPV6_2_MASK_M BIT(6)
+#define PRT_MNG_MSFM_IPV6_3_MASK_S 7
+#define PRT_MNG_MSFM_IPV6_3_MASK_M BIT(7)
+#define MSIX_PBA_PAGE(_i) (0x02E08000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: FLR */
+#define MSIX_PBA_PAGE_MAX_INDEX 63
+#define MSIX_PBA_PAGE_PENBIT_S 0
+#define MSIX_PBA_PAGE_PENBIT_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_PBA1(_i) (0x00008000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: FLR */
+#define MSIX_PBA1_MAX_INDEX 63
+#define MSIX_PBA1_PENBIT_S 0
+#define MSIX_PBA1_PENBIT_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TADD_PAGE(_i) (0x02E00000 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TADD_PAGE_MAX_INDEX 2047
+#define MSIX_TADD_PAGE_MSIXTADD10_S 0
+#define MSIX_TADD_PAGE_MSIXTADD10_M MAKEMASK(0x3, 0)
+#define MSIX_TADD_PAGE_MSIXTADD_S 2
+#define MSIX_TADD_PAGE_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2)
+#define MSIX_TADD1(_i) (0x00000000 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TADD1_MAX_INDEX 2047
+#define MSIX_TADD1_MSIXTADD10_S 0
+#define MSIX_TADD1_MSIXTADD10_M MAKEMASK(0x3, 0)
+#define MSIX_TADD1_MSIXTADD_S 2
+#define MSIX_TADD1_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2)
+#define MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TMSG_MAX_INDEX 2047
+#define MSIX_TMSG_MSIXTMSG_S 0
+#define MSIX_TMSG_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TMSG_PAGE(_i) (0x02E00008 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TMSG_PAGE_MAX_INDEX 2047
+#define MSIX_TMSG_PAGE_MSIXTMSG_S 0
+#define MSIX_TMSG_PAGE_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TUADD_PAGE(_i) (0x02E00004 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TUADD_PAGE_MAX_INDEX 2047
+#define MSIX_TUADD_PAGE_MSIXTUADD_S 0
+#define MSIX_TUADD_PAGE_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TUADD1(_i) (0x00000004 + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TUADD1_MAX_INDEX 2047
+#define MSIX_TUADD1_MSIXTUADD_S 0
+#define MSIX_TUADD1_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0)
+#define MSIX_TVCTRL_PAGE(_i) (0x02E0000C + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TVCTRL_PAGE_MAX_INDEX 2047
+#define MSIX_TVCTRL_PAGE_MASK_S 0
+#define MSIX_TVCTRL_PAGE_MASK_M BIT(0)
+#define MSIX_TVCTRL1(_i) (0x0000000C + ((_i) * 16)) /* _i=0...2047 */ /* Reset Source: FLR */
+#define MSIX_TVCTRL1_MAX_INDEX 2047
+#define MSIX_TVCTRL1_MASK_S 0
+#define MSIX_TVCTRL1_MASK_M BIT(0)
+#define GLNVM_AL_DONE_HLP 0x000824C4 /* Reset Source: POR */
+#define GLNVM_AL_DONE_HLP_HLP_CORER_S 0
+#define GLNVM_AL_DONE_HLP_HLP_CORER_M BIT(0)
+#define GLNVM_AL_DONE_HLP_HLP_FULLR_S 1
+#define GLNVM_AL_DONE_HLP_HLP_FULLR_M BIT(1)
+#define GLNVM_ALTIMERS 0x000B6140 /* Reset Source: POR */
+#define GLNVM_ALTIMERS_PCI_ALTIMER_S 0
+#define GLNVM_ALTIMERS_PCI_ALTIMER_M MAKEMASK(0xFFF, 0)
+#define GLNVM_ALTIMERS_GEN_ALTIMER_S 12
+#define GLNVM_ALTIMERS_GEN_ALTIMER_M MAKEMASK(0xFFFFF, 12)
+#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(6)
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S 0
+#define GLNVM_GENS_NVM_PRES_M BIT(0)
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S 8
+#define GLNVM_GENS_BANK1VAL_M BIT(8)
+#define GLNVM_GENS_ALT_PRST_S 23
+#define GLNVM_GENS_ALT_PRST_M BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S 25
+#define GLNVM_GENS_FL_AUTO_RD_M BIT(25)
+#define GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset Source: POR */
+#define GLNVM_PROTCSR_MAX_INDEX 59
+#define GLNVM_PROTCSR_ADDR_BLOCK_S 0
+#define GLNVM_PROTCSR_ADDR_BLOCK_M MAKEMASK(0xFFFFFF, 0)
+#define GLNVM_ULD 0x000B6008 /* Reset Source: POR */
+#define GLNVM_ULD_PCIER_DONE_S 0
+#define GLNVM_ULD_PCIER_DONE_M BIT(0)
+#define GLNVM_ULD_PCIER_DONE_1_S 1
+#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
+#define GLNVM_ULD_CORER_DONE_S 3
+#define GLNVM_ULD_CORER_DONE_M BIT(3)
+#define GLNVM_ULD_GLOBR_DONE_S 4
+#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLNVM_ULD_POR_DONE_S 5
+#define GLNVM_ULD_POR_DONE_M BIT(5)
+#define GLNVM_ULD_POR_DONE_1_S 8
+#define GLNVM_ULD_POR_DONE_1_M BIT(8)
+#define GLNVM_ULD_PCIER_DONE_2_S 9
+#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
+#define GLNVM_ULD_PE_DONE_S 10
+#define GLNVM_ULD_PE_DONE_M BIT(10)
+#define GLNVM_ULD_HLP_CORE_DONE_S 11
+#define GLNVM_ULD_HLP_CORE_DONE_M BIT(11)
+#define GLNVM_ULD_HLP_FULL_DONE_S 12
+#define GLNVM_ULD_HLP_FULL_DONE_M BIT(12)
+#define GLNVM_ULT 0x000B6154 /* Reset Source: POR */
+#define GLNVM_ULT_CONF_PCIR_AE_S 0
+#define GLNVM_ULT_CONF_PCIR_AE_M BIT(0)
+#define GLNVM_ULT_CONF_PCIRTL_AE_S 1
+#define GLNVM_ULT_CONF_PCIRTL_AE_M BIT(1)
+#define GLNVM_ULT_RESERVED_1_S 2
+#define GLNVM_ULT_RESERVED_1_M BIT(2)
+#define GLNVM_ULT_CONF_CORE_AE_S 3
+#define GLNVM_ULT_CONF_CORE_AE_M BIT(3)
+#define GLNVM_ULT_CONF_GLOBAL_AE_S 4
+#define GLNVM_ULT_CONF_GLOBAL_AE_M BIT(4)
+#define GLNVM_ULT_CONF_POR_AE_S 5
+#define GLNVM_ULT_CONF_POR_AE_M BIT(5)
+#define GLNVM_ULT_RESERVED_2_S 6
+#define GLNVM_ULT_RESERVED_2_M BIT(6)
+#define GLNVM_ULT_RESERVED_3_S 7
+#define GLNVM_ULT_RESERVED_3_M BIT(7)
+#define GLNVM_ULT_RESERVED_5_S 8
+#define GLNVM_ULT_RESERVED_5_M BIT(8)
+#define GLNVM_ULT_CONF_PCIALT_AE_S 9
+#define GLNVM_ULT_CONF_PCIALT_AE_M BIT(9)
+#define GLNVM_ULT_CONF_PE_AE_S 10
+#define GLNVM_ULT_CONF_PE_AE_M BIT(10)
+#define GLNVM_ULT_RESERVED_4_S 11
+#define GLNVM_ULT_RESERVED_4_M MAKEMASK(0x1FFFFF, 11)
+#define GL_COTF_MARKER_STATUS 0x00200200 /* Reset Source: CORER */
+#define GL_COTF_MARKER_STATUS_MRKR_BUSY_S 0
+#define GL_COTF_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xFF, 0)
+#define GL_COTF_MARKER_TRIG_RCU_PRS(_i) (0x002001D4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GL_COTF_MARKER_TRIG_RCU_PRS_MAX_INDEX 7
+#define GL_COTF_MARKER_TRIG_RCU_PRS_SET_RST_S 0
+#define GL_COTF_MARKER_TRIG_RCU_PRS_SET_RST_M BIT(0)
+#define GL_PRS_MARKER_ERROR 0x00200204 /* Reset Source: CORER */
+#define GL_PRS_MARKER_ERROR_XLR_CFG_ERR_S 0
+#define GL_PRS_MARKER_ERROR_XLR_CFG_ERR_M BIT(0)
+#define GL_PRS_MARKER_ERROR_QH_CFG_ERR_S 1
+#define GL_PRS_MARKER_ERROR_QH_CFG_ERR_M BIT(1)
+#define GL_PRS_MARKER_ERROR_COTF_CFG_ERR_S 2
+#define GL_PRS_MARKER_ERROR_COTF_CFG_ERR_M BIT(2)
+#define GL_PRS_RX_PIPE_INIT0(_i) (0x0020000C + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */
+#define GL_PRS_RX_PIPE_INIT0_MAX_INDEX 6
+#define GL_PRS_RX_PIPE_INIT0_GPCSR_INIT_S 0
+#define GL_PRS_RX_PIPE_INIT0_GPCSR_INIT_M MAKEMASK(0xFFFF, 0)
+#define GL_PRS_RX_PIPE_INIT1 0x00200028 /* Reset Source: CORER */
+#define GL_PRS_RX_PIPE_INIT1_GPCSR_INIT_S 0
+#define GL_PRS_RX_PIPE_INIT1_GPCSR_INIT_M MAKEMASK(0xFFFF, 0)
+#define GL_PRS_RX_PIPE_INIT2 0x0020002C /* Reset Source: CORER */
+#define GL_PRS_RX_PIPE_INIT2_GPCSR_INIT_S 0
+#define GL_PRS_RX_PIPE_INIT2_GPCSR_INIT_M MAKEMASK(0xFFFF, 0)
+#define GL_PRS_RX_SIZE_CTRL 0x00200004 /* Reset Source: CORER */
+#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_S 0
+#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_M MAKEMASK(0x3FF, 0)
+#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_EN_S 15
+#define GL_PRS_RX_SIZE_CTRL_MIN_SIZE_EN_M BIT(15)
+#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_S 16
+#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_M MAKEMASK(0x3FF, 16)
+#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_EN_S 31
+#define GL_PRS_RX_SIZE_CTRL_MAX_SIZE_EN_M BIT(31)
+#define GL_PRS_TX_PIPE_INIT0(_i) (0x00202018 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */
+#define GL_PRS_TX_PIPE_INIT0_MAX_INDEX 6
+#define GL_PRS_TX_PIPE_INIT0_GPCSR_INIT_S 0
+#define GL_PRS_TX_PIPE_INIT0_GPCSR_INIT_M MAKEMASK(0xFFFF, 0)
+#define GL_PRS_TX_PIPE_INIT1 0x00202034 /* Reset Source: CORER */
+#define GL_PRS_TX_PIPE_INIT1_GPCSR_INIT_S 0
+#define GL_PRS_TX_PIPE_INIT1_GPCSR_INIT_M MAKEMASK(0xFFFF, 0)
+#define GL_PRS_TX_PIPE_INIT2 0x00202038 /* Reset Source: CORER */
+#define GL_PRS_TX_PIPE_INIT2_GPCSR_INIT_S 0
+#define GL_PRS_TX_PIPE_INIT2_GPCSR_INIT_M MAKEMASK(0xFFFF, 0)
+#define GL_PRS_TX_SIZE_CTRL 0x00202014 /* Reset Source: CORER */
+#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_S 0
+#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_M MAKEMASK(0x3FF, 0)
+#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_EN_S 15
+#define GL_PRS_TX_SIZE_CTRL_MIN_SIZE_EN_M BIT(15)
+#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_S 16
+#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_M MAKEMASK(0x3FF, 16)
+#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_EN_S 31
+#define GL_PRS_TX_SIZE_CTRL_MAX_SIZE_EN_M BIT(31)
+#define GL_QH_MARKER_STATUS 0x002001FC /* Reset Source: CORER */
+#define GL_QH_MARKER_STATUS_MRKR_BUSY_S 0
+#define GL_QH_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xF, 0)
+#define GL_QH_MARKER_TRIG_RCU_PRS(_i) (0x002001C4 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GL_QH_MARKER_TRIG_RCU_PRS_MAX_INDEX 3
+#define GL_QH_MARKER_TRIG_RCU_PRS_QPID_S 0
+#define GL_QH_MARKER_TRIG_RCU_PRS_QPID_M MAKEMASK(0x3FFFF, 0)
+#define GL_QH_MARKER_TRIG_RCU_PRS_PE_TAG_S 18
+#define GL_QH_MARKER_TRIG_RCU_PRS_PE_TAG_M MAKEMASK(0xFF, 18)
+#define GL_QH_MARKER_TRIG_RCU_PRS_PORT_NUM_S 26
+#define GL_QH_MARKER_TRIG_RCU_PRS_PORT_NUM_M MAKEMASK(0x7, 26)
+#define GL_QH_MARKER_TRIG_RCU_PRS_SET_RST_S 31
+#define GL_QH_MARKER_TRIG_RCU_PRS_SET_RST_M BIT(31)
+#define GL_RPRS_ANA_CSR_CTRL 0x00200708 /* Reset Source: CORER */
+#define GL_RPRS_ANA_CSR_CTRL_SELECT_EN_S 0
+#define GL_RPRS_ANA_CSR_CTRL_SELECT_EN_M BIT(0)
+#define GL_RPRS_ANA_CSR_CTRL_SELECTED_ANA_S 1
+#define GL_RPRS_ANA_CSR_CTRL_SELECTED_ANA_M BIT(1)
+#define GL_TPRS_ANA_CSR_CTRL 0x00202100 /* Reset Source: CORER */
+#define GL_TPRS_ANA_CSR_CTRL_SELECT_EN_S 0
+#define GL_TPRS_ANA_CSR_CTRL_SELECT_EN_M BIT(0)
+#define GL_TPRS_ANA_CSR_CTRL_SELECTED_ANA_S 1
+#define GL_TPRS_ANA_CSR_CTRL_SELECTED_ANA_M BIT(1)
+#define GL_TPRS_MNG_PM_THR 0x00202004 /* Reset Source: CORER */
+#define GL_TPRS_MNG_PM_THR_MNG_PM_THR_S 0
+#define GL_TPRS_MNG_PM_THR_MNG_PM_THR_M MAKEMASK(0x3FFF, 0)
+#define GL_TPRS_PM_CNT(_i) (0x00202008 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GL_TPRS_PM_CNT_MAX_INDEX 1
+#define GL_TPRS_PM_CNT_GL_PRS_PM_CNT_S 0
+#define GL_TPRS_PM_CNT_GL_PRS_PM_CNT_M MAKEMASK(0x3FFF, 0)
+#define GL_TPRS_PM_THR 0x00202000 /* Reset Source: CORER */
+#define GL_TPRS_PM_THR_PM_THR_S 0
+#define GL_TPRS_PM_THR_PM_THR_M MAKEMASK(0x3FFF, 0)
+#define GL_XLR_MARKER_LOG_RCU_PRS(_i) (0x00200208 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_XLR_MARKER_LOG_RCU_PRS_MAX_INDEX 63
+#define GL_XLR_MARKER_LOG_RCU_PRS_XLR_TRIG_S 0
+#define GL_XLR_MARKER_LOG_RCU_PRS_XLR_TRIG_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_XLR_MARKER_STATUS(_i) (0x002001F4 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GL_XLR_MARKER_STATUS_MAX_INDEX 1
+#define GL_XLR_MARKER_STATUS_MRKR_BUSY_S 0
+#define GL_XLR_MARKER_STATUS_MRKR_BUSY_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_XLR_MARKER_TRIG_PE 0x005008C0 /* Reset Source: CORER */
+#define GL_XLR_MARKER_TRIG_PE_VM_VF_NUM_S 0
+#define GL_XLR_MARKER_TRIG_PE_VM_VF_NUM_M MAKEMASK(0x3FF, 0)
+#define GL_XLR_MARKER_TRIG_PE_VM_VF_TYPE_S 10
+#define GL_XLR_MARKER_TRIG_PE_VM_VF_TYPE_M MAKEMASK(0x3, 10)
+#define GL_XLR_MARKER_TRIG_PE_PF_NUM_S 12
+#define GL_XLR_MARKER_TRIG_PE_PF_NUM_M MAKEMASK(0x7, 12)
+#define GL_XLR_MARKER_TRIG_PE_PORT_NUM_S 16
+#define GL_XLR_MARKER_TRIG_PE_PORT_NUM_M MAKEMASK(0x7, 16)
+#define GL_XLR_MARKER_TRIG_RCU_PRS 0x002001C0 /* Reset Source: CORER */
+#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_S 0
+#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_NUM_M MAKEMASK(0x3FF, 0)
+#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_S 10
+#define GL_XLR_MARKER_TRIG_RCU_PRS_VM_VF_TYPE_M MAKEMASK(0x3, 10)
+#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_S 12
+#define GL_XLR_MARKER_TRIG_RCU_PRS_PF_NUM_M MAKEMASK(0x7, 12)
+#define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_S 16
+#define GL_XLR_MARKER_TRIG_RCU_PRS_PORT_NUM_M MAKEMASK(0x7, 16)
+#define GL_CLKGATE_EVENTS 0x0009DE70 /* Reset Source: PERST */
+#define GL_CLKGATE_EVENTS_PRIMARY_CLKGATE_EVENTS_S 0
+#define GL_CLKGATE_EVENTS_PRIMARY_CLKGATE_EVENTS_M MAKEMASK(0xFFFF, 0)
+#define GL_CLKGATE_EVENTS_SIDEBAND_CLKGATE_EVENTS_S 16
+#define GL_CLKGATE_EVENTS_SIDEBAND_CLKGATE_EVENTS_M MAKEMASK(0xFFFF, 16)
+#define GLPCI_BYTCTH_NP_C 0x000BFDA8 /* Reset Source: PCIR */
+#define GLPCI_BYTCTH_NP_C_PCI_COUNT_BW_BCT_S 0
+#define GLPCI_BYTCTH_NP_C_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_BYTCTH_P 0x0009E970 /* Reset Source: PCIR */
+#define GLPCI_BYTCTH_P_PCI_COUNT_BW_BCT_S 0
+#define GLPCI_BYTCTH_P_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_BYTCTL_NP_C 0x000BFDAC /* Reset Source: PCIR */
+#define GLPCI_BYTCTL_NP_C_PCI_COUNT_BW_BCT_S 0
+#define GLPCI_BYTCTL_NP_C_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_BYTCTL_P 0x0009E994 /* Reset Source: PCIR */
+#define GLPCI_BYTCTL_P_PCI_COUNT_BW_BCT_S 0
+#define GLPCI_BYTCTL_P_PCI_COUNT_BW_BCT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_CAPCTRL 0x0009DE88 /* Reset Source: PCIR */
+#define GLPCI_CAPCTRL_VPD_EN_S 0
+#define GLPCI_CAPCTRL_VPD_EN_M BIT(0)
+#define GLPCI_CAPSUP 0x0009DE8C /* Reset Source: PCIR */
+#define GLPCI_CAPSUP_PCIE_VER_S 0
+#define GLPCI_CAPSUP_PCIE_VER_M BIT(0)
+#define GLPCI_CAPSUP_RESERVED_2_S 1
+#define GLPCI_CAPSUP_RESERVED_2_M BIT(1)
+#define GLPCI_CAPSUP_LTR_EN_S 2
+#define GLPCI_CAPSUP_LTR_EN_M BIT(2)
+#define GLPCI_CAPSUP_TPH_EN_S 3
+#define GLPCI_CAPSUP_TPH_EN_M BIT(3)
+#define GLPCI_CAPSUP_ARI_EN_S 4
+#define GLPCI_CAPSUP_ARI_EN_M BIT(4)
+#define GLPCI_CAPSUP_IOV_EN_S 5
+#define GLPCI_CAPSUP_IOV_EN_M BIT(5)
+#define GLPCI_CAPSUP_ACS_EN_S 6
+#define GLPCI_CAPSUP_ACS_EN_M BIT(6)
+#define GLPCI_CAPSUP_SEC_EN_S 7
+#define GLPCI_CAPSUP_SEC_EN_M BIT(7)
+#define GLPCI_CAPSUP_PASID_EN_S 8
+#define GLPCI_CAPSUP_PASID_EN_M BIT(8)
+#define GLPCI_CAPSUP_DLFE_EN_S 9
+#define GLPCI_CAPSUP_DLFE_EN_M BIT(9)
+#define GLPCI_CAPSUP_GEN4_EXT_EN_S 10
+#define GLPCI_CAPSUP_GEN4_EXT_EN_M BIT(10)
+#define GLPCI_CAPSUP_GEN4_MARG_EN_S 11
+#define GLPCI_CAPSUP_GEN4_MARG_EN_M BIT(11)
+#define GLPCI_CAPSUP_ECRC_GEN_EN_S 16
+#define GLPCI_CAPSUP_ECRC_GEN_EN_M BIT(16)
+#define GLPCI_CAPSUP_ECRC_CHK_EN_S 17
+#define GLPCI_CAPSUP_ECRC_CHK_EN_M BIT(17)
+#define GLPCI_CAPSUP_IDO_EN_S 18
+#define GLPCI_CAPSUP_IDO_EN_M BIT(18)
+#define GLPCI_CAPSUP_MSI_MASK_S 19
+#define GLPCI_CAPSUP_MSI_MASK_M BIT(19)
+#define GLPCI_CAPSUP_CSR_CONF_EN_S 20
+#define GLPCI_CAPSUP_CSR_CONF_EN_M BIT(20)
+#define GLPCI_CAPSUP_WAKUP_EN_S 21
+#define GLPCI_CAPSUP_WAKUP_EN_M BIT(21)
+#define GLPCI_CAPSUP_LOAD_SUBSYS_ID_S 30
+#define GLPCI_CAPSUP_LOAD_SUBSYS_ID_M BIT(30)
+#define GLPCI_CAPSUP_LOAD_DEV_ID_S 31
+#define GLPCI_CAPSUP_LOAD_DEV_ID_M BIT(31)
+#define GLPCI_CNF 0x0009DEA0 /* Reset Source: POR */
+#define GLPCI_CNF_FLEX10_S 1
+#define GLPCI_CNF_FLEX10_M BIT(1)
+#define GLPCI_CNF_WAKE_PIN_EN_S 2
+#define GLPCI_CNF_WAKE_PIN_EN_M BIT(2)
+#define GLPCI_CNF_MSIX_ECC_BLOCK_DISABLE_S 3
+#define GLPCI_CNF_MSIX_ECC_BLOCK_DISABLE_M BIT(3)
+#define GLPCI_CNF2 0x000BE004 /* Reset Source: PCIR */
+#define GLPCI_CNF2_RO_DIS_S 0
+#define GLPCI_CNF2_RO_DIS_M BIT(0)
+#define GLPCI_CNF2_CACHELINE_SIZE_S 1
+#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
+#define GLPCI_DREVID 0x0009E9AC /* Reset Source: PCIR */
+#define GLPCI_DREVID_DEFAULT_REVID_S 0
+#define GLPCI_DREVID_DEFAULT_REVID_M MAKEMASK(0xFF, 0)
+#define GLPCI_GSCL_1_NP_C 0x000BFDA4 /* Reset Source: PCIR */
+#define GLPCI_GSCL_1_NP_C_RT_MODE_S 8
+#define GLPCI_GSCL_1_NP_C_RT_MODE_M BIT(8)
+#define GLPCI_GSCL_1_NP_C_RT_EVENT_S 9
+#define GLPCI_GSCL_1_NP_C_RT_EVENT_M MAKEMASK(0x1F, 9)
+#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EN_S 14
+#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EN_M BIT(14)
+#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EV_S 15
+#define GLPCI_GSCL_1_NP_C_PCI_COUNT_BW_EV_M MAKEMASK(0x1F, 15)
+#define GLPCI_GSCL_1_NP_C_GIO_COUNT_RESET_S 29
+#define GLPCI_GSCL_1_NP_C_GIO_COUNT_RESET_M BIT(29)
+#define GLPCI_GSCL_1_NP_C_GIO_COUNT_STOP_S 30
+#define GLPCI_GSCL_1_NP_C_GIO_COUNT_STOP_M BIT(30)
+#define GLPCI_GSCL_1_NP_C_GIO_COUNT_START_S 31
+#define GLPCI_GSCL_1_NP_C_GIO_COUNT_START_M BIT(31)
+#define GLPCI_GSCL_1_P 0x0009E9B4 /* Reset Source: PCIR */
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_0_S 0
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_0_M BIT(0)
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_1_S 1
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_1_M BIT(1)
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_2_S 2
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_2_M BIT(2)
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_3_S 3
+#define GLPCI_GSCL_1_P_GIO_COUNT_EN_3_M BIT(3)
+#define GLPCI_GSCL_1_P_LBC_ENABLE_0_S 4
+#define GLPCI_GSCL_1_P_LBC_ENABLE_0_M BIT(4)
+#define GLPCI_GSCL_1_P_LBC_ENABLE_1_S 5
+#define GLPCI_GSCL_1_P_LBC_ENABLE_1_M BIT(5)
+#define GLPCI_GSCL_1_P_LBC_ENABLE_2_S 6
+#define GLPCI_GSCL_1_P_LBC_ENABLE_2_M BIT(6)
+#define GLPCI_GSCL_1_P_LBC_ENABLE_3_S 7
+#define GLPCI_GSCL_1_P_LBC_ENABLE_3_M BIT(7)
+#define GLPCI_GSCL_1_P_PCI_COUNT_BW_EN_S 14
+#define GLPCI_GSCL_1_P_PCI_COUNT_BW_EN_M BIT(14)
+#define GLPCI_GSCL_1_P_GIO_64_BIT_EN_S 28
+#define GLPCI_GSCL_1_P_GIO_64_BIT_EN_M BIT(28)
+#define GLPCI_GSCL_1_P_GIO_COUNT_RESET_S 29
+#define GLPCI_GSCL_1_P_GIO_COUNT_RESET_M BIT(29)
+#define GLPCI_GSCL_1_P_GIO_COUNT_STOP_S 30
+#define GLPCI_GSCL_1_P_GIO_COUNT_STOP_M BIT(30)
+#define GLPCI_GSCL_1_P_GIO_COUNT_START_S 31
+#define GLPCI_GSCL_1_P_GIO_COUNT_START_M BIT(31)
+#define GLPCI_GSCL_2 0x0009E998 /* Reset Source: PCIR */
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_0_S 0
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_0_M MAKEMASK(0xFF, 0)
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_1_S 8
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_1_M MAKEMASK(0xFF, 8)
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_2_S 16
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_2_M MAKEMASK(0xFF, 16)
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_3_S 24
+#define GLPCI_GSCL_2_GIO_EVENT_NUM_3_M MAKEMASK(0xFF, 24)
+#define GLPCI_GSCL_5_8(_i) (0x0009E954 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: PCIR */
+#define GLPCI_GSCL_5_8_MAX_INDEX 3
+#define GLPCI_GSCL_5_8_LBC_THRESHOLD_N_S 0
+#define GLPCI_GSCL_5_8_LBC_THRESHOLD_N_M MAKEMASK(0xFFFF, 0)
+#define GLPCI_GSCL_5_8_LBC_TIMER_N_S 16
+#define GLPCI_GSCL_5_8_LBC_TIMER_N_M MAKEMASK(0xFFFF, 16)
+#define GLPCI_GSCN_0_3(_i) (0x0009E99C + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: PCIR */
+#define GLPCI_GSCN_0_3_MAX_INDEX 3
+#define GLPCI_GSCN_0_3_EVENT_COUNTER_S 0
+#define GLPCI_GSCN_0_3_EVENT_COUNTER_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_LATCT_NP_C 0x000BFDA0 /* Reset Source: PCIR */
+#define GLPCI_LATCT_NP_C_PCI_LATENCY_COUNT_S 0
+#define GLPCI_LATCT_NP_C_PCI_LATENCY_COUNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_LBARCTRL 0x0009DE74 /* Reset Source: POR */
+#define GLPCI_LBARCTRL_PREFBAR_S 0
+#define GLPCI_LBARCTRL_PREFBAR_M BIT(0)
+#define GLPCI_LBARCTRL_BAR32_S 1
+#define GLPCI_LBARCTRL_BAR32_M BIT(1)
+#define GLPCI_LBARCTRL_PAGES_SPACE_EN_PF_S 2
+#define GLPCI_LBARCTRL_PAGES_SPACE_EN_PF_M BIT(2)
+#define GLPCI_LBARCTRL_FLASH_EXPOSE_S 3
+#define GLPCI_LBARCTRL_FLASH_EXPOSE_M BIT(3)
+#define GLPCI_LBARCTRL_PE_DB_SIZE_S 4
+#define GLPCI_LBARCTRL_PE_DB_SIZE_M MAKEMASK(0x3, 4)
+#define GLPCI_LBARCTRL_PAGES_SPACE_EN_VF_S 9
+#define GLPCI_LBARCTRL_PAGES_SPACE_EN_VF_M BIT(9)
+#define GLPCI_LBARCTRL_EXROM_SIZE_S 11
+#define GLPCI_LBARCTRL_EXROM_SIZE_M MAKEMASK(0x7, 11)
+#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_S 14
+#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_M MAKEMASK(0x3, 14)
+#define GLPCI_LINKCAP 0x0009DE90 /* Reset Source: PCIR */
+#define GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_S 0
+#define GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_M MAKEMASK(0x3F, 0)
+#define GLPCI_LINKCAP_MAX_LINK_WIDTH_S 9
+#define GLPCI_LINKCAP_MAX_LINK_WIDTH_M MAKEMASK(0xF, 9)
+#define GLPCI_NPQ_CFG 0x000BFD80 /* Reset Source: PCIR */
+#define GLPCI_NPQ_CFG_EXTEND_TO_S 0
+#define GLPCI_NPQ_CFG_EXTEND_TO_M BIT(0)
+#define GLPCI_NPQ_CFG_SMALL_TO_S 1
+#define GLPCI_NPQ_CFG_SMALL_TO_M BIT(1)
+#define GLPCI_NPQ_CFG_WEIGHT_AVG_S 2
+#define GLPCI_NPQ_CFG_WEIGHT_AVG_M MAKEMASK(0xF, 2)
+#define GLPCI_NPQ_CFG_NPQ_SPARE_S 6
+#define GLPCI_NPQ_CFG_NPQ_SPARE_M MAKEMASK(0x3FF, 6)
+#define GLPCI_NPQ_CFG_NPQ_ERR_STAT_S 16
+#define GLPCI_NPQ_CFG_NPQ_ERR_STAT_M MAKEMASK(0xF, 16)
+#define GLPCI_PKTCT_NP_C 0x000BFD9C /* Reset Source: PCIR */
+#define GLPCI_PKTCT_NP_C_PCI_COUNT_BW_PCT_S 0
+#define GLPCI_PKTCT_NP_C_PCI_COUNT_BW_PCT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_PKTCT_P 0x0009E9B0 /* Reset Source: PCIR */
+#define GLPCI_PKTCT_P_PCI_COUNT_BW_PCT_S 0
+#define GLPCI_PKTCT_P_PCI_COUNT_BW_PCT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_PMSUP 0x0009DE94 /* Reset Source: PCIR */
+#define GLPCI_PMSUP_RESERVED_0_S 0
+#define GLPCI_PMSUP_RESERVED_0_M MAKEMASK(0x3, 0)
+#define GLPCI_PMSUP_RESERVED_1_S 2
+#define GLPCI_PMSUP_RESERVED_1_M MAKEMASK(0x7, 2)
+#define GLPCI_PMSUP_RESERVED_2_S 5
+#define GLPCI_PMSUP_RESERVED_2_M MAKEMASK(0x7, 5)
+#define GLPCI_PMSUP_L0S_ACC_LAT_S 8
+#define GLPCI_PMSUP_L0S_ACC_LAT_M MAKEMASK(0x7, 8)
+#define GLPCI_PMSUP_L1_ACC_LAT_S 11
+#define GLPCI_PMSUP_L1_ACC_LAT_M MAKEMASK(0x7, 11)
+#define GLPCI_PMSUP_RESERVED_3_S 14
+#define GLPCI_PMSUP_RESERVED_3_M BIT(14)
+#define GLPCI_PMSUP_OBFF_SUP_S 15
+#define GLPCI_PMSUP_OBFF_SUP_M MAKEMASK(0x3, 15)
+#define GLPCI_PUSH_PE_IF_TO_STATUS 0x0009DF44 /* Reset Source: PCIR */
+#define GLPCI_PUSH_PE_IF_TO_STATUS_GLPCI_PUSH_PE_IF_TO_STATUS_S 0
+#define GLPCI_PUSH_PE_IF_TO_STATUS_GLPCI_PUSH_PE_IF_TO_STATUS_M BIT(0)
+#define GLPCI_PWRDATA 0x0009DE7C /* Reset Source: PCIR */
+#define GLPCI_PWRDATA_D0_POWER_S 0
+#define GLPCI_PWRDATA_D0_POWER_M MAKEMASK(0xFF, 0)
+#define GLPCI_PWRDATA_COMM_POWER_S 8
+#define GLPCI_PWRDATA_COMM_POWER_M MAKEMASK(0xFF, 8)
+#define GLPCI_PWRDATA_D3_POWER_S 16
+#define GLPCI_PWRDATA_D3_POWER_M MAKEMASK(0xFF, 16)
+#define GLPCI_PWRDATA_DATA_SCALE_S 24
+#define GLPCI_PWRDATA_DATA_SCALE_M MAKEMASK(0x3, 24)
+#define GLPCI_REVID 0x0009DE98 /* Reset Source: PCIR */
+#define GLPCI_REVID_NVM_REVID_S 0
+#define GLPCI_REVID_NVM_REVID_M MAKEMASK(0xFF, 0)
+#define GLPCI_SERH 0x0009DE84 /* Reset Source: PCIR */
+#define GLPCI_SERH_SER_NUM_H_S 0
+#define GLPCI_SERH_SER_NUM_H_M MAKEMASK(0xFFFF, 0)
+#define GLPCI_SERL 0x0009DE80 /* Reset Source: PCIR */
+#define GLPCI_SERL_SER_NUM_L_S 0
+#define GLPCI_SERL_SER_NUM_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPCI_SUBVENID 0x0009DEE8 /* Reset Source: PCIR */
+#define GLPCI_SUBVENID_SUB_VEN_ID_S 0
+#define GLPCI_SUBVENID_SUB_VEN_ID_M MAKEMASK(0xFFFF, 0)
+#define GLPCI_UPADD 0x000BE0D4 /* Reset Source: PCIR */
+#define GLPCI_UPADD_ADDRESS_S 1
+#define GLPCI_UPADD_ADDRESS_M MAKEMASK(0x7FFFFFFF, 1)
+#define GLPCI_VENDORID 0x0009DEC8 /* Reset Source: PCIR */
+#define GLPCI_VENDORID_VENDORID_S 0
+#define GLPCI_VENDORID_VENDORID_M MAKEMASK(0xFFFF, 0)
+#define GLPCI_VFSUP 0x0009DE9C /* Reset Source: PCIR */
+#define GLPCI_VFSUP_VF_PREFETCH_S 0
+#define GLPCI_VFSUP_VF_PREFETCH_M BIT(0)
+#define GLPCI_VFSUP_VR_BAR_TYPE_S 1
+#define GLPCI_VFSUP_VR_BAR_TYPE_M BIT(1)
+#define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90 /* Reset Source: PCIR */
+#define GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_S 0
+#define GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_M MAKEMASK(0xFFFF, 0)
+#define PF_FUNC_RID 0x0009E880 /* Reset Source: PCIR */
+#define PF_FUNC_RID_FUNCTION_NUMBER_S 0
+#define PF_FUNC_RID_FUNCTION_NUMBER_M MAKEMASK(0x7, 0)
+#define PF_FUNC_RID_DEVICE_NUMBER_S 3
+#define PF_FUNC_RID_DEVICE_NUMBER_M MAKEMASK(0x1F, 3)
+#define PF_FUNC_RID_BUS_NUMBER_S 8
+#define PF_FUNC_RID_BUS_NUMBER_M MAKEMASK(0xFF, 8)
+#define PF_PCI_CIAA 0x0009E580 /* Reset Source: FLR */
+#define PF_PCI_CIAA_ADDRESS_S 0
+#define PF_PCI_CIAA_ADDRESS_M MAKEMASK(0xFFF, 0)
+#define PF_PCI_CIAA_VF_NUM_S 12
+#define PF_PCI_CIAA_VF_NUM_M MAKEMASK(0xFF, 12)
+#define PF_PCI_CIAD 0x0009E500 /* Reset Source: FLR */
+#define PF_PCI_CIAD_DATA_S 0
+#define PF_PCI_CIAD_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFPCI_CLASS 0x0009DB00 /* Reset Source: PCIR */
+#define PFPCI_CLASS_STORAGE_CLASS_S 0
+#define PFPCI_CLASS_STORAGE_CLASS_M BIT(0)
+#define PFPCI_CLASS_PF_IS_LAN_S 2
+#define PFPCI_CLASS_PF_IS_LAN_M BIT(2)
+#define PFPCI_CNF 0x0009DF00 /* Reset Source: PCIR */
+#define PFPCI_CNF_MSI_EN_S 2
+#define PFPCI_CNF_MSI_EN_M BIT(2)
+#define PFPCI_CNF_EXROM_DIS_S 3
+#define PFPCI_CNF_EXROM_DIS_M BIT(3)
+#define PFPCI_CNF_IO_BAR_S 4
+#define PFPCI_CNF_IO_BAR_M BIT(4)
+#define PFPCI_CNF_INT_PIN_S 5
+#define PFPCI_CNF_INT_PIN_M MAKEMASK(0x3, 5)
+#define PFPCI_DEVID 0x0009DE00 /* Reset Source: PCIR */
+#define PFPCI_DEVID_PF_DEV_ID_S 0
+#define PFPCI_DEVID_PF_DEV_ID_M MAKEMASK(0xFFFF, 0)
+#define PFPCI_DEVID_VF_DEV_ID_S 16
+#define PFPCI_DEVID_VF_DEV_ID_M MAKEMASK(0xFFFF, 16)
+#define PFPCI_FACTPS 0x0009E900 /* Reset Source: FLR */
+#define PFPCI_FACTPS_FUNC_POWER_STATE_S 0
+#define PFPCI_FACTPS_FUNC_POWER_STATE_M MAKEMASK(0x3, 0)
+#define PFPCI_FACTPS_FUNC_AUX_EN_S 3
+#define PFPCI_FACTPS_FUNC_AUX_EN_M BIT(3)
+#define PFPCI_FUNC 0x0009D980 /* Reset Source: POR */
+#define PFPCI_FUNC_FUNC_DIS_S 0
+#define PFPCI_FUNC_FUNC_DIS_M BIT(0)
+#define PFPCI_FUNC_ALLOW_FUNC_DIS_S 1
+#define PFPCI_FUNC_ALLOW_FUNC_DIS_M BIT(1)
+#define PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_S 2
+#define PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_M BIT(2)
+#define PFPCI_PF_FLUSH_DONE 0x0009E400 /* Reset Source: PCIR */
+#define PFPCI_PF_FLUSH_DONE_FLUSH_DONE_S 0
+#define PFPCI_PF_FLUSH_DONE_FLUSH_DONE_M BIT(0)
+#define PFPCI_PM 0x0009DA80 /* Reset Source: POR */
+#define PFPCI_PM_PME_EN_S 0
+#define PFPCI_PM_PME_EN_M BIT(0)
+#define PFPCI_STATUS1 0x0009DA00 /* Reset Source: POR */
+#define PFPCI_STATUS1_FUNC_VALID_S 0
+#define PFPCI_STATUS1_FUNC_VALID_M BIT(0)
+#define PFPCI_SUBSYSID 0x0009D880 /* Reset Source: PCIR */
+#define PFPCI_SUBSYSID_PF_SUBSYS_ID_S 0
+#define PFPCI_SUBSYSID_PF_SUBSYS_ID_M MAKEMASK(0xFFFF, 0)
+#define PFPCI_SUBSYSID_VF_SUBSYS_ID_S 16
+#define PFPCI_SUBSYSID_VF_SUBSYS_ID_M MAKEMASK(0xFFFF, 16)
+#define PFPCI_VF_FLUSH_DONE(_VF) (0x0009E000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PCIR */
+#define PFPCI_VF_FLUSH_DONE_MAX_INDEX 255
+#define PFPCI_VF_FLUSH_DONE_FLUSH_DONE_S 0
+#define PFPCI_VF_FLUSH_DONE_FLUSH_DONE_M BIT(0)
+#define PFPCI_VM_FLUSH_DONE 0x0009E480 /* Reset Source: PCIR */
+#define PFPCI_VM_FLUSH_DONE_FLUSH_DONE_S 0
+#define PFPCI_VM_FLUSH_DONE_FLUSH_DONE_M BIT(0)
+#define PFPCI_VMINDEX 0x0009E600 /* Reset Source: PCIR */
+#define PFPCI_VMINDEX_VMINDEX_S 0
+#define PFPCI_VMINDEX_VMINDEX_M MAKEMASK(0x3FF, 0)
+#define PFPCI_VMPEND 0x0009E800 /* Reset Source: PCIR */
+#define PFPCI_VMPEND_PENDING_S 0
+#define PFPCI_VMPEND_PENDING_M BIT(0)
+#define PQ_FIFO_STATUS 0x0009DF40 /* Reset Source: PCIR */
+#define PQ_FIFO_STATUS_PQ_FIFO_COUNT_S 0
+#define PQ_FIFO_STATUS_PQ_FIFO_COUNT_M MAKEMASK(0x7FFFFFFF, 0)
+#define PQ_FIFO_STATUS_PQ_FIFO_EMPTY_S 31
+#define PQ_FIFO_STATUS_PQ_FIFO_EMPTY_M BIT(31)
+#define GLPE_CPUSTATUS0 0x0050BA5C /* Reset Source: CORER */
+#define GLPE_CPUSTATUS0_PECPUSTATUS0_S 0
+#define GLPE_CPUSTATUS0_PECPUSTATUS0_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPE_CPUSTATUS1 0x0050BA60 /* Reset Source: CORER */
+#define GLPE_CPUSTATUS1_PECPUSTATUS1_S 0
+#define GLPE_CPUSTATUS1_PECPUSTATUS1_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPE_CPUSTATUS2 0x0050BA64 /* Reset Source: CORER */
+#define GLPE_CPUSTATUS2_PECPUSTATUS2_S 0
+#define GLPE_CPUSTATUS2_PECPUSTATUS2_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPE_MDQ_BASE(_i) (0x00536000 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLPE_MDQ_BASE_MAX_INDEX 511
+#define GLPE_MDQ_BASE_MDOC_INDEX_S 0
+#define GLPE_MDQ_BASE_MDOC_INDEX_M MAKEMASK(0xFFFFFFF, 0)
+#define GLPE_MDQ_PTR(_i) (0x00537000 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLPE_MDQ_PTR_MAX_INDEX 511
+#define GLPE_MDQ_PTR_MDQ_HEAD_S 0
+#define GLPE_MDQ_PTR_MDQ_HEAD_M MAKEMASK(0x3FFF, 0)
+#define GLPE_MDQ_PTR_MDQ_TAIL_S 16
+#define GLPE_MDQ_PTR_MDQ_TAIL_M MAKEMASK(0x3FFF, 16)
+#define GLPE_MDQ_SIZE(_i) (0x00536800 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLPE_MDQ_SIZE_MAX_INDEX 511
+#define GLPE_MDQ_SIZE_MDQ_SIZE_S 0
+#define GLPE_MDQ_SIZE_MDQ_SIZE_M MAKEMASK(0x3FFF, 0)
+#define GLPE_PEPM_CTRL 0x0050C000 /* Reset Source: PERST */
+#define GLPE_PEPM_CTRL_PEPM_ENABLE_S 0
+#define GLPE_PEPM_CTRL_PEPM_ENABLE_M BIT(0)
+#define GLPE_PEPM_CTRL_PEPM_HALT_S 8
+#define GLPE_PEPM_CTRL_PEPM_HALT_M BIT(8)
+#define GLPE_PEPM_CTRL_PEPM_PUSH_MARGIN_S 16
+#define GLPE_PEPM_CTRL_PEPM_PUSH_MARGIN_M MAKEMASK(0xFF, 16)
+#define GLPE_PEPM_DEALLOC 0x0050C004 /* Reset Source: PERST */
+#define GLPE_PEPM_DEALLOC_MDQ_CREDITS_S 0
+#define GLPE_PEPM_DEALLOC_MDQ_CREDITS_M MAKEMASK(0x3FFF, 0)
+#define GLPE_PEPM_DEALLOC_PSQ_CREDITS_S 14
+#define GLPE_PEPM_DEALLOC_PSQ_CREDITS_M MAKEMASK(0x1F, 14)
+#define GLPE_PEPM_DEALLOC_PQID_S 19
+#define GLPE_PEPM_DEALLOC_PQID_M MAKEMASK(0x1FF, 19)
+#define GLPE_PEPM_DEALLOC_PORT_S 28
+#define GLPE_PEPM_DEALLOC_PORT_M MAKEMASK(0x7, 28)
+#define GLPE_PEPM_DEALLOC_DEALLOC_RDY_S 31
+#define GLPE_PEPM_DEALLOC_DEALLOC_RDY_M BIT(31)
+#define GLPE_PEPM_PSQ_COUNT 0x0050C020 /* Reset Source: PERST */
+#define GLPE_PEPM_PSQ_COUNT_PEPM_PSQ_COUNT_S 0
+#define GLPE_PEPM_PSQ_COUNT_PEPM_PSQ_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PEPM_THRESH(_i) (0x0050C840 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: PERST */
+#define GLPE_PEPM_THRESH_MAX_INDEX 511
+#define GLPE_PEPM_THRESH_PEPM_PSQ_THRESH_S 0
+#define GLPE_PEPM_THRESH_PEPM_PSQ_THRESH_M MAKEMASK(0x1F, 0)
+#define GLPE_PEPM_THRESH_PEPM_MDQ_THRESH_S 16
+#define GLPE_PEPM_THRESH_PEPM_MDQ_THRESH_M MAKEMASK(0x3FFF, 16)
+#define GLPE_PFAEQEDROPCNT(_i) (0x00503240 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFAEQEDROPCNT_MAX_INDEX 7
+#define GLPE_PFAEQEDROPCNT_AEQEDROPCNT_S 0
+#define GLPE_PFAEQEDROPCNT_AEQEDROPCNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFCEQEDROPCNT(_i) (0x00503220 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFCEQEDROPCNT_MAX_INDEX 7
+#define GLPE_PFCEQEDROPCNT_CEQEDROPCNT_S 0
+#define GLPE_PFCEQEDROPCNT_CEQEDROPCNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFCQEDROPCNT(_i) (0x00503200 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFCQEDROPCNT_MAX_INDEX 7
+#define GLPE_PFCQEDROPCNT_CQEDROPCNT_S 0
+#define GLPE_PFCQEDROPCNT_CQEDROPCNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFFLMOOISCALLOCERR(_i) (0x0050B960 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFFLMOOISCALLOCERR_MAX_INDEX 7
+#define GLPE_PFFLMOOISCALLOCERR_ERROR_COUNT_S 0
+#define GLPE_PFFLMOOISCALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFFLMQ1ALLOCERR(_i) (0x0050B920 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFFLMQ1ALLOCERR_MAX_INDEX 7
+#define GLPE_PFFLMQ1ALLOCERR_ERROR_COUNT_S 0
+#define GLPE_PFFLMQ1ALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFFLMRRFALLOCERR(_i) (0x0050B940 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFFLMRRFALLOCERR_MAX_INDEX 7
+#define GLPE_PFFLMRRFALLOCERR_ERROR_COUNT_S 0
+#define GLPE_PFFLMRRFALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFFLMXMITALLOCERR(_i) (0x0050B900 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFFLMXMITALLOCERR_MAX_INDEX 7
+#define GLPE_PFFLMXMITALLOCERR_ERROR_COUNT_S 0
+#define GLPE_PFFLMXMITALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_PFTCPNOW50USCNT(_i) (0x0050B8C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPE_PFTCPNOW50USCNT_MAX_INDEX 7
+#define GLPE_PFTCPNOW50USCNT_CNT_S 0
+#define GLPE_PFTCPNOW50USCNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPE_PUSH_PEPM 0x0053241C /* Reset Source: CORER */
+#define GLPE_PUSH_PEPM_MDQ_CREDITS_S 0
+#define GLPE_PUSH_PEPM_MDQ_CREDITS_M MAKEMASK(0xFF, 0)
+#define GLPE_VFAEQEDROPCNT(_i) (0x00503100 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define GLPE_VFAEQEDROPCNT_AEQEDROPCNT_S 0
+#define GLPE_VFAEQEDROPCNT_AEQEDROPCNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFCEQEDROPCNT(_i) (0x00503080 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define GLPE_VFCEQEDROPCNT_CEQEDROPCNT_S 0
+#define GLPE_VFCEQEDROPCNT_CEQEDROPCNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFCQEDROPCNT(_i) (0x00503000 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define GLPE_VFCQEDROPCNT_CQEDROPCNT_S 0
+#define GLPE_VFCQEDROPCNT_CQEDROPCNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFFLMOOISCALLOCERR(_i) (0x0050B580 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFFLMOOISCALLOCERR_MAX_INDEX 31
+#define GLPE_VFFLMOOISCALLOCERR_ERROR_COUNT_S 0
+#define GLPE_VFFLMOOISCALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFFLMQ1ALLOCERR(_i) (0x0050B480 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_S 0
+#define GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFFLMRRFALLOCERR(_i) (0x0050B500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFFLMRRFALLOCERR_MAX_INDEX 31
+#define GLPE_VFFLMRRFALLOCERR_ERROR_COUNT_S 0
+#define GLPE_VFFLMRRFALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFFLMXMITALLOCERR(_i) (0x0050B400 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_S 0
+#define GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_M MAKEMASK(0xFFFF, 0)
+#define GLPE_VFTCPNOW50USCNT(_i) (0x0050B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: PE_CORER */
+#define GLPE_VFTCPNOW50USCNT_MAX_INDEX 31
+#define GLPE_VFTCPNOW50USCNT_CNT_S 0
+#define GLPE_VFTCPNOW50USCNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFPE_AEQALLOC 0x00502D00 /* Reset Source: PFR */
+#define PFPE_AEQALLOC_AECOUNT_S 0
+#define PFPE_AEQALLOC_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFPE_CCQPHIGH 0x0050A100 /* Reset Source: PFR */
+#define PFPE_CCQPHIGH_PECCQPHIGH_S 0
+#define PFPE_CCQPHIGH_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFPE_CCQPLOW 0x0050A080 /* Reset Source: PFR */
+#define PFPE_CCQPLOW_PECCQPLOW_S 0
+#define PFPE_CCQPLOW_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFPE_CCQPSTATUS 0x0050A000 /* Reset Source: PFR */
+#define PFPE_CCQPSTATUS_CCQP_DONE_S 0
+#define PFPE_CCQPSTATUS_CCQP_DONE_M BIT(0)
+#define PFPE_CCQPSTATUS_HMC_PROFILE_S 4
+#define PFPE_CCQPSTATUS_HMC_PROFILE_M MAKEMASK(0x7, 4)
+#define PFPE_CCQPSTATUS_RDMA_EN_VFS_S 16
+#define PFPE_CCQPSTATUS_RDMA_EN_VFS_M MAKEMASK(0x3F, 16)
+#define PFPE_CCQPSTATUS_CCQP_ERR_S 31
+#define PFPE_CCQPSTATUS_CCQP_ERR_M BIT(31)
+#define PFPE_CQACK 0x00502C80 /* Reset Source: PFR */
+#define PFPE_CQACK_PECQID_S 0
+#define PFPE_CQACK_PECQID_M MAKEMASK(0x7FFFF, 0)
+#define PFPE_CQARM 0x00502C00 /* Reset Source: PFR */
+#define PFPE_CQARM_PECQID_S 0
+#define PFPE_CQARM_PECQID_M MAKEMASK(0x7FFFF, 0)
+#define PFPE_CQPDB 0x00500800 /* Reset Source: PFR */
+#define PFPE_CQPDB_WQHEAD_S 0
+#define PFPE_CQPDB_WQHEAD_M MAKEMASK(0x7FF, 0)
+#define PFPE_CQPERRCODES 0x0050A200 /* Reset Source: PFR */
+#define PFPE_CQPERRCODES_CQP_MINOR_CODE_S 0
+#define PFPE_CQPERRCODES_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0)
+#define PFPE_CQPERRCODES_CQP_MAJOR_CODE_S 16
+#define PFPE_CQPERRCODES_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16)
+#define PFPE_CQPTAIL 0x00500880 /* Reset Source: PFR */
+#define PFPE_CQPTAIL_WQTAIL_S 0
+#define PFPE_CQPTAIL_WQTAIL_M MAKEMASK(0x7FF, 0)
+#define PFPE_CQPTAIL_CQP_OP_ERR_S 31
+#define PFPE_CQPTAIL_CQP_OP_ERR_M BIT(31)
+#define PFPE_IPCONFIG0 0x0050A180 /* Reset Source: PFR */
+#define PFPE_IPCONFIG0_PEIPID_S 0
+#define PFPE_IPCONFIG0_PEIPID_M MAKEMASK(0xFFFF, 0)
+#define PFPE_IPCONFIG0_USEENTIREIDRANGE_S 16
+#define PFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16)
+#define PFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17
+#define PFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17)
+#define PFPE_MRTEIDXMASK 0x0050A300 /* Reset Source: PFR */
+#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0
+#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
+#define PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */
+#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
+#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define PFPE_TCPNOWTIMER 0x0050A280 /* Reset Source: PFR */
+#define PFPE_TCPNOWTIMER_TCP_NOW_S 0
+#define PFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFPE_WQEALLOC 0x00504400 /* Reset Source: PFR */
+#define PFPE_WQEALLOC_PEQPID_S 0
+#define PFPE_WQEALLOC_PEQPID_M MAKEMASK(0x3FFFF, 0)
+#define PFPE_WQEALLOC_WQE_DESC_INDEX_S 20
+#define PFPE_WQEALLOC_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20)
+#define PRT_PEPM_COUNT(_i) (0x0050C040 + ((_i) * 4)) /* _i=0...511 */ /* Reset Source: PERST */
+#define PRT_PEPM_COUNT_MAX_INDEX 511
+#define PRT_PEPM_COUNT_PEPM_PSQ_COUNT_S 0
+#define PRT_PEPM_COUNT_PEPM_PSQ_COUNT_M MAKEMASK(0x1F, 0)
+#define PRT_PEPM_COUNT_PEPM_MDQ_COUNT_S 16
+#define PRT_PEPM_COUNT_PEPM_MDQ_COUNT_M MAKEMASK(0x3FFF, 16)
+#define VFPE_AEQALLOC(_VF) (0x00502800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_AEQALLOC_MAX_INDEX 255
+#define VFPE_AEQALLOC_AECOUNT_S 0
+#define VFPE_AEQALLOC_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_CCQPHIGH(_VF) (0x00508800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CCQPHIGH_MAX_INDEX 255
+#define VFPE_CCQPHIGH_PECCQPHIGH_S 0
+#define VFPE_CCQPHIGH_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_CCQPLOW(_VF) (0x00508400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CCQPLOW_MAX_INDEX 255
+#define VFPE_CCQPLOW_PECCQPLOW_S 0
+#define VFPE_CCQPLOW_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_CCQPSTATUS(_VF) (0x00508000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CCQPSTATUS_MAX_INDEX 255
+#define VFPE_CCQPSTATUS_CCQP_DONE_S 0
+#define VFPE_CCQPSTATUS_CCQP_DONE_M BIT(0)
+#define VFPE_CCQPSTATUS_HMC_PROFILE_S 4
+#define VFPE_CCQPSTATUS_HMC_PROFILE_M MAKEMASK(0x7, 4)
+#define VFPE_CCQPSTATUS_RDMA_EN_VFS_S 16
+#define VFPE_CCQPSTATUS_RDMA_EN_VFS_M MAKEMASK(0x3F, 16)
+#define VFPE_CCQPSTATUS_CCQP_ERR_S 31
+#define VFPE_CCQPSTATUS_CCQP_ERR_M BIT(31)
+#define VFPE_CQACK(_VF) (0x00502400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CQACK_MAX_INDEX 255
+#define VFPE_CQACK_PECQID_S 0
+#define VFPE_CQACK_PECQID_M MAKEMASK(0x7FFFF, 0)
+#define VFPE_CQARM(_VF) (0x00502000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CQARM_MAX_INDEX 255
+#define VFPE_CQARM_PECQID_S 0
+#define VFPE_CQARM_PECQID_M MAKEMASK(0x7FFFF, 0)
+#define VFPE_CQPDB(_VF) (0x00500000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CQPDB_MAX_INDEX 255
+#define VFPE_CQPDB_WQHEAD_S 0
+#define VFPE_CQPDB_WQHEAD_M MAKEMASK(0x7FF, 0)
+#define VFPE_CQPERRCODES(_VF) (0x00509000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CQPERRCODES_MAX_INDEX 255
+#define VFPE_CQPERRCODES_CQP_MINOR_CODE_S 0
+#define VFPE_CQPERRCODES_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0)
+#define VFPE_CQPERRCODES_CQP_MAJOR_CODE_S 16
+#define VFPE_CQPERRCODES_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16)
+#define VFPE_CQPTAIL(_VF) (0x00500400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_CQPTAIL_MAX_INDEX 255
+#define VFPE_CQPTAIL_WQTAIL_S 0
+#define VFPE_CQPTAIL_WQTAIL_M MAKEMASK(0x7FF, 0)
+#define VFPE_CQPTAIL_CQP_OP_ERR_S 31
+#define VFPE_CQPTAIL_CQP_OP_ERR_M BIT(31)
+#define VFPE_IPCONFIG0(_VF) (0x00508C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_IPCONFIG0_MAX_INDEX 255
+#define VFPE_IPCONFIG0_PEIPID_S 0
+#define VFPE_IPCONFIG0_PEIPID_M MAKEMASK(0xFFFF, 0)
+#define VFPE_IPCONFIG0_USEENTIREIDRANGE_S 16
+#define VFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16)
+#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17
+#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17)
+#define VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255
+#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
+#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define VFPE_TCPNOWTIMER(_VF) (0x00509400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_TCPNOWTIMER_MAX_INDEX 255
+#define VFPE_TCPNOWTIMER_TCP_NOW_S 0
+#define VFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_WQEALLOC(_VF) (0x00504000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_WQEALLOC_MAX_INDEX 255
+#define VFPE_WQEALLOC_PEQPID_S 0
+#define VFPE_WQEALLOC_PEQPID_M MAKEMASK(0x3FFFF, 0)
+#define VFPE_WQEALLOC_WQE_DESC_INDEX_S 20
+#define VFPE_WQEALLOC_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20)
+#define GLPES_PFIP4RXDISCARD(_i) (0x00541400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXDISCARD_MAX_INDEX 127
+#define GLPES_PFIP4RXDISCARD_IP4RXDISCARD_S 0
+#define GLPES_PFIP4RXDISCARD_IP4RXDISCARD_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4RXFRAGSHI(_i) (0x00541C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXFRAGSHI_MAX_INDEX 127
+#define GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_S 0
+#define GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4RXFRAGSLO(_i) (0x00541C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXFRAGSLO_MAX_INDEX 127
+#define GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_S 0
+#define GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4RXMCOCTSHI(_i) (0x00542404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_S 0
+#define GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4RXMCOCTSLO(_i) (0x00542400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_S 0
+#define GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4RXMCPKTSHI(_i) (0x00542C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_S 0
+#define GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4RXMCPKTSLO(_i) (0x00542C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_S 0
+#define GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4RXOCTSHI(_i) (0x00540404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_S 0
+#define GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4RXOCTSLO(_i) (0x00540400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_S 0
+#define GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4RXPKTSHI(_i) (0x00540C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_S 0
+#define GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4RXPKTSLO(_i) (0x00540C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_S 0
+#define GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4RXTRUNC(_i) (0x00541800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4RXTRUNC_MAX_INDEX 127
+#define GLPES_PFIP4RXTRUNC_IP4RXTRUNC_S 0
+#define GLPES_PFIP4RXTRUNC_IP4RXTRUNC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4TXFRAGSHI(_i) (0x00547404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXFRAGSHI_MAX_INDEX 127
+#define GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_S 0
+#define GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4TXFRAGSLO(_i) (0x00547400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXFRAGSLO_MAX_INDEX 127
+#define GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_S 0
+#define GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4TXMCOCTSHI(_i) (0x00547C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_S 0
+#define GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4TXMCOCTSLO(_i) (0x00547C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_S 0
+#define GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4TXMCPKTSHI(_i) (0x00548404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_S 0
+#define GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4TXMCPKTSLO(_i) (0x00548400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_S 0
+#define GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4TXNOROUTE(_i) (0x0054B400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXNOROUTE_MAX_INDEX 127
+#define GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_S 0
+#define GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_PFIP4TXOCTSHI(_i) (0x00546404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_S 0
+#define GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4TXOCTSLO(_i) (0x00546400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_S 0
+#define GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP4TXPKTSHI(_i) (0x00546C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_S 0
+#define GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP4TXPKTSLO(_i) (0x00546C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP4TXPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_S 0
+#define GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXDISCARD(_i) (0x00544400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXDISCARD_MAX_INDEX 127
+#define GLPES_PFIP6RXDISCARD_IP6RXDISCARD_S 0
+#define GLPES_PFIP6RXDISCARD_IP6RXDISCARD_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXFRAGSHI(_i) (0x00544C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXFRAGSHI_MAX_INDEX 127
+#define GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_S 0
+#define GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6RXFRAGSLO(_i) (0x00544C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXFRAGSLO_MAX_INDEX 127
+#define GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_S 0
+#define GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXMCOCTSHI(_i) (0x00545404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_S 0
+#define GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6RXMCOCTSLO(_i) (0x00545400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_S 0
+#define GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXMCPKTSHI(_i) (0x00545C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_S 0
+#define GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6RXMCPKTSLO(_i) (0x00545C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_S 0
+#define GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXOCTSHI(_i) (0x00543404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_S 0
+#define GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6RXOCTSLO(_i) (0x00543400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_S 0
+#define GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXPKTSHI(_i) (0x00543C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_S 0
+#define GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6RXPKTSLO(_i) (0x00543C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_S 0
+#define GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6RXTRUNC(_i) (0x00544800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6RXTRUNC_MAX_INDEX 127
+#define GLPES_PFIP6RXTRUNC_IP6RXTRUNC_S 0
+#define GLPES_PFIP6RXTRUNC_IP6RXTRUNC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6TXFRAGSHI(_i) (0x00549C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXFRAGSHI_MAX_INDEX 127
+#define GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_S 0
+#define GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6TXFRAGSLO(_i) (0x00549C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXFRAGSLO_MAX_INDEX 127
+#define GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_S 0
+#define GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6TXMCOCTSHI(_i) (0x0054A404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_S 0
+#define GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6TXMCOCTSLO(_i) (0x0054A400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_S 0
+#define GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6TXMCPKTSHI(_i) (0x0054AC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_S 0
+#define GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6TXMCPKTSLO(_i) (0x0054AC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_S 0
+#define GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6TXNOROUTE(_i) (0x0054B800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXNOROUTE_MAX_INDEX 127
+#define GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_S 0
+#define GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_PFIP6TXOCTSHI(_i) (0x00548C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXOCTSHI_MAX_INDEX 127
+#define GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_S 0
+#define GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6TXOCTSLO(_i) (0x00548C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXOCTSLO_MAX_INDEX 127
+#define GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_S 0
+#define GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFIP6TXPKTSHI(_i) (0x00549404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXPKTSHI_MAX_INDEX 127
+#define GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_S 0
+#define GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFIP6TXPKTSLO(_i) (0x00549400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFIP6TXPKTSLO_MAX_INDEX 127
+#define GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_S 0
+#define GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMARXRDSHI(_i) (0x0054EC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMARXRDSHI_MAX_INDEX 127
+#define GLPES_PFRDMARXRDSHI_RDMARXRDSHI_S 0
+#define GLPES_PFRDMARXRDSHI_RDMARXRDSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMARXRDSLO(_i) (0x0054EC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMARXRDSLO_MAX_INDEX 127
+#define GLPES_PFRDMARXRDSLO_RDMARXRDSLO_S 0
+#define GLPES_PFRDMARXRDSLO_RDMARXRDSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMARXSNDSHI(_i) (0x0054F404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMARXSNDSHI_MAX_INDEX 127
+#define GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_S 0
+#define GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMARXSNDSLO(_i) (0x0054F400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMARXSNDSLO_MAX_INDEX 127
+#define GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_S 0
+#define GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMARXWRSHI(_i) (0x0054E404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMARXWRSHI_MAX_INDEX 127
+#define GLPES_PFRDMARXWRSHI_RDMARXWRSHI_S 0
+#define GLPES_PFRDMARXWRSHI_RDMARXWRSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMARXWRSLO(_i) (0x0054E400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMARXWRSLO_MAX_INDEX 127
+#define GLPES_PFRDMARXWRSLO_RDMARXWRSLO_S 0
+#define GLPES_PFRDMARXWRSLO_RDMARXWRSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMATXRDSHI(_i) (0x00550404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMATXRDSHI_MAX_INDEX 127
+#define GLPES_PFRDMATXRDSHI_RDMARXRDSHI_S 0
+#define GLPES_PFRDMATXRDSHI_RDMARXRDSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMATXRDSLO(_i) (0x00550400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMATXRDSLO_MAX_INDEX 127
+#define GLPES_PFRDMATXRDSLO_RDMARXRDSLO_S 0
+#define GLPES_PFRDMATXRDSLO_RDMARXRDSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMATXSNDSHI(_i) (0x00550C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMATXSNDSHI_MAX_INDEX 127
+#define GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_S 0
+#define GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMATXSNDSLO(_i) (0x00550C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMATXSNDSLO_MAX_INDEX 127
+#define GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_S 0
+#define GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMATXWRSHI(_i) (0x0054FC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMATXWRSHI_MAX_INDEX 127
+#define GLPES_PFRDMATXWRSHI_RDMARXWRSHI_S 0
+#define GLPES_PFRDMATXWRSHI_RDMARXWRSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMATXWRSLO(_i) (0x0054FC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMATXWRSLO_MAX_INDEX 127
+#define GLPES_PFRDMATXWRSLO_RDMARXWRSLO_S 0
+#define GLPES_PFRDMATXWRSLO_RDMARXWRSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMAVBNDHI(_i) (0x00551404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMAVBNDHI_MAX_INDEX 127
+#define GLPES_PFRDMAVBNDHI_RDMAVBNDHI_S 0
+#define GLPES_PFRDMAVBNDHI_RDMAVBNDHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMAVBNDLO(_i) (0x00551400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMAVBNDLO_MAX_INDEX 127
+#define GLPES_PFRDMAVBNDLO_RDMAVBNDLO_S 0
+#define GLPES_PFRDMAVBNDLO_RDMAVBNDLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRDMAVINVHI(_i) (0x00551C04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMAVINVHI_MAX_INDEX 127
+#define GLPES_PFRDMAVINVHI_RDMAVINVHI_S 0
+#define GLPES_PFRDMAVINVHI_RDMAVINVHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFRDMAVINVLO(_i) (0x00551C00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRDMAVINVLO_MAX_INDEX 127
+#define GLPES_PFRDMAVINVLO_RDMAVINVLO_S 0
+#define GLPES_PFRDMAVINVLO_RDMAVINVLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFRXVLANERR(_i) (0x00540000 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFRXVLANERR_MAX_INDEX 127
+#define GLPES_PFRXVLANERR_RXVLANERR_S 0
+#define GLPES_PFRXVLANERR_RXVLANERR_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_PFTCPRTXSEG(_i) (0x00552400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPRTXSEG_MAX_INDEX 127
+#define GLPES_PFTCPRTXSEG_TCPRTXSEG_S 0
+#define GLPES_PFTCPRTXSEG_TCPRTXSEG_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFTCPRXOPTERR(_i) (0x0054C400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPRXOPTERR_MAX_INDEX 127
+#define GLPES_PFTCPRXOPTERR_TCPRXOPTERR_S 0
+#define GLPES_PFTCPRXOPTERR_TCPRXOPTERR_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_PFTCPRXPROTOERR(_i) (0x0054C800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPRXPROTOERR_MAX_INDEX 127
+#define GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_S 0
+#define GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_PFTCPRXSEGSHI(_i) (0x0054BC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPRXSEGSHI_MAX_INDEX 127
+#define GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_S 0
+#define GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFTCPRXSEGSLO(_i) (0x0054BC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPRXSEGSLO_MAX_INDEX 127
+#define GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_S 0
+#define GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFTCPTXSEGHI(_i) (0x0054CC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPTXSEGHI_MAX_INDEX 127
+#define GLPES_PFTCPTXSEGHI_TCPTXSEGHI_S 0
+#define GLPES_PFTCPTXSEGHI_TCPTXSEGHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFTCPTXSEGLO(_i) (0x0054CC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFTCPTXSEGLO_MAX_INDEX 127
+#define GLPES_PFTCPTXSEGLO_TCPTXSEGLO_S 0
+#define GLPES_PFTCPTXSEGLO_TCPTXSEGLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFUDPRXPKTSHI(_i) (0x0054D404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFUDPRXPKTSHI_MAX_INDEX 127
+#define GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_S 0
+#define GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFUDPRXPKTSLO(_i) (0x0054D400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFUDPRXPKTSLO_MAX_INDEX 127
+#define GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_S 0
+#define GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_PFUDPTXPKTSHI(_i) (0x0054DC04 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFUDPTXPKTSHI_MAX_INDEX 127
+#define GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_S 0
+#define GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_M MAKEMASK(0xFFFF, 0)
+#define GLPES_PFUDPTXPKTSLO(_i) (0x0054DC00 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLPES_PFUDPTXPKTSLO_MAX_INDEX 127
+#define GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_S 0
+#define GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_RDMARXMULTFPDUSHI 0x0055E00C /* Reset Source: CORER */
+#define GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_S 0
+#define GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_RDMARXMULTFPDUSLO 0x0055E008 /* Reset Source: CORER */
+#define GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_S 0
+#define GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_RDMARXOOODDPHI 0x0055E014 /* Reset Source: CORER */
+#define GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_S 0
+#define GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_RDMARXOOODDPLO 0x0055E010 /* Reset Source: CORER */
+#define GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_S 0
+#define GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_RDMARXOOONOMARK 0x0055E004 /* Reset Source: CORER */
+#define GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_S 0
+#define GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_RDMARXUNALIGN 0x0055E000 /* Reset Source: CORER */
+#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_S 0
+#define GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPRXFOURHOLEHI 0x0055E03C /* Reset Source: CORER */
+#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_S 0
+#define GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPRXFOURHOLELO 0x0055E038 /* Reset Source: CORER */
+#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_S 0
+#define GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPRXONEHOLEHI 0x0055E024 /* Reset Source: CORER */
+#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_S 0
+#define GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPRXONEHOLELO 0x0055E020 /* Reset Source: CORER */
+#define GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_S 0
+#define GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPRXPUREACKHI 0x0055E01C /* Reset Source: CORER */
+#define GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_S 0
+#define GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPRXPUREACKSLO 0x0055E018 /* Reset Source: CORER */
+#define GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_S 0
+#define GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPRXTHREEHOLEHI 0x0055E034 /* Reset Source: CORER */
+#define GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_S 0
+#define GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPRXTHREEHOLELO 0x0055E030 /* Reset Source: CORER */
+#define GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_S 0
+#define GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPRXTWOHOLEHI 0x0055E02C /* Reset Source: CORER */
+#define GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_S 0
+#define GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPRXTWOHOLELO 0x0055E028 /* Reset Source: CORER */
+#define GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_S 0
+#define GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPTXRETRANSFASTHI 0x0055E044 /* Reset Source: CORER */
+#define GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_S 0
+#define GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPTXRETRANSFASTLO 0x0055E040 /* Reset Source: CORER */
+#define GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_S 0
+#define GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPTXTOUTSFASTHI 0x0055E04C /* Reset Source: CORER */
+#define GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_S 0
+#define GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPTXTOUTSFASTLO 0x0055E048 /* Reset Source: CORER */
+#define GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_S 0
+#define GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPES_TCPTXTOUTSHI 0x0055E054 /* Reset Source: CORER */
+#define GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_S 0
+#define GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_M MAKEMASK(0xFFFFFF, 0)
+#define GLPES_TCPTXTOUTSLO 0x0055E050 /* Reset Source: CORER */
+#define GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_S 0
+#define GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_PWR_MODE_CTL 0x000B820C /* Reset Source: POR */
+#define GL_PWR_MODE_CTL_SWITCH_PWR_MODE_EN_S 0
+#define GL_PWR_MODE_CTL_SWITCH_PWR_MODE_EN_M BIT(0)
+#define GL_PWR_MODE_CTL_NIC_PWR_MODE_EN_S 1
+#define GL_PWR_MODE_CTL_NIC_PWR_MODE_EN_M BIT(1)
+#define GL_PWR_MODE_CTL_S5_PWR_MODE_EN_S 2
+#define GL_PWR_MODE_CTL_S5_PWR_MODE_EN_M BIT(2)
+#define GL_PWR_MODE_CTL_CAR_MAX_SW_CONFIG_S 3
+#define GL_PWR_MODE_CTL_CAR_MAX_SW_CONFIG_M MAKEMASK(0x3, 3)
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
+#define GL_PWR_MODE_CTL_CAR_MAX_BW_M MAKEMASK(0x3, 30)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT 0x000B825C /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15)
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_S5_S 18
+#define GL_PWR_MODE_DIVIDE_CTRL_H_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT 0x000B8218 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15)
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_S5_S 18
+#define GL_PWR_MODE_DIVIDE_CTRL_L_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT 0x000B8260 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PECLK_S 0
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PECLK_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UCLK_S 3
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UCLK_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_LCLK_S 6
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_LCLK_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PSM_S 9
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_PSM_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_RXCTL_S 12
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_RXCTL_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UANA_S 15
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_UANA_M MAKEMASK(0x7, 15)
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_S5_S 18
+#define GL_PWR_MODE_DIVIDE_CTRL_M_DEFAULT_DEFAULT_DIV_VAL_S5_M MAKEMASK(0x7, 18)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK 0x000B8200 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_LCLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK 0x000B81F0 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PECLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM 0x000B81FC /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_PSM_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL 0x000B81F8 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_RXCTL_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA 0x000B8208 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UANA_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK 0x000B81F4 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_H_UCLK_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK 0x000B8244 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_LCLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK 0x000B8220 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PECLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM 0x000B8240 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_PSM_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL 0x000B823C /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_RXCTL_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA 0x000B8248 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UANA_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK 0x000B8238 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_L_UCLK_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK 0x000B8230 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_LCLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK 0x000B821C /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PECLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM 0x000B822C /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_PSM_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL 0x000B8228 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_RXCTL_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA 0x000B8234 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UANA_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK 0x000B8224 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S0_CTRL_M_UCLK_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL 0x000B81EC /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_50G_H_S 0
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_50G_H_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_25G_H_S 3
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_25G_H_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_10G_H_S 6
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_10G_H_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_4G_H_S 9
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_4G_H_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_A50G_H_S 12
+#define GL_PWR_MODE_DIVIDE_S5_H_CTRL_DIV_VAL_TBW_A50G_H_M MAKEMASK(0xF, 12)
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL 0x000B824C /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_50G_L_S 0
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_50G_L_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_25G_L_S 3
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_25G_L_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_10G_L_S 6
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_10G_L_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_4G_L_S 9
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_4G_L_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_A50G_L_S 12
+#define GL_PWR_MODE_DIVIDE_S5_L_CTRL_DIV_VAL_TBW_A50G_L_M MAKEMASK(0x7, 12)
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL 0x000B8250 /* Reset Source: POR */
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_50G_M_S 0
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_50G_M_M MAKEMASK(0x7, 0)
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_25G_M_S 3
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_25G_M_M MAKEMASK(0x7, 3)
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_10G_M_S 6
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_10G_M_M MAKEMASK(0x7, 6)
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_4G_M_S 9
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_4G_M_M MAKEMASK(0x7, 9)
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_A50G_M_S 12
+#define GL_PWR_MODE_DIVIDE_S5_M_CTRL_DIV_VAL_TBW_A50G_M_M MAKEMASK(0x7, 12)
+#define GL_S5_PWR_MODE_EXIT_CTL 0x000B8270 /* Reset Source: POR */
+#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_AUTO_EXIT_S 0
+#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_AUTO_EXIT_M BIT(0)
+#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_FW_EXIT_S 1
+#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_FW_EXIT_M BIT(1)
+#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_PRST_FLOWS_ON_CORER_S 3
+#define GL_S5_PWR_MODE_EXIT_CTL_S5_PWR_MODE_PRST_FLOWS_ON_CORER_M BIT(3)
+#define GLGEN_PME_TO 0x000B81BC /* Reset Source: POR */
+#define GLGEN_PME_TO_PME_TO_FOR_PE_S 0
+#define GLGEN_PME_TO_PME_TO_FOR_PE_M BIT(0)
+#define PRTPM_EEE_STAT 0x001E4320 /* Reset Source: GLOBR */
+#define PRTPM_EEE_STAT_EEE_NEG_S 29
+#define PRTPM_EEE_STAT_EEE_NEG_M BIT(29)
+#define PRTPM_EEE_STAT_RX_LPI_STATUS_S 30
+#define PRTPM_EEE_STAT_RX_LPI_STATUS_M BIT(30)
+#define PRTPM_EEE_STAT_TX_LPI_STATUS_S 31
+#define PRTPM_EEE_STAT_TX_LPI_STATUS_M BIT(31)
+#define PRTPM_EEEC 0x001E4380 /* Reset Source: GLOBR */
+#define PRTPM_EEEC_TW_WAKE_MIN_S 16
+#define PRTPM_EEEC_TW_WAKE_MIN_M MAKEMASK(0x3F, 16)
+#define PRTPM_EEEC_TX_LU_LPI_DLY_S 24
+#define PRTPM_EEEC_TX_LU_LPI_DLY_M MAKEMASK(0x3, 24)
+#define PRTPM_EEEC_TEEE_DLY_S 26
+#define PRTPM_EEEC_TEEE_DLY_M MAKEMASK(0x3F, 26)
+#define PRTPM_EEEFWD 0x001E4400 /* Reset Source: GLOBR */
+#define PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_S 31
+#define PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_M BIT(31)
+#define PRTPM_EEER 0x001E4360 /* Reset Source: GLOBR */
+#define PRTPM_EEER_TW_SYSTEM_S 0
+#define PRTPM_EEER_TW_SYSTEM_M MAKEMASK(0xFFFF, 0)
+#define PRTPM_EEER_TX_LPI_EN_S 16
+#define PRTPM_EEER_TX_LPI_EN_M BIT(16)
+#define PRTPM_EEETXC 0x001E43E0 /* Reset Source: GLOBR */
+#define PRTPM_EEETXC_TW_PHY_S 0
+#define PRTPM_EEETXC_TW_PHY_M MAKEMASK(0xFFFF, 0)
+#define PRTPM_RLPIC 0x001E43A0 /* Reset Source: GLOBR */
+#define PRTPM_RLPIC_ERLPIC_S 0
+#define PRTPM_RLPIC_ERLPIC_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTPM_TLPIC 0x001E43C0 /* Reset Source: GLOBR */
+#define PRTPM_TLPIC_ETLPIC_S 0
+#define PRTPM_TLPIC_ETLPIC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLRPB_DHW(_i) (0x000AC000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLRPB_DHW_MAX_INDEX 15
+#define GLRPB_DHW_DHW_TCN_S 0
+#define GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DLW(_i) (0x000AC044 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLRPB_DLW_MAX_INDEX 15
+#define GLRPB_DLW_DLW_TCN_S 0
+#define GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DPS(_i) (0x000AC084 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLRPB_DPS_MAX_INDEX 15
+#define GLRPB_DPS_DPS_TCN_S 0
+#define GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DSI_EN 0x000AC324 /* Reset Source: CORER */
+#define GLRPB_DSI_EN_DSI_EN_S 0
+#define GLRPB_DSI_EN_DSI_EN_M BIT(0)
+#define GLRPB_DSI_EN_DSI_L2_MAC_ERR_DROP_EN_S 1
+#define GLRPB_DSI_EN_DSI_L2_MAC_ERR_DROP_EN_M BIT(1)
+#define GLRPB_SHW(_i) (0x000AC120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRPB_SHW_MAX_INDEX 7
+#define GLRPB_SHW_SHW_S 0
+#define GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SLW(_i) (0x000AC140 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRPB_SLW_MAX_INDEX 7
+#define GLRPB_SLW_SLW_S 0
+#define GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SPS(_i) (0x000AC0C4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRPB_SPS_MAX_INDEX 7
+#define GLRPB_SPS_SPS_TCN_S 0
+#define GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_TC_CFG(_i) (0x000AC2A4 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLRPB_TC_CFG_MAX_INDEX 31
+#define GLRPB_TC_CFG_D_POOL_S 0
+#define GLRPB_TC_CFG_D_POOL_M MAKEMASK(0xFFFF, 0)
+#define GLRPB_TC_CFG_S_POOL_S 16
+#define GLRPB_TC_CFG_S_POOL_M MAKEMASK(0xFFFF, 16)
+#define GLRPB_TCHW(_i) (0x000AC330 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLRPB_TCHW_MAX_INDEX 31
+#define GLRPB_TCHW_TCHW_S 0
+#define GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_TCLW(_i) (0x000AC3B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLRPB_TCLW_MAX_INDEX 31
+#define GLRPB_TCLW_TCLW_S 0
+#define GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0)
+#define GLQF_APBVT(_i) (0x00450000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define GLQF_APBVT_MAX_INDEX 2047
+#define GLQF_APBVT_APBVT_S 0
+#define GLQF_APBVT_APBVT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_FD_CLSN_0 0x00460028 /* Reset Source: CORER */
+#define GLQF_FD_CLSN_0_HITSBCNT_S 0
+#define GLQF_FD_CLSN_0_HITSBCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_FD_CLSN1 0x00460030 /* Reset Source: CORER */
+#define GLQF_FD_CLSN1_HITLBCNT_S 0
+#define GLQF_FD_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_FD_CNT 0x00460018 /* Reset Source: CORER */
+#define GLQF_FD_CNT_FD_GCNT_S 0
+#define GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FD_CNT_FD_BCNT_S 16
+#define GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define GLQF_FD_CTL 0x00460000 /* Reset Source: CORER */
+#define GLQF_FD_CTL_FDLONG_S 0
+#define GLQF_FD_CTL_FDLONG_M MAKEMASK(0xF, 0)
+#define GLQF_FD_CTL_HASH_REPORT_S 4
+#define GLQF_FD_CTL_HASH_REPORT_M BIT(4)
+#define GLQF_FD_CTL_FLT_ADDR_REPORT_S 5
+#define GLQF_FD_CTL_FLT_ADDR_REPORT_M BIT(5)
+#define GLQF_FD_SIZE 0x00460010 /* Reset Source: CORER */
+#define GLQF_FD_SIZE_FD_GSIZE_S 0
+#define GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FD_SIZE_FD_BSIZE_S 16
+#define GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLQF_FDCNT_0 0x00460020 /* Reset Source: CORER */
+#define GLQF_FDCNT_0_BUCKETCNT_S 0
+#define GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FDCNT_0_CNT_NOT_VLD_S 31
+#define GLQF_FDCNT_0_CNT_NOT_VLD_M BIT(31)
+#define GLQF_FDEVICTENA(_i) (0x00452000 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLQF_FDEVICTENA_MAX_INDEX 3
+#define GLQF_FDEVICTENA_FDEVICTENA_S 0
+#define GLQF_FDEVICTENA_FDEVICTENA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */
+#define GLQF_FDINSET_MAX_INDEX 127
+#define GLQF_FDINSET_FV_WORD_INDX0_S 0
+#define GLQF_FDINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0)
+#define GLQF_FDINSET_FV_WORD_VAL0_S 7
+#define GLQF_FDINSET_FV_WORD_VAL0_M BIT(7)
+#define GLQF_FDINSET_FV_WORD_INDX1_S 8
+#define GLQF_FDINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8)
+#define GLQF_FDINSET_FV_WORD_VAL1_S 15
+#define GLQF_FDINSET_FV_WORD_VAL1_M BIT(15)
+#define GLQF_FDINSET_FV_WORD_INDX2_S 16
+#define GLQF_FDINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16)
+#define GLQF_FDINSET_FV_WORD_VAL2_S 23
+#define GLQF_FDINSET_FV_WORD_VAL2_M BIT(23)
+#define GLQF_FDINSET_FV_WORD_INDX3_S 24
+#define GLQF_FDINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24)
+#define GLQF_FDINSET_FV_WORD_VAL3_S 31
+#define GLQF_FDINSET_FV_WORD_VAL3_M BIT(31)
+#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLQF_FDMASK_MAX_INDEX 31
+#define GLQF_FDMASK_MSK_INDEX_S 0
+#define GLQF_FDMASK_MSK_INDEX_M MAKEMASK(0x1F, 0)
+#define GLQF_FDMASK_MASK_S 16
+#define GLQF_FDMASK_MASK_M MAKEMASK(0xFFFF, 16)
+#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLQF_FDMASK_SEL_MAX_INDEX 127
+#define GLQF_FDMASK_SEL_MASK_SEL_S 0
+#define GLQF_FDMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */
+#define GLQF_FDSWAP_MAX_INDEX 127
+#define GLQF_FDSWAP_FV_WORD_INDX0_S 0
+#define GLQF_FDSWAP_FV_WORD_INDX0_M MAKEMASK(0x1F, 0)
+#define GLQF_FDSWAP_FV_WORD_VAL0_S 7
+#define GLQF_FDSWAP_FV_WORD_VAL0_M BIT(7)
+#define GLQF_FDSWAP_FV_WORD_INDX1_S 8
+#define GLQF_FDSWAP_FV_WORD_INDX1_M MAKEMASK(0x1F, 8)
+#define GLQF_FDSWAP_FV_WORD_VAL1_S 15
+#define GLQF_FDSWAP_FV_WORD_VAL1_M BIT(15)
+#define GLQF_FDSWAP_FV_WORD_INDX2_S 16
+#define GLQF_FDSWAP_FV_WORD_INDX2_M MAKEMASK(0x1F, 16)
+#define GLQF_FDSWAP_FV_WORD_VAL2_S 23
+#define GLQF_FDSWAP_FV_WORD_VAL2_M BIT(23)
+#define GLQF_FDSWAP_FV_WORD_INDX3_S 24
+#define GLQF_FDSWAP_FV_WORD_INDX3_M MAKEMASK(0x1F, 24)
+#define GLQF_FDSWAP_FV_WORD_VAL3_S 31
+#define GLQF_FDSWAP_FV_WORD_VAL3_M BIT(31)
+#define GLQF_HINSET(_i, _j) (0x0040E000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */
+#define GLQF_HINSET_MAX_INDEX 127
+#define GLQF_HINSET_FV_WORD_INDX0_S 0
+#define GLQF_HINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0)
+#define GLQF_HINSET_FV_WORD_VAL0_S 7
+#define GLQF_HINSET_FV_WORD_VAL0_M BIT(7)
+#define GLQF_HINSET_FV_WORD_INDX1_S 8
+#define GLQF_HINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8)
+#define GLQF_HINSET_FV_WORD_VAL1_S 15
+#define GLQF_HINSET_FV_WORD_VAL1_M BIT(15)
+#define GLQF_HINSET_FV_WORD_INDX2_S 16
+#define GLQF_HINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16)
+#define GLQF_HINSET_FV_WORD_VAL2_S 23
+#define GLQF_HINSET_FV_WORD_VAL2_M BIT(23)
+#define GLQF_HINSET_FV_WORD_INDX3_S 24
+#define GLQF_HINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24)
+#define GLQF_HINSET_FV_WORD_VAL3_S 31
+#define GLQF_HINSET_FV_WORD_VAL3_M BIT(31)
+#define GLQF_HKEY(_i) (0x00456000 + ((_i) * 4)) /* _i=0...12 */ /* Reset Source: CORER */
+#define GLQF_HKEY_MAX_INDEX 12
+#define GLQF_HKEY_KEY_0_S 0
+#define GLQF_HKEY_KEY_0_M MAKEMASK(0xFF, 0)
+#define GLQF_HKEY_KEY_1_S 8
+#define GLQF_HKEY_KEY_1_M MAKEMASK(0xFF, 8)
+#define GLQF_HKEY_KEY_2_S 16
+#define GLQF_HKEY_KEY_2_M MAKEMASK(0xFF, 16)
+#define GLQF_HKEY_KEY_3_S 24
+#define GLQF_HKEY_KEY_3_M MAKEMASK(0xFF, 24)
+#define GLQF_HLUT(_i, _j) (0x00438000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...15 */ /* Reset Source: CORER */
+#define GLQF_HLUT_MAX_INDEX 127
+#define GLQF_HLUT_LUT0_S 0
+#define GLQF_HLUT_LUT0_M MAKEMASK(0x3F, 0)
+#define GLQF_HLUT_LUT1_S 8
+#define GLQF_HLUT_LUT1_M MAKEMASK(0x3F, 8)
+#define GLQF_HLUT_LUT2_S 16
+#define GLQF_HLUT_LUT2_M MAKEMASK(0x3F, 16)
+#define GLQF_HLUT_LUT3_S 24
+#define GLQF_HLUT_LUT3_M MAKEMASK(0x3F, 24)
+#define GLQF_HLUT_SIZE(_i) (0x00455400 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLQF_HLUT_SIZE_MAX_INDEX 15
+#define GLQF_HLUT_SIZE_HSIZE_S 0
+#define GLQF_HLUT_SIZE_HSIZE_M BIT(0)
+#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLQF_HMASK_MAX_INDEX 31
+#define GLQF_HMASK_MSK_INDEX_S 0
+#define GLQF_HMASK_MSK_INDEX_M MAKEMASK(0x1F, 0)
+#define GLQF_HMASK_MASK_S 16
+#define GLQF_HMASK_MASK_M MAKEMASK(0xFFFF, 16)
+#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GLQF_HMASK_SEL_MAX_INDEX 127
+#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define GLQF_HMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_HSYMM(_i, _j) (0x0040F000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...5 */ /* Reset Source: CORER */
+#define GLQF_HSYMM_MAX_INDEX 127
+#define GLQF_HSYMM_FV_SYMM_INDX0_S 0
+#define GLQF_HSYMM_FV_SYMM_INDX0_M MAKEMASK(0x1F, 0)
+#define GLQF_HSYMM_SYMM0_ENA_S 7
+#define GLQF_HSYMM_SYMM0_ENA_M BIT(7)
+#define GLQF_HSYMM_FV_SYMM_INDX1_S 8
+#define GLQF_HSYMM_FV_SYMM_INDX1_M MAKEMASK(0x1F, 8)
+#define GLQF_HSYMM_SYMM1_ENA_S 15
+#define GLQF_HSYMM_SYMM1_ENA_M BIT(15)
+#define GLQF_HSYMM_FV_SYMM_INDX2_S 16
+#define GLQF_HSYMM_FV_SYMM_INDX2_M MAKEMASK(0x1F, 16)
+#define GLQF_HSYMM_SYMM2_ENA_S 23
+#define GLQF_HSYMM_SYMM2_ENA_M BIT(23)
+#define GLQF_HSYMM_FV_SYMM_INDX3_S 24
+#define GLQF_HSYMM_FV_SYMM_INDX3_M MAKEMASK(0x1F, 24)
+#define GLQF_HSYMM_SYMM3_ENA_S 31
+#define GLQF_HSYMM_SYMM3_ENA_M BIT(31)
+#define GLQF_PE_APBVT_CNT 0x00455500 /* Reset Source: CORER */
+#define GLQF_PE_APBVT_CNT_APBVT_LAN_S 0
+#define GLQF_PE_APBVT_CNT_APBVT_LAN_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLQF_PE_CMD 0x00471080 /* Reset Source: CORER */
+#define GLQF_PE_CMD_ADDREM_STS_S 0
+#define GLQF_PE_CMD_ADDREM_STS_M MAKEMASK(0xFFFFFF, 0)
+#define GLQF_PE_CMD_ADDREM_ID_S 28
+#define GLQF_PE_CMD_ADDREM_ID_M MAKEMASK(0xF, 28)
+#define GLQF_PE_CTL 0x004710C0 /* Reset Source: CORER */
+#define GLQF_PE_CTL_PELONG_S 0
+#define GLQF_PE_CTL_PELONG_M MAKEMASK(0xF, 0)
+#define GLQF_PE_CTL2(_i) (0x00455200 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLQF_PE_CTL2_MAX_INDEX 31
+#define GLQF_PE_CTL2_TO_QH_S 0
+#define GLQF_PE_CTL2_TO_QH_M MAKEMASK(0x3, 0)
+#define GLQF_PE_CTL2_APBVT_ENA_S 2
+#define GLQF_PE_CTL2_APBVT_ENA_M BIT(2)
+#define GLQF_PE_FVE 0x0020E514 /* Reset Source: CORER */
+#define GLQF_PE_FVE_W_ENA_S 0
+#define GLQF_PE_FVE_W_ENA_M MAKEMASK(0xFFFFFF, 0)
+#define GLQF_PE_OSR_STS 0x00471040 /* Reset Source: CORER */
+#define GLQF_PE_OSR_STS_QH_SRCH_MAXOSR_S 0
+#define GLQF_PE_OSR_STS_QH_SRCH_MAXOSR_M MAKEMASK(0x3FF, 0)
+#define GLQF_PE_OSR_STS_QH_CMD_MAXOSR_S 16
+#define GLQF_PE_OSR_STS_QH_CMD_MAXOSR_M MAKEMASK(0x3FF, 16)
+#define GLQF_PEINSET(_i, _j) (0x00415000 + ((_i) * 4 + (_j) * 128)) /* _i=0...31, _j=0...5 */ /* Reset Source: CORER */
+#define GLQF_PEINSET_MAX_INDEX 31
+#define GLQF_PEINSET_FV_WORD_INDX0_S 0
+#define GLQF_PEINSET_FV_WORD_INDX0_M MAKEMASK(0x1F, 0)
+#define GLQF_PEINSET_FV_WORD_VAL0_S 7
+#define GLQF_PEINSET_FV_WORD_VAL0_M BIT(7)
+#define GLQF_PEINSET_FV_WORD_INDX1_S 8
+#define GLQF_PEINSET_FV_WORD_INDX1_M MAKEMASK(0x1F, 8)
+#define GLQF_PEINSET_FV_WORD_VAL1_S 15
+#define GLQF_PEINSET_FV_WORD_VAL1_M BIT(15)
+#define GLQF_PEINSET_FV_WORD_INDX2_S 16
+#define GLQF_PEINSET_FV_WORD_INDX2_M MAKEMASK(0x1F, 16)
+#define GLQF_PEINSET_FV_WORD_VAL2_S 23
+#define GLQF_PEINSET_FV_WORD_VAL2_M BIT(23)
+#define GLQF_PEINSET_FV_WORD_INDX3_S 24
+#define GLQF_PEINSET_FV_WORD_INDX3_M MAKEMASK(0x1F, 24)
+#define GLQF_PEINSET_FV_WORD_VAL3_S 31
+#define GLQF_PEINSET_FV_WORD_VAL3_M BIT(31)
+#define GLQF_PEMASK(_i) (0x00415400 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLQF_PEMASK_MAX_INDEX 15
+#define GLQF_PEMASK_MSK_INDEX_S 0
+#define GLQF_PEMASK_MSK_INDEX_M MAKEMASK(0x1F, 0)
+#define GLQF_PEMASK_MASK_S 16
+#define GLQF_PEMASK_MASK_M MAKEMASK(0xFFFF, 16)
+#define GLQF_PEMASK_SEL(_i) (0x00415500 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLQF_PEMASK_SEL_MAX_INDEX 31
+#define GLQF_PEMASK_SEL_MASK_SEL_S 0
+#define GLQF_PEMASK_SEL_MASK_SEL_M MAKEMASK(0xFFFF, 0)
+#define GLQF_PETABLE_CLR(_i) (0x000AA078 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLQF_PETABLE_CLR_MAX_INDEX 1
+#define GLQF_PETABLE_CLR_VM_VF_NUM_S 0
+#define GLQF_PETABLE_CLR_VM_VF_NUM_M MAKEMASK(0x3FF, 0)
+#define GLQF_PETABLE_CLR_VM_VF_TYPE_S 10
+#define GLQF_PETABLE_CLR_VM_VF_TYPE_M MAKEMASK(0x3, 10)
+#define GLQF_PETABLE_CLR_PF_NUM_S 12
+#define GLQF_PETABLE_CLR_PF_NUM_M MAKEMASK(0x7, 12)
+#define GLQF_PETABLE_CLR_PE_BUSY_S 16
+#define GLQF_PETABLE_CLR_PE_BUSY_M BIT(16)
+#define GLQF_PETABLE_CLR_PE_CLEAR_S 17
+#define GLQF_PETABLE_CLR_PE_CLEAR_M BIT(17)
+#define GLQF_PROF2TC(_i, _j) (0x0044D000 + ((_i) * 4 + (_j) * 512)) /* _i=0...127, _j=0...3 */ /* Reset Source: CORER */
+#define GLQF_PROF2TC_MAX_INDEX 127
+#define GLQF_PROF2TC_OVERRIDE_ENA_0_S 0
+#define GLQF_PROF2TC_OVERRIDE_ENA_0_M BIT(0)
+#define GLQF_PROF2TC_REGION_0_S 1
+#define GLQF_PROF2TC_REGION_0_M MAKEMASK(0x7, 1)
+#define GLQF_PROF2TC_OVERRIDE_ENA_1_S 4
+#define GLQF_PROF2TC_OVERRIDE_ENA_1_M BIT(4)
+#define GLQF_PROF2TC_REGION_1_S 5
+#define GLQF_PROF2TC_REGION_1_M MAKEMASK(0x7, 5)
+#define GLQF_PROF2TC_OVERRIDE_ENA_2_S 8
+#define GLQF_PROF2TC_OVERRIDE_ENA_2_M BIT(8)
+#define GLQF_PROF2TC_REGION_2_S 9
+#define GLQF_PROF2TC_REGION_2_M MAKEMASK(0x7, 9)
+#define GLQF_PROF2TC_OVERRIDE_ENA_3_S 12
+#define GLQF_PROF2TC_OVERRIDE_ENA_3_M BIT(12)
+#define GLQF_PROF2TC_REGION_3_S 13
+#define GLQF_PROF2TC_REGION_3_M MAKEMASK(0x7, 13)
+#define GLQF_PROF2TC_OVERRIDE_ENA_4_S 16
+#define GLQF_PROF2TC_OVERRIDE_ENA_4_M BIT(16)
+#define GLQF_PROF2TC_REGION_4_S 17
+#define GLQF_PROF2TC_REGION_4_M MAKEMASK(0x7, 17)
+#define GLQF_PROF2TC_OVERRIDE_ENA_5_S 20
+#define GLQF_PROF2TC_OVERRIDE_ENA_5_M BIT(20)
+#define GLQF_PROF2TC_REGION_5_S 21
+#define GLQF_PROF2TC_REGION_5_M MAKEMASK(0x7, 21)
+#define GLQF_PROF2TC_OVERRIDE_ENA_6_S 24
+#define GLQF_PROF2TC_OVERRIDE_ENA_6_M BIT(24)
+#define GLQF_PROF2TC_REGION_6_S 25
+#define GLQF_PROF2TC_REGION_6_M MAKEMASK(0x7, 25)
+#define GLQF_PROF2TC_OVERRIDE_ENA_7_S 28
+#define GLQF_PROF2TC_OVERRIDE_ENA_7_M BIT(28)
+#define GLQF_PROF2TC_REGION_7_S 29
+#define GLQF_PROF2TC_REGION_7_M MAKEMASK(0x7, 29)
+#define PFQF_FD_CNT 0x00460180 /* Reset Source: CORER */
+#define PFQF_FD_CNT_FD_GCNT_S 0
+#define PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_CNT_FD_BCNT_S 16
+#define PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_ENA 0x0043A000 /* Reset Source: CORER */
+#define PFQF_FD_ENA_FD_ENA_S 0
+#define PFQF_FD_ENA_FD_ENA_M BIT(0)
+#define PFQF_FD_SIZE 0x00460100 /* Reset Source: CORER */
+#define PFQF_FD_SIZE_FD_GSIZE_S 0
+#define PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_SIZE_FD_BSIZE_S 16
+#define PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_SUBTRACT 0x00460200 /* Reset Source: CORER */
+#define PFQF_FD_SUBTRACT_FD_GCNT_S 0
+#define PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_SUBTRACT_FD_BCNT_S 16
+#define PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define PFQF_HLUT(_i) (0x00430000 + ((_i) * 64)) /* _i=0...511 */ /* Reset Source: CORER */
+#define PFQF_HLUT_MAX_INDEX 511
+#define PFQF_HLUT_LUT0_S 0
+#define PFQF_HLUT_LUT0_M MAKEMASK(0xFF, 0)
+#define PFQF_HLUT_LUT1_S 8
+#define PFQF_HLUT_LUT1_M MAKEMASK(0xFF, 8)
+#define PFQF_HLUT_LUT2_S 16
+#define PFQF_HLUT_LUT2_M MAKEMASK(0xFF, 16)
+#define PFQF_HLUT_LUT3_S 24
+#define PFQF_HLUT_LUT3_M MAKEMASK(0xFF, 24)
+#define PFQF_HLUT_SIZE 0x00455480 /* Reset Source: CORER */
+#define PFQF_HLUT_SIZE_HSIZE_S 0
+#define PFQF_HLUT_SIZE_HSIZE_M MAKEMASK(0x3, 0)
+#define PFQF_PE_CLSN0 0x00470480 /* Reset Source: CORER */
+#define PFQF_PE_CLSN0_HITSBCNT_S 0
+#define PFQF_PE_CLSN0_HITSBCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFQF_PE_CLSN1 0x00470500 /* Reset Source: CORER */
+#define PFQF_PE_CLSN1_HITLBCNT_S 0
+#define PFQF_PE_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFQF_PE_CTL1 0x00470000 /* Reset Source: CORER */
+#define PFQF_PE_CTL1_PEHSIZE_S 0
+#define PFQF_PE_CTL1_PEHSIZE_M MAKEMASK(0xF, 0)
+#define PFQF_PE_CTL2 0x00470040 /* Reset Source: CORER */
+#define PFQF_PE_CTL2_PEDSIZE_S 0
+#define PFQF_PE_CTL2_PEDSIZE_M MAKEMASK(0xF, 0)
+#define PFQF_PE_FILTERING_ENA 0x0043A080 /* Reset Source: CORER */
+#define PFQF_PE_FILTERING_ENA_PE_ENA_S 0
+#define PFQF_PE_FILTERING_ENA_PE_ENA_M BIT(0)
+#define PFQF_PE_FLHD 0x00470100 /* Reset Source: CORER */
+#define PFQF_PE_FLHD_FLHD_S 0
+#define PFQF_PE_FLHD_FLHD_M MAKEMASK(0xFFFFFF, 0)
+#define PFQF_PE_ST_CTL 0x00470400 /* Reset Source: CORER */
+#define PFQF_PE_ST_CTL_PF_CNT_EN_S 0
+#define PFQF_PE_ST_CTL_PF_CNT_EN_M BIT(0)
+#define PFQF_PE_ST_CTL_VFS_CNT_EN_S 1
+#define PFQF_PE_ST_CTL_VFS_CNT_EN_M BIT(1)
+#define PFQF_PE_ST_CTL_VF_CNT_EN_S 2
+#define PFQF_PE_ST_CTL_VF_CNT_EN_M BIT(2)
+#define PFQF_PE_ST_CTL_VF_NUM_S 16
+#define PFQF_PE_ST_CTL_VF_NUM_M MAKEMASK(0xFF, 16)
+#define PFQF_PE_TC_CTL 0x00452080 /* Reset Source: CORER */
+#define PFQF_PE_TC_CTL_TC_EN_PF_S 0
+#define PFQF_PE_TC_CTL_TC_EN_PF_M MAKEMASK(0xFF, 0)
+#define PFQF_PE_TC_CTL_TC_EN_VF_S 16
+#define PFQF_PE_TC_CTL_TC_EN_VF_M MAKEMASK(0xFF, 16)
+#define PFQF_PECNT_0 0x00470200 /* Reset Source: CORER */
+#define PFQF_PECNT_0_BUCKETCNT_S 0
+#define PFQF_PECNT_0_BUCKETCNT_M MAKEMASK(0x3FFFF, 0)
+#define PFQF_PECNT_1 0x00470300 /* Reset Source: CORER */
+#define PFQF_PECNT_1_FLTCNT_S 0
+#define PFQF_PECNT_1_FLTCNT_M MAKEMASK(0x3FFFF, 0)
+#define VPQF_PE_CTL1(_VF) (0x00474000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPQF_PE_CTL1_MAX_INDEX 255
+#define VPQF_PE_CTL1_PEHSIZE_S 0
+#define VPQF_PE_CTL1_PEHSIZE_M MAKEMASK(0xF, 0)
+#define VPQF_PE_CTL2(_VF) (0x00474800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPQF_PE_CTL2_MAX_INDEX 255
+#define VPQF_PE_CTL2_PEDSIZE_S 0
+#define VPQF_PE_CTL2_PEDSIZE_M MAKEMASK(0xF, 0)
+#define VPQF_PE_FILTERING_ENA(_VF) (0x00455800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPQF_PE_FILTERING_ENA_MAX_INDEX 255
+#define VPQF_PE_FILTERING_ENA_PE_ENA_S 0
+#define VPQF_PE_FILTERING_ENA_PE_ENA_M BIT(0)
+#define VPQF_PE_FLHD(_VF) (0x00472000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPQF_PE_FLHD_MAX_INDEX 255
+#define VPQF_PE_FLHD_FLHD_S 0
+#define VPQF_PE_FLHD_FLHD_M MAKEMASK(0xFFFFFF, 0)
+#define VPQF_PECNT_0(_VF) (0x00472800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPQF_PECNT_0_MAX_INDEX 255
+#define VPQF_PECNT_0_BUCKETCNT_S 0
+#define VPQF_PECNT_0_BUCKETCNT_M MAKEMASK(0x3FFFF, 0)
+#define VPQF_PECNT_1(_VF) (0x00473000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VPQF_PECNT_1_MAX_INDEX 255
+#define VPQF_PECNT_1_FLTCNT_S 0
+#define VPQF_PECNT_1_FLTCNT_M MAKEMASK(0x3FFFF, 0)
+#define GLDCB_RMPMC 0x001223C8 /* Reset Source: CORER */
+#define GLDCB_RMPMC_RSPM_S 0
+#define GLDCB_RMPMC_RSPM_M MAKEMASK(0x3F, 0)
+#define GLDCB_RMPMC_MIQ_NODROP_MODE_S 6
+#define GLDCB_RMPMC_MIQ_NODROP_MODE_M MAKEMASK(0x1F, 6)
+#define GLDCB_RMPMC_RPM_DIS_S 31
+#define GLDCB_RMPMC_RPM_DIS_M BIT(31)
+#define GLDCB_RMPMS 0x001223CC /* Reset Source: CORER */
+#define GLDCB_RMPMS_RMPM_S 0
+#define GLDCB_RMPMS_RMPM_M MAKEMASK(0xFFFF, 0)
+#define GLDCB_RPCC 0x00122260 /* Reset Source: CORER */
+#define GLDCB_RPCC_EN_S 0
+#define GLDCB_RPCC_EN_M BIT(0)
+#define GLDCB_RPCC_SCL_FACT_S 4
+#define GLDCB_RPCC_SCL_FACT_M MAKEMASK(0x1F, 4)
+#define GLDCB_RPCC_THRSH_S 16
+#define GLDCB_RPCC_THRSH_M MAKEMASK(0xFFF, 16)
+#define GLDCB_RSPMC 0x001223C4 /* Reset Source: CORER */
+#define GLDCB_RSPMC_RSPM_S 0
+#define GLDCB_RSPMC_RSPM_M MAKEMASK(0xFF, 0)
+#define GLDCB_RSPMC_RPM_MODE_S 8
+#define GLDCB_RSPMC_RPM_MODE_M MAKEMASK(0x3, 8)
+#define GLDCB_RSPMC_PRR_MAX_EXP_S 10
+#define GLDCB_RSPMC_PRR_MAX_EXP_M MAKEMASK(0xF, 10)
+#define GLDCB_RSPMC_PFCTIMER_S 14
+#define GLDCB_RSPMC_PFCTIMER_M MAKEMASK(0x3FFF, 14)
+#define GLDCB_RSPMC_RPM_DIS_S 31
+#define GLDCB_RSPMC_RPM_DIS_M BIT(31)
+#define GLDCB_RSPMS 0x001223C0 /* Reset Source: CORER */
+#define GLDCB_RSPMS_RSPM_S 0
+#define GLDCB_RSPMS_RSPM_M MAKEMASK(0x3FFFF, 0)
+#define GLDCB_RTCTI 0x001223D0 /* Reset Source: CORER */
+#define GLDCB_RTCTI_PFCTIMEOUT_TC_S 0
+#define GLDCB_RTCTI_PFCTIMEOUT_TC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLDCB_RTCTQ(_i) (0x001222C0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLDCB_RTCTQ_MAX_INDEX 31
+#define GLDCB_RTCTQ_RXQNUM_S 0
+#define GLDCB_RTCTQ_RXQNUM_M MAKEMASK(0x7FF, 0)
+#define GLDCB_RTCTQ_IS_PF_Q_S 16
+#define GLDCB_RTCTQ_IS_PF_Q_M BIT(16)
+#define GLDCB_RTCTS(_i) (0x00122340 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLDCB_RTCTS_MAX_INDEX 31
+#define GLDCB_RTCTS_PFCTIMER_S 0
+#define GLDCB_RTCTS_PFCTIMER_M MAKEMASK(0x3FFF, 0)
+#define GLRCB_CFG_COTF_CNT(_i) (0x001223D4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRCB_CFG_COTF_CNT_MAX_INDEX 7
+#define GLRCB_CFG_COTF_CNT_MRKR_COTF_CNT_S 0
+#define GLRCB_CFG_COTF_CNT_MRKR_COTF_CNT_M MAKEMASK(0x3F, 0)
+#define GLRCB_CFG_COTF_ST 0x001223F4 /* Reset Source: CORER */
+#define GLRCB_CFG_COTF_ST_MRKR_COTF_ST_S 0
+#define GLRCB_CFG_COTF_ST_MRKR_COTF_ST_M MAKEMASK(0xFF, 0)
+#define GLRPRS_PMCFG_DHW(_i) (0x00200388 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_DHW_MAX_INDEX 15
+#define GLRPRS_PMCFG_DHW_DHW_S 0
+#define GLRPRS_PMCFG_DHW_DHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_DLW(_i) (0x002003C8 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_DLW_MAX_INDEX 15
+#define GLRPRS_PMCFG_DLW_DLW_S 0
+#define GLRPRS_PMCFG_DLW_DLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_DPS(_i) (0x00200308 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_DPS_MAX_INDEX 15
+#define GLRPRS_PMCFG_DPS_DPS_S 0
+#define GLRPRS_PMCFG_DPS_DPS_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_SHW(_i) (0x00200448 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_SHW_MAX_INDEX 7
+#define GLRPRS_PMCFG_SHW_SHW_S 0
+#define GLRPRS_PMCFG_SHW_SHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_SLW(_i) (0x00200468 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_SLW_MAX_INDEX 7
+#define GLRPRS_PMCFG_SLW_SLW_S 0
+#define GLRPRS_PMCFG_SLW_SLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_SPS(_i) (0x00200408 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_SPS_MAX_INDEX 7
+#define GLRPRS_PMCFG_SPS_SPS_S 0
+#define GLRPRS_PMCFG_SPS_SPS_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_TC_CFG(_i) (0x00200488 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_TC_CFG_MAX_INDEX 31
+#define GLRPRS_PMCFG_TC_CFG_D_POOL_S 0
+#define GLRPRS_PMCFG_TC_CFG_D_POOL_M MAKEMASK(0xF, 0)
+#define GLRPRS_PMCFG_TC_CFG_S_POOL_S 16
+#define GLRPRS_PMCFG_TC_CFG_S_POOL_M MAKEMASK(0x7, 16)
+#define GLRPRS_PMCFG_TCHW(_i) (0x00200588 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_TCHW_MAX_INDEX 31
+#define GLRPRS_PMCFG_TCHW_TCHW_S 0
+#define GLRPRS_PMCFG_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPRS_PMCFG_TCLW(_i) (0x00200608 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLRPRS_PMCFG_TCLW_MAX_INDEX 31
+#define GLRPRS_PMCFG_TCLW_TCLW_S 0
+#define GLRPRS_PMCFG_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0)
+#define GLSWT_PMCFG_TC_CFG(_i) (0x00204900 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSWT_PMCFG_TC_CFG_MAX_INDEX 31
+#define GLSWT_PMCFG_TC_CFG_D_POOL_S 0
+#define GLSWT_PMCFG_TC_CFG_D_POOL_M MAKEMASK(0xF, 0)
+#define GLSWT_PMCFG_TC_CFG_S_POOL_S 16
+#define GLSWT_PMCFG_TC_CFG_S_POOL_M MAKEMASK(0x7, 16)
+#define PRTDCB_RLANPMS 0x00122280 /* Reset Source: CORER */
+#define PRTDCB_RLANPMS_LANRPPM_S 0
+#define PRTDCB_RLANPMS_LANRPPM_M MAKEMASK(0x3FFFF, 0)
+#define PRTDCB_RPPMC 0x00122240 /* Reset Source: CORER */
+#define PRTDCB_RPPMC_LANRPPM_S 0
+#define PRTDCB_RPPMC_LANRPPM_M MAKEMASK(0xFF, 0)
+#define PRTDCB_RPPMC_RDMARPPM_S 8
+#define PRTDCB_RPPMC_RDMARPPM_M MAKEMASK(0xFF, 8)
+#define PRTDCB_RRDMAPMS 0x00122120 /* Reset Source: CORER */
+#define PRTDCB_RRDMAPMS_RDMARPPM_S 0
+#define PRTDCB_RRDMAPMS_RDMARPPM_M MAKEMASK(0x3FFFF, 0)
+#define GL_STAT_SWR_BPCH(_i) (0x00347804 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_BPCH_MAX_INDEX 127
+#define GL_STAT_SWR_BPCH_VLBPCH_S 0
+#define GL_STAT_SWR_BPCH_VLBPCH_M MAKEMASK(0xFF, 0)
+#define GL_STAT_SWR_BPCL(_i) (0x00347800 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_BPCL_MAX_INDEX 127
+#define GL_STAT_SWR_BPCL_VLBPCL_S 0
+#define GL_STAT_SWR_BPCL_VLBPCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_STAT_SWR_GORCH(_i) (0x00342004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_GORCH_MAX_INDEX 127
+#define GL_STAT_SWR_GORCH_VLBCH_S 0
+#define GL_STAT_SWR_GORCH_VLBCH_M MAKEMASK(0xFF, 0)
+#define GL_STAT_SWR_GORCL(_i) (0x00342000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_GORCL_MAX_INDEX 127
+#define GL_STAT_SWR_GORCL_VLBCL_S 0
+#define GL_STAT_SWR_GORCL_VLBCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_STAT_SWR_GOTCH(_i) (0x00304004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_GOTCH_MAX_INDEX 127
+#define GL_STAT_SWR_GOTCH_VLBCH_S 0
+#define GL_STAT_SWR_GOTCH_VLBCH_M MAKEMASK(0xFF, 0)
+#define GL_STAT_SWR_GOTCL(_i) (0x00304000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_GOTCL_MAX_INDEX 127
+#define GL_STAT_SWR_GOTCL_VLBCL_S 0
+#define GL_STAT_SWR_GOTCL_VLBCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_STAT_SWR_MPCH(_i) (0x00347404 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_MPCH_MAX_INDEX 127
+#define GL_STAT_SWR_MPCH_VLMPCH_S 0
+#define GL_STAT_SWR_MPCH_VLMPCH_M MAKEMASK(0xFF, 0)
+#define GL_STAT_SWR_MPCL(_i) (0x00347400 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_MPCL_MAX_INDEX 127
+#define GL_STAT_SWR_MPCL_VLMPCL_S 0
+#define GL_STAT_SWR_MPCL_VLMPCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_STAT_SWR_UPCH(_i) (0x00347004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_UPCH_MAX_INDEX 127
+#define GL_STAT_SWR_UPCH_VLUPCH_S 0
+#define GL_STAT_SWR_UPCH_VLUPCH_M MAKEMASK(0xFF, 0)
+#define GL_STAT_SWR_UPCL(_i) (0x00347000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define GL_STAT_SWR_UPCL_MAX_INDEX 127
+#define GL_STAT_SWR_UPCL_VLUPCL_S 0
+#define GL_STAT_SWR_UPCL_VLUPCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_AORCL(_i) (0x003812C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_AORCL_MAX_INDEX 7
+#define GLPRT_AORCL_AORCL_S 0
+#define GLPRT_AORCL_AORCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_BPRCH_MAX_INDEX 7
+#define GLPRT_BPRCH_UPRCH_S 0
+#define GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_BPRCL_MAX_INDEX 7
+#define GLPRT_BPRCL_UPRCH_S 0
+#define GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_BPTCH_MAX_INDEX 7
+#define GLPRT_BPTCH_UPRCH_S 0
+#define GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_BPTCL_MAX_INDEX 7
+#define GLPRT_BPTCL_UPRCH_S 0
+#define GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_CRCERRS_MAX_INDEX 7
+#define GLPRT_CRCERRS_CRCERRS_S 0
+#define GLPRT_CRCERRS_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_CRCERRS_H(_i) (0x00380104 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_CRCERRS_H_MAX_INDEX 7
+#define GLPRT_CRCERRS_H_CRCERRS_S 0
+#define GLPRT_CRCERRS_H_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_GORCH_MAX_INDEX 7
+#define GLPRT_GORCH_GORCH_S 0
+#define GLPRT_GORCH_GORCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_GORCL_MAX_INDEX 7
+#define GLPRT_GORCL_GORCL_S 0
+#define GLPRT_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_GOTCH_MAX_INDEX 7
+#define GLPRT_GOTCH_GOTCH_S 0
+#define GLPRT_GOTCH_GOTCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_GOTCL_MAX_INDEX 7
+#define GLPRT_GOTCL_GOTCL_S 0
+#define GLPRT_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_ILLERRC_MAX_INDEX 7
+#define GLPRT_ILLERRC_ILLERRC_S 0
+#define GLPRT_ILLERRC_ILLERRC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_ILLERRC_H(_i) (0x003801C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_ILLERRC_H_MAX_INDEX 7
+#define GLPRT_ILLERRC_H_ILLERRC_S 0
+#define GLPRT_ILLERRC_H_ILLERRC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXOFFRXC_MAX_INDEX 7
+#define GLPRT_LXOFFRXC_LXOFFRXCNT_S 0
+#define GLPRT_LXOFFRXC_LXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXOFFRXC_H(_i) (0x003802C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXOFFRXC_H_MAX_INDEX 7
+#define GLPRT_LXOFFRXC_H_LXOFFRXCNT_S 0
+#define GLPRT_LXOFFRXC_H_LXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXOFFTXC_MAX_INDEX 7
+#define GLPRT_LXOFFTXC_LXOFFTXC_S 0
+#define GLPRT_LXOFFTXC_LXOFFTXC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXOFFTXC_H(_i) (0x00381184 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXOFFTXC_H_MAX_INDEX 7
+#define GLPRT_LXOFFTXC_H_LXOFFTXC_S 0
+#define GLPRT_LXOFFTXC_H_LXOFFTXC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXONRXC_MAX_INDEX 7
+#define GLPRT_LXONRXC_LXONRXCNT_S 0
+#define GLPRT_LXONRXC_LXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXONRXC_H(_i) (0x00380284 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXONRXC_H_MAX_INDEX 7
+#define GLPRT_LXONRXC_H_LXONRXCNT_S 0
+#define GLPRT_LXONRXC_H_LXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXONTXC_MAX_INDEX 7
+#define GLPRT_LXONTXC_LXONTXC_S 0
+#define GLPRT_LXONTXC_LXONTXC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_LXONTXC_H(_i) (0x00381144 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_LXONTXC_H_MAX_INDEX 7
+#define GLPRT_LXONTXC_H_LXONTXC_S 0
+#define GLPRT_LXONTXC_H_LXONTXC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MLFC_MAX_INDEX 7
+#define GLPRT_MLFC_MLFC_S 0
+#define GLPRT_MLFC_MLFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_MLFC_H(_i) (0x00380044 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MLFC_H_MAX_INDEX 7
+#define GLPRT_MLFC_H_MLFC_S 0
+#define GLPRT_MLFC_H_MLFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MPRCH_MAX_INDEX 7
+#define GLPRT_MPRCH_MPRCH_S 0
+#define GLPRT_MPRCH_MPRCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MPRCL_MAX_INDEX 7
+#define GLPRT_MPRCL_MPRCL_S 0
+#define GLPRT_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MPTCH_MAX_INDEX 7
+#define GLPRT_MPTCH_MPTCH_S 0
+#define GLPRT_MPTCH_MPTCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MPTCL_MAX_INDEX 7
+#define GLPRT_MPTCL_MPTCL_S 0
+#define GLPRT_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MRFC_MAX_INDEX 7
+#define GLPRT_MRFC_MRFC_S 0
+#define GLPRT_MRFC_MRFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_MRFC_H(_i) (0x00380084 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_MRFC_H_MAX_INDEX 7
+#define GLPRT_MRFC_H_MRFC_S 0
+#define GLPRT_MRFC_H_MRFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC1023H_MAX_INDEX 7
+#define GLPRT_PRC1023H_PRC1023H_S 0
+#define GLPRT_PRC1023H_PRC1023H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC1023L_MAX_INDEX 7
+#define GLPRT_PRC1023L_PRC1023L_S 0
+#define GLPRT_PRC1023L_PRC1023L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC127H_MAX_INDEX 7
+#define GLPRT_PRC127H_PRC127H_S 0
+#define GLPRT_PRC127H_PRC127H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC127L_MAX_INDEX 7
+#define GLPRT_PRC127L_PRC127L_S 0
+#define GLPRT_PRC127L_PRC127L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC1522H_MAX_INDEX 7
+#define GLPRT_PRC1522H_PRC1522H_S 0
+#define GLPRT_PRC1522H_PRC1522H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC1522L_MAX_INDEX 7
+#define GLPRT_PRC1522L_PRC1522L_S 0
+#define GLPRT_PRC1522L_PRC1522L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC255H_MAX_INDEX 7
+#define GLPRT_PRC255H_PRTPRC255H_S 0
+#define GLPRT_PRC255H_PRTPRC255H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC255L_MAX_INDEX 7
+#define GLPRT_PRC255L_PRC255L_S 0
+#define GLPRT_PRC255L_PRC255L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC511H_MAX_INDEX 7
+#define GLPRT_PRC511H_PRC511H_S 0
+#define GLPRT_PRC511H_PRC511H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC511L_MAX_INDEX 7
+#define GLPRT_PRC511L_PRC511L_S 0
+#define GLPRT_PRC511L_PRC511L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC64H_MAX_INDEX 7
+#define GLPRT_PRC64H_PRC64H_S 0
+#define GLPRT_PRC64H_PRC64H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC64L_MAX_INDEX 7
+#define GLPRT_PRC64L_PRC64L_S 0
+#define GLPRT_PRC64L_PRC64L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC9522H_MAX_INDEX 7
+#define GLPRT_PRC9522H_PRC1522H_S 0
+#define GLPRT_PRC9522H_PRC1522H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PRC9522L_MAX_INDEX 7
+#define GLPRT_PRC9522L_PRC1522L_S 0
+#define GLPRT_PRC9522L_PRC1522L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC1023H_MAX_INDEX 7
+#define GLPRT_PTC1023H_PTC1023H_S 0
+#define GLPRT_PTC1023H_PTC1023H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC1023L_MAX_INDEX 7
+#define GLPRT_PTC1023L_PTC1023L_S 0
+#define GLPRT_PTC1023L_PTC1023L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC127H_MAX_INDEX 7
+#define GLPRT_PTC127H_PTC127H_S 0
+#define GLPRT_PTC127H_PTC127H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC127L_MAX_INDEX 7
+#define GLPRT_PTC127L_PTC127L_S 0
+#define GLPRT_PTC127L_PTC127L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC1522H_MAX_INDEX 7
+#define GLPRT_PTC1522H_PTC1522H_S 0
+#define GLPRT_PTC1522H_PTC1522H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC1522L_MAX_INDEX 7
+#define GLPRT_PTC1522L_PTC1522L_S 0
+#define GLPRT_PTC1522L_PTC1522L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC255H_MAX_INDEX 7
+#define GLPRT_PTC255H_PTC255H_S 0
+#define GLPRT_PTC255H_PTC255H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC255L_MAX_INDEX 7
+#define GLPRT_PTC255L_PTC255L_S 0
+#define GLPRT_PTC255L_PTC255L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC511H_MAX_INDEX 7
+#define GLPRT_PTC511H_PTC511H_S 0
+#define GLPRT_PTC511H_PTC511H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC511L_MAX_INDEX 7
+#define GLPRT_PTC511L_PTC511L_S 0
+#define GLPRT_PTC511L_PTC511L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC64H_MAX_INDEX 7
+#define GLPRT_PTC64H_PTC64H_S 0
+#define GLPRT_PTC64H_PTC64H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC64L_MAX_INDEX 7
+#define GLPRT_PTC64L_PTC64L_S 0
+#define GLPRT_PTC64L_PTC64L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC9522H_MAX_INDEX 7
+#define GLPRT_PTC9522H_PTC9522H_S 0
+#define GLPRT_PTC9522H_PTC9522H_M MAKEMASK(0xFF, 0)
+#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PTC9522L_MAX_INDEX 7
+#define GLPRT_PTC9522L_PTC9522L_S 0
+#define GLPRT_PTC9522L_PTC9522L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXOFFRXC_MAX_INDEX 7
+#define GLPRT_PXOFFRXC_PRPXOFFRXCNT_S 0
+#define GLPRT_PXOFFRXC_PRPXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXOFFRXC_H(_i, _j) (0x00380504 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXOFFRXC_H_MAX_INDEX 7
+#define GLPRT_PXOFFRXC_H_PRPXOFFRXCNT_S 0
+#define GLPRT_PXOFFRXC_H_PRPXOFFRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXOFFTXC_MAX_INDEX 7
+#define GLPRT_PXOFFTXC_PRPXOFFTXCNT_S 0
+#define GLPRT_PXOFFTXC_PRPXOFFTXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXOFFTXC_H(_i, _j) (0x00380F44 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXOFFTXC_H_MAX_INDEX 7
+#define GLPRT_PXOFFTXC_H_PRPXOFFTXCNT_S 0
+#define GLPRT_PXOFFTXC_H_PRPXOFFTXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXONRXC_MAX_INDEX 7
+#define GLPRT_PXONRXC_PRPXONRXCNT_S 0
+#define GLPRT_PXONRXC_PRPXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXONRXC_H(_i, _j) (0x00380304 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXONRXC_H_MAX_INDEX 7
+#define GLPRT_PXONRXC_H_PRPXONRXCNT_S 0
+#define GLPRT_PXONRXC_H_PRPXONRXCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXONTXC_MAX_INDEX 7
+#define GLPRT_PXONTXC_PRPXONTXC_S 0
+#define GLPRT_PXONTXC_PRPXONTXC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_PXONTXC_H(_i, _j) (0x00380D44 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_PXONTXC_H_MAX_INDEX 7
+#define GLPRT_PXONTXC_H_PRPXONTXC_S 0
+#define GLPRT_PXONTXC_H_PRPXONTXC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RFC_MAX_INDEX 7
+#define GLPRT_RFC_RFC_S 0
+#define GLPRT_RFC_RFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RFC_H(_i) (0x00380AC4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RFC_H_MAX_INDEX 7
+#define GLPRT_RFC_H_RFC_S 0
+#define GLPRT_RFC_H_RFC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RJC_MAX_INDEX 7
+#define GLPRT_RJC_RJC_S 0
+#define GLPRT_RJC_RJC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RJC_H(_i) (0x00380B04 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RJC_H_MAX_INDEX 7
+#define GLPRT_RJC_H_RJC_S 0
+#define GLPRT_RJC_H_RJC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RLEC_MAX_INDEX 7
+#define GLPRT_RLEC_RLEC_S 0
+#define GLPRT_RLEC_RLEC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RLEC_H(_i) (0x00380144 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RLEC_H_MAX_INDEX 7
+#define GLPRT_RLEC_H_RLEC_S 0
+#define GLPRT_RLEC_H_RLEC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_ROC_MAX_INDEX 7
+#define GLPRT_ROC_ROC_S 0
+#define GLPRT_ROC_ROC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_ROC_H(_i) (0x00380244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_ROC_H_MAX_INDEX 7
+#define GLPRT_ROC_H_ROC_S 0
+#define GLPRT_ROC_H_ROC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RUC_MAX_INDEX 7
+#define GLPRT_RUC_RUC_S 0
+#define GLPRT_RUC_RUC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RUC_H(_i) (0x00380204 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RUC_H_MAX_INDEX 7
+#define GLPRT_RUC_H_RUC_S 0
+#define GLPRT_RUC_H_RUC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RXON2OFFCNT_MAX_INDEX 7
+#define GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_S 0
+#define GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_RXON2OFFCNT_H(_i, _j) (0x00380704 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...7 */ /* Reset Source: CORER */
+#define GLPRT_RXON2OFFCNT_H_MAX_INDEX 7
+#define GLPRT_RXON2OFFCNT_H_PRRXON2OFFCNT_S 0
+#define GLPRT_RXON2OFFCNT_H_PRRXON2OFFCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_STDC(_i) (0x00340000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_STDC_MAX_INDEX 7
+#define GLPRT_STDC_STDC_S 0
+#define GLPRT_STDC_STDC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_TDOLD_MAX_INDEX 7
+#define GLPRT_TDOLD_GLPRT_TDOLD_S 0
+#define GLPRT_TDOLD_GLPRT_TDOLD_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_TDOLD_H(_i) (0x00381284 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_TDOLD_H_MAX_INDEX 7
+#define GLPRT_TDOLD_H_GLPRT_TDOLD_S 0
+#define GLPRT_TDOLD_H_GLPRT_TDOLD_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_UPRCH_MAX_INDEX 7
+#define GLPRT_UPRCH_UPRCH_S 0
+#define GLPRT_UPRCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_UPRCL_MAX_INDEX 7
+#define GLPRT_UPRCL_UPRCL_S 0
+#define GLPRT_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_UPTCH_MAX_INDEX 7
+#define GLPRT_UPTCH_UPTCH_S 0
+#define GLPRT_UPTCH_UPTCH_M MAKEMASK(0xFF, 0)
+#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
+#define GLPRT_UPTCL_MAX_INDEX 7
+#define GLPRT_UPTCL_VUPTCH_S 0
+#define GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSTAT_ACL_CNT_0_H(_i) (0x00388004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_0_H_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_0_H_CNT_MSB_S 0
+#define GLSTAT_ACL_CNT_0_H_CNT_MSB_M MAKEMASK(0xFF, 0)
+#define GLSTAT_ACL_CNT_0_L(_i) (0x00388000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_0_L_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_0_L_CNT_LSB_S 0
+#define GLSTAT_ACL_CNT_0_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSTAT_ACL_CNT_1_H(_i) (0x00389004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_1_H_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_1_H_CNT_MSB_S 0
+#define GLSTAT_ACL_CNT_1_H_CNT_MSB_M MAKEMASK(0xFF, 0)
+#define GLSTAT_ACL_CNT_1_L(_i) (0x00389000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_1_L_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_1_L_CNT_LSB_S 0
+#define GLSTAT_ACL_CNT_1_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSTAT_ACL_CNT_2_H(_i) (0x0038A004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_2_H_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_2_H_CNT_MSB_S 0
+#define GLSTAT_ACL_CNT_2_H_CNT_MSB_M MAKEMASK(0xFF, 0)
+#define GLSTAT_ACL_CNT_2_L(_i) (0x0038A000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_2_L_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_2_L_CNT_LSB_S 0
+#define GLSTAT_ACL_CNT_2_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSTAT_ACL_CNT_3_H(_i) (0x0038B004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_3_H_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_3_H_CNT_MSB_S 0
+#define GLSTAT_ACL_CNT_3_H_CNT_MSB_M MAKEMASK(0xFF, 0)
+#define GLSTAT_ACL_CNT_3_L(_i) (0x0038B000 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
+#define GLSTAT_ACL_CNT_3_L_MAX_INDEX 511
+#define GLSTAT_ACL_CNT_3_L_CNT_LSB_S 0
+#define GLSTAT_ACL_CNT_3_L_CNT_LSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSTAT_FD_CNT0H(_i) (0x003A0004 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */
+#define GLSTAT_FD_CNT0H_MAX_INDEX 4095
+#define GLSTAT_FD_CNT0H_FD0_CNT_H_S 0
+#define GLSTAT_FD_CNT0H_FD0_CNT_H_M MAKEMASK(0xFF, 0)
+#define GLSTAT_FD_CNT0L(_i) (0x003A0000 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */
+#define GLSTAT_FD_CNT0L_MAX_INDEX 4095
+#define GLSTAT_FD_CNT0L_FD0_CNT_L_S 0
+#define GLSTAT_FD_CNT0L_FD0_CNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSTAT_FD_CNT1H(_i) (0x003A8004 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */
+#define GLSTAT_FD_CNT1H_MAX_INDEX 4095
+#define GLSTAT_FD_CNT1H_FD0_CNT_H_S 0
+#define GLSTAT_FD_CNT1H_FD0_CNT_H_M MAKEMASK(0xFF, 0)
+#define GLSTAT_FD_CNT1L(_i) (0x003A8000 + ((_i) * 8)) /* _i=0...4095 */ /* Reset Source: CORER */
+#define GLSTAT_FD_CNT1L_MAX_INDEX 4095
+#define GLSTAT_FD_CNT1L_FD0_CNT_L_S 0
+#define GLSTAT_FD_CNT1L_FD0_CNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_BPRCH(_i) (0x00346204 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_BPRCH_MAX_INDEX 31
+#define GLSW_BPRCH_BPRCH_S 0
+#define GLSW_BPRCH_BPRCH_M MAKEMASK(0xFF, 0)
+#define GLSW_BPRCL(_i) (0x00346200 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_BPRCL_MAX_INDEX 31
+#define GLSW_BPRCL_BPRCL_S 0
+#define GLSW_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_BPTCH(_i) (0x00310204 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_BPTCH_MAX_INDEX 31
+#define GLSW_BPTCH_BPTCH_S 0
+#define GLSW_BPTCH_BPTCH_M MAKEMASK(0xFF, 0)
+#define GLSW_BPTCL(_i) (0x00310200 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_BPTCL_MAX_INDEX 31
+#define GLSW_BPTCL_BPTCL_S 0
+#define GLSW_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_GORCH(_i) (0x00341004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_GORCH_MAX_INDEX 31
+#define GLSW_GORCH_GORCH_S 0
+#define GLSW_GORCH_GORCH_M MAKEMASK(0xFF, 0)
+#define GLSW_GORCL(_i) (0x00341000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_GORCL_MAX_INDEX 31
+#define GLSW_GORCL_GORCL_S 0
+#define GLSW_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_GOTCH(_i) (0x00302004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_GOTCH_MAX_INDEX 31
+#define GLSW_GOTCH_GOTCH_S 0
+#define GLSW_GOTCH_GOTCH_M MAKEMASK(0xFF, 0)
+#define GLSW_GOTCL(_i) (0x00302000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_GOTCL_MAX_INDEX 31
+#define GLSW_GOTCL_GOTCL_S 0
+#define GLSW_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_MPRCH(_i) (0x00346104 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_MPRCH_MAX_INDEX 31
+#define GLSW_MPRCH_MPRCH_S 0
+#define GLSW_MPRCH_MPRCH_M MAKEMASK(0xFF, 0)
+#define GLSW_MPRCL(_i) (0x00346100 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_MPRCL_MAX_INDEX 31
+#define GLSW_MPRCL_MPRCL_S 0
+#define GLSW_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_MPTCH(_i) (0x00310104 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_MPTCH_MAX_INDEX 31
+#define GLSW_MPTCH_MPTCH_S 0
+#define GLSW_MPTCH_MPTCH_M MAKEMASK(0xFF, 0)
+#define GLSW_MPTCL(_i) (0x00310100 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_MPTCL_MAX_INDEX 31
+#define GLSW_MPTCL_MPTCL_S 0
+#define GLSW_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_UPRCH(_i) (0x00346004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_UPRCH_MAX_INDEX 31
+#define GLSW_UPRCH_UPRCH_S 0
+#define GLSW_UPRCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define GLSW_UPRCL(_i) (0x00346000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_UPRCL_MAX_INDEX 31
+#define GLSW_UPRCL_UPRCL_S 0
+#define GLSW_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSW_UPTCH(_i) (0x00310004 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_UPTCH_MAX_INDEX 31
+#define GLSW_UPTCH_UPTCH_S 0
+#define GLSW_UPTCH_UPTCH_M MAKEMASK(0xFF, 0)
+#define GLSW_UPTCL(_i) (0x00310000 + ((_i) * 8)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GLSW_UPTCL_MAX_INDEX 31
+#define GLSW_UPTCL_UPTCL_S 0
+#define GLSW_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSWID_RUPP(_i) (0x00345000 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define GLSWID_RUPP_MAX_INDEX 255
+#define GLSWID_RUPP_RUPP_S 0
+#define GLSWID_RUPP_RUPP_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_BPRCH_MAX_INDEX 767
+#define GLV_BPRCH_BPRCH_S 0
+#define GLV_BPRCH_BPRCH_M MAKEMASK(0xFF, 0)
+#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_BPRCL_MAX_INDEX 767
+#define GLV_BPRCL_BPRCL_S 0
+#define GLV_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_BPTCH_MAX_INDEX 767
+#define GLV_BPTCH_BPTCH_S 0
+#define GLV_BPTCH_BPTCH_M MAKEMASK(0xFF, 0)
+#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_BPTCL_MAX_INDEX 767
+#define GLV_BPTCL_BPTCL_S 0
+#define GLV_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_GORCH_MAX_INDEX 767
+#define GLV_GORCH_GORCH_S 0
+#define GLV_GORCH_GORCH_M MAKEMASK(0xFF, 0)
+#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_GORCL_MAX_INDEX 767
+#define GLV_GORCL_GORCL_S 0
+#define GLV_GORCL_GORCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_GOTCH_MAX_INDEX 767
+#define GLV_GOTCH_GOTCH_S 0
+#define GLV_GOTCH_GOTCH_M MAKEMASK(0xFF, 0)
+#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_GOTCL_MAX_INDEX 767
+#define GLV_GOTCL_GOTCL_S 0
+#define GLV_GOTCL_GOTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_MPRCH_MAX_INDEX 767
+#define GLV_MPRCH_MPRCH_S 0
+#define GLV_MPRCH_MPRCH_M MAKEMASK(0xFF, 0)
+#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_MPRCL_MAX_INDEX 767
+#define GLV_MPRCL_MPRCL_S 0
+#define GLV_MPRCL_MPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_MPTCH_MAX_INDEX 767
+#define GLV_MPTCH_MPTCH_S 0
+#define GLV_MPTCH_MPTCH_M MAKEMASK(0xFF, 0)
+#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_MPTCL_MAX_INDEX 767
+#define GLV_MPTCL_MPTCL_S 0
+#define GLV_MPTCL_MPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_RDPC_MAX_INDEX 767
+#define GLV_RDPC_RDPC_S 0
+#define GLV_RDPC_RDPC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_REPC(_i) (0x00295804 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_REPC_MAX_INDEX 767
+#define GLV_REPC_NO_DESC_CNT_S 0
+#define GLV_REPC_NO_DESC_CNT_M MAKEMASK(0xFFFF, 0)
+#define GLV_REPC_ERROR_CNT_S 16
+#define GLV_REPC_ERROR_CNT_M MAKEMASK(0xFFFF, 16)
+#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_TEPC_MAX_INDEX 767
+#define GLV_TEPC_TEPC_S 0
+#define GLV_TEPC_TEPC_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_UPRCH_MAX_INDEX 767
+#define GLV_UPRCH_UPRCH_S 0
+#define GLV_UPRCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_UPRCL_MAX_INDEX 767
+#define GLV_UPRCL_UPRCL_S 0
+#define GLV_UPRCL_UPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_UPTCH_MAX_INDEX 767
+#define GLV_UPTCH_GLVUPTCH_S 0
+#define GLV_UPTCH_GLVUPTCH_M MAKEMASK(0xFF, 0)
+#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) /* _i=0...767 */ /* Reset Source: CORER */
+#define GLV_UPTCL_MAX_INDEX 767
+#define GLV_UPTCL_UPTCL_S 0
+#define GLV_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLVEBUP_RBCH(_i, _j) (0x00343004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_RBCH_MAX_INDEX 7
+#define GLVEBUP_RBCH_UPBCH_S 0
+#define GLVEBUP_RBCH_UPBCH_M MAKEMASK(0xFF, 0)
+#define GLVEBUP_RBCL(_i, _j) (0x00343000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_RBCL_MAX_INDEX 7
+#define GLVEBUP_RBCL_UPBCL_S 0
+#define GLVEBUP_RBCL_UPBCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLVEBUP_RPCH(_i, _j) (0x00344004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_RPCH_MAX_INDEX 7
+#define GLVEBUP_RPCH_UPPCH_S 0
+#define GLVEBUP_RPCH_UPPCH_M MAKEMASK(0xFF, 0)
+#define GLVEBUP_RPCL(_i, _j) (0x00344000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_RPCL_MAX_INDEX 7
+#define GLVEBUP_RPCL_UPPCL_S 0
+#define GLVEBUP_RPCL_UPPCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLVEBUP_TBCH(_i, _j) (0x00306004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_TBCH_MAX_INDEX 7
+#define GLVEBUP_TBCH_UPBCH_S 0
+#define GLVEBUP_TBCH_UPBCH_M MAKEMASK(0xFF, 0)
+#define GLVEBUP_TBCL(_i, _j) (0x00306000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_TBCL_MAX_INDEX 7
+#define GLVEBUP_TBCL_UPBCL_S 0
+#define GLVEBUP_TBCL_UPBCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLVEBUP_TPCH(_i, _j) (0x00308004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_TPCH_MAX_INDEX 7
+#define GLVEBUP_TPCH_UPPCH_S 0
+#define GLVEBUP_TPCH_UPPCH_M MAKEMASK(0xFF, 0)
+#define GLVEBUP_TPCL(_i, _j) (0x00308000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...31 */ /* Reset Source: CORER */
+#define GLVEBUP_TPCL_MAX_INDEX 7
+#define GLVEBUP_TPCL_UPPCL_S 0
+#define GLVEBUP_TPCL_UPPCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTRPB_LDPC 0x000AC280 /* Reset Source: CORER */
+#define PRTRPB_LDPC_CRCERRS_S 0
+#define PRTRPB_LDPC_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTRPB_RDPC 0x000AC260 /* Reset Source: CORER */
+#define PRTRPB_RDPC_CRCERRS_S 0
+#define PRTRPB_RDPC_CRCERRS_M MAKEMASK(0xFFFFFFFF, 0)
+#define PRTTPB_STAT_TC_BYTES_SENTL(_i) (0x00098200 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define PRTTPB_STAT_TC_BYTES_SENTL_MAX_INDEX 63
+#define PRTTPB_STAT_TC_BYTES_SENTL_TCCNT_S 0
+#define PRTTPB_STAT_TC_BYTES_SENTL_TCCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define TPB_PRTTPB_STAT_PKT_SENT(_i) (0x00099470 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define TPB_PRTTPB_STAT_PKT_SENT_MAX_INDEX 7
+#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_S 0
+#define TPB_PRTTPB_STAT_PKT_SENT_PKTCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define TPB_PRTTPB_STAT_TC_BYTES_SENT(_i) (0x00099094 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define TPB_PRTTPB_STAT_TC_BYTES_SENT_MAX_INDEX 63
+#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_S 0
+#define TPB_PRTTPB_STAT_TC_BYTES_SENT_TCCNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define EMP_SWT_PRUNIND 0x00204020 /* Reset Source: CORER */
+#define EMP_SWT_PRUNIND_OPCODE_S 0
+#define EMP_SWT_PRUNIND_OPCODE_M MAKEMASK(0xF, 0)
+#define EMP_SWT_PRUNIND_LIST_INDEX_NUM_S 4
+#define EMP_SWT_PRUNIND_LIST_INDEX_NUM_M MAKEMASK(0x3FF, 4)
+#define EMP_SWT_PRUNIND_VSI_NUM_S 16
+#define EMP_SWT_PRUNIND_VSI_NUM_M MAKEMASK(0x3FF, 16)
+#define EMP_SWT_PRUNIND_BIT_VALUE_S 31
+#define EMP_SWT_PRUNIND_BIT_VALUE_M BIT(31)
+#define EMP_SWT_REPIND 0x0020401C /* Reset Source: CORER */
+#define EMP_SWT_REPIND_OPCODE_S 0
+#define EMP_SWT_REPIND_OPCODE_M MAKEMASK(0xF, 0)
+#define EMP_SWT_REPIND_LIST_INDEX_NUMBER_S 4
+#define EMP_SWT_REPIND_LIST_INDEX_NUMBER_M MAKEMASK(0x3FF, 4)
+#define EMP_SWT_REPIND_VSI_NUM_S 16
+#define EMP_SWT_REPIND_VSI_NUM_M MAKEMASK(0x3FF, 16)
+#define EMP_SWT_REPIND_BIT_VALUE_S 31
+#define EMP_SWT_REPIND_BIT_VALUE_M BIT(31)
+#define GL_OVERRIDEC 0x002040A4 /* Reset Source: CORER */
+#define GL_OVERRIDEC_OVERRIDE_ATTEMPTC_S 0
+#define GL_OVERRIDEC_OVERRIDE_ATTEMPTC_M MAKEMASK(0xFFFF, 0)
+#define GL_OVERRIDEC_LAST_VSI_S 16
+#define GL_OVERRIDEC_LAST_VSI_M MAKEMASK(0x3FF, 16)
+#define GL_PLG_AVG_CALC_CFG 0x0020A5AC /* Reset Source: CORER */
+#define GL_PLG_AVG_CALC_CFG_CYCLE_LEN_S 0
+#define GL_PLG_AVG_CALC_CFG_CYCLE_LEN_M MAKEMASK(0x7FFFFFFF, 0)
+#define GL_PLG_AVG_CALC_CFG_MODE_S 31
+#define GL_PLG_AVG_CALC_CFG_MODE_M BIT(31)
+#define GL_PLG_AVG_CALC_ST 0x0020A5B0 /* Reset Source: CORER */
+#define GL_PLG_AVG_CALC_ST_IN_DATA_S 0
+#define GL_PLG_AVG_CALC_ST_IN_DATA_M MAKEMASK(0x7FFF, 0)
+#define GL_PLG_AVG_CALC_ST_OUT_DATA_S 16
+#define GL_PLG_AVG_CALC_ST_OUT_DATA_M MAKEMASK(0x7FFF, 16)
+#define GL_PLG_AVG_CALC_ST_VALID_S 31
+#define GL_PLG_AVG_CALC_ST_VALID_M BIT(31)
+#define GL_PRE_CFG_CMD 0x00214090 /* Reset Source: CORER */
+#define GL_PRE_CFG_CMD_ADDR_S 0
+#define GL_PRE_CFG_CMD_ADDR_M MAKEMASK(0x1FFF, 0)
+#define GL_PRE_CFG_CMD_TBLIDX_S 16
+#define GL_PRE_CFG_CMD_TBLIDX_M MAKEMASK(0x7, 16)
+#define GL_PRE_CFG_CMD_CMD_S 29
+#define GL_PRE_CFG_CMD_CMD_M BIT(29)
+#define GL_PRE_CFG_CMD_DONE_S 31
+#define GL_PRE_CFG_CMD_DONE_M BIT(31)
+#define GL_PRE_CFG_DATA(_i) (0x00214074 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */
+#define GL_PRE_CFG_DATA_MAX_INDEX 6
+#define GL_PRE_CFG_DATA_GL_PRE_RCP_DATA_S 0
+#define GL_PRE_CFG_DATA_GL_PRE_RCP_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_SWT_FUNCFILT 0x001D2698 /* Reset Source: CORER */
+#define GL_SWT_FUNCFILT_FUNCFILT_S 0
+#define GL_SWT_FUNCFILT_FUNCFILT_M BIT(0)
+#define GL_SWT_FW_STS(_i) (0x00216000 + ((_i) * 4)) /* _i=0...5 */ /* Reset Source: CORER */
+#define GL_SWT_FW_STS_MAX_INDEX 5
+#define GL_SWT_FW_STS_GL_SWT_FW_STS_S 0
+#define GL_SWT_FW_STS_GL_SWT_FW_STS_M MAKEMASK(0xFFFFFFFF, 0)
+#define GL_SWT_LAT_DOUBLE 0x00204004 /* Reset Source: CORER */
+#define GL_SWT_LAT_DOUBLE_BASE_S 0
+#define GL_SWT_LAT_DOUBLE_BASE_M MAKEMASK(0x7FF, 0)
+#define GL_SWT_LAT_DOUBLE_SIZE_S 16
+#define GL_SWT_LAT_DOUBLE_SIZE_M MAKEMASK(0x7FF, 16)
+#define GL_SWT_LAT_QUAD 0x00204008 /* Reset Source: CORER */
+#define GL_SWT_LAT_QUAD_BASE_S 0
+#define GL_SWT_LAT_QUAD_BASE_M MAKEMASK(0x7FF, 0)
+#define GL_SWT_LAT_QUAD_SIZE_S 16
+#define GL_SWT_LAT_QUAD_SIZE_M MAKEMASK(0x7FF, 16)
+#define GL_SWT_LAT_SINGLE 0x00204000 /* Reset Source: CORER */
+#define GL_SWT_LAT_SINGLE_BASE_S 0
+#define GL_SWT_LAT_SINGLE_BASE_M MAKEMASK(0x7FF, 0)
+#define GL_SWT_LAT_SINGLE_SIZE_S 16
+#define GL_SWT_LAT_SINGLE_SIZE_M MAKEMASK(0x7FF, 16)
+#define GL_SWT_MD_PRI 0x002040AC /* Reset Source: CORER */
+#define GL_SWT_MD_PRI_VSI_PRI_S 0
+#define GL_SWT_MD_PRI_VSI_PRI_M MAKEMASK(0x7, 0)
+#define GL_SWT_MD_PRI_LB_PRI_S 4
+#define GL_SWT_MD_PRI_LB_PRI_M MAKEMASK(0x7, 4)
+#define GL_SWT_MD_PRI_LAN_EN_PRI_S 8
+#define GL_SWT_MD_PRI_LAN_EN_PRI_M MAKEMASK(0x7, 8)
+#define GL_SWT_MD_PRI_QH_PRI_S 12
+#define GL_SWT_MD_PRI_QH_PRI_M MAKEMASK(0x7, 12)
+#define GL_SWT_MD_PRI_QL_PRI_S 16
+#define GL_SWT_MD_PRI_QL_PRI_M MAKEMASK(0x7, 16)
+#define GL_SWT_MIRTARVSI(_i) (0x00204500 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define GL_SWT_MIRTARVSI_MAX_INDEX 63
+#define GL_SWT_MIRTARVSI_VFVMNUMBER_S 0
+#define GL_SWT_MIRTARVSI_VFVMNUMBER_M MAKEMASK(0x3FF, 0)
+#define GL_SWT_MIRTARVSI_FUNCTIONTYPE_S 10
+#define GL_SWT_MIRTARVSI_FUNCTIONTYPE_M MAKEMASK(0x3, 10)
+#define GL_SWT_MIRTARVSI_PFNUMBER_S 12
+#define GL_SWT_MIRTARVSI_PFNUMBER_M MAKEMASK(0x7, 12)
+#define GL_SWT_MIRTARVSI_TARGETVSI_S 20
+#define GL_SWT_MIRTARVSI_TARGETVSI_M MAKEMASK(0x3FF, 20)
+#define GL_SWT_MIRTARVSI_RULEENABLE_S 31
+#define GL_SWT_MIRTARVSI_RULEENABLE_M BIT(31)
+#define GL_SWT_SWIDFVIDX 0x00214114 /* Reset Source: CORER */
+#define GL_SWT_SWIDFVIDX_SWIDFVIDX_S 0
+#define GL_SWT_SWIDFVIDX_SWIDFVIDX_M MAKEMASK(0x3F, 0)
+#define GL_SWT_SWIDFVIDX_PORT_TYPE_S 31
+#define GL_SWT_SWIDFVIDX_PORT_TYPE_M BIT(31)
+#define GL_VP_SWITCHID(_i) (0x00214094 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define GL_VP_SWITCHID_MAX_INDEX 31
+#define GL_VP_SWITCHID_SWITCHID_S 0
+#define GL_VP_SWITCHID_SWITCHID_M MAKEMASK(0xFF, 0)
+#define GLSWID_STAT_BLOCK(_i) (0x0020A1A4 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define GLSWID_STAT_BLOCK_MAX_INDEX 255
+#define GLSWID_STAT_BLOCK_VEBID_S 0
+#define GLSWID_STAT_BLOCK_VEBID_M MAKEMASK(0x1F, 0)
+#define GLSWID_STAT_BLOCK_VEBID_VALID_S 31
+#define GLSWID_STAT_BLOCK_VEBID_VALID_M BIT(31)
+#define GLSWT_ACT_RESP_0 0x0020A5A4 /* Reset Source: CORER */
+#define GLSWT_ACT_RESP_0_GLSWT_ACT_RESP_S 0
+#define GLSWT_ACT_RESP_0_GLSWT_ACT_RESP_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSWT_ACT_RESP_1 0x0020A5A8 /* Reset Source: CORER */
+#define GLSWT_ACT_RESP_1_GLSWT_ACT_RESP_S 0
+#define GLSWT_ACT_RESP_1_GLSWT_ACT_RESP_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLSWT_ARB_MODE 0x0020A674 /* Reset Source: CORER */
+#define GLSWT_ARB_MODE_FLU_PRI_SHM_S 0
+#define GLSWT_ARB_MODE_FLU_PRI_SHM_M BIT(0)
+#define GLSWT_ARB_MODE_TX_RX_FWD_PRI_S 1
+#define GLSWT_ARB_MODE_TX_RX_FWD_PRI_M BIT(1)
+#define PRT_SBPVSI 0x00204120 /* Reset Source: CORER */
+#define PRT_SBPVSI_BAD_FRAMES_VSI_S 0
+#define PRT_SBPVSI_BAD_FRAMES_VSI_M MAKEMASK(0x3FF, 0)
+#define PRT_SBPVSI_SBP_S 31
+#define PRT_SBPVSI_SBP_M BIT(31)
+#define PRT_SCSTS 0x00204140 /* Reset Source: CORER */
+#define PRT_SCSTS_BSCA_S 0
+#define PRT_SCSTS_BSCA_M BIT(0)
+#define PRT_SCSTS_BSCAP_S 1
+#define PRT_SCSTS_BSCAP_M BIT(1)
+#define PRT_SCSTS_MSCA_S 2
+#define PRT_SCSTS_MSCA_M BIT(2)
+#define PRT_SCSTS_MSCAP_S 3
+#define PRT_SCSTS_MSCAP_M BIT(3)
+#define PRT_SWT_BSCCNT 0x00204160 /* Reset Source: CORER */
+#define PRT_SWT_BSCCNT_CCOUNT_S 0
+#define PRT_SWT_BSCCNT_CCOUNT_M MAKEMASK(0x1FFFFFF, 0)
+#define PRT_SWT_BSCTRH 0x00204180 /* Reset Source: CORER */
+#define PRT_SWT_BSCTRH_UTRESH_S 0
+#define PRT_SWT_BSCTRH_UTRESH_M MAKEMASK(0x7FFFF, 0)
+#define PRT_SWT_MIREG 0x002042A0 /* Reset Source: CORER */
+#define PRT_SWT_MIREG_MIRRULE_S 0
+#define PRT_SWT_MIREG_MIRRULE_M MAKEMASK(0x3F, 0)
+#define PRT_SWT_MIREG_MIRENA_S 7
+#define PRT_SWT_MIREG_MIRENA_M BIT(7)
+#define PRT_SWT_MIRIG 0x00204280 /* Reset Source: CORER */
+#define PRT_SWT_MIRIG_MIRRULE_S 0
+#define PRT_SWT_MIRIG_MIRRULE_M MAKEMASK(0x3F, 0)
+#define PRT_SWT_MIRIG_MIRENA_S 7
+#define PRT_SWT_MIRIG_MIRENA_M BIT(7)
+#define PRT_SWT_MSCCNT 0x00204100 /* Reset Source: CORER */
+#define PRT_SWT_MSCCNT_CCOUNT_S 0
+#define PRT_SWT_MSCCNT_CCOUNT_M MAKEMASK(0x1FFFFFF, 0)
+#define PRT_SWT_MSCTRH 0x002041C0 /* Reset Source: CORER */
+#define PRT_SWT_MSCTRH_UTRESH_S 0
+#define PRT_SWT_MSCTRH_UTRESH_M MAKEMASK(0x7FFFF, 0)
+#define PRT_SWT_SCBI 0x002041E0 /* Reset Source: CORER */
+#define PRT_SWT_SCBI_BI_S 0
+#define PRT_SWT_SCBI_BI_M MAKEMASK(0x1FFFFFF, 0)
+#define PRT_SWT_SCCRL 0x00204200 /* Reset Source: CORER */
+#define PRT_SWT_SCCRL_MDIPW_S 0
+#define PRT_SWT_SCCRL_MDIPW_M BIT(0)
+#define PRT_SWT_SCCRL_MDICW_S 1
+#define PRT_SWT_SCCRL_MDICW_M BIT(1)
+#define PRT_SWT_SCCRL_BDIPW_S 2
+#define PRT_SWT_SCCRL_BDIPW_M BIT(2)
+#define PRT_SWT_SCCRL_BDICW_S 3
+#define PRT_SWT_SCCRL_BDICW_M BIT(3)
+#define PRT_SWT_SCCRL_INTERVAL_S 8
+#define PRT_SWT_SCCRL_INTERVAL_M MAKEMASK(0xFFFFF, 8)
+#define PRT_TCTUPR(_i) (0x00040840 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define PRT_TCTUPR_MAX_INDEX 31
+#define PRT_TCTUPR_UP0_S 0
+#define PRT_TCTUPR_UP0_M MAKEMASK(0x7, 0)
+#define PRT_TCTUPR_UP1_S 4
+#define PRT_TCTUPR_UP1_M MAKEMASK(0x7, 4)
+#define PRT_TCTUPR_UP2_S 8
+#define PRT_TCTUPR_UP2_M MAKEMASK(0x7, 8)
+#define PRT_TCTUPR_UP3_S 12
+#define PRT_TCTUPR_UP3_M MAKEMASK(0x7, 12)
+#define PRT_TCTUPR_UP4_S 16
+#define PRT_TCTUPR_UP4_M MAKEMASK(0x7, 16)
+#define PRT_TCTUPR_UP5_S 20
+#define PRT_TCTUPR_UP5_M MAKEMASK(0x7, 20)
+#define PRT_TCTUPR_UP6_S 24
+#define PRT_TCTUPR_UP6_M MAKEMASK(0x7, 24)
+#define PRT_TCTUPR_UP7_S 28
+#define PRT_TCTUPR_UP7_M MAKEMASK(0x7, 28)
+#define GLHH_ART_CTL 0x000A41D4 /* Reset Source: POR */
+#define GLHH_ART_CTL_ACTIVE_S 0
+#define GLHH_ART_CTL_ACTIVE_M BIT(0)
+#define GLHH_ART_CTL_TIME_OUT1_S 1
+#define GLHH_ART_CTL_TIME_OUT1_M BIT(1)
+#define GLHH_ART_CTL_TIME_OUT2_S 2
+#define GLHH_ART_CTL_TIME_OUT2_M BIT(2)
+#define GLHH_ART_CTL_RESET_HH_S 31
+#define GLHH_ART_CTL_RESET_HH_M BIT(31)
+#define GLHH_ART_DATA 0x000A41E0 /* Reset Source: POR */
+#define GLHH_ART_DATA_AGENT_TYPE_S 0
+#define GLHH_ART_DATA_AGENT_TYPE_M MAKEMASK(0x7, 0)
+#define GLHH_ART_DATA_SYNC_TYPE_S 3
+#define GLHH_ART_DATA_SYNC_TYPE_M BIT(3)
+#define GLHH_ART_DATA_MAX_DELAY_S 4
+#define GLHH_ART_DATA_MAX_DELAY_M MAKEMASK(0xF, 4)
+#define GLHH_ART_DATA_TIME_BASE_S 8
+#define GLHH_ART_DATA_TIME_BASE_M MAKEMASK(0xF, 8)
+#define GLHH_ART_DATA_RSV_DATA_S 12
+#define GLHH_ART_DATA_RSV_DATA_M MAKEMASK(0xFFFFF, 12)
+#define GLHH_ART_TIME_H 0x000A41D8 /* Reset Source: POR */
+#define GLHH_ART_TIME_H_ART_TIME_H_S 0
+#define GLHH_ART_TIME_H_ART_TIME_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLHH_ART_TIME_L 0x000A41DC /* Reset Source: POR */
+#define GLHH_ART_TIME_L_ART_TIME_L_S 0
+#define GLHH_ART_TIME_L_ART_TIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_IN_0_MAX_INDEX 1
+#define GLTSYN_AUX_IN_0_EVNTLVL_S 0
+#define GLTSYN_AUX_IN_0_EVNTLVL_M MAKEMASK(0x3, 0)
+#define GLTSYN_AUX_IN_0_INT_ENA_S 4
+#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_IN_1(_i) (0x000889E0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_IN_1_MAX_INDEX 1
+#define GLTSYN_AUX_IN_1_EVNTLVL_S 0
+#define GLTSYN_AUX_IN_1_EVNTLVL_M MAKEMASK(0x3, 0)
+#define GLTSYN_AUX_IN_1_INT_ENA_S 4
+#define GLTSYN_AUX_IN_1_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_IN_2(_i) (0x000889E8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_IN_2_MAX_INDEX 1
+#define GLTSYN_AUX_IN_2_EVNTLVL_S 0
+#define GLTSYN_AUX_IN_2_EVNTLVL_M MAKEMASK(0x3, 0)
+#define GLTSYN_AUX_IN_2_INT_ENA_S 4
+#define GLTSYN_AUX_IN_2_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_OUT_0_MAX_INDEX 1
+#define GLTSYN_AUX_OUT_0_OUT_ENA_S 0
+#define GLTSYN_AUX_OUT_0_OUT_ENA_M BIT(0)
+#define GLTSYN_AUX_OUT_0_OUTMOD_S 1
+#define GLTSYN_AUX_OUT_0_OUTMOD_M MAKEMASK(0x3, 1)
+#define GLTSYN_AUX_OUT_0_OUTLVL_S 3
+#define GLTSYN_AUX_OUT_0_OUTLVL_M BIT(3)
+#define GLTSYN_AUX_OUT_0_INT_ENA_S 4
+#define GLTSYN_AUX_OUT_0_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_OUT_0_PULSEW_S 8
+#define GLTSYN_AUX_OUT_0_PULSEW_M MAKEMASK(0xF, 8)
+#define GLTSYN_AUX_OUT_1(_i) (0x000889A0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_OUT_1_MAX_INDEX 1
+#define GLTSYN_AUX_OUT_1_OUT_ENA_S 0
+#define GLTSYN_AUX_OUT_1_OUT_ENA_M BIT(0)
+#define GLTSYN_AUX_OUT_1_OUTMOD_S 1
+#define GLTSYN_AUX_OUT_1_OUTMOD_M MAKEMASK(0x3, 1)
+#define GLTSYN_AUX_OUT_1_OUTLVL_S 3
+#define GLTSYN_AUX_OUT_1_OUTLVL_M BIT(3)
+#define GLTSYN_AUX_OUT_1_INT_ENA_S 4
+#define GLTSYN_AUX_OUT_1_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_OUT_1_PULSEW_S 8
+#define GLTSYN_AUX_OUT_1_PULSEW_M MAKEMASK(0xF, 8)
+#define GLTSYN_AUX_OUT_2(_i) (0x000889A8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_OUT_2_MAX_INDEX 1
+#define GLTSYN_AUX_OUT_2_OUT_ENA_S 0
+#define GLTSYN_AUX_OUT_2_OUT_ENA_M BIT(0)
+#define GLTSYN_AUX_OUT_2_OUTMOD_S 1
+#define GLTSYN_AUX_OUT_2_OUTMOD_M MAKEMASK(0x3, 1)
+#define GLTSYN_AUX_OUT_2_OUTLVL_S 3
+#define GLTSYN_AUX_OUT_2_OUTLVL_M BIT(3)
+#define GLTSYN_AUX_OUT_2_INT_ENA_S 4
+#define GLTSYN_AUX_OUT_2_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_OUT_2_PULSEW_S 8
+#define GLTSYN_AUX_OUT_2_PULSEW_M MAKEMASK(0xF, 8)
+#define GLTSYN_AUX_OUT_3(_i) (0x000889B0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_AUX_OUT_3_MAX_INDEX 1
+#define GLTSYN_AUX_OUT_3_OUT_ENA_S 0
+#define GLTSYN_AUX_OUT_3_OUT_ENA_M BIT(0)
+#define GLTSYN_AUX_OUT_3_OUTMOD_S 1
+#define GLTSYN_AUX_OUT_3_OUTMOD_M MAKEMASK(0x3, 1)
+#define GLTSYN_AUX_OUT_3_OUTLVL_S 3
+#define GLTSYN_AUX_OUT_3_OUTLVL_M BIT(3)
+#define GLTSYN_AUX_OUT_3_INT_ENA_S 4
+#define GLTSYN_AUX_OUT_3_INT_ENA_M BIT(4)
+#define GLTSYN_AUX_OUT_3_PULSEW_S 8
+#define GLTSYN_AUX_OUT_3_PULSEW_M MAKEMASK(0xF, 8)
+#define GLTSYN_CLKO_0(_i) (0x000889B8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_CLKO_0_MAX_INDEX 1
+#define GLTSYN_CLKO_0_TSYNCLKO_S 0
+#define GLTSYN_CLKO_0_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_CLKO_1(_i) (0x000889C0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_CLKO_1_MAX_INDEX 1
+#define GLTSYN_CLKO_1_TSYNCLKO_S 0
+#define GLTSYN_CLKO_1_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_CLKO_2(_i) (0x000889C8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_CLKO_2_MAX_INDEX 1
+#define GLTSYN_CLKO_2_TSYNCLKO_S 0
+#define GLTSYN_CLKO_2_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_CLKO_3(_i) (0x000889D0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_CLKO_3_MAX_INDEX 1
+#define GLTSYN_CLKO_3_TSYNCLKO_S 0
+#define GLTSYN_CLKO_3_TSYNCLKO_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_CMD 0x00088810 /* Reset Source: CORER */
+#define GLTSYN_CMD_CMD_S 0
+#define GLTSYN_CMD_CMD_M MAKEMASK(0xFF, 0)
+#define GLTSYN_CMD_SEL_MASTER_S 8
+#define GLTSYN_CMD_SEL_MASTER_M BIT(8)
+#define GLTSYN_CMD_SYNC 0x00088814 /* Reset Source: CORER */
+#define GLTSYN_CMD_SYNC_SYNC_S 0
+#define GLTSYN_CMD_SYNC_SYNC_M MAKEMASK(0x3, 0)
+#define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_ENA_MAX_INDEX 1
+#define GLTSYN_ENA_TSYN_ENA_S 0
+#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
+#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_EVNT_H_0_MAX_INDEX 1
+#define GLTSYN_EVNT_H_0_TSYNEVNT_H_S 0
+#define GLTSYN_EVNT_H_0_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_EVNT_H_1(_i) (0x00088980 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_EVNT_H_1_MAX_INDEX 1
+#define GLTSYN_EVNT_H_1_TSYNEVNT_H_S 0
+#define GLTSYN_EVNT_H_1_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_EVNT_H_2(_i) (0x00088990 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_EVNT_H_2_MAX_INDEX 1
+#define GLTSYN_EVNT_H_2_TSYNEVNT_H_S 0
+#define GLTSYN_EVNT_H_2_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_EVNT_L_0_MAX_INDEX 1
+#define GLTSYN_EVNT_L_0_TSYNEVNT_L_S 0
+#define GLTSYN_EVNT_L_0_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_EVNT_L_1(_i) (0x00088978 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_EVNT_L_1_MAX_INDEX 1
+#define GLTSYN_EVNT_L_1_TSYNEVNT_L_S 0
+#define GLTSYN_EVNT_L_1_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_EVNT_L_2(_i) (0x00088988 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_EVNT_L_2_MAX_INDEX 1
+#define GLTSYN_EVNT_L_2_TSYNEVNT_L_S 0
+#define GLTSYN_EVNT_L_2_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_HHTIME_H_MAX_INDEX 1
+#define GLTSYN_HHTIME_H_TSYNEVNT_H_S 0
+#define GLTSYN_HHTIME_H_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_HHTIME_L_MAX_INDEX 1
+#define GLTSYN_HHTIME_L_TSYNEVNT_L_S 0
+#define GLTSYN_HHTIME_L_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_INCVAL_H_MAX_INDEX 1
+#define GLTSYN_INCVAL_H_INCVAL_H_S 0
+#define GLTSYN_INCVAL_H_INCVAL_H_M MAKEMASK(0xFF, 0)
+#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_INCVAL_L_MAX_INDEX 1
+#define GLTSYN_INCVAL_L_INCVAL_L_S 0
+#define GLTSYN_INCVAL_L_INCVAL_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_SHADJ_H_MAX_INDEX 1
+#define GLTSYN_SHADJ_H_ADJUST_H_S 0
+#define GLTSYN_SHADJ_H_ADJUST_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_SHADJ_L_MAX_INDEX 1
+#define GLTSYN_SHADJ_L_ADJUST_L_S 0
+#define GLTSYN_SHADJ_L_ADJUST_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_SHTIME_0_MAX_INDEX 1
+#define GLTSYN_SHTIME_0_TSYNTIME_0_S 0
+#define GLTSYN_SHTIME_0_TSYNTIME_0_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_SHTIME_H_MAX_INDEX 1
+#define GLTSYN_SHTIME_H_TSYNTIME_H_S 0
+#define GLTSYN_SHTIME_H_TSYNTIME_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_SHTIME_L_MAX_INDEX 1
+#define GLTSYN_SHTIME_L_TSYNTIME_L_S 0
+#define GLTSYN_SHTIME_L_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_STAT_MAX_INDEX 1
+#define GLTSYN_STAT_EVENT0_S 0
+#define GLTSYN_STAT_EVENT0_M BIT(0)
+#define GLTSYN_STAT_EVENT1_S 1
+#define GLTSYN_STAT_EVENT1_M BIT(1)
+#define GLTSYN_STAT_EVENT2_S 2
+#define GLTSYN_STAT_EVENT2_M BIT(2)
+#define GLTSYN_STAT_TGT0_S 4
+#define GLTSYN_STAT_TGT0_M BIT(4)
+#define GLTSYN_STAT_TGT1_S 5
+#define GLTSYN_STAT_TGT1_M BIT(5)
+#define GLTSYN_STAT_TGT2_S 6
+#define GLTSYN_STAT_TGT2_M BIT(6)
+#define GLTSYN_STAT_TGT3_S 7
+#define GLTSYN_STAT_TGT3_M BIT(7)
+#define GLTSYN_SYNC_DLAY 0x00088818 /* Reset Source: CORER */
+#define GLTSYN_SYNC_DLAY_SYNC_DELAY_S 0
+#define GLTSYN_SYNC_DLAY_SYNC_DELAY_M MAKEMASK(0x1F, 0)
+#define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_H_0_MAX_INDEX 1
+#define GLTSYN_TGT_H_0_TSYNTGTT_H_S 0
+#define GLTSYN_TGT_H_0_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_H_1(_i) (0x00088940 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_H_1_MAX_INDEX 1
+#define GLTSYN_TGT_H_1_TSYNTGTT_H_S 0
+#define GLTSYN_TGT_H_1_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_H_2(_i) (0x00088950 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_H_2_MAX_INDEX 1
+#define GLTSYN_TGT_H_2_TSYNTGTT_H_S 0
+#define GLTSYN_TGT_H_2_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_H_3(_i) (0x00088960 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_H_3_MAX_INDEX 1
+#define GLTSYN_TGT_H_3_TSYNTGTT_H_S 0
+#define GLTSYN_TGT_H_3_TSYNTGTT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_L_0_MAX_INDEX 1
+#define GLTSYN_TGT_L_0_TSYNTGTT_L_S 0
+#define GLTSYN_TGT_L_0_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_L_1(_i) (0x00088938 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_L_1_MAX_INDEX 1
+#define GLTSYN_TGT_L_1_TSYNTGTT_L_S 0
+#define GLTSYN_TGT_L_1_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_L_2(_i) (0x00088948 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_L_2_MAX_INDEX 1
+#define GLTSYN_TGT_L_2_TSYNTGTT_L_S 0
+#define GLTSYN_TGT_L_2_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TGT_L_3(_i) (0x00088958 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TGT_L_3_MAX_INDEX 1
+#define GLTSYN_TGT_L_3_TSYNTGTT_L_S 0
+#define GLTSYN_TGT_L_3_TSYNTGTT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TIME_0_MAX_INDEX 1
+#define GLTSYN_TIME_0_TSYNTIME_0_S 0
+#define GLTSYN_TIME_0_TSYNTIME_0_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TIME_H_MAX_INDEX 1
+#define GLTSYN_TIME_H_TSYNTIME_H_S 0
+#define GLTSYN_TIME_H_TSYNTIME_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define GLTSYN_TIME_L_MAX_INDEX 1
+#define GLTSYN_TIME_L_TSYNTIME_L_S 0
+#define GLTSYN_TIME_L_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
+#define PFHH_SEM_BUSY_S 0
+#define PFHH_SEM_BUSY_M BIT(0)
+#define PFHH_SEM_PF_OWNER_S 4
+#define PFHH_SEM_PF_OWNER_M MAKEMASK(0x7, 4)
+#define PFTSYN_SEM 0x00088880 /* Reset Source: PFR */
+#define PFTSYN_SEM_BUSY_S 0
+#define PFTSYN_SEM_BUSY_M BIT(0)
+#define PFTSYN_SEM_PF_OWNER_S 4
+#define PFTSYN_SEM_PF_OWNER_M MAKEMASK(0x7, 4)
+#define GLPE_TSCD_FLR(_i) (0x0051E24C + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define GLPE_TSCD_FLR_MAX_INDEX 3
+#define GLPE_TSCD_FLR_DRAIN_VCTR_ID_S 0
+#define GLPE_TSCD_FLR_DRAIN_VCTR_ID_M MAKEMASK(0x3, 0)
+#define GLPE_TSCD_FLR_PORT_S 2
+#define GLPE_TSCD_FLR_PORT_M MAKEMASK(0x7, 2)
+#define GLPE_TSCD_FLR_PF_NUM_S 5
+#define GLPE_TSCD_FLR_PF_NUM_M MAKEMASK(0x7, 5)
+#define GLPE_TSCD_FLR_VM_VF_TYPE_S 8
+#define GLPE_TSCD_FLR_VM_VF_TYPE_M MAKEMASK(0x3, 8)
+#define GLPE_TSCD_FLR_VM_VF_NUM_S 16
+#define GLPE_TSCD_FLR_VM_VF_NUM_M MAKEMASK(0x3FF, 16)
+#define GLPE_TSCD_FLR_VLD_S 31
+#define GLPE_TSCD_FLR_VLD_M BIT(31)
+#define GLPE_TSCD_PEPM 0x0051E228 /* Reset Source: CORER */
+#define GLPE_TSCD_PEPM_MDQ_CREDITS_S 0
+#define GLPE_TSCD_PEPM_MDQ_CREDITS_M MAKEMASK(0xFF, 0)
+#define PF_VIRT_VSTATUS 0x0009E680 /* Reset Source: PFR */
+#define PF_VIRT_VSTATUS_NUM_VFS_S 0
+#define PF_VIRT_VSTATUS_NUM_VFS_M MAKEMASK(0xFF, 0)
+#define PF_VIRT_VSTATUS_TOTAL_VFS_S 8
+#define PF_VIRT_VSTATUS_TOTAL_VFS_M MAKEMASK(0xFF, 8)
+#define PF_VIRT_VSTATUS_IOV_ACTIVE_S 16
+#define PF_VIRT_VSTATUS_IOV_ACTIVE_M BIT(16)
+#define PF_VT_PFALLOC 0x001D2480 /* Reset Source: CORER */
+#define PF_VT_PFALLOC_FIRSTVF_S 0
+#define PF_VT_PFALLOC_FIRSTVF_M MAKEMASK(0xFF, 0)
+#define PF_VT_PFALLOC_LASTVF_S 8
+#define PF_VT_PFALLOC_LASTVF_M MAKEMASK(0xFF, 8)
+#define PF_VT_PFALLOC_VALID_S 31
+#define PF_VT_PFALLOC_VALID_M BIT(31)
+#define PF_VT_PFALLOC_HIF 0x0009DD80 /* Reset Source: PCIR */
+#define PF_VT_PFALLOC_HIF_FIRSTVF_S 0
+#define PF_VT_PFALLOC_HIF_FIRSTVF_M MAKEMASK(0xFF, 0)
+#define PF_VT_PFALLOC_HIF_LASTVF_S 8
+#define PF_VT_PFALLOC_HIF_LASTVF_M MAKEMASK(0xFF, 8)
+#define PF_VT_PFALLOC_HIF_VALID_S 31
+#define PF_VT_PFALLOC_HIF_VALID_M BIT(31)
+#define PF_VT_PFALLOC_PCIE 0x000BE080 /* Reset Source: PCIR */
+#define PF_VT_PFALLOC_PCIE_FIRSTVF_S 0
+#define PF_VT_PFALLOC_PCIE_FIRSTVF_M MAKEMASK(0xFF, 0)
+#define PF_VT_PFALLOC_PCIE_LASTVF_S 8
+#define PF_VT_PFALLOC_PCIE_LASTVF_M MAKEMASK(0xFF, 8)
+#define PF_VT_PFALLOC_PCIE_VALID_S 31
+#define PF_VT_PFALLOC_PCIE_VALID_M BIT(31)
+#define VSI_L2TAGSTXVALID(_VSI) (0x00046000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_L2TAGSTXVALID_MAX_INDEX 767
+#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_S 0
+#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_M MAKEMASK(0x7, 0)
+#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_S 3
+#define VSI_L2TAGSTXVALID_L2TAG1INSERTID_VALID_M BIT(3)
+#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_S 4
+#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_M MAKEMASK(0x7, 4)
+#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_S 7
+#define VSI_L2TAGSTXVALID_L2TAG2INSERTID_VALID_M BIT(7)
+#define VSI_L2TAGSTXVALID_TIR0INSERTID_S 16
+#define VSI_L2TAGSTXVALID_TIR0INSERTID_M MAKEMASK(0x7, 16)
+#define VSI_L2TAGSTXVALID_TIR0_INSERT_S 19
+#define VSI_L2TAGSTXVALID_TIR0_INSERT_M BIT(19)
+#define VSI_L2TAGSTXVALID_TIR1INSERTID_S 20
+#define VSI_L2TAGSTXVALID_TIR1INSERTID_M MAKEMASK(0x7, 20)
+#define VSI_L2TAGSTXVALID_TIR1_INSERT_S 23
+#define VSI_L2TAGSTXVALID_TIR1_INSERT_M BIT(23)
+#define VSI_L2TAGSTXVALID_TIR2INSERTID_S 24
+#define VSI_L2TAGSTXVALID_TIR2INSERTID_M MAKEMASK(0x7, 24)
+#define VSI_L2TAGSTXVALID_TIR2_INSERT_S 27
+#define VSI_L2TAGSTXVALID_TIR2_INSERT_M BIT(27)
+#define VSI_PASID(_VSI) (0x0009C000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSI_PASID_MAX_INDEX 767
+#define VSI_PASID_PASID_S 0
+#define VSI_PASID_PASID_M MAKEMASK(0xFFFFF, 0)
+#define VSI_PASID_EN_S 31
+#define VSI_PASID_EN_M BIT(31)
+#define VSI_RUPR(_VSI) (0x00050000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_RUPR_MAX_INDEX 767
+#define VSI_RUPR_UP0_S 0
+#define VSI_RUPR_UP0_M MAKEMASK(0x7, 0)
+#define VSI_RUPR_UP1_S 3
+#define VSI_RUPR_UP1_M MAKEMASK(0x7, 3)
+#define VSI_RUPR_UP2_S 6
+#define VSI_RUPR_UP2_M MAKEMASK(0x7, 6)
+#define VSI_RUPR_UP3_S 9
+#define VSI_RUPR_UP3_M MAKEMASK(0x7, 9)
+#define VSI_RUPR_UP4_S 12
+#define VSI_RUPR_UP4_M MAKEMASK(0x7, 12)
+#define VSI_RUPR_UP5_S 15
+#define VSI_RUPR_UP5_M MAKEMASK(0x7, 15)
+#define VSI_RUPR_UP6_S 18
+#define VSI_RUPR_UP6_M MAKEMASK(0x7, 18)
+#define VSI_RUPR_UP7_S 21
+#define VSI_RUPR_UP7_M MAKEMASK(0x7, 21)
+#define VSI_RXSWCTRL(_VSI) (0x00205000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_RXSWCTRL_MAX_INDEX 767
+#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_S 8
+#define VSI_RXSWCTRL_MACVSIPRUNEENABLE_M BIT(8)
+#define VSI_RXSWCTRL_PRUNEENABLE_S 9
+#define VSI_RXSWCTRL_PRUNEENABLE_M MAKEMASK(0xF, 9)
+#define VSI_RXSWCTRL_SRCPRUNEENABLE_S 13
+#define VSI_RXSWCTRL_SRCPRUNEENABLE_M BIT(13)
+#define VSI_SRCSWCTRL(_VSI) (0x00209000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_SRCSWCTRL_MAX_INDEX 767
+#define VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_S 0
+#define VSI_SRCSWCTRL_ALLOWDESTOVERRIDE_M BIT(0)
+#define VSI_SRCSWCTRL_ALLOWLOOPBACK_S 1
+#define VSI_SRCSWCTRL_ALLOWLOOPBACK_M BIT(1)
+#define VSI_SRCSWCTRL_LANENABLE_S 2
+#define VSI_SRCSWCTRL_LANENABLE_M BIT(2)
+#define VSI_SRCSWCTRL_MACAS_S 3
+#define VSI_SRCSWCTRL_MACAS_M BIT(3)
+#define VSI_SRCSWCTRL_PRUNEENABLE_S 4
+#define VSI_SRCSWCTRL_PRUNEENABLE_M MAKEMASK(0xF, 4)
+#define VSI_SWITCHID(_VSI) (0x00215000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_SWITCHID_MAX_INDEX 767
+#define VSI_SWITCHID_SWITCHID_S 0
+#define VSI_SWITCHID_SWITCHID_M MAKEMASK(0xFF, 0)
+#define VSI_SWT_MIREG(_VSI) (0x00207000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_SWT_MIREG_MAX_INDEX 767
+#define VSI_SWT_MIREG_MIRRULE_S 0
+#define VSI_SWT_MIREG_MIRRULE_M MAKEMASK(0x3F, 0)
+#define VSI_SWT_MIREG_MIRENA_S 7
+#define VSI_SWT_MIREG_MIRENA_M BIT(7)
+#define VSI_SWT_MIRIG(_VSI) (0x00208000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_SWT_MIRIG_MAX_INDEX 767
+#define VSI_SWT_MIRIG_MIRRULE_S 0
+#define VSI_SWT_MIRIG_MIRRULE_M MAKEMASK(0x3F, 0)
+#define VSI_SWT_MIRIG_MIRENA_S 7
+#define VSI_SWT_MIRIG_MIRENA_M BIT(7)
+#define VSI_TAIR(_VSI) (0x00044000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSI_TAIR_MAX_INDEX 767
+#define VSI_TAIR_PORT_TAG_ID_S 0
+#define VSI_TAIR_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0)
+#define VSI_TAR(_VSI) (0x00045000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TAR_MAX_INDEX 767
+#define VSI_TAR_ACCEPTTAGGED_S 0
+#define VSI_TAR_ACCEPTTAGGED_M MAKEMASK(0x3FF, 0)
+#define VSI_TAR_ACCEPTUNTAGGED_S 16
+#define VSI_TAR_ACCEPTUNTAGGED_M MAKEMASK(0x3FF, 16)
+#define VSI_TIR_0(_VSI) (0x00041000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TIR_0_MAX_INDEX 767
+#define VSI_TIR_0_PORT_TAG_ID_S 0
+#define VSI_TIR_0_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0)
+#define VSI_TIR_1(_VSI) (0x00042000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TIR_1_MAX_INDEX 767
+#define VSI_TIR_1_PORT_TAG_ID_S 0
+#define VSI_TIR_1_PORT_TAG_ID_M MAKEMASK(0xFFFFFFFF, 0)
+#define VSI_TIR_2(_VSI) (0x00043000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TIR_2_MAX_INDEX 767
+#define VSI_TIR_2_PORT_TAG_ID_S 0
+#define VSI_TIR_2_PORT_TAG_ID_M MAKEMASK(0xFFFF, 0)
+#define VSI_TSR(_VSI) (0x00051000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TSR_MAX_INDEX 767
+#define VSI_TSR_STRIPTAG_S 0
+#define VSI_TSR_STRIPTAG_M MAKEMASK(0x3FF, 0)
+#define VSI_TSR_SHOWTAG_S 10
+#define VSI_TSR_SHOWTAG_M MAKEMASK(0x3FF, 10)
+#define VSI_TSR_SHOWPRIONLY_S 20
+#define VSI_TSR_SHOWPRIONLY_M MAKEMASK(0x3FF, 20)
+#define VSI_TUPIOM(_VSI) (0x00048000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TUPIOM_MAX_INDEX 767
+#define VSI_TUPIOM_UP0_S 0
+#define VSI_TUPIOM_UP0_M MAKEMASK(0x7, 0)
+#define VSI_TUPIOM_UP1_S 3
+#define VSI_TUPIOM_UP1_M MAKEMASK(0x7, 3)
+#define VSI_TUPIOM_UP2_S 6
+#define VSI_TUPIOM_UP2_M MAKEMASK(0x7, 6)
+#define VSI_TUPIOM_UP3_S 9
+#define VSI_TUPIOM_UP3_M MAKEMASK(0x7, 9)
+#define VSI_TUPIOM_UP4_S 12
+#define VSI_TUPIOM_UP4_M MAKEMASK(0x7, 12)
+#define VSI_TUPIOM_UP5_S 15
+#define VSI_TUPIOM_UP5_M MAKEMASK(0x7, 15)
+#define VSI_TUPIOM_UP6_S 18
+#define VSI_TUPIOM_UP6_M MAKEMASK(0x7, 18)
+#define VSI_TUPIOM_UP7_S 21
+#define VSI_TUPIOM_UP7_M MAKEMASK(0x7, 21)
+#define VSI_TUPR(_VSI) (0x00047000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSI_TUPR_MAX_INDEX 767
+#define VSI_TUPR_UP0_S 0
+#define VSI_TUPR_UP0_M MAKEMASK(0x7, 0)
+#define VSI_TUPR_UP1_S 3
+#define VSI_TUPR_UP1_M MAKEMASK(0x7, 3)
+#define VSI_TUPR_UP2_S 6
+#define VSI_TUPR_UP2_M MAKEMASK(0x7, 6)
+#define VSI_TUPR_UP3_S 9
+#define VSI_TUPR_UP3_M MAKEMASK(0x7, 9)
+#define VSI_TUPR_UP4_S 12
+#define VSI_TUPR_UP4_M MAKEMASK(0x7, 12)
+#define VSI_TUPR_UP5_S 15
+#define VSI_TUPR_UP5_M MAKEMASK(0x7, 15)
+#define VSI_TUPR_UP6_S 18
+#define VSI_TUPR_UP6_M MAKEMASK(0x7, 18)
+#define VSI_TUPR_UP7_S 21
+#define VSI_TUPR_UP7_M MAKEMASK(0x7, 21)
+#define VSI_VSI2F(_VSI) (0x001D0000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSI_VSI2F_MAX_INDEX 767
+#define VSI_VSI2F_VFVMNUMBER_S 0
+#define VSI_VSI2F_VFVMNUMBER_M MAKEMASK(0x3FF, 0)
+#define VSI_VSI2F_FUNCTIONTYPE_S 10
+#define VSI_VSI2F_FUNCTIONTYPE_M MAKEMASK(0x3, 10)
+#define VSI_VSI2F_PFNUMBER_S 12
+#define VSI_VSI2F_PFNUMBER_M MAKEMASK(0x7, 12)
+#define VSI_VSI2F_BUFFERNUMBER_S 16
+#define VSI_VSI2F_BUFFERNUMBER_M MAKEMASK(0x7, 16)
+#define VSI_VSI2F_VSI_NUMBER_S 20
+#define VSI_VSI2F_VSI_NUMBER_M MAKEMASK(0x3FF, 20)
+#define VSI_VSI2F_VSI_ENABLE_S 31
+#define VSI_VSI2F_VSI_ENABLE_M BIT(31)
+#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define VSIQF_FD_CNT_MAX_INDEX 767
+#define VSIQF_FD_CNT_FD_GCNT_S 0
+#define VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0)
+#define VSIQF_FD_CNT_FD_BCNT_S 16
+#define VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16)
+#define VSIQF_FD_CTL1(_VSI) (0x00411000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIQF_FD_CTL1_MAX_INDEX 767
+#define VSIQF_FD_CTL1_FLT_ENA_S 0
+#define VSIQF_FD_CTL1_FLT_ENA_M BIT(0)
+#define VSIQF_FD_CTL1_CFG_ENA_S 1
+#define VSIQF_FD_CTL1_CFG_ENA_M BIT(1)
+#define VSIQF_FD_CTL1_EVICT_ENA_S 2
+#define VSIQF_FD_CTL1_EVICT_ENA_M BIT(2)
+#define VSIQF_FD_DFLT(_VSI) (0x00457000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIQF_FD_DFLT_MAX_INDEX 767
+#define VSIQF_FD_DFLT_DEFLT_QINDX_S 0
+#define VSIQF_FD_DFLT_DEFLT_QINDX_M MAKEMASK(0x7FF, 0)
+#define VSIQF_FD_DFLT_DEFLT_TOQUEUE_S 12
+#define VSIQF_FD_DFLT_DEFLT_TOQUEUE_M MAKEMASK(0x7, 12)
+#define VSIQF_FD_DFLT_COMP_QINDX_S 16
+#define VSIQF_FD_DFLT_COMP_QINDX_M MAKEMASK(0x7FF, 16)
+#define VSIQF_FD_DFLT_DEFLT_QINDX_PRIO_S 28
+#define VSIQF_FD_DFLT_DEFLT_QINDX_PRIO_M MAKEMASK(0x7, 28)
+#define VSIQF_FD_DFLT_DEFLT_DROP_S 31
+#define VSIQF_FD_DFLT_DEFLT_DROP_M BIT(31)
+#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIQF_FD_SIZE_MAX_INDEX 767
+#define VSIQF_FD_SIZE_FD_GSIZE_S 0
+#define VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0)
+#define VSIQF_FD_SIZE_FD_BSIZE_S 16
+#define VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16)
+#define VSIQF_HASH_CTL(_VSI) (0x0040D000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIQF_HASH_CTL_MAX_INDEX 767
+#define VSIQF_HASH_CTL_HASH_LUT_SEL_S 0
+#define VSIQF_HASH_CTL_HASH_LUT_SEL_M MAKEMASK(0x3, 0)
+#define VSIQF_HASH_CTL_GLOB_LUT_S 2
+#define VSIQF_HASH_CTL_GLOB_LUT_M MAKEMASK(0xF, 2)
+#define VSIQF_HASH_CTL_HASH_SCHEME_S 6
+#define VSIQF_HASH_CTL_HASH_SCHEME_M MAKEMASK(0x3, 6)
+#define VSIQF_HASH_CTL_TC_OVER_SEL_S 8
+#define VSIQF_HASH_CTL_TC_OVER_SEL_M MAKEMASK(0x1F, 8)
+#define VSIQF_HASH_CTL_TC_OVER_ENA_S 15
+#define VSIQF_HASH_CTL_TC_OVER_ENA_M BIT(15)
+#define VSIQF_HKEY(_i, _VSI) (0x00400000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...12, _VSI=0...767 */ /* Reset Source: PFR */
+#define VSIQF_HKEY_MAX_INDEX 12
+#define VSIQF_HKEY_KEY_0_S 0
+#define VSIQF_HKEY_KEY_0_M MAKEMASK(0xFF, 0)
+#define VSIQF_HKEY_KEY_1_S 8
+#define VSIQF_HKEY_KEY_1_M MAKEMASK(0xFF, 8)
+#define VSIQF_HKEY_KEY_2_S 16
+#define VSIQF_HKEY_KEY_2_M MAKEMASK(0xFF, 16)
+#define VSIQF_HKEY_KEY_3_S 24
+#define VSIQF_HKEY_KEY_3_M MAKEMASK(0xFF, 24)
+#define VSIQF_HLUT(_i, _VSI) (0x00420000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...15, _VSI=0...767 */ /* Reset Source: PFR */
+#define VSIQF_HLUT_MAX_INDEX 15
+#define VSIQF_HLUT_LUT0_S 0
+#define VSIQF_HLUT_LUT0_M MAKEMASK(0xF, 0)
+#define VSIQF_HLUT_LUT1_S 8
+#define VSIQF_HLUT_LUT1_M MAKEMASK(0xF, 8)
+#define VSIQF_HLUT_LUT2_S 16
+#define VSIQF_HLUT_LUT2_M MAKEMASK(0xF, 16)
+#define VSIQF_HLUT_LUT3_S 24
+#define VSIQF_HLUT_LUT3_M MAKEMASK(0xF, 24)
+#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define VSIQF_PE_CTL1_MAX_INDEX 767
+#define VSIQF_PE_CTL1_PE_FLTENA_S 0
+#define VSIQF_PE_CTL1_PE_FLTENA_M BIT(0)
+#define VSIQF_TC_REGION(_i, _VSI) (0x00448000 + ((_i) * 4096 + (_VSI) * 4)) /* _i=0...3, _VSI=0...767 */ /* Reset Source: CORER */
+#define VSIQF_TC_REGION_MAX_INDEX 3
+#define VSIQF_TC_REGION_TC_BASE0_S 0
+#define VSIQF_TC_REGION_TC_BASE0_M MAKEMASK(0x7FF, 0)
+#define VSIQF_TC_REGION_TC_SIZE0_S 11
+#define VSIQF_TC_REGION_TC_SIZE0_M MAKEMASK(0xF, 11)
+#define VSIQF_TC_REGION_TC_BASE1_S 16
+#define VSIQF_TC_REGION_TC_BASE1_M MAKEMASK(0x7FF, 16)
+#define VSIQF_TC_REGION_TC_SIZE1_S 27
+#define VSIQF_TC_REGION_TC_SIZE1_M MAKEMASK(0xF, 27)
+#define GLPM_WUMC 0x0009DEE4 /* Reset Source: POR */
+#define GLPM_WUMC_MNG_WU_PF_S 16
+#define GLPM_WUMC_MNG_WU_PF_M MAKEMASK(0xFF, 16)
+#define PFPM_APM 0x000B8080 /* Reset Source: POR */
+#define PFPM_APM_APME_S 0
+#define PFPM_APM_APME_M BIT(0)
+#define PFPM_WUC 0x0009DC80 /* Reset Source: POR */
+#define PFPM_WUC_EN_APM_D0_S 5
+#define PFPM_WUC_EN_APM_D0_M BIT(5)
+#define PFPM_WUFC 0x0009DC00 /* Reset Source: POR */
+#define PFPM_WUFC_LNKC_S 0
+#define PFPM_WUFC_LNKC_M BIT(0)
+#define PFPM_WUFC_MAG_S 1
+#define PFPM_WUFC_MAG_M BIT(1)
+#define PFPM_WUFC_MNG_S 3
+#define PFPM_WUFC_MNG_M BIT(3)
+#define PFPM_WUFC_FLX0_ACT_S 4
+#define PFPM_WUFC_FLX0_ACT_M BIT(4)
+#define PFPM_WUFC_FLX1_ACT_S 5
+#define PFPM_WUFC_FLX1_ACT_M BIT(5)
+#define PFPM_WUFC_FLX2_ACT_S 6
+#define PFPM_WUFC_FLX2_ACT_M BIT(6)
+#define PFPM_WUFC_FLX3_ACT_S 7
+#define PFPM_WUFC_FLX3_ACT_M BIT(7)
+#define PFPM_WUFC_FLX4_ACT_S 8
+#define PFPM_WUFC_FLX4_ACT_M BIT(8)
+#define PFPM_WUFC_FLX5_ACT_S 9
+#define PFPM_WUFC_FLX5_ACT_M BIT(9)
+#define PFPM_WUFC_FLX6_ACT_S 10
+#define PFPM_WUFC_FLX6_ACT_M BIT(10)
+#define PFPM_WUFC_FLX7_ACT_S 11
+#define PFPM_WUFC_FLX7_ACT_M BIT(11)
+#define PFPM_WUFC_FLX0_S 16
+#define PFPM_WUFC_FLX0_M BIT(16)
+#define PFPM_WUFC_FLX1_S 17
+#define PFPM_WUFC_FLX1_M BIT(17)
+#define PFPM_WUFC_FLX2_S 18
+#define PFPM_WUFC_FLX2_M BIT(18)
+#define PFPM_WUFC_FLX3_S 19
+#define PFPM_WUFC_FLX3_M BIT(19)
+#define PFPM_WUFC_FLX4_S 20
+#define PFPM_WUFC_FLX4_M BIT(20)
+#define PFPM_WUFC_FLX5_S 21
+#define PFPM_WUFC_FLX5_M BIT(21)
+#define PFPM_WUFC_FLX6_S 22
+#define PFPM_WUFC_FLX6_M BIT(22)
+#define PFPM_WUFC_FLX7_S 23
+#define PFPM_WUFC_FLX7_M BIT(23)
+#define PFPM_WUFC_FW_RST_WK_S 31
+#define PFPM_WUFC_FW_RST_WK_M BIT(31)
+#define PFPM_WUS 0x0009DB80 /* Reset Source: POR */
+#define PFPM_WUS_LNKC_S 0
+#define PFPM_WUS_LNKC_M BIT(0)
+#define PFPM_WUS_MAG_S 1
+#define PFPM_WUS_MAG_M BIT(1)
+#define PFPM_WUS_PME_STATUS_S 2
+#define PFPM_WUS_PME_STATUS_M BIT(2)
+#define PFPM_WUS_MNG_S 3
+#define PFPM_WUS_MNG_M BIT(3)
+#define PFPM_WUS_FLX0_S 16
+#define PFPM_WUS_FLX0_M BIT(16)
+#define PFPM_WUS_FLX1_S 17
+#define PFPM_WUS_FLX1_M BIT(17)
+#define PFPM_WUS_FLX2_S 18
+#define PFPM_WUS_FLX2_M BIT(18)
+#define PFPM_WUS_FLX3_S 19
+#define PFPM_WUS_FLX3_M BIT(19)
+#define PFPM_WUS_FLX4_S 20
+#define PFPM_WUS_FLX4_M BIT(20)
+#define PFPM_WUS_FLX5_S 21
+#define PFPM_WUS_FLX5_M BIT(21)
+#define PFPM_WUS_FLX6_S 22
+#define PFPM_WUS_FLX6_M BIT(22)
+#define PFPM_WUS_FLX7_S 23
+#define PFPM_WUS_FLX7_M BIT(23)
+#define PFPM_WUS_FW_RST_WK_S 31
+#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define PRTPM_SAH_MAX_INDEX 3
+#define PRTPM_SAH_PFPM_SAH_S 0
+#define PRTPM_SAH_PFPM_SAH_M MAKEMASK(0xFFFF, 0)
+#define PRTPM_SAH_PF_NUM_S 26
+#define PRTPM_SAH_PF_NUM_M MAKEMASK(0xF, 26)
+#define PRTPM_SAH_MC_MAG_EN_S 30
+#define PRTPM_SAH_MC_MAG_EN_M BIT(30)
+#define PRTPM_SAH_AV_S 31
+#define PRTPM_SAH_AV_M BIT(31)
+#define PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define PRTPM_SAL_MAX_INDEX 3
+#define PRTPM_SAL_PFPM_SAL_S 0
+#define PRTPM_SAL_PFPM_SAL_M MAKEMASK(0xFFFFFFFF, 0)
+#define GLPE_CQM_FUNC_INVALIDATE 0x00503300 /* Reset Source: CORER */
+#define GLPE_CQM_FUNC_INVALIDATE_PF_NUM_S 0
+#define GLPE_CQM_FUNC_INVALIDATE_PF_NUM_M MAKEMASK(0x7, 0)
+#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_NUM_S 3
+#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_NUM_M MAKEMASK(0x3FF, 3)
+#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_S 13
+#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_M MAKEMASK(0x3, 13)
+#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_S 31
+#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_M BIT(31)
+#define VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */
+#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0
+#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
+#define GLTSYN_HH_DLAY 0x0008881C /* Reset Source: CORER */
+#define GLTSYN_HH_DLAY_SYNC_DELAY_S 0
+#define GLTSYN_HH_DLAY_SYNC_DELAY_M MAKEMASK(0xF, 0)
+#define VF_MBX_ARQBAH1 0x00006000 /* Reset Source: CORER */
+#define VF_MBX_ARQBAH1_ARQBAH_S 0
+#define VF_MBX_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_ARQBAL1 0x00006C00 /* Reset Source: CORER */
+#define VF_MBX_ARQBAL1_ARQBAL_LSB_S 0
+#define VF_MBX_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_ARQBAL1_ARQBAL_S 6
+#define VF_MBX_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_ARQH1 0x00007400 /* Reset Source: CORER */
+#define VF_MBX_ARQH1_ARQH_S 0
+#define VF_MBX_ARQH1_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ARQLEN1 0x00008000 /* Reset Source: PFR */
+#define VF_MBX_ARQLEN1_ARQLEN_S 0
+#define VF_MBX_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ARQLEN1_ARQVFE_S 28
+#define VF_MBX_ARQLEN1_ARQVFE_M BIT(28)
+#define VF_MBX_ARQLEN1_ARQOVFL_S 29
+#define VF_MBX_ARQLEN1_ARQOVFL_M BIT(29)
+#define VF_MBX_ARQLEN1_ARQCRIT_S 30
+#define VF_MBX_ARQLEN1_ARQCRIT_M BIT(30)
+#define VF_MBX_ARQLEN1_ARQENABLE_S 31
+#define VF_MBX_ARQLEN1_ARQENABLE_M BIT(31)
+#define VF_MBX_ARQT1 0x00007000 /* Reset Source: CORER */
+#define VF_MBX_ARQT1_ARQT_S 0
+#define VF_MBX_ARQT1_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ATQBAH1 0x00007800 /* Reset Source: CORER */
+#define VF_MBX_ATQBAH1_ATQBAH_S 0
+#define VF_MBX_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_ATQBAL1 0x00007C00 /* Reset Source: CORER */
+#define VF_MBX_ATQBAL1_ATQBAL_S 6
+#define VF_MBX_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_ATQH1 0x00006400 /* Reset Source: CORER */
+#define VF_MBX_ATQH1_ATQH_S 0
+#define VF_MBX_ATQH1_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ATQLEN1 0x00006800 /* Reset Source: PFR */
+#define VF_MBX_ATQLEN1_ATQLEN_S 0
+#define VF_MBX_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_ATQLEN1_ATQVFE_S 28
+#define VF_MBX_ATQLEN1_ATQVFE_M BIT(28)
+#define VF_MBX_ATQLEN1_ATQOVFL_S 29
+#define VF_MBX_ATQLEN1_ATQOVFL_M BIT(29)
+#define VF_MBX_ATQLEN1_ATQCRIT_S 30
+#define VF_MBX_ATQLEN1_ATQCRIT_M BIT(30)
+#define VF_MBX_ATQLEN1_ATQENABLE_S 31
+#define VF_MBX_ATQLEN1_ATQENABLE_M BIT(31)
+#define VF_MBX_ATQT1 0x00008400 /* Reset Source: CORER */
+#define VF_MBX_ATQT1_ATQT_S 0
+#define VF_MBX_ATQT1_ATQT_M MAKEMASK(0x3FF, 0)
+#define PFPCI_VF_FLUSH_DONE1 0x0000E400 /* Reset Source: PCIR */
+#define PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_S 0
+#define PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_M BIT(0)
+#define VFGEN_RSTAT1 0x00008800 /* Reset Source: VFR */
+#define VFGEN_RSTAT1_VFR_STATE_S 0
+#define VFGEN_RSTAT1_VFR_STATE_M MAKEMASK(0x3, 0)
+#define VFINT_DYN_CTL0 0x00005C00 /* Reset Source: CORER */
+#define VFINT_DYN_CTL0_INTENA_S 0
+#define VFINT_DYN_CTL0_INTENA_M BIT(0)
+#define VFINT_DYN_CTL0_CLEARPBA_S 1
+#define VFINT_DYN_CTL0_CLEARPBA_M BIT(1)
+#define VFINT_DYN_CTL0_SWINT_TRIG_S 2
+#define VFINT_DYN_CTL0_SWINT_TRIG_M BIT(2)
+#define VFINT_DYN_CTL0_ITR_INDX_S 3
+#define VFINT_DYN_CTL0_ITR_INDX_M MAKEMASK(0x3, 3)
+#define VFINT_DYN_CTL0_INTERVAL_S 5
+#define VFINT_DYN_CTL0_INTERVAL_M MAKEMASK(0xFFF, 5)
+#define VFINT_DYN_CTL0_SW_ITR_INDX_ENA_S 24
+#define VFINT_DYN_CTL0_SW_ITR_INDX_ENA_M BIT(24)
+#define VFINT_DYN_CTL0_SW_ITR_INDX_S 25
+#define VFINT_DYN_CTL0_SW_ITR_INDX_M MAKEMASK(0x3, 25)
+#define VFINT_DYN_CTL0_WB_ON_ITR_S 30
+#define VFINT_DYN_CTL0_WB_ON_ITR_M BIT(30)
+#define VFINT_DYN_CTL0_INTENA_MSK_S 31
+#define VFINT_DYN_CTL0_INTENA_MSK_M BIT(31)
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define VFINT_DYN_CTLN_MAX_INDEX 63
+#define VFINT_DYN_CTLN_INTENA_S 0
+#define VFINT_DYN_CTLN_INTENA_M BIT(0)
+#define VFINT_DYN_CTLN_CLEARPBA_S 1
+#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define VFINT_DYN_CTLN_SWINT_TRIG_S 2
+#define VFINT_DYN_CTLN_SWINT_TRIG_M BIT(2)
+#define VFINT_DYN_CTLN_ITR_INDX_S 3
+#define VFINT_DYN_CTLN_ITR_INDX_M MAKEMASK(0x3, 3)
+#define VFINT_DYN_CTLN_INTERVAL_S 5
+#define VFINT_DYN_CTLN_INTERVAL_M MAKEMASK(0xFFF, 5)
+#define VFINT_DYN_CTLN_SW_ITR_INDX_ENA_S 24
+#define VFINT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(24)
+#define VFINT_DYN_CTLN_SW_ITR_INDX_S 25
+#define VFINT_DYN_CTLN_SW_ITR_INDX_M MAKEMASK(0x3, 25)
+#define VFINT_DYN_CTLN_WB_ON_ITR_S 30
+#define VFINT_DYN_CTLN_WB_ON_ITR_M BIT(30)
+#define VFINT_DYN_CTLN_INTENA_MSK_S 31
+#define VFINT_DYN_CTLN_INTENA_MSK_M BIT(31)
+#define VFINT_ITR0(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define VFINT_ITR0_MAX_INDEX 2
+#define VFINT_ITR0_INTERVAL_S 0
+#define VFINT_ITR0_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */
+#define VFINT_ITRN_MAX_INDEX 2
+#define VFINT_ITRN_INTERVAL_S 0
+#define VFINT_ITRN_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define QRX_TAIL1(_QRX) (0x00002000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define QRX_TAIL1_MAX_INDEX 255
+#define QRX_TAIL1_TAIL_S 0
+#define QRX_TAIL1_TAIL_M MAKEMASK(0x1FFF, 0)
+#define QTX_TAIL(_DBQM) (0x00000000 + ((_DBQM) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define QTX_TAIL_MAX_INDEX 255
+#define QTX_TAIL_QTX_COMM_DBELL_S 0
+#define QTX_TAIL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_CPM_ARQBAH1 0x0000F060 /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQBAH1_ARQBAH_S 0
+#define VF_MBX_CPM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_CPM_ARQBAL1 0x0000F050 /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQBAL1_ARQBAL_LSB_S 0
+#define VF_MBX_CPM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_CPM_ARQBAL1_ARQBAL_S 6
+#define VF_MBX_CPM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_CPM_ARQH1 0x0000F080 /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQH1_ARQH_S 0
+#define VF_MBX_CPM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ARQLEN1 0x0000F070 /* Reset Source: PFR */
+#define VF_MBX_CPM_ARQLEN1_ARQLEN_S 0
+#define VF_MBX_CPM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ARQLEN1_ARQVFE_S 28
+#define VF_MBX_CPM_ARQLEN1_ARQVFE_M BIT(28)
+#define VF_MBX_CPM_ARQLEN1_ARQOVFL_S 29
+#define VF_MBX_CPM_ARQLEN1_ARQOVFL_M BIT(29)
+#define VF_MBX_CPM_ARQLEN1_ARQCRIT_S 30
+#define VF_MBX_CPM_ARQLEN1_ARQCRIT_M BIT(30)
+#define VF_MBX_CPM_ARQLEN1_ARQENABLE_S 31
+#define VF_MBX_CPM_ARQLEN1_ARQENABLE_M BIT(31)
+#define VF_MBX_CPM_ARQT1 0x0000F090 /* Reset Source: CORER */
+#define VF_MBX_CPM_ARQT1_ARQT_S 0
+#define VF_MBX_CPM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ATQBAH1 0x0000F010 /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQBAH1_ATQBAH_S 0
+#define VF_MBX_CPM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_CPM_ATQBAL1 0x0000F000 /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQBAL1_ATQBAL_S 6
+#define VF_MBX_CPM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_CPM_ATQH1 0x0000F030 /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQH1_ATQH_S 0
+#define VF_MBX_CPM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ATQLEN1 0x0000F020 /* Reset Source: PFR */
+#define VF_MBX_CPM_ATQLEN1_ATQLEN_S 0
+#define VF_MBX_CPM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_CPM_ATQLEN1_ATQVFE_S 28
+#define VF_MBX_CPM_ATQLEN1_ATQVFE_M BIT(28)
+#define VF_MBX_CPM_ATQLEN1_ATQOVFL_S 29
+#define VF_MBX_CPM_ATQLEN1_ATQOVFL_M BIT(29)
+#define VF_MBX_CPM_ATQLEN1_ATQCRIT_S 30
+#define VF_MBX_CPM_ATQLEN1_ATQCRIT_M BIT(30)
+#define VF_MBX_CPM_ATQLEN1_ATQENABLE_S 31
+#define VF_MBX_CPM_ATQLEN1_ATQENABLE_M BIT(31)
+#define VF_MBX_CPM_ATQT1 0x0000F040 /* Reset Source: CORER */
+#define VF_MBX_CPM_ATQT1_ATQT_S 0
+#define VF_MBX_CPM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ARQBAH1 0x00020060 /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQBAH1_ARQBAH_S 0
+#define VF_MBX_HLP_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_HLP_ARQBAL1 0x00020050 /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQBAL1_ARQBAL_LSB_S 0
+#define VF_MBX_HLP_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_HLP_ARQBAL1_ARQBAL_S 6
+#define VF_MBX_HLP_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_HLP_ARQH1 0x00020080 /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQH1_ARQH_S 0
+#define VF_MBX_HLP_ARQH1_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ARQLEN1 0x00020070 /* Reset Source: PFR */
+#define VF_MBX_HLP_ARQLEN1_ARQLEN_S 0
+#define VF_MBX_HLP_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ARQLEN1_ARQVFE_S 28
+#define VF_MBX_HLP_ARQLEN1_ARQVFE_M BIT(28)
+#define VF_MBX_HLP_ARQLEN1_ARQOVFL_S 29
+#define VF_MBX_HLP_ARQLEN1_ARQOVFL_M BIT(29)
+#define VF_MBX_HLP_ARQLEN1_ARQCRIT_S 30
+#define VF_MBX_HLP_ARQLEN1_ARQCRIT_M BIT(30)
+#define VF_MBX_HLP_ARQLEN1_ARQENABLE_S 31
+#define VF_MBX_HLP_ARQLEN1_ARQENABLE_M BIT(31)
+#define VF_MBX_HLP_ARQT1 0x00020090 /* Reset Source: CORER */
+#define VF_MBX_HLP_ARQT1_ARQT_S 0
+#define VF_MBX_HLP_ARQT1_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ATQBAH1 0x00020010 /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQBAH1_ATQBAH_S 0
+#define VF_MBX_HLP_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_HLP_ATQBAL1 0x00020000 /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQBAL1_ATQBAL_S 6
+#define VF_MBX_HLP_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_HLP_ATQH1 0x00020030 /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQH1_ATQH_S 0
+#define VF_MBX_HLP_ATQH1_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ATQLEN1 0x00020020 /* Reset Source: PFR */
+#define VF_MBX_HLP_ATQLEN1_ATQLEN_S 0
+#define VF_MBX_HLP_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_HLP_ATQLEN1_ATQVFE_S 28
+#define VF_MBX_HLP_ATQLEN1_ATQVFE_M BIT(28)
+#define VF_MBX_HLP_ATQLEN1_ATQOVFL_S 29
+#define VF_MBX_HLP_ATQLEN1_ATQOVFL_M BIT(29)
+#define VF_MBX_HLP_ATQLEN1_ATQCRIT_S 30
+#define VF_MBX_HLP_ATQLEN1_ATQCRIT_M BIT(30)
+#define VF_MBX_HLP_ATQLEN1_ATQENABLE_S 31
+#define VF_MBX_HLP_ATQLEN1_ATQENABLE_M BIT(31)
+#define VF_MBX_HLP_ATQT1 0x00020040 /* Reset Source: CORER */
+#define VF_MBX_HLP_ATQT1_ATQT_S 0
+#define VF_MBX_HLP_ATQT1_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ARQBAH1 0x00021060 /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQBAH1_ARQBAH_S 0
+#define VF_MBX_PSM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_PSM_ARQBAL1 0x00021050 /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQBAL1_ARQBAL_LSB_S 0
+#define VF_MBX_PSM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_MBX_PSM_ARQBAL1_ARQBAL_S 6
+#define VF_MBX_PSM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_PSM_ARQH1 0x00021080 /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQH1_ARQH_S 0
+#define VF_MBX_PSM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ARQLEN1 0x00021070 /* Reset Source: PFR */
+#define VF_MBX_PSM_ARQLEN1_ARQLEN_S 0
+#define VF_MBX_PSM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ARQLEN1_ARQVFE_S 28
+#define VF_MBX_PSM_ARQLEN1_ARQVFE_M BIT(28)
+#define VF_MBX_PSM_ARQLEN1_ARQOVFL_S 29
+#define VF_MBX_PSM_ARQLEN1_ARQOVFL_M BIT(29)
+#define VF_MBX_PSM_ARQLEN1_ARQCRIT_S 30
+#define VF_MBX_PSM_ARQLEN1_ARQCRIT_M BIT(30)
+#define VF_MBX_PSM_ARQLEN1_ARQENABLE_S 31
+#define VF_MBX_PSM_ARQLEN1_ARQENABLE_M BIT(31)
+#define VF_MBX_PSM_ARQT1 0x00021090 /* Reset Source: CORER */
+#define VF_MBX_PSM_ARQT1_ARQT_S 0
+#define VF_MBX_PSM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ATQBAH1 0x00021010 /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQBAH1_ATQBAH_S 0
+#define VF_MBX_PSM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_MBX_PSM_ATQBAL1 0x00021000 /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQBAL1_ATQBAL_S 6
+#define VF_MBX_PSM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_MBX_PSM_ATQH1 0x00021030 /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQH1_ATQH_S 0
+#define VF_MBX_PSM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ATQLEN1 0x00021020 /* Reset Source: PFR */
+#define VF_MBX_PSM_ATQLEN1_ATQLEN_S 0
+#define VF_MBX_PSM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_MBX_PSM_ATQLEN1_ATQVFE_S 28
+#define VF_MBX_PSM_ATQLEN1_ATQVFE_M BIT(28)
+#define VF_MBX_PSM_ATQLEN1_ATQOVFL_S 29
+#define VF_MBX_PSM_ATQLEN1_ATQOVFL_M BIT(29)
+#define VF_MBX_PSM_ATQLEN1_ATQCRIT_S 30
+#define VF_MBX_PSM_ATQLEN1_ATQCRIT_M BIT(30)
+#define VF_MBX_PSM_ATQLEN1_ATQENABLE_S 31
+#define VF_MBX_PSM_ATQLEN1_ATQENABLE_M BIT(31)
+#define VF_MBX_PSM_ATQT1 0x00021040 /* Reset Source: CORER */
+#define VF_MBX_PSM_ATQT1_ATQT_S 0
+#define VF_MBX_PSM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ARQBAH1 0x0000F160 /* Reset Source: CORER */
+#define VF_SB_CPM_ARQBAH1_ARQBAH_S 0
+#define VF_SB_CPM_ARQBAH1_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_SB_CPM_ARQBAL1 0x0000F150 /* Reset Source: CORER */
+#define VF_SB_CPM_ARQBAL1_ARQBAL_LSB_S 0
+#define VF_SB_CPM_ARQBAL1_ARQBAL_LSB_M MAKEMASK(0x3F, 0)
+#define VF_SB_CPM_ARQBAL1_ARQBAL_S 6
+#define VF_SB_CPM_ARQBAL1_ARQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_SB_CPM_ARQH1 0x0000F180 /* Reset Source: CORER */
+#define VF_SB_CPM_ARQH1_ARQH_S 0
+#define VF_SB_CPM_ARQH1_ARQH_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ARQLEN1 0x0000F170 /* Reset Source: PFR */
+#define VF_SB_CPM_ARQLEN1_ARQLEN_S 0
+#define VF_SB_CPM_ARQLEN1_ARQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ARQLEN1_ARQVFE_S 28
+#define VF_SB_CPM_ARQLEN1_ARQVFE_M BIT(28)
+#define VF_SB_CPM_ARQLEN1_ARQOVFL_S 29
+#define VF_SB_CPM_ARQLEN1_ARQOVFL_M BIT(29)
+#define VF_SB_CPM_ARQLEN1_ARQCRIT_S 30
+#define VF_SB_CPM_ARQLEN1_ARQCRIT_M BIT(30)
+#define VF_SB_CPM_ARQLEN1_ARQENABLE_S 31
+#define VF_SB_CPM_ARQLEN1_ARQENABLE_M BIT(31)
+#define VF_SB_CPM_ARQT1 0x0000F190 /* Reset Source: CORER */
+#define VF_SB_CPM_ARQT1_ARQT_S 0
+#define VF_SB_CPM_ARQT1_ARQT_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ATQBAH1 0x0000F110 /* Reset Source: CORER */
+#define VF_SB_CPM_ATQBAH1_ATQBAH_S 0
+#define VF_SB_CPM_ATQBAH1_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VF_SB_CPM_ATQBAL1 0x0000F100 /* Reset Source: CORER */
+#define VF_SB_CPM_ATQBAL1_ATQBAL_S 6
+#define VF_SB_CPM_ATQBAL1_ATQBAL_M MAKEMASK(0x3FFFFFF, 6)
+#define VF_SB_CPM_ATQH1 0x0000F130 /* Reset Source: CORER */
+#define VF_SB_CPM_ATQH1_ATQH_S 0
+#define VF_SB_CPM_ATQH1_ATQH_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ATQLEN1 0x0000F120 /* Reset Source: PFR */
+#define VF_SB_CPM_ATQLEN1_ATQLEN_S 0
+#define VF_SB_CPM_ATQLEN1_ATQLEN_M MAKEMASK(0x3FF, 0)
+#define VF_SB_CPM_ATQLEN1_ATQVFE_S 28
+#define VF_SB_CPM_ATQLEN1_ATQVFE_M BIT(28)
+#define VF_SB_CPM_ATQLEN1_ATQOVFL_S 29
+#define VF_SB_CPM_ATQLEN1_ATQOVFL_M BIT(29)
+#define VF_SB_CPM_ATQLEN1_ATQCRIT_S 30
+#define VF_SB_CPM_ATQLEN1_ATQCRIT_M BIT(30)
+#define VF_SB_CPM_ATQLEN1_ATQENABLE_S 31
+#define VF_SB_CPM_ATQLEN1_ATQENABLE_M BIT(31)
+#define VF_SB_CPM_ATQT1 0x0000F140 /* Reset Source: CORER */
+#define VF_SB_CPM_ATQT1_ATQT_S 0
+#define VF_SB_CPM_ATQT1_ATQT_M MAKEMASK(0x3FF, 0)
+#define VFINT_DYN_CTL(_i) (0x00023000 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */
+#define VFINT_DYN_CTL_MAX_INDEX 7
+#define VFINT_DYN_CTL_INTENA_S 0
+#define VFINT_DYN_CTL_INTENA_M BIT(0)
+#define VFINT_DYN_CTL_CLEARPBA_S 1
+#define VFINT_DYN_CTL_CLEARPBA_M BIT(1)
+#define VFINT_DYN_CTL_SWINT_TRIG_S 2
+#define VFINT_DYN_CTL_SWINT_TRIG_M BIT(2)
+#define VFINT_DYN_CTL_ITR_INDX_S 3
+#define VFINT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3)
+#define VFINT_DYN_CTL_INTERVAL_S 5
+#define VFINT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5)
+#define VFINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
+#define VFINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define VFINT_DYN_CTL_SW_ITR_INDX_S 25
+#define VFINT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25)
+#define VFINT_DYN_CTL_WB_ON_ITR_S 30
+#define VFINT_DYN_CTL_WB_ON_ITR_M BIT(30)
+#define VFINT_DYN_CTL_INTENA_MSK_S 31
+#define VFINT_DYN_CTL_INTENA_MSK_M BIT(31)
+#define VFINT_ITR_0(_i) (0x00023004 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */
+#define VFINT_ITR_0_MAX_INDEX 7
+#define VFINT_ITR_0_INTERVAL_S 0
+#define VFINT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define VFINT_ITR_1(_i) (0x00023008 + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */
+#define VFINT_ITR_1_MAX_INDEX 7
+#define VFINT_ITR_1_INTERVAL_S 0
+#define VFINT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define VFINT_ITR_2(_i) (0x0002300C + ((_i) * 4096)) /* _i=0...7 */ /* Reset Source: CORER */
+#define VFINT_ITR_2_MAX_INDEX 7
+#define VFINT_ITR_2_INTERVAL_S 0
+#define VFINT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define VFQRX_TAIL(_QRX) (0x0002E000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VFQRX_TAIL_MAX_INDEX 255
+#define VFQRX_TAIL_TAIL_S 0
+#define VFQRX_TAIL_TAIL_M MAKEMASK(0x1FFF, 0)
+#define VFQTX_COMM_DBELL(_DBQM) (0x00030000 + ((_DBQM) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define VFQTX_COMM_DBELL_MAX_INDEX 255
+#define VFQTX_COMM_DBELL_QTX_COMM_DBELL_S 0
+#define VFQTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFQTX_COMM_DBLQ_DBELL(_DBLQ) (0x00022000 + ((_DBLQ) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
+#define VFQTX_COMM_DBLQ_DBELL_MAX_INDEX 3
+#define VFQTX_COMM_DBLQ_DBELL_TAIL_S 0
+#define VFQTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0)
+#define MSIX_TMSG1(_i) (0x00000008 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */
+#define MSIX_TMSG1_MAX_INDEX 64
+#define MSIX_TMSG1_MSIXTMSG_S 0
+#define MSIX_TMSG1_MSIXTMSG_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_AEQALLOC1 0x0000A400 /* Reset Source: VFR */
+#define VFPE_AEQALLOC1_AECOUNT_S 0
+#define VFPE_AEQALLOC1_AECOUNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_CCQPHIGH1 0x00009800 /* Reset Source: VFR */
+#define VFPE_CCQPHIGH1_PECCQPHIGH_S 0
+#define VFPE_CCQPHIGH1_PECCQPHIGH_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_CCQPLOW1 0x0000AC00 /* Reset Source: VFR */
+#define VFPE_CCQPLOW1_PECCQPLOW_S 0
+#define VFPE_CCQPLOW1_PECCQPLOW_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_CCQPSTATUS1 0x0000B800 /* Reset Source: VFR */
+#define VFPE_CCQPSTATUS1_CCQP_DONE_S 0
+#define VFPE_CCQPSTATUS1_CCQP_DONE_M BIT(0)
+#define VFPE_CCQPSTATUS1_HMC_PROFILE_S 4
+#define VFPE_CCQPSTATUS1_HMC_PROFILE_M MAKEMASK(0x7, 4)
+#define VFPE_CCQPSTATUS1_RDMA_EN_VFS_S 16
+#define VFPE_CCQPSTATUS1_RDMA_EN_VFS_M MAKEMASK(0x3F, 16)
+#define VFPE_CCQPSTATUS1_CCQP_ERR_S 31
+#define VFPE_CCQPSTATUS1_CCQP_ERR_M BIT(31)
+#define VFPE_CQACK1 0x0000B000 /* Reset Source: VFR */
+#define VFPE_CQACK1_PECQID_S 0
+#define VFPE_CQACK1_PECQID_M MAKEMASK(0x7FFFF, 0)
+#define VFPE_CQARM1 0x0000B400 /* Reset Source: VFR */
+#define VFPE_CQARM1_PECQID_S 0
+#define VFPE_CQARM1_PECQID_M MAKEMASK(0x7FFFF, 0)
+#define VFPE_CQPDB1 0x0000BC00 /* Reset Source: VFR */
+#define VFPE_CQPDB1_WQHEAD_S 0
+#define VFPE_CQPDB1_WQHEAD_M MAKEMASK(0x7FF, 0)
+#define VFPE_CQPERRCODES1 0x00009C00 /* Reset Source: VFR */
+#define VFPE_CQPERRCODES1_CQP_MINOR_CODE_S 0
+#define VFPE_CQPERRCODES1_CQP_MINOR_CODE_M MAKEMASK(0xFFFF, 0)
+#define VFPE_CQPERRCODES1_CQP_MAJOR_CODE_S 16
+#define VFPE_CQPERRCODES1_CQP_MAJOR_CODE_M MAKEMASK(0xFFFF, 16)
+#define VFPE_CQPTAIL1 0x0000A000 /* Reset Source: VFR */
+#define VFPE_CQPTAIL1_WQTAIL_S 0
+#define VFPE_CQPTAIL1_WQTAIL_M MAKEMASK(0x7FF, 0)
+#define VFPE_CQPTAIL1_CQP_OP_ERR_S 31
+#define VFPE_CQPTAIL1_CQP_OP_ERR_M BIT(31)
+#define VFPE_IPCONFIG01 0x00008C00 /* Reset Source: VFR */
+#define VFPE_IPCONFIG01_PEIPID_S 0
+#define VFPE_IPCONFIG01_PEIPID_M MAKEMASK(0xFFFF, 0)
+#define VFPE_IPCONFIG01_USEENTIREIDRANGE_S 16
+#define VFPE_IPCONFIG01_USEENTIREIDRANGE_M BIT(16)
+#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_S 17
+#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_M BIT(17)
+#define VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define VFPE_MRTEIDXMASK1_MAX_INDEX 255
+#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0
+#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
+#define VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */
+#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0
+#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define VFPE_TCPNOWTIMER1 0x0000A800 /* Reset Source: VFR */
+#define VFPE_TCPNOWTIMER1_TCP_NOW_S 0
+#define VFPE_TCPNOWTIMER1_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
+#define VFPE_WQEALLOC1 0x0000C000 /* Reset Source: VFR */
+#define VFPE_WQEALLOC1_PEQPID_S 0
+#define VFPE_WQEALLOC1_PEQPID_M MAKEMASK(0x3FFFF, 0)
+#define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20
+#define VFPE_WQEALLOC1_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20)
+
+#endif
Index: sys/dev/ice/ice_iflib.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_iflib.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_iflib.h
+ * @brief main header for the iflib driver implementation
+ *
+ * Contains the definitions for various structures used by the iflib driver
+ * implementation, including the Tx and Rx queue structures and the ice_softc
+ * structure.
+ */
+
+#ifndef _ICE_IFLIB_H_
+#define _ICE_IFLIB_H_
+
+/* include kernel options first */
+#include "ice_opts.h"
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+#include <net/iflib.h>
+#include "ifdi_if.h"
+
+#include "ice_lib.h"
+#include "ice_osdep.h"
+#include "ice_resmgr.h"
+#include "ice_type.h"
+#include "ice_features.h"
+
+/**
+ * ASSERT_CTX_LOCKED - Assert that the iflib context lock is held
+ * @sc: ice softc pointer
+ *
+ * Macro to trigger an assertion if the iflib context lock is not
+ * currently held.
+ */
+#define ASSERT_CTX_LOCKED(sc) sx_assert((sc)->iflib_ctx_lock, SA_XLOCKED)
+
+/**
+ * IFLIB_CTX_LOCK - lock the iflib context lock
+ * @sc: ice softc pointer
+ *
+ * Macro used to unlock the iflib context lock.
+ */
+#define IFLIB_CTX_LOCK(sc) sx_xlock((sc)->iflib_ctx_lock)
+
+/**
+ * IFLIB_CTX_UNLOCK - unlock the iflib context lock
+ * @sc: ice softc pointer
+ *
+ * Macro used to unlock the iflib context lock.
+ */
+#define IFLIB_CTX_UNLOCK(sc) sx_xunlock((sc)->iflib_ctx_lock)
+
+/**
+ * ASSERT_CFG_LOCKED - Assert that a configuration lock is held
+ * @sc: ice softc pointer
+ *
+ * Macro used by ice_lib.c to verify that certain functions are called while
+ * holding a configuration lock. For the iflib implementation, this will be
+ * the iflib context lock.
+ */
+#define ASSERT_CFG_LOCKED(sc) ASSERT_CTX_LOCKED(sc)
+
+/**
+ * ICE_IFLIB_MAX_DESC_COUNT - Maximum ring size for iflib
+ *
+ * The iflib stack currently requires that the ring size, or number of
+ * descriptors, be a power of 2. The ice hardware is limited to a maximum of
+ * 8160 descriptors, which is not quite 2^13. Limit the maximum ring size for
+ * iflib to just 2^12 (4096).
+ */
+#define ICE_IFLIB_MAX_DESC_COUNT 4096
+
+/**
+ * @struct ice_irq_vector
+ * @brief Driver irq vector structure
+ *
+ * ice_lib.c requires the following parameters
+ * @me: the vector number
+ *
+ * Other parameters may be iflib driver specific
+ *
+ * The iflib driver uses a single hardware interrupt per Rx queue, and uses
+ * software interrupts for the Tx queues.
+ */
+struct ice_irq_vector {
+ u32 me;
+
+ struct if_irq irq;
+};
+
+/**
+ * @struct ice_tx_queue
+ * @brief Driver Tx queue structure
+ *
+ * ice_lib.c requires the following parameters:
+ * @vsi: backpointer the VSI structure
+ * @me: this queue's index into the queue array
+ * @irqv: always NULL for iflib
+ * @desc_count: the number of descriptors
+ * @tx_paddr: the physical address for this queue
+ * @q_teid: the Tx queue TEID returned from firmware
+ * @stats: queue statistics
+ *
+ * Other parameters may be iflib driver specific
+ */
+struct ice_tx_queue {
+ struct ice_vsi *vsi;
+ struct ice_tx_desc *tx_base;
+ bus_addr_t tx_paddr;
+ struct tx_stats stats;
+ u64 tso;
+ u16 desc_count;
+ u32 tail;
+ struct ice_irq_vector *irqv;
+ u32 q_teid;
+ u32 me;
+
+ /* descriptor writeback status */
+ qidx_t *tx_rsq;
+ qidx_t tx_rs_cidx;
+ qidx_t tx_rs_pidx;
+ qidx_t tx_cidx_processed;
+};
+
+/**
+ * @struct ice_rx_queue
+ * @brief Driver Rx queue structure
+ *
+ * ice_lib.c requires the following parameters:
+ * @vsi: backpointer the VSI structure
+ * @me: this queue's index into the queue array
+ * @irqv: pointer to vector structure associated with this queue
+ * @desc_count: the number of descriptors
+ * @rx_paddr: the physical address for this queue
+ * @tail: the tail register address for this queue
+ * @stats: queue statistics
+ *
+ * Other parameters may be iflib driver specific
+ */
+struct ice_rx_queue {
+ struct ice_vsi *vsi;
+ union ice_32b_rx_flex_desc *rx_base;
+ bus_addr_t rx_paddr;
+ struct rx_stats stats;
+ u16 desc_count;
+ u32 tail;
+ struct ice_irq_vector *irqv;
+ u32 me;
+
+ struct if_irq que_irq;
+};
+
+/**
+ * @struct ice_softc
+ * @brief main structure representing one device
+ *
+ * ice_lib.c requires the following parameters
+ * @all_vsi: the array of all allocated VSIs
+ * @debug_sysctls: sysctl node for debug sysctls
+ * @dev: device_t pointer
+ * @feat_en: bitmap of enabled driver features
+ * @hw: embedded ice_hw structure
+ * @ifp: pointer to the ifnet structure
+ * @link_up: boolean indicating if link is up
+ * @num_available_vsi: size of the VSI array
+ * @pf_vsi: embedded VSI structure for the main PF VSI
+ * @rx_qmgr: queue manager for Rx queues
+ * @soft_stats: software statistics for this device
+ * @state: driver state flags
+ * @stats: hardware statistics for this device
+ * @tx_qmgr: queue manager for Tx queues
+ * @vsi_sysctls: sysctl node for all VSI sysctls
+ * @enable_tx_fc_filter: boolean indicating if the Tx FC filter is enabled
+ * @enable_tx_lldp_filter: boolean indicating if the Tx LLDP filter is enabled
+ * @rebuild_ticks: indicates when a post-reset rebuild started
+ * @imgr: resource manager for interrupt allocations
+ * @pf_imap: interrupt mapping for PF LAN interrupts
+ * @lan_vectors: # of vectors used by LAN driver (length of pf_imap)
+ * @ldo_tlv: LAN Default Override settings from NVM
+ *
+ * ice_iov.c requires the following parameters (when PCI_IOV is defined):
+ * @vfs: array of VF context structures
+ * @num_vfs: number of VFs to use for SR-IOV
+ *
+ * The main representation for a single OS device, used to represent a single
+ * physical function.
+ */
+struct ice_softc {
+ struct ice_hw hw;
+ struct ice_vsi pf_vsi; /* Main PF VSI */
+
+ char admin_mtx_name[16]; /* name of the admin mutex */
+ struct mtx admin_mtx; /* mutex to protect the admin timer */
+ struct callout admin_timer; /* timer to trigger admin task */
+
+ struct ice_vsi **all_vsi; /* Array of VSI pointers */
+ u16 num_available_vsi; /* Size of VSI array */
+
+ struct sysctl_oid *vsi_sysctls; /* Sysctl node for VSI sysctls */
+ struct sysctl_oid *debug_sysctls; /* Sysctl node for debug sysctls */
+
+ device_t dev;
+ if_ctx_t ctx;
+ if_shared_ctx_t sctx;
+ if_softc_ctx_t scctx;
+ struct ifmedia *media;
+ struct ifnet *ifp;
+
+ /* device statistics */
+ struct ice_pf_hw_stats stats;
+ struct ice_pf_sw_stats soft_stats;
+
+ /* Tx/Rx queue managers */
+ struct ice_resmgr tx_qmgr;
+ struct ice_resmgr rx_qmgr;
+
+ /* Interrupt allocation manager */
+ struct ice_resmgr imgr;
+ u16 *pf_imap;
+ int lan_vectors;
+
+ /* iflib Tx/Rx queue count sysctl values */
+ int ifc_sysctl_ntxqs;
+ int ifc_sysctl_nrxqs;
+
+ /* IRQ Vector data */
+ struct resource *msix_table;
+ int num_irq_vectors;
+ struct ice_irq_vector *irqvs;
+
+ /* BAR info */
+ struct ice_bar_info bar0;
+
+ /* link status */
+ bool link_up;
+
+ /* Ethertype filters enabled */
+ bool enable_tx_fc_filter;
+ bool enable_tx_lldp_filter;
+
+ int rebuild_ticks;
+
+ /* driver state flags, only access using atomic functions */
+ u32 state;
+
+ /* NVM link override settings */
+ struct ice_link_default_override_tlv ldo_tlv;
+
+ struct sx *iflib_ctx_lock;
+
+ /* Tri-state feature flags (capable/enabled) */
+ ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT);
+ ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT);
+
+};
+
+#endif /* _ICE_IFLIB_H_ */
Index: sys/dev/ice/ice_iflib_recovery_txrx.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_iflib_recovery_txrx.c
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_iflib_recovery_txrx.c
+ * @brief iflib Tx/Rx ops for recovery mode
+ *
+ * Contains the if_txrx structure of operations used when the driver detects
+ * that the firmware is in recovery mode. These ops essentially do nothing and
+ * exist to prevent any chance that the stack could attempt to transmit or
+ * receive when the device is in firmware recovery mode.
+ */
+
+#include "ice_iflib.h"
+
+/*
+ * iflib txrx methods used when in recovery mode
+ */
+static int ice_recovery_txd_encap(void *arg, if_pkt_info_t pi);
+static int ice_recovery_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+static void ice_recovery_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int ice_recovery_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+static int ice_recovery_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
+static void ice_recovery_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
+static void ice_recovery_rxd_refill(void *arg, if_rxd_update_t iru);
+
+/**
+ * @var ice_recovery_txrx
+ * @brief Tx/Rx operations for recovery mode
+ *
+ * Similar to ice_txrx, but contains pointers to functions which are no-ops.
+ * Used when the driver is in firmware recovery mode to prevent any attempt to
+ * transmit or receive packets while the hardware is not initialized.
+ */
+struct if_txrx ice_recovery_txrx = {
+ .ift_txd_encap = ice_recovery_txd_encap,
+ .ift_txd_flush = ice_recovery_txd_flush,
+ .ift_txd_credits_update = ice_recovery_txd_credits_update,
+ .ift_rxd_available = ice_recovery_rxd_available,
+ .ift_rxd_pkt_get = ice_recovery_rxd_pkt_get,
+ .ift_rxd_refill = ice_recovery_rxd_refill,
+ .ift_rxd_flush = ice_recovery_rxd_flush,
+};
+
+/**
+ * ice_recovery_txd_encap - prepare Tx descriptors for a packet
+ * @arg: the iflib softc structure pointer
+ * @pi: packet info
+ *
+ * Since the Tx queues are not initialized during recovery mode, this function
+ * does nothing.
+ *
+ * @returns ENOSYS
+ */
+static int
+ice_recovery_txd_encap(void __unused *arg, if_pkt_info_t __unused pi)
+{
+ return (ENOSYS);
+}
+
+/**
+ * ice_recovery_txd_flush - Flush Tx descriptors to hardware
+ * @arg: device specific softc pointer
+ * @txqid: the Tx queue to flush
+ * @pidx: descriptor index to advance tail to
+ *
+ * Since the Tx queues are not initialized during recovery mode, this function
+ * does nothing.
+ */
+static void
+ice_recovery_txd_flush(void __unused *arg, uint16_t __unused txqid,
+ qidx_t __unused pidx)
+{
+ ;
+}
+
+/**
+ * ice_recovery_txd_credits_update - cleanup Tx descriptors
+ * @arg: device private softc
+ * @txqid: the Tx queue to update
+ * @clear: if false, only report, do not actually clean
+ *
+ * Since the Tx queues are not initialized during recovery mode, this function
+ * always reports that no descriptors are ready.
+ *
+ * @returns 0
+ */
+static int
+ice_recovery_txd_credits_update(void __unused *arg, uint16_t __unused txqid,
+ bool __unused clear)
+{
+ return (0);
+}
+
+/**
+ * ice_recovery_rxd_available - Return number of available Rx packets
+ * @arg: device private softc
+ * @rxqid: the Rx queue id
+ * @pidx: descriptor start point
+ * @budget: maximum Rx budget
+ *
+ * Since the Rx queues are not initialized during recovery mode, this function
+ * always reports that no packets are ready.
+ *
+ * @returns 0
+ */
+static int
+ice_recovery_rxd_available(void __unused *arg, uint16_t __unused rxqid,
+ qidx_t __unused pidx, qidx_t __unused budget)
+{
+ return (0);
+}
+
+/**
+ * ice_recovery_rxd_pkt_get - Called by iflib to send data to upper layer
+ * @arg: device specific softc
+ * @ri: receive packet info
+ *
+ * Since the Rx queues are not initialized during recovery mode this function
+ * always returns an error indicating that nothing could be done.
+ *
+ * @returns ENOSYS
+ */
+static int
+ice_recovery_rxd_pkt_get(void __unused *arg, if_rxd_info_t __unused ri)
+{
+ return (ENOSYS);
+}
+
+/**
+ * ice_recovery_rxd_refill - Prepare Rx descriptors for re-use by hardware
+ * @arg: device specific softc structure
+ * @iru: the Rx descriptor update structure
+ *
+ * Since the Rx queues are not initialized during Recovery mode, this function
+ * does nothing.
+ */
+static void
+ice_recovery_rxd_refill(void __unused *arg, if_rxd_update_t __unused iru)
+{
+ ;
+}
+
+/**
+ * ice_recovery_rxd_flush - Flush Rx descriptors to hardware
+ * @arg: device specific softc pointer
+ * @rxqid: the Rx queue to flush
+ * @flidx: unused parameter
+ * @pidx: descriptor index to advance tail to
+ *
+ * Since the Rx queues are not initialized during Recovery mode, this function
+ * does nothing.
+ */
+static void
+ice_recovery_rxd_flush(void __unused *arg, uint16_t __unused rxqid,
+ uint8_t flidx __unused, qidx_t __unused pidx)
+{
+ ;
+}
Index: sys/dev/ice/ice_iflib_sysctls.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_iflib_sysctls.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_iflib_sysctls.h
+ * @brief iflib specific driver wide sysctls
+ *
+ * Contains driver wide sysctls related to the iflib networking stack.
+ */
+#ifndef _ICE_IFLIB_SYSCTLS_H_
+#define _ICE_IFLIB_SYSCTLS_H_
+
+/* include sysctls that are generic and not related to the iflib stack */
+#include "ice_common_sysctls.h"
+
+#endif /* _ICE_IFLIB_SYSCTLS_H_ */
Index: sys/dev/ice/ice_iflib_txrx.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_iflib_txrx.c
@@ -0,0 +1,401 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_iflib_txrx.c
+ * @brief iflib Tx/Rx hotpath
+ *
+ * Main location for the iflib Tx/Rx hotpath implementation.
+ *
+ * Contains the implementation for the iflib function callbacks and the
+ * if_txrx ops structure.
+ */
+
+#include "ice_iflib.h"
+
+/* Tx/Rx hotpath utility functions */
+#include "ice_common_txrx.h"
+
+/*
+ * iflib txrx method declarations
+ */
+static int ice_ift_txd_encap(void *arg, if_pkt_info_t pi);
+static int ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+static void ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
+static int ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget);
+static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx);
+static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru);
+
+/* Macro to help extract the NIC mode flexible Rx descriptor fields from the
+ * advanced 32byte Rx descriptors.
+ */
+#define RX_FLEX_NIC(desc, field) \
+ (((struct ice_32b_rx_flex_desc_nic *)desc)->field)
+
+/**
+ * @var ice_txrx
+ * @brief Tx/Rx operations for the iflib stack
+ *
+ * Structure defining the Tx and Rx related operations that iflib can request
+ * the driver to perform. These are the main entry points for the hot path of
+ * the transmit and receive paths in the iflib driver.
+ */
+struct if_txrx ice_txrx = {
+ .ift_txd_encap = ice_ift_txd_encap,
+ .ift_txd_flush = ice_ift_txd_flush,
+ .ift_txd_credits_update = ice_ift_txd_credits_update,
+ .ift_rxd_available = ice_ift_rxd_available,
+ .ift_rxd_pkt_get = ice_ift_rxd_pkt_get,
+ .ift_rxd_refill = ice_ift_rxd_refill,
+ .ift_rxd_flush = ice_ift_rxd_flush,
+};
+
+/**
+ * ice_ift_txd_encap - prepare Tx descriptors for a packet
+ * @arg: the iflib softc structure pointer
+ * @pi: packet info
+ *
+ * Prepares and encapsulates the given packet into into Tx descriptors, in
+ * preparation for sending to the transmit engine. Sets the necessary context
+ * descriptors for TSO and other offloads, and prepares the last descriptor
+ * for the writeback status.
+ *
+ * Return 0 on success, non-zero error code on failure.
+ */
+static int
+ice_ift_txd_encap(void *arg, if_pkt_info_t pi)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[pi->ipi_qsidx];
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ struct ice_tx_desc *txd = NULL;
+ int i, j, mask, pidx_last;
+ u32 cmd, off;
+
+ cmd = off = 0;
+ i = pi->ipi_pidx;
+
+ /* Set up the TSO/CSUM offload */
+ if (pi->ipi_csum_flags & ICE_CSUM_OFFLOAD) {
+ /* Set up the TSO context descriptor if required */
+ if (pi->ipi_csum_flags & CSUM_TSO) {
+ if (ice_tso_detect_sparse(pi))
+ return (EFBIG);
+ i = ice_tso_setup(txq, pi);
+ }
+ ice_tx_setup_offload(txq, pi, &cmd, &off);
+ }
+ if (pi->ipi_mflags & M_VLANTAG)
+ cmd |= ICE_TX_DESC_CMD_IL2TAG1;
+
+ mask = txq->desc_count - 1;
+ for (j = 0; j < nsegs; j++) {
+ bus_size_t seglen;
+
+ txd = &txq->tx_base[i];
+ seglen = segs[j].ds_len;
+
+ txd->buf_addr = htole64(segs[j].ds_addr);
+ txd->cmd_type_offset_bsz =
+ htole64(ICE_TX_DESC_DTYPE_DATA
+ | ((u64)cmd << ICE_TXD_QW1_CMD_S)
+ | ((u64)off << ICE_TXD_QW1_OFFSET_S)
+ | ((u64)seglen << ICE_TXD_QW1_TX_BUF_SZ_S)
+ | ((u64)htole16(pi->ipi_vtag) << ICE_TXD_QW1_L2TAG1_S));
+
+ txq->stats.tx_bytes += seglen;
+ pidx_last = i;
+ i = (i+1) & mask;
+ }
+
+ /* Set the last descriptor for report */
+#define ICE_TXD_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+ txd->cmd_type_offset_bsz |=
+ htole64(((u64)ICE_TXD_CMD << ICE_TXD_QW1_CMD_S));
+
+ /* Add to report status array */
+ txq->tx_rsq[txq->tx_rs_pidx] = pidx_last;
+ txq->tx_rs_pidx = (txq->tx_rs_pidx+1) & mask;
+ MPASS(txq->tx_rs_pidx != txq->tx_rs_cidx);
+
+ pi->ipi_new_pidx = i;
+
+ ++txq->stats.tx_packets;
+ return (0);
+}
+
+/**
+ * ice_ift_txd_flush - Flush Tx descriptors to hardware
+ * @arg: device specific softc pointer
+ * @txqid: the Tx queue to flush
+ * @pidx: descriptor index to advance tail to
+ *
+ * Advance the Transmit Descriptor Tail (TDT). This indicates to hardware that
+ * frames are available for transmit.
+ */
+static void
+ice_ift_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
+ struct ice_hw *hw = &sc->hw;
+
+ wr32(hw, txq->tail, pidx);
+}
+
+/**
+ * ice_ift_txd_credits_update - cleanup Tx descriptors
+ * @arg: device private softc
+ * @txqid: the Tx queue to update
+ * @clear: if false, only report, do not actually clean
+ *
+ * If clear is false, iflib is asking if we *could* clean up any Tx
+ * descriptors.
+ *
+ * If clear is true, iflib is requesting to cleanup and reclaim used Tx
+ * descriptors.
+ */
+static int
+ice_ift_txd_credits_update(void *arg, uint16_t txqid, bool clear)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_tx_queue *txq = &sc->pf_vsi.tx_queues[txqid];
+
+ qidx_t processed = 0;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ bool is_done;
+
+ rs_cidx = txq->tx_rs_cidx;
+ if (rs_cidx == txq->tx_rs_pidx)
+ return (0);
+ cur = txq->tx_rsq[rs_cidx];
+ MPASS(cur != QIDX_INVALID);
+ is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
+
+ if (!is_done)
+ return (0);
+ else if (clear == false)
+ return (1);
+
+ prev = txq->tx_cidx_processed;
+ ntxd = txq->desc_count;
+ do {
+ MPASS(prev != cur);
+ delta = (int32_t)cur - (int32_t)prev;
+ if (delta < 0)
+ delta += ntxd;
+ MPASS(delta > 0);
+ processed += delta;
+ prev = cur;
+ rs_cidx = (rs_cidx + 1) & (ntxd-1);
+ if (rs_cidx == txq->tx_rs_pidx)
+ break;
+ cur = txq->tx_rsq[rs_cidx];
+ MPASS(cur != QIDX_INVALID);
+ is_done = ice_is_tx_desc_done(&txq->tx_base[cur]);
+ } while (is_done);
+
+ txq->tx_rs_cidx = rs_cidx;
+ txq->tx_cidx_processed = prev;
+
+ return (processed);
+}
+
+/**
+ * ice_ift_rxd_available - Return number of available Rx packets
+ * @arg: device private softc
+ * @rxqid: the Rx queue id
+ * @pidx: descriptor start point
+ * @budget: maximum Rx budget
+ *
+ * Determines how many Rx packets are available on the queue, up to a maximum
+ * of the given budget.
+ */
+static int
+ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
+ union ice_32b_rx_flex_desc *rxd;
+ uint16_t status0;
+ int cnt, i, nrxd;
+
+ nrxd = rxq->desc_count;
+
+ for (cnt = 0, i = pidx; cnt < nrxd - 1 && cnt < budget;) {
+ rxd = &rxq->rx_base[i];
+ status0 = le16toh(rxd->wb.status_error0);
+
+ if ((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) == 0)
+ break;
+ if (++i == nrxd)
+ i = 0;
+ if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S))
+ cnt++;
+ }
+
+ return (cnt);
+}
+
+/**
+ * ice_ift_rxd_pkt_get - Called by iflib to send data to upper layer
+ * @arg: device specific softc
+ * @ri: receive packet info
+ *
+ * This function is called by iflib, and executes in ithread context. It is
+ * called by iflib to obtain data which has been DMA'ed into host memory.
+ * Returns zero on success, and an error code on failure.
+ */
+static int
+ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx];
+ union ice_32b_rx_flex_desc *cur;
+ u16 status0, plen, vtag, ptype;
+ bool eop;
+ size_t cidx;
+ int i;
+
+ cidx = ri->iri_cidx;
+ i = 0;
+ do {
+ /* 5 descriptor receive limit */
+ MPASS(i < ICE_MAX_RX_SEGS);
+
+ cur = &rxq->rx_base[cidx];
+ status0 = le16toh(cur->wb.status_error0);
+ plen = le16toh(cur->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+ ptype = le16toh(cur->wb.ptype_flex_flags0) &
+ ICE_RX_FLEX_DESC_PTYPE_M;
+
+ /* we should never be called without a valid descriptor */
+ MPASS((status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) != 0);
+
+ ri->iri_len += plen;
+
+ cur->wb.status_error0 = 0;
+ eop = (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S));
+ if (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S))
+ vtag = le16toh(cur->wb.l2tag1);
+ else
+ vtag = 0;
+
+ /*
+ * Make sure packets with bad L2 values are discarded.
+ * NOTE: Only the EOP descriptor has valid error results.
+ */
+ if (eop && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S))) {
+ rxq->stats.desc_errs++;
+ return (EBADMSG);
+ }
+ ri->iri_frags[i].irf_flid = 0;
+ ri->iri_frags[i].irf_idx = cidx;
+ ri->iri_frags[i].irf_len = plen;
+ if (++cidx == rxq->desc_count)
+ cidx = 0;
+ i++;
+ } while (!eop);
+
+ /* capture soft statistics for this Rx queue */
+ rxq->stats.rx_packets++;
+ rxq->stats.rx_bytes += ri->iri_len;
+
+ if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0)
+ ice_rx_checksum(rxq, &ri->iri_csum_flags,
+ &ri->iri_csum_data, status0, ptype);
+ ri->iri_flowid = le32toh(RX_FLEX_NIC(&cur->wb, rss_hash));
+ ri->iri_rsstype = ice_ptype_to_hash(ptype);
+ ri->iri_vtag = vtag;
+ ri->iri_nfrags = i;
+ if (vtag)
+ ri->iri_flags |= M_VLANTAG;
+ return (0);
+}
+
+/**
+ * ice_ift_rxd_refill - Prepare Rx descriptors for re-use by hardware
+ * @arg: device specific softc structure
+ * @iru: the Rx descriptor update structure
+ *
+ * Update the Rx descriptor indices for a given queue, assigning new physical
+ * addresses to the descriptors, preparing them for re-use by the hardware.
+ */
+static void
+ice_ift_rxd_refill(void *arg, if_rxd_update_t iru)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_rx_queue *rxq;
+ uint32_t next_pidx;
+ int i;
+ uint64_t *paddrs;
+ uint32_t pidx;
+ uint16_t qsidx, count;
+
+ paddrs = iru->iru_paddrs;
+ pidx = iru->iru_pidx;
+ qsidx = iru->iru_qsidx;
+ count = iru->iru_count;
+
+ rxq = &(sc->pf_vsi.rx_queues[qsidx]);
+
+ for (i = 0, next_pidx = pidx; i < count; i++) {
+ rxq->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]);
+ if (++next_pidx == (uint32_t)rxq->desc_count)
+ next_pidx = 0;
+ }
+}
+
+/**
+ * ice_ift_rxd_flush - Flush Rx descriptors to hardware
+ * @arg: device specific softc pointer
+ * @rxqid: the Rx queue to flush
+ * @flidx: unused parameter
+ * @pidx: descriptor index to advance tail to
+ *
+ * Advance the Receive Descriptor Tail (RDT). This indicates to hardware that
+ * software is done with the descriptor and it can be recycled.
+ */
+static void
+ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx __unused,
+ qidx_t pidx)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[rxqid];
+ struct ice_hw *hw = &sc->hw;
+
+ wr32(hw, rxq->tail, pidx);
+}
Index: sys/dev/ice/ice_lan_tx_rx.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_lan_tx_rx.h
@@ -0,0 +1,2355 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_LAN_TX_RX_H_
+#define _ICE_LAN_TX_RX_H_
+#include "ice_osdep.h"
+
+/* Rx Descriptors */
+union ice_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ __le16 mirroring_status;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow Director filter ID */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/PTYPE/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union ice_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ __le16 mirroring_status;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow Director filter ID */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/PTYPE/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ __le32 reserved;
+ __le32 fd_id;
+ } qword3;
+ } wb; /* writeback */
+};
+
+struct ice_fltr_desc {
+ __le64 qidx_compq_space_stat;
+ __le64 dtype_cmd_vsi_fdid;
+};
+
+#define ICE_FXD_FLTR_QW0_QINDEX_S 0
+#define ICE_FXD_FLTR_QW0_QINDEX_M (0x7FFULL << ICE_FXD_FLTR_QW0_QINDEX_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_S 11
+#define ICE_FXD_FLTR_QW0_COMP_Q_M BIT_ULL(ICE_FXD_FLTR_QW0_COMP_Q_S)
+#define ICE_FXD_FLTR_QW0_COMP_Q_ZERO 0x0ULL
+#define ICE_FXD_FLTR_QW0_COMP_Q_QINDX 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_S 12
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_NONE 0x0ULL
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
+#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL
+
+#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
+#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
+#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR 0x0ULL
+#define ICE_FXD_FLTR_QW0_FD_SPACE_BEST_EFFORT 0x1ULL
+#define ICE_FXD_FLTR_QW0_FD_SPACE_GUAR_BEST 0x2ULL
+#define ICE_FXD_FLTR_QW0_FD_SPACE_BEST_GUAR 0x3ULL
+
+#define ICE_FXD_FLTR_QW0_STAT_CNT_S 16
+#define ICE_FXD_FLTR_QW0_STAT_CNT_M \
+ (0x1FFFULL << ICE_FXD_FLTR_QW0_STAT_CNT_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_S 29
+#define ICE_FXD_FLTR_QW0_STAT_ENA_M (0x3ULL << ICE_FXD_FLTR_QW0_STAT_ENA_S)
+#define ICE_FXD_FLTR_QW0_STAT_ENA_NONE 0x0ULL
+#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS 0x1ULL
+#define ICE_FXD_FLTR_QW0_STAT_ENA_BYTES 0x2ULL
+#define ICE_FXD_FLTR_QW0_STAT_ENA_PKTS_BYTES 0x3ULL
+
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_S 31
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_M BIT_ULL(ICE_FXD_FLTR_QW0_EVICT_ENA_S)
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE 0x0ULL
+#define ICE_FXD_FLTR_QW0_EVICT_ENA_TRUE 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_S 32
+#define ICE_FXD_FLTR_QW0_TO_Q_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_EQUALS_QINDEX 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_S 35
+#define ICE_FXD_FLTR_QW0_TO_Q_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_TO_Q_PRI_S)
+#define ICE_FXD_FLTR_QW0_TO_Q_PRIO1 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_S 38
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_M \
+ (0x3ULL << ICE_FXD_FLTR_QW0_DPU_RECIPE_S)
+#define ICE_FXD_FLTR_QW0_DPU_RECIPE_DFLT 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_DROP_S 40
+#define ICE_FXD_FLTR_QW0_DROP_M BIT_ULL(ICE_FXD_FLTR_QW0_DROP_S)
+#define ICE_FXD_FLTR_QW0_DROP_NO 0x0ULL
+#define ICE_FXD_FLTR_QW0_DROP_YES 0x1ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_S 41
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_M (0x7ULL << ICE_FXD_FLTR_QW0_FLEX_PRI_S)
+#define ICE_FXD_FLTR_QW0_FLEX_PRI_NONE 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_S 44
+#define ICE_FXD_FLTR_QW0_FLEX_MDID_M (0xFULL << ICE_FXD_FLTR_QW0_FLEX_MDID_S)
+#define ICE_FXD_FLTR_QW0_FLEX_MDID0 0x0ULL
+
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_S 48
+#define ICE_FXD_FLTR_QW0_FLEX_VAL_M \
+ (0xFFFFULL << ICE_FXD_FLTR_QW0_FLEX_VAL_S)
+#define ICE_FXD_FLTR_QW0_FLEX_VAL0 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_DTYPE_S 0
+#define ICE_FXD_FLTR_QW1_DTYPE_M (0xFULL << ICE_FXD_FLTR_QW1_DTYPE_S)
+#define ICE_FXD_FLTR_QW1_PCMD_S 4
+#define ICE_FXD_FLTR_QW1_PCMD_M BIT_ULL(ICE_FXD_FLTR_QW1_PCMD_S)
+#define ICE_FXD_FLTR_QW1_PCMD_ADD 0x0ULL
+#define ICE_FXD_FLTR_QW1_PCMD_REMOVE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_PRI_S 5
+#define ICE_FXD_FLTR_QW1_PROF_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_PROF_PRI_S)
+#define ICE_FXD_FLTR_QW1_PROF_PRIO_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_PROF_S 8
+#define ICE_FXD_FLTR_QW1_PROF_M (0x3FULL << ICE_FXD_FLTR_QW1_PROF_S)
+#define ICE_FXD_FLTR_QW1_PROF_ZERO 0x0ULL
+
+#define ICE_FXD_FLTR_QW1_FD_VSI_S 14
+#define ICE_FXD_FLTR_QW1_FD_VSI_M (0x3FFULL << ICE_FXD_FLTR_QW1_FD_VSI_S)
+#define ICE_FXD_FLTR_QW1_SWAP_S 24
+#define ICE_FXD_FLTR_QW1_SWAP_M BIT_ULL(ICE_FXD_FLTR_QW1_SWAP_S)
+#define ICE_FXD_FLTR_QW1_SWAP_NOT_SET 0x0ULL
+#define ICE_FXD_FLTR_QW1_SWAP_SET 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
+#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
+#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
+#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_MDID_FD 0x05ULL
+
+#define ICE_FXD_FLTR_QW1_FDID_S 32
+#define ICE_FXD_FLTR_QW1_FDID_M \
+ (0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
+#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
+
+enum ice_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_DESC_STATUS_DD_S = 0,
+ ICE_RX_DESC_STATUS_EOF_S = 1,
+ ICE_RX_DESC_STATUS_L2TAG1P_S = 2,
+ ICE_RX_DESC_STATUS_L3L4P_S = 3,
+ ICE_RX_DESC_STATUS_CRCP_S = 4,
+ ICE_RX_DESC_STATUS_TSYNINDX_S = 5,
+ ICE_RX_DESC_STATUS_TSYNVALID_S = 7,
+ ICE_RX_DESC_STATUS_EXT_UDP_0_S = 8,
+ ICE_RX_DESC_STATUS_UMBCAST_S = 9,
+ ICE_RX_DESC_STATUS_FLM_S = 11,
+ ICE_RX_DESC_STATUS_FLTSTAT_S = 12,
+ ICE_RX_DESC_STATUS_LPBK_S = 14,
+ ICE_RX_DESC_STATUS_IPV6EXADD_S = 15,
+ ICE_RX_DESC_STATUS_RESERVED2_S = 16,
+ ICE_RX_DESC_STATUS_INT_UDP_0_S = 18,
+ ICE_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define ICE_RXD_QW1_STATUS_S 0
+#define ICE_RXD_QW1_STATUS_M ((BIT(ICE_RX_DESC_STATUS_LAST) - 1) << \
+ ICE_RXD_QW1_STATUS_S)
+
+#define ICE_RXD_QW1_STATUS_TSYNINDX_S ICE_RX_DESC_STATUS_TSYNINDX_S
+#define ICE_RXD_QW1_STATUS_TSYNINDX_M (0x3UL << ICE_RXD_QW1_STATUS_TSYNINDX_S)
+
+#define ICE_RXD_QW1_STATUS_TSYNVALID_S ICE_RX_DESC_STATUS_TSYNVALID_S
+#define ICE_RXD_QW1_STATUS_TSYNVALID_M BIT_ULL(ICE_RXD_QW1_STATUS_TSYNVALID_S)
+
+enum ice_rx_desc_fltstat_values {
+ ICE_RX_DESC_FLTSTAT_NO_DATA = 0,
+ ICE_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ ICE_RX_DESC_FLTSTAT_RSV = 2,
+ ICE_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define ICE_RXD_QW1_ERROR_S 19
+#define ICE_RXD_QW1_ERROR_M (0xFFUL << ICE_RXD_QW1_ERROR_S)
+
+enum ice_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_DESC_ERROR_RXE_S = 0,
+ ICE_RX_DESC_ERROR_RECIPE_S = 1,
+ ICE_RX_DESC_ERROR_HBO_S = 2,
+ ICE_RX_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */
+ ICE_RX_DESC_ERROR_IPE_S = 3,
+ ICE_RX_DESC_ERROR_L4E_S = 4,
+ ICE_RX_DESC_ERROR_EIPE_S = 5,
+ ICE_RX_DESC_ERROR_OVERSIZE_S = 6,
+ ICE_RX_DESC_ERROR_PPRS_S = 7
+};
+
+enum ice_rx_desc_error_l3l4e_masks {
+ ICE_RX_DESC_ERROR_L3L4E_NONE = 0,
+ ICE_RX_DESC_ERROR_L3L4E_PROT = 1,
+};
+
+#define ICE_RXD_QW1_PTYPE_S 30
+#define ICE_RXD_QW1_PTYPE_M (0xFFULL << ICE_RXD_QW1_PTYPE_S)
+
+/* Packet type non-ip values */
+enum ice_rx_l2_ptype {
+ ICE_RX_PTYPE_L2_RESERVED = 0,
+ ICE_RX_PTYPE_L2_MAC_PAY2 = 1,
+ ICE_RX_PTYPE_L2_FIP_PAY2 = 3,
+ ICE_RX_PTYPE_L2_OUI_PAY2 = 4,
+ ICE_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ ICE_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ ICE_RX_PTYPE_L2_ECP_PAY2 = 7,
+ ICE_RX_PTYPE_L2_EVB_PAY2 = 8,
+ ICE_RX_PTYPE_L2_QCN_PAY2 = 9,
+ ICE_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ ICE_RX_PTYPE_L2_ARP = 11,
+};
+
+struct ice_rx_ptype_decoded {
+ u32 ptype:10;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:2;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum ice_rx_ptype_outer_ip {
+ ICE_RX_PTYPE_OUTER_L2 = 0,
+ ICE_RX_PTYPE_OUTER_IP = 1,
+};
+
+enum ice_rx_ptype_outer_ip_ver {
+ ICE_RX_PTYPE_OUTER_NONE = 0,
+ ICE_RX_PTYPE_OUTER_IPV4 = 1,
+ ICE_RX_PTYPE_OUTER_IPV6 = 2,
+};
+
+enum ice_rx_ptype_outer_fragmented {
+ ICE_RX_PTYPE_NOT_FRAG = 0,
+ ICE_RX_PTYPE_FRAG = 1,
+};
+
+enum ice_rx_ptype_tunnel_type {
+ ICE_RX_PTYPE_TUNNEL_NONE = 0,
+ ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
+ ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum ice_rx_ptype_tunnel_end_prot {
+ ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
+ ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum ice_rx_ptype_inner_prot {
+ ICE_RX_PTYPE_INNER_PROT_NONE = 0,
+ ICE_RX_PTYPE_INNER_PROT_UDP = 1,
+ ICE_RX_PTYPE_INNER_PROT_TCP = 2,
+ ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
+ ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
+};
+
+enum ice_rx_ptype_payload_layer {
+ ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define ICE_RXD_QW1_LEN_PBUF_S 38
+#define ICE_RXD_QW1_LEN_PBUF_M (0x3FFFULL << ICE_RXD_QW1_LEN_PBUF_S)
+
+#define ICE_RXD_QW1_LEN_HBUF_S 52
+#define ICE_RXD_QW1_LEN_HBUF_M (0x7FFULL << ICE_RXD_QW1_LEN_HBUF_S)
+
+#define ICE_RXD_QW1_LEN_SPH_S 63
+#define ICE_RXD_QW1_LEN_SPH_M BIT_ULL(ICE_RXD_QW1_LEN_SPH_S)
+
+enum ice_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_DESC_EXT_STATUS_L2TAG2P_S = 0,
+ ICE_RX_DESC_EXT_STATUS_L2TAG3P_S = 1,
+ ICE_RX_DESC_EXT_STATUS_FLEXBL_S = 2,
+ ICE_RX_DESC_EXT_STATUS_FLEXBH_S = 4,
+ ICE_RX_DESC_EXT_STATUS_FDLONGB_S = 9,
+ ICE_RX_DESC_EXT_STATUS_PELONGB_S = 11,
+};
+
+enum ice_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_DESC_PE_STATUS_QPID_S = 0, /* 18 BITS */
+ ICE_RX_DESC_PE_STATUS_L4PORT_S = 0, /* 16 BITS */
+ ICE_RX_DESC_PE_STATUS_IPINDEX_S = 16, /* 8 BITS */
+ ICE_RX_DESC_PE_STATUS_QPIDHIT_S = 24,
+ ICE_RX_DESC_PE_STATUS_APBVTHIT_S = 25,
+ ICE_RX_DESC_PE_STATUS_PORTV_S = 26,
+ ICE_RX_DESC_PE_STATUS_URG_S = 27,
+ ICE_RX_DESC_PE_STATUS_IPFRAG_S = 28,
+ ICE_RX_DESC_PE_STATUS_IPOPT_S = 29
+};
+
+#define ICE_RX_PROG_STATUS_DESC_LEN_S 38
+#define ICE_RX_PROG_STATUS_DESC_LEN 0x2000000
+
+#define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S 2
+#define ICE_RX_PROG_STATUS_DESC_QW1_PROGID_M \
+ (0x7UL << ICE_RX_PROG_STATUS_DESC_QW1_PROGID_S)
+
+#define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S 19
+#define ICE_RX_PROG_STATUS_DESC_QW1_ERROR_M \
+ (0x3FUL << ICE_RX_PROG_STATUS_DESC_QW1_ERROR_S)
+
+enum ice_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_PROG_STATUS_DESC_DD_S = 0,
+ ICE_RX_PROG_STATUS_DESC_PROG_ID_S = 2 /* 3 BITS */
+};
+
+enum ice_rx_prog_status_desc_prog_id_masks {
+ ICE_RX_PROG_STATUS_DESC_FD_FLTR_STATUS = 1,
+};
+
+enum ice_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_PROG_STATUS_DESC_FD_TBL_FULL_S = 0,
+ ICE_RX_PROG_STATUS_DESC_NO_FD_ENTRY_S = 1,
+};
+
+/* Rx Flex Descriptors
+ * These descriptors are used instead of the legacy version descriptors when
+ * ice_rlan_ctx.adv_desc is set
+ */
+
+union ice_32b_rx_flex_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ /* Qword 0 */
+ u8 rxdid; /* descriptor builder profile ID */
+ u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+ __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+ __le16 pkt_len; /* [15:14] are reserved */
+ __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+ /* sph=[11:11] */
+ /* ff1/ext=[15:12] */
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 flex_meta0;
+ __le16 flex_meta1;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 time_stamp_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flex_meta2;
+ __le16 flex_meta3;
+ union {
+ struct {
+ __le16 flex_meta4;
+ __le16 flex_meta5;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+ } wb; /* writeback */
+};
+
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 2
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID higher 16-bits
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct ice_32b_rx_flex_desc_nic {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le32 flow_id;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/* Rx Flex Descriptor Switch Profile
+ * RxDID Profile ID 3
+ * Flex-field 0: Source VSI
+ */
+struct ice_32b_rx_flex_desc_sw {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 src_vsi; /* [10:15] are reserved */
+ __le16 flex_md1_rsvd;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le32 rsvd; /* flex words 2-3 are reserved */
+ __le32 ts_high;
+};
+
+/* Rx Flex Descriptor NIC VEB Profile
+ * RxDID Profile ID 4
+ * Flex-field 0: Destination VSI
+ */
+struct ice_32b_rx_flex_desc_nic_veb_dbg {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 dst_vsi; /* [0:12]: destination VSI */
+ /* 13: VSI valid bit */
+ /* [14:15] are reserved */
+ __le16 flex_field_1;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le32 rsvd; /* flex words 2-3 are reserved */
+ __le32 ts_high;
+};
+
+/* Rx Flex Descriptor NIC ACL Profile
+ * RxDID Profile ID 5
+ * Flex-field 0: ACL Counter 0
+ * Flex-field 1: ACL Counter 1
+ * Flex-field 2: ACL Counter 2
+ */
+struct ice_32b_rx_flex_desc_nic_acl_dbg {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le16 acl_ctr0;
+ __le16 acl_ctr1;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flex_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 acl_ctr2;
+ __le16 rsvd; /* flex words 2-3 are reserved */
+ __le32 ts_high;
+};
+
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 6
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Source VSI
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct ice_32b_rx_flex_desc_nic_2 {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flow_id;
+ __le16 src_vsi;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/* Receive Flex Descriptor profile IDs: There are a total
+ * of 64 profiles where profile IDs 0/1 are for legacy; and
+ * profiles 2-63 are flex profiles that can be programmed
+ * with a specific metadata (profile 7 reserved for HW)
+ */
+enum ice_rxdid {
+ ICE_RXDID_LEGACY_0 = 0,
+ ICE_RXDID_LEGACY_1 = 1,
+ ICE_RXDID_FLEX_NIC = 2,
+ ICE_RXDID_FLEX_NIC_2 = 6,
+ ICE_RXDID_HW = 7,
+ ICE_RXDID_LAST = 63,
+};
+
+/* Recceive Flex descriptor Dword Index */
+enum ice_flex_word {
+ ICE_RX_FLEX_DWORD_0 = 0,
+ ICE_RX_FLEX_DWORD_1,
+ ICE_RX_FLEX_DWORD_2,
+ ICE_RX_FLEX_DWORD_3,
+ ICE_RX_FLEX_DWORD_4,
+ ICE_RX_FLEX_DWORD_5
+};
+
+/* Receive Flex Descriptor Rx opcode values */
+enum ice_flex_opcode {
+ ICE_RX_OPC_DEBUG = 0,
+ ICE_RX_OPC_MDID,
+ ICE_RX_OPC_EXTRACT,
+ ICE_RX_OPC_PROTID
+};
+
+/* Receive Descriptor MDID values that access packet flags */
+enum ice_flex_mdid_pkt_flags {
+ ICE_RX_MDID_PKT_FLAGS_15_0 = 20,
+ ICE_RX_MDID_PKT_FLAGS_31_16,
+ ICE_RX_MDID_PKT_FLAGS_47_32,
+ ICE_RX_MDID_PKT_FLAGS_63_48,
+};
+
+/* Generic descriptor MDID values */
+enum ice_flex_mdid {
+ ICE_MDID_GENERIC_WORD_0,
+ ICE_MDID_GENERIC_WORD_1,
+ ICE_MDID_GENERIC_WORD_2,
+ ICE_MDID_GENERIC_WORD_3,
+ ICE_MDID_GENERIC_WORD_4,
+ ICE_MDID_FLOW_ID_LOWER,
+ ICE_MDID_FLOW_ID_HIGH,
+ ICE_MDID_RX_DESCR_PROF_IDX,
+ ICE_MDID_RX_PKT_DROP,
+ ICE_MDID_RX_DST_Q = 12,
+ ICE_MDID_RX_DST_VSI,
+ ICE_MDID_SRC_VSI = 19,
+ ICE_MDID_ACL_NOP = 55,
+ /* Entry 56 */
+ ICE_MDID_RX_HASH_LOW,
+ ICE_MDID_ACL_CNTR_PKT = ICE_MDID_RX_HASH_LOW,
+ /* Entry 57 */
+ ICE_MDID_RX_HASH_HIGH,
+ ICE_MDID_ACL_CNTR_BYTES = ICE_MDID_RX_HASH_HIGH,
+ ICE_MDID_ACL_CNTR_PKT_BYTES
+};
+
+/* for ice_32byte_rx_flex_desc.mir_id_umb_cast member */
+#define ICE_RX_FLEX_DESC_MIRROR_M (0x3F) /* 6-bits */
+
+/* Rx/Tx Flag64 packet flag bits */
+enum ice_flg64_bits {
+ ICE_FLG_PKT_DSI = 0,
+ /* If there is a 1 in this bit position then that means Rx packet */
+ ICE_FLG_PKT_DIR = 4,
+ ICE_FLG_EVLAN_x8100 = 14,
+ ICE_FLG_EVLAN_x9100,
+ ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_MAC = 22,
+ ICE_FLG_TNL_VLAN,
+ ICE_FLG_PKT_FRG,
+ ICE_FLG_FIN = 32,
+ ICE_FLG_SYN,
+ ICE_FLG_RST,
+ ICE_FLG_TNL0 = 38,
+ ICE_FLG_TNL1,
+ ICE_FLG_TNL2,
+ ICE_FLG_UDP_GRE,
+ ICE_FLG_RSVD = 63
+};
+
+enum ice_rx_flex_desc_umb_cast_bits { /* field is 2 bits long */
+ ICE_RX_FLEX_DESC_UMB_CAST_S = 6,
+ ICE_RX_FLEX_DESC_UMB_CAST_LAST /* this entry must be last!!! */
+};
+
+enum ice_umbcast_dest_addr_types {
+ ICE_DEST_UNICAST = 0,
+ ICE_DEST_MULTICAST,
+ ICE_DEST_BROADCAST,
+ ICE_DEST_MIRRORED,
+};
+
+/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
+#define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
+
+enum ice_rx_flex_desc_flexi_flags0_bits { /* field is 6 bits long */
+ ICE_RX_FLEX_DESC_FLEXI_FLAGS0_S = 10,
+ ICE_RX_FLEX_DESC_FLEXI_FLAGS0_LAST /* this entry must be last!!! */
+};
+
+/* for ice_32byte_rx_flex_desc.pkt_length member */
+#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
+
+/* for ice_32byte_rx_flex_desc.header_length_sph_flexi_flags1 member */
+#define ICE_RX_FLEX_DESC_HEADER_LEN_M (0x7FF) /* 11-bits */
+
+enum ice_rx_flex_desc_sph_bits { /* field is 1 bit long */
+ ICE_RX_FLEX_DESC_SPH_S = 11,
+ ICE_RX_FLEX_DESC_SPH_LAST /* this entry must be last!!! */
+};
+
+enum ice_rx_flex_desc_flexi_flags1_bits { /* field is 4 bits long */
+ ICE_RX_FLEX_DESC_FLEXI_FLAGS1_S = 12,
+ ICE_RX_FLEX_DESC_FLEXI_FLAGS1_LAST /* this entry must be last!!! */
+};
+
+enum ice_rx_flex_desc_ext_status_bits { /* field is 4 bits long */
+ ICE_RX_FLEX_DESC_EXT_STATUS_EXT_UDP_S = 12,
+ ICE_RX_FLEX_DESC_EXT_STATUS_INT_UDP_S = 13,
+ ICE_RX_FLEX_DESC_EXT_STATUS_RECIPE_S = 14,
+ ICE_RX_FLEX_DESC_EXT_STATUS_OVERSIZE_S = 15,
+ ICE_RX_FLEX_DESC_EXT_STATUS_LAST /* entry must be last!!! */
+};
+
+enum ice_rx_flex_desc_status_error_0_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
+ ICE_RX_FLEX_DESC_STATUS0_EOF_S,
+ ICE_RX_FLEX_DESC_STATUS0_HBO_S,
+ ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+ ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+ ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
+ ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+ ICE_RX_FLEX_DESC_STATUS0_RXE_S,
+ ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
+ ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+ ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+ ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+ ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+ ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
+};
+
+enum ice_rx_flex_desc_status_error_1_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
+ ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ ICE_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+ /* [10:6] reserved */
+ ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
+ ICE_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
+ ICE_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
+ ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
+ ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
+ ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
+enum ice_rx_flex_desc_exstat_bits {
+ /* Note: These are predefined bit offsets */
+ ICE_RX_FLEX_DESC_EXSTAT_EXTUDP_S = 0,
+ ICE_RX_FLEX_DESC_EXSTAT_INTUDP_S = 1,
+ ICE_RX_FLEX_DESC_EXSTAT_RECIPE_S = 2,
+ ICE_RX_FLEX_DESC_EXSTAT_OVERSIZE_S = 3,
+};
+
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
+#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
+#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
+
+/* RLAN Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct ice_rlan_ctx {
+ u16 head;
+ u16 cpuid; /* bigger than needed, see above for reason */
+#define ICE_RLAN_BASE_S 7
+ u64 base;
+ u16 qlen;
+#define ICE_RLAN_CTX_DBUF_S 7
+ u16 dbuf; /* bigger than needed, see above for reason */
+#define ICE_RLAN_CTX_HBUF_S 6
+ u16 hbuf; /* bigger than needed, see above for reason */
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u32 rxmax; /* bigger than needed, see above for reason */
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
+};
+
+struct ice_ctx_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
+ .offset = offsetof(struct _struct, _ele), \
+ .size_of = FIELD_SIZEOF(struct _struct, _ele), \
+ .width = _width, \
+ .lsb = _lsb, \
+}
+
+/* for hsplit_0 field of Rx RLAN context */
+enum ice_rlan_ctx_rx_hsplit_0 {
+ ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 = 1,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_IP = 2,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* for hsplit_1 field of Rx RLAN context */
+enum ice_rlan_ctx_rx_hsplit_1 {
+ ICE_RLAN_RX_HSPLIT_1_NO_SPLIT = 0,
+ ICE_RLAN_RX_HSPLIT_1_SPLIT_L2 = 1,
+ ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
+};
+
+/* Tx Descriptor */
+struct ice_tx_desc {
+ __le64 buf_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define ICE_TXD_QW1_DTYPE_S 0
+#define ICE_TXD_QW1_DTYPE_M (0xFUL << ICE_TXD_QW1_DTYPE_S)
+
+enum ice_tx_desc_dtype_value {
+ ICE_TX_DESC_DTYPE_DATA = 0x0,
+ ICE_TX_DESC_DTYPE_CTX = 0x1,
+ ICE_TX_DESC_DTYPE_IPSEC = 0x3,
+ ICE_TX_DESC_DTYPE_FLTR_PROG = 0x8,
+ ICE_TX_DESC_DTYPE_HLP_META = 0x9,
+ /* DESC_DONE - HW has completed write-back of descriptor */
+ ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
+};
+
+#define ICE_TXD_QW1_CMD_S 4
+#define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
+
+enum ice_tx_desc_cmd_bits {
+ ICE_TX_DESC_CMD_EOP = 0x0001,
+ ICE_TX_DESC_CMD_RS = 0x0002,
+ ICE_TX_DESC_CMD_RSVD = 0x0004,
+ ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ ICE_TX_DESC_CMD_DUMMY = 0x0010,
+ ICE_TX_DESC_CMD_IIPT_NONIP = 0x0000,
+ ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
+ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
+ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
+ ICE_TX_DESC_CMD_RSVD2 = 0x0080,
+ ICE_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000,
+ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
+ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
+ ICE_TX_DESC_CMD_RE = 0x0400,
+ ICE_TX_DESC_CMD_RSVD3 = 0x0800,
+};
+
+#define ICE_TXD_QW1_OFFSET_S 16
+#define ICE_TXD_QW1_OFFSET_M (0x3FFFFULL << ICE_TXD_QW1_OFFSET_S)
+
+enum ice_tx_desc_len_fields {
+ /* Note: These are predefined bit offsets */
+ ICE_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
+ ICE_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
+ ICE_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
+};
+
+#define ICE_TXD_QW1_MACLEN_M (0x7FUL << ICE_TX_DESC_LEN_MACLEN_S)
+#define ICE_TXD_QW1_IPLEN_M (0x7FUL << ICE_TX_DESC_LEN_IPLEN_S)
+#define ICE_TXD_QW1_L4LEN_M (0xFUL << ICE_TX_DESC_LEN_L4_LEN_S)
+
+/* Tx descriptor field limits in bytes */
+#define ICE_TXD_MACLEN_MAX ((ICE_TXD_QW1_MACLEN_M >> \
+ ICE_TX_DESC_LEN_MACLEN_S) * ICE_BYTES_PER_WORD)
+#define ICE_TXD_IPLEN_MAX ((ICE_TXD_QW1_IPLEN_M >> \
+ ICE_TX_DESC_LEN_IPLEN_S) * ICE_BYTES_PER_DWORD)
+#define ICE_TXD_L4LEN_MAX ((ICE_TXD_QW1_L4LEN_M >> \
+ ICE_TX_DESC_LEN_L4_LEN_S) * ICE_BYTES_PER_DWORD)
+
+#define ICE_TXD_QW1_TX_BUF_SZ_S 34
+#define ICE_TXD_QW1_TX_BUF_SZ_M (0x3FFFULL << ICE_TXD_QW1_TX_BUF_SZ_S)
+
+#define ICE_TXD_QW1_L2TAG1_S 48
+#define ICE_TXD_QW1_L2TAG1_M (0xFFFFULL << ICE_TXD_QW1_L2TAG1_S)
+
+/* Context descriptors */
+struct ice_tx_ctx_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 qw1;
+};
+
+#define ICE_TXD_CTX_QW1_DTYPE_S 0
+#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
+
+#define ICE_TXD_CTX_QW1_CMD_S 4
+#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
+
+#define ICE_TXD_CTX_QW1_IPSEC_S 11
+#define ICE_TXD_CTX_QW1_IPSEC_M (0x7FUL << ICE_TXD_CTX_QW1_IPSEC_S)
+
+#define ICE_TXD_CTX_QW1_TSO_LEN_S 30
+#define ICE_TXD_CTX_QW1_TSO_LEN_M \
+ (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
+
+#define ICE_TXD_CTX_QW1_TSYN_S ICE_TXD_CTX_QW1_TSO_LEN_S
+#define ICE_TXD_CTX_QW1_TSYN_M ICE_TXD_CTX_QW1_TSO_LEN_M
+
+#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_QW1_MSS_M (0x3FFFULL << ICE_TXD_CTX_QW1_MSS_S)
+#define ICE_TXD_CTX_MIN_MSS 64
+#define ICE_TXD_CTX_MAX_MSS 9668
+
+#define ICE_TXD_CTX_QW1_VSI_S 50
+#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
+
+enum ice_tx_ctx_desc_cmd_bits {
+ ICE_TX_CTX_DESC_TSO = 0x01,
+ ICE_TX_CTX_DESC_TSYN = 0x02,
+ ICE_TX_CTX_DESC_IL2TAG2 = 0x04,
+ ICE_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ ICE_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ ICE_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ ICE_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ ICE_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ ICE_TX_CTX_DESC_RESERVED = 0x40
+};
+
+enum ice_tx_ctx_desc_eipt_offload {
+ ICE_TX_CTX_EIPT_NONE = 0x0,
+ ICE_TX_CTX_EIPT_IPV6 = 0x1,
+ ICE_TX_CTX_EIPT_IPV4_NO_CSUM = 0x2,
+ ICE_TX_CTX_EIPT_IPV4 = 0x3
+};
+
+#define ICE_TXD_CTX_QW0_EIPT_S 0
+#define ICE_TXD_CTX_QW0_EIPT_M (0x3ULL << ICE_TXD_CTX_QW0_EIPT_S)
+
+#define ICE_TXD_CTX_QW0_EIPLEN_S 2
+#define ICE_TXD_CTX_QW0_EIPLEN_M (0x7FUL << ICE_TXD_CTX_QW0_EIPLEN_S)
+
+#define ICE_TXD_CTX_QW0_L4TUNT_S 9
+#define ICE_TXD_CTX_QW0_L4TUNT_M (0x3ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
+
+#define ICE_TXD_CTX_UDP_TUNNELING BIT_ULL(ICE_TXD_CTX_QW0_L4TUNT_S)
+#define ICE_TXD_CTX_GRE_TUNNELING (0x2ULL << ICE_TXD_CTX_QW0_L4TUNT_S)
+
+#define ICE_TXD_CTX_QW0_EIP_NOINC_S 11
+#define ICE_TXD_CTX_QW0_EIP_NOINC_M BIT_ULL(ICE_TXD_CTX_QW0_EIP_NOINC_S)
+
+#define ICE_TXD_CTX_EIP_NOINC_IPID_CONST ICE_TXD_CTX_QW0_EIP_NOINC_M
+
+#define ICE_TXD_CTX_QW0_NATLEN_S 12
+#define ICE_TXD_CTX_QW0_NATLEN_M (0X7FULL << ICE_TXD_CTX_QW0_NATLEN_S)
+
+#define ICE_TXD_CTX_QW0_DECTTL_S 19
+#define ICE_TXD_CTX_QW0_DECTTL_M (0xFULL << ICE_TXD_CTX_QW0_DECTTL_S)
+
+#define ICE_TXD_CTX_QW0_L4T_CS_S 23
+#define ICE_TXD_CTX_QW0_L4T_CS_M BIT_ULL(ICE_TXD_CTX_QW0_L4T_CS_S)
+
+#define ICE_LAN_TXQ_MAX_QGRPS 127
+#define ICE_LAN_TXQ_MAX_QDIS 1023
+
+/* Tx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct ice_tlan_ctx {
+#define ICE_TLAN_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 port_num;
+ u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
+#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
+#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
+ u16 src_vsi;
+ u8 tsyn_ena;
+ u8 internal_usage_flag;
+ u8 alt_vlan;
+ u16 cpuid; /* bigger than needed, see above for reason */
+ u8 wb_mode;
+ u8 tphrd_desc;
+ u8 tphrd;
+ u8 tphwr_desc;
+ u16 cmpq_id;
+ u16 qnum_in_func;
+ u8 itr_notification_mode;
+ u8 adjust_prof_id;
+ u32 qlen; /* bigger than needed, see above for reason */
+ u8 quanta_prof_idx;
+ u8 tso_ena;
+ u16 tso_qnum;
+ u8 legacy_int;
+ u8 drop_ena;
+ u8 cache_prof_idx;
+ u8 pkt_shaper_prof_idx;
+ u8 int_q_state; /* width not needed - internal do not write */
+};
+
+/* LAN Tx Completion Queue data */
+#pragma pack(1)
+struct ice_tx_cmpltnq {
+ u16 txq_id;
+ u8 generation;
+ u16 tx_head;
+ u8 cmpl_type;
+};
+#pragma pack()
+
+/* FIXME: move to a .c file that references this variable */
+/* LAN Tx Completion Queue data info */
+static const struct ice_ctx_ele ice_tx_cmpltnq_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tx_cmpltnq, txq_id, 14, 0),
+ ICE_CTX_STORE(ice_tx_cmpltnq, generation, 1, 15),
+ ICE_CTX_STORE(ice_tx_cmpltnq, tx_head, 13, 16),
+ ICE_CTX_STORE(ice_tx_cmpltnq, cmpl_type, 3, 29),
+ { 0 }
+};
+
+/* LAN Tx Completion Queue Context */
+#pragma pack(1)
+struct ice_tx_cmpltnq_ctx {
+ u64 base;
+ u32 q_len;
+#define ICE_TX_CMPLTNQ_CTX_Q_LEN_S 4
+ u8 generation;
+ u32 wrt_ptr;
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+ u8 tph_desc_wr;
+ u8 cpuid;
+ u32 cmpltn_cache[16];
+};
+#pragma pack()
+
+/* LAN Tx Doorbell Descriptor Format */
+struct ice_tx_drbell_fmt {
+ u16 txq_id;
+ u8 dd;
+ u8 rs;
+ u32 db;
+};
+
+/* FIXME: move to a .c file that references this variable */
+/* LAN Tx Doorbell Descriptor format info */
+static const struct ice_ctx_ele ice_tx_drbell_fmt_info[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_tx_drbell_fmt, txq_id, 14, 0),
+ ICE_CTX_STORE(ice_tx_drbell_fmt, dd, 1, 14),
+ ICE_CTX_STORE(ice_tx_drbell_fmt, rs, 1, 15),
+ ICE_CTX_STORE(ice_tx_drbell_fmt, db, 32, 32),
+ { 0 }
+};
+
+/* LAN Tx Doorbell Queue Context */
+#pragma pack(1)
+struct ice_tx_drbell_q_ctx {
+ u64 base;
+ u16 ring_len;
+ u8 pf_num;
+ u16 vf_num;
+ u8 vmvf_type;
+ u8 cpuid;
+ u8 tph_desc_rd;
+ u8 tph_desc_wr;
+ u8 db_q_en;
+ u16 rd_head;
+ u16 rd_tail;
+};
+#pragma pack()
+
+/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT ice_ptype_lkup[ptype].known
+ * THEN
+ * Packet is unknown
+ * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
+ * Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ * Use the enum ice_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+ { PTYPE, \
+ 1, \
+ ICE_RX_PTYPE_OUTER_##OUTER_IP, \
+ ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+ ICE_RX_PTYPE_##OUTER_FRAG, \
+ ICE_RX_PTYPE_TUNNEL_##T, \
+ ICE_RX_PTYPE_TUNNEL_END_##TE, \
+ ICE_RX_PTYPE_##TEF, \
+ ICE_RX_PTYPE_INNER_PROT_##I, \
+ ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define ICE_PTT_UNUSED_ENTRY(PTYPE) { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
+#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = {
+ /* L2 Packet types */
+ ICE_PTT_UNUSED_ENTRY(0),
+ ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+ ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(3),
+ ICE_PTT_UNUSED_ENTRY(4),
+ ICE_PTT_UNUSED_ENTRY(5),
+ ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(8),
+ ICE_PTT_UNUSED_ENTRY(9),
+ ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+ ICE_PTT_UNUSED_ENTRY(12),
+ ICE_PTT_UNUSED_ENTRY(13),
+ ICE_PTT_UNUSED_ENTRY(14),
+ ICE_PTT_UNUSED_ENTRY(15),
+ ICE_PTT_UNUSED_ENTRY(16),
+ ICE_PTT_UNUSED_ENTRY(17),
+ ICE_PTT_UNUSED_ENTRY(18),
+ ICE_PTT_UNUSED_ENTRY(19),
+ ICE_PTT_UNUSED_ENTRY(20),
+ ICE_PTT_UNUSED_ENTRY(21),
+
+ /* Non Tunneled IPv4 */
+ ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(25),
+ ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv4 */
+ ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(32),
+ ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> IPv6 */
+ ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(39),
+ ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT */
+ ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> IPv4 */
+ ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(47),
+ ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> IPv6 */
+ ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(54),
+ ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC */
+ ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+ ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(62),
+ ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+ ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(69),
+ ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv4 --> GRE/NAT --> MAC/VLAN */
+ ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(77),
+ ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(84),
+ ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* Non Tunneled IPv6 */
+ ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+ ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ ICE_PTT_UNUSED_ENTRY(91),
+ ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
+ ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+ ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv4 */
+ ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(98),
+ ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> IPv6 */
+ ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(105),
+ ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT */
+ ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> IPv4 */
+ ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(113),
+ ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> IPv6 */
+ ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(120),
+ ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC */
+ ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+ ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(128),
+ ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+ ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(135),
+ ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN */
+ ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+ ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+ ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+ ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(143),
+ ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
+ ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+ ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+ /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+ ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+ ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+ ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
+ ICE_PTT_UNUSED_ENTRY(150),
+ ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
+ ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+ ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+ /* unused entries */
+ ICE_PTT_UNUSED_ENTRY(154),
+ ICE_PTT_UNUSED_ENTRY(155),
+ ICE_PTT_UNUSED_ENTRY(156),
+ ICE_PTT_UNUSED_ENTRY(157),
+ ICE_PTT_UNUSED_ENTRY(158),
+ ICE_PTT_UNUSED_ENTRY(159),
+
+ ICE_PTT_UNUSED_ENTRY(160),
+ ICE_PTT_UNUSED_ENTRY(161),
+ ICE_PTT_UNUSED_ENTRY(162),
+ ICE_PTT_UNUSED_ENTRY(163),
+ ICE_PTT_UNUSED_ENTRY(164),
+ ICE_PTT_UNUSED_ENTRY(165),
+ ICE_PTT_UNUSED_ENTRY(166),
+ ICE_PTT_UNUSED_ENTRY(167),
+ ICE_PTT_UNUSED_ENTRY(168),
+ ICE_PTT_UNUSED_ENTRY(169),
+
+ ICE_PTT_UNUSED_ENTRY(170),
+ ICE_PTT_UNUSED_ENTRY(171),
+ ICE_PTT_UNUSED_ENTRY(172),
+ ICE_PTT_UNUSED_ENTRY(173),
+ ICE_PTT_UNUSED_ENTRY(174),
+ ICE_PTT_UNUSED_ENTRY(175),
+ ICE_PTT_UNUSED_ENTRY(176),
+ ICE_PTT_UNUSED_ENTRY(177),
+ ICE_PTT_UNUSED_ENTRY(178),
+ ICE_PTT_UNUSED_ENTRY(179),
+
+ ICE_PTT_UNUSED_ENTRY(180),
+ ICE_PTT_UNUSED_ENTRY(181),
+ ICE_PTT_UNUSED_ENTRY(182),
+ ICE_PTT_UNUSED_ENTRY(183),
+ ICE_PTT_UNUSED_ENTRY(184),
+ ICE_PTT_UNUSED_ENTRY(185),
+ ICE_PTT_UNUSED_ENTRY(186),
+ ICE_PTT_UNUSED_ENTRY(187),
+ ICE_PTT_UNUSED_ENTRY(188),
+ ICE_PTT_UNUSED_ENTRY(189),
+
+ ICE_PTT_UNUSED_ENTRY(190),
+ ICE_PTT_UNUSED_ENTRY(191),
+ ICE_PTT_UNUSED_ENTRY(192),
+ ICE_PTT_UNUSED_ENTRY(193),
+ ICE_PTT_UNUSED_ENTRY(194),
+ ICE_PTT_UNUSED_ENTRY(195),
+ ICE_PTT_UNUSED_ENTRY(196),
+ ICE_PTT_UNUSED_ENTRY(197),
+ ICE_PTT_UNUSED_ENTRY(198),
+ ICE_PTT_UNUSED_ENTRY(199),
+
+ ICE_PTT_UNUSED_ENTRY(200),
+ ICE_PTT_UNUSED_ENTRY(201),
+ ICE_PTT_UNUSED_ENTRY(202),
+ ICE_PTT_UNUSED_ENTRY(203),
+ ICE_PTT_UNUSED_ENTRY(204),
+ ICE_PTT_UNUSED_ENTRY(205),
+ ICE_PTT_UNUSED_ENTRY(206),
+ ICE_PTT_UNUSED_ENTRY(207),
+ ICE_PTT_UNUSED_ENTRY(208),
+ ICE_PTT_UNUSED_ENTRY(209),
+
+ ICE_PTT_UNUSED_ENTRY(210),
+ ICE_PTT_UNUSED_ENTRY(211),
+ ICE_PTT_UNUSED_ENTRY(212),
+ ICE_PTT_UNUSED_ENTRY(213),
+ ICE_PTT_UNUSED_ENTRY(214),
+ ICE_PTT_UNUSED_ENTRY(215),
+ ICE_PTT_UNUSED_ENTRY(216),
+ ICE_PTT_UNUSED_ENTRY(217),
+ ICE_PTT_UNUSED_ENTRY(218),
+ ICE_PTT_UNUSED_ENTRY(219),
+
+ ICE_PTT_UNUSED_ENTRY(220),
+ ICE_PTT_UNUSED_ENTRY(221),
+ ICE_PTT_UNUSED_ENTRY(222),
+ ICE_PTT_UNUSED_ENTRY(223),
+ ICE_PTT_UNUSED_ENTRY(224),
+ ICE_PTT_UNUSED_ENTRY(225),
+ ICE_PTT_UNUSED_ENTRY(226),
+ ICE_PTT_UNUSED_ENTRY(227),
+ ICE_PTT_UNUSED_ENTRY(228),
+ ICE_PTT_UNUSED_ENTRY(229),
+
+ ICE_PTT_UNUSED_ENTRY(230),
+ ICE_PTT_UNUSED_ENTRY(231),
+ ICE_PTT_UNUSED_ENTRY(232),
+ ICE_PTT_UNUSED_ENTRY(233),
+ ICE_PTT_UNUSED_ENTRY(234),
+ ICE_PTT_UNUSED_ENTRY(235),
+ ICE_PTT_UNUSED_ENTRY(236),
+ ICE_PTT_UNUSED_ENTRY(237),
+ ICE_PTT_UNUSED_ENTRY(238),
+ ICE_PTT_UNUSED_ENTRY(239),
+
+ ICE_PTT_UNUSED_ENTRY(240),
+ ICE_PTT_UNUSED_ENTRY(241),
+ ICE_PTT_UNUSED_ENTRY(242),
+ ICE_PTT_UNUSED_ENTRY(243),
+ ICE_PTT_UNUSED_ENTRY(244),
+ ICE_PTT_UNUSED_ENTRY(245),
+ ICE_PTT_UNUSED_ENTRY(246),
+ ICE_PTT_UNUSED_ENTRY(247),
+ ICE_PTT_UNUSED_ENTRY(248),
+ ICE_PTT_UNUSED_ENTRY(249),
+
+ ICE_PTT_UNUSED_ENTRY(250),
+ ICE_PTT_UNUSED_ENTRY(251),
+ ICE_PTT_UNUSED_ENTRY(252),
+ ICE_PTT_UNUSED_ENTRY(253),
+ ICE_PTT_UNUSED_ENTRY(254),
+ ICE_PTT_UNUSED_ENTRY(255),
+ ICE_PTT_UNUSED_ENTRY(256),
+ ICE_PTT_UNUSED_ENTRY(257),
+ ICE_PTT_UNUSED_ENTRY(258),
+ ICE_PTT_UNUSED_ENTRY(259),
+
+ ICE_PTT_UNUSED_ENTRY(260),
+ ICE_PTT_UNUSED_ENTRY(261),
+ ICE_PTT_UNUSED_ENTRY(262),
+ ICE_PTT_UNUSED_ENTRY(263),
+ ICE_PTT_UNUSED_ENTRY(264),
+ ICE_PTT_UNUSED_ENTRY(265),
+ ICE_PTT_UNUSED_ENTRY(266),
+ ICE_PTT_UNUSED_ENTRY(267),
+ ICE_PTT_UNUSED_ENTRY(268),
+ ICE_PTT_UNUSED_ENTRY(269),
+
+ ICE_PTT_UNUSED_ENTRY(270),
+ ICE_PTT_UNUSED_ENTRY(271),
+ ICE_PTT_UNUSED_ENTRY(272),
+ ICE_PTT_UNUSED_ENTRY(273),
+ ICE_PTT_UNUSED_ENTRY(274),
+ ICE_PTT_UNUSED_ENTRY(275),
+ ICE_PTT_UNUSED_ENTRY(276),
+ ICE_PTT_UNUSED_ENTRY(277),
+ ICE_PTT_UNUSED_ENTRY(278),
+ ICE_PTT_UNUSED_ENTRY(279),
+
+ ICE_PTT_UNUSED_ENTRY(280),
+ ICE_PTT_UNUSED_ENTRY(281),
+ ICE_PTT_UNUSED_ENTRY(282),
+ ICE_PTT_UNUSED_ENTRY(283),
+ ICE_PTT_UNUSED_ENTRY(284),
+ ICE_PTT_UNUSED_ENTRY(285),
+ ICE_PTT_UNUSED_ENTRY(286),
+ ICE_PTT_UNUSED_ENTRY(287),
+ ICE_PTT_UNUSED_ENTRY(288),
+ ICE_PTT_UNUSED_ENTRY(289),
+
+ ICE_PTT_UNUSED_ENTRY(290),
+ ICE_PTT_UNUSED_ENTRY(291),
+ ICE_PTT_UNUSED_ENTRY(292),
+ ICE_PTT_UNUSED_ENTRY(293),
+ ICE_PTT_UNUSED_ENTRY(294),
+ ICE_PTT_UNUSED_ENTRY(295),
+ ICE_PTT_UNUSED_ENTRY(296),
+ ICE_PTT_UNUSED_ENTRY(297),
+ ICE_PTT_UNUSED_ENTRY(298),
+ ICE_PTT_UNUSED_ENTRY(299),
+
+ ICE_PTT_UNUSED_ENTRY(300),
+ ICE_PTT_UNUSED_ENTRY(301),
+ ICE_PTT_UNUSED_ENTRY(302),
+ ICE_PTT_UNUSED_ENTRY(303),
+ ICE_PTT_UNUSED_ENTRY(304),
+ ICE_PTT_UNUSED_ENTRY(305),
+ ICE_PTT_UNUSED_ENTRY(306),
+ ICE_PTT_UNUSED_ENTRY(307),
+ ICE_PTT_UNUSED_ENTRY(308),
+ ICE_PTT_UNUSED_ENTRY(309),
+
+ ICE_PTT_UNUSED_ENTRY(310),
+ ICE_PTT_UNUSED_ENTRY(311),
+ ICE_PTT_UNUSED_ENTRY(312),
+ ICE_PTT_UNUSED_ENTRY(313),
+ ICE_PTT_UNUSED_ENTRY(314),
+ ICE_PTT_UNUSED_ENTRY(315),
+ ICE_PTT_UNUSED_ENTRY(316),
+ ICE_PTT_UNUSED_ENTRY(317),
+ ICE_PTT_UNUSED_ENTRY(318),
+ ICE_PTT_UNUSED_ENTRY(319),
+
+ ICE_PTT_UNUSED_ENTRY(320),
+ ICE_PTT_UNUSED_ENTRY(321),
+ ICE_PTT_UNUSED_ENTRY(322),
+ ICE_PTT_UNUSED_ENTRY(323),
+ ICE_PTT_UNUSED_ENTRY(324),
+ ICE_PTT_UNUSED_ENTRY(325),
+ ICE_PTT_UNUSED_ENTRY(326),
+ ICE_PTT_UNUSED_ENTRY(327),
+ ICE_PTT_UNUSED_ENTRY(328),
+ ICE_PTT_UNUSED_ENTRY(329),
+
+ ICE_PTT_UNUSED_ENTRY(330),
+ ICE_PTT_UNUSED_ENTRY(331),
+ ICE_PTT_UNUSED_ENTRY(332),
+ ICE_PTT_UNUSED_ENTRY(333),
+ ICE_PTT_UNUSED_ENTRY(334),
+ ICE_PTT_UNUSED_ENTRY(335),
+ ICE_PTT_UNUSED_ENTRY(336),
+ ICE_PTT_UNUSED_ENTRY(337),
+ ICE_PTT_UNUSED_ENTRY(338),
+ ICE_PTT_UNUSED_ENTRY(339),
+
+ ICE_PTT_UNUSED_ENTRY(340),
+ ICE_PTT_UNUSED_ENTRY(341),
+ ICE_PTT_UNUSED_ENTRY(342),
+ ICE_PTT_UNUSED_ENTRY(343),
+ ICE_PTT_UNUSED_ENTRY(344),
+ ICE_PTT_UNUSED_ENTRY(345),
+ ICE_PTT_UNUSED_ENTRY(346),
+ ICE_PTT_UNUSED_ENTRY(347),
+ ICE_PTT_UNUSED_ENTRY(348),
+ ICE_PTT_UNUSED_ENTRY(349),
+
+ ICE_PTT_UNUSED_ENTRY(350),
+ ICE_PTT_UNUSED_ENTRY(351),
+ ICE_PTT_UNUSED_ENTRY(352),
+ ICE_PTT_UNUSED_ENTRY(353),
+ ICE_PTT_UNUSED_ENTRY(354),
+ ICE_PTT_UNUSED_ENTRY(355),
+ ICE_PTT_UNUSED_ENTRY(356),
+ ICE_PTT_UNUSED_ENTRY(357),
+ ICE_PTT_UNUSED_ENTRY(358),
+ ICE_PTT_UNUSED_ENTRY(359),
+
+ ICE_PTT_UNUSED_ENTRY(360),
+ ICE_PTT_UNUSED_ENTRY(361),
+ ICE_PTT_UNUSED_ENTRY(362),
+ ICE_PTT_UNUSED_ENTRY(363),
+ ICE_PTT_UNUSED_ENTRY(364),
+ ICE_PTT_UNUSED_ENTRY(365),
+ ICE_PTT_UNUSED_ENTRY(366),
+ ICE_PTT_UNUSED_ENTRY(367),
+ ICE_PTT_UNUSED_ENTRY(368),
+ ICE_PTT_UNUSED_ENTRY(369),
+
+ ICE_PTT_UNUSED_ENTRY(370),
+ ICE_PTT_UNUSED_ENTRY(371),
+ ICE_PTT_UNUSED_ENTRY(372),
+ ICE_PTT_UNUSED_ENTRY(373),
+ ICE_PTT_UNUSED_ENTRY(374),
+ ICE_PTT_UNUSED_ENTRY(375),
+ ICE_PTT_UNUSED_ENTRY(376),
+ ICE_PTT_UNUSED_ENTRY(377),
+ ICE_PTT_UNUSED_ENTRY(378),
+ ICE_PTT_UNUSED_ENTRY(379),
+
+ ICE_PTT_UNUSED_ENTRY(380),
+ ICE_PTT_UNUSED_ENTRY(381),
+ ICE_PTT_UNUSED_ENTRY(382),
+ ICE_PTT_UNUSED_ENTRY(383),
+ ICE_PTT_UNUSED_ENTRY(384),
+ ICE_PTT_UNUSED_ENTRY(385),
+ ICE_PTT_UNUSED_ENTRY(386),
+ ICE_PTT_UNUSED_ENTRY(387),
+ ICE_PTT_UNUSED_ENTRY(388),
+ ICE_PTT_UNUSED_ENTRY(389),
+
+ ICE_PTT_UNUSED_ENTRY(390),
+ ICE_PTT_UNUSED_ENTRY(391),
+ ICE_PTT_UNUSED_ENTRY(392),
+ ICE_PTT_UNUSED_ENTRY(393),
+ ICE_PTT_UNUSED_ENTRY(394),
+ ICE_PTT_UNUSED_ENTRY(395),
+ ICE_PTT_UNUSED_ENTRY(396),
+ ICE_PTT_UNUSED_ENTRY(397),
+ ICE_PTT_UNUSED_ENTRY(398),
+ ICE_PTT_UNUSED_ENTRY(399),
+
+ ICE_PTT_UNUSED_ENTRY(400),
+ ICE_PTT_UNUSED_ENTRY(401),
+ ICE_PTT_UNUSED_ENTRY(402),
+ ICE_PTT_UNUSED_ENTRY(403),
+ ICE_PTT_UNUSED_ENTRY(404),
+ ICE_PTT_UNUSED_ENTRY(405),
+ ICE_PTT_UNUSED_ENTRY(406),
+ ICE_PTT_UNUSED_ENTRY(407),
+ ICE_PTT_UNUSED_ENTRY(408),
+ ICE_PTT_UNUSED_ENTRY(409),
+
+ ICE_PTT_UNUSED_ENTRY(410),
+ ICE_PTT_UNUSED_ENTRY(411),
+ ICE_PTT_UNUSED_ENTRY(412),
+ ICE_PTT_UNUSED_ENTRY(413),
+ ICE_PTT_UNUSED_ENTRY(414),
+ ICE_PTT_UNUSED_ENTRY(415),
+ ICE_PTT_UNUSED_ENTRY(416),
+ ICE_PTT_UNUSED_ENTRY(417),
+ ICE_PTT_UNUSED_ENTRY(418),
+ ICE_PTT_UNUSED_ENTRY(419),
+
+ ICE_PTT_UNUSED_ENTRY(420),
+ ICE_PTT_UNUSED_ENTRY(421),
+ ICE_PTT_UNUSED_ENTRY(422),
+ ICE_PTT_UNUSED_ENTRY(423),
+ ICE_PTT_UNUSED_ENTRY(424),
+ ICE_PTT_UNUSED_ENTRY(425),
+ ICE_PTT_UNUSED_ENTRY(426),
+ ICE_PTT_UNUSED_ENTRY(427),
+ ICE_PTT_UNUSED_ENTRY(428),
+ ICE_PTT_UNUSED_ENTRY(429),
+
+ ICE_PTT_UNUSED_ENTRY(430),
+ ICE_PTT_UNUSED_ENTRY(431),
+ ICE_PTT_UNUSED_ENTRY(432),
+ ICE_PTT_UNUSED_ENTRY(433),
+ ICE_PTT_UNUSED_ENTRY(434),
+ ICE_PTT_UNUSED_ENTRY(435),
+ ICE_PTT_UNUSED_ENTRY(436),
+ ICE_PTT_UNUSED_ENTRY(437),
+ ICE_PTT_UNUSED_ENTRY(438),
+ ICE_PTT_UNUSED_ENTRY(439),
+
+ ICE_PTT_UNUSED_ENTRY(440),
+ ICE_PTT_UNUSED_ENTRY(441),
+ ICE_PTT_UNUSED_ENTRY(442),
+ ICE_PTT_UNUSED_ENTRY(443),
+ ICE_PTT_UNUSED_ENTRY(444),
+ ICE_PTT_UNUSED_ENTRY(445),
+ ICE_PTT_UNUSED_ENTRY(446),
+ ICE_PTT_UNUSED_ENTRY(447),
+ ICE_PTT_UNUSED_ENTRY(448),
+ ICE_PTT_UNUSED_ENTRY(449),
+
+ ICE_PTT_UNUSED_ENTRY(450),
+ ICE_PTT_UNUSED_ENTRY(451),
+ ICE_PTT_UNUSED_ENTRY(452),
+ ICE_PTT_UNUSED_ENTRY(453),
+ ICE_PTT_UNUSED_ENTRY(454),
+ ICE_PTT_UNUSED_ENTRY(455),
+ ICE_PTT_UNUSED_ENTRY(456),
+ ICE_PTT_UNUSED_ENTRY(457),
+ ICE_PTT_UNUSED_ENTRY(458),
+ ICE_PTT_UNUSED_ENTRY(459),
+
+ ICE_PTT_UNUSED_ENTRY(460),
+ ICE_PTT_UNUSED_ENTRY(461),
+ ICE_PTT_UNUSED_ENTRY(462),
+ ICE_PTT_UNUSED_ENTRY(463),
+ ICE_PTT_UNUSED_ENTRY(464),
+ ICE_PTT_UNUSED_ENTRY(465),
+ ICE_PTT_UNUSED_ENTRY(466),
+ ICE_PTT_UNUSED_ENTRY(467),
+ ICE_PTT_UNUSED_ENTRY(468),
+ ICE_PTT_UNUSED_ENTRY(469),
+
+ ICE_PTT_UNUSED_ENTRY(470),
+ ICE_PTT_UNUSED_ENTRY(471),
+ ICE_PTT_UNUSED_ENTRY(472),
+ ICE_PTT_UNUSED_ENTRY(473),
+ ICE_PTT_UNUSED_ENTRY(474),
+ ICE_PTT_UNUSED_ENTRY(475),
+ ICE_PTT_UNUSED_ENTRY(476),
+ ICE_PTT_UNUSED_ENTRY(477),
+ ICE_PTT_UNUSED_ENTRY(478),
+ ICE_PTT_UNUSED_ENTRY(479),
+
+ ICE_PTT_UNUSED_ENTRY(480),
+ ICE_PTT_UNUSED_ENTRY(481),
+ ICE_PTT_UNUSED_ENTRY(482),
+ ICE_PTT_UNUSED_ENTRY(483),
+ ICE_PTT_UNUSED_ENTRY(484),
+ ICE_PTT_UNUSED_ENTRY(485),
+ ICE_PTT_UNUSED_ENTRY(486),
+ ICE_PTT_UNUSED_ENTRY(487),
+ ICE_PTT_UNUSED_ENTRY(488),
+ ICE_PTT_UNUSED_ENTRY(489),
+
+ ICE_PTT_UNUSED_ENTRY(490),
+ ICE_PTT_UNUSED_ENTRY(491),
+ ICE_PTT_UNUSED_ENTRY(492),
+ ICE_PTT_UNUSED_ENTRY(493),
+ ICE_PTT_UNUSED_ENTRY(494),
+ ICE_PTT_UNUSED_ENTRY(495),
+ ICE_PTT_UNUSED_ENTRY(496),
+ ICE_PTT_UNUSED_ENTRY(497),
+ ICE_PTT_UNUSED_ENTRY(498),
+ ICE_PTT_UNUSED_ENTRY(499),
+
+ ICE_PTT_UNUSED_ENTRY(500),
+ ICE_PTT_UNUSED_ENTRY(501),
+ ICE_PTT_UNUSED_ENTRY(502),
+ ICE_PTT_UNUSED_ENTRY(503),
+ ICE_PTT_UNUSED_ENTRY(504),
+ ICE_PTT_UNUSED_ENTRY(505),
+ ICE_PTT_UNUSED_ENTRY(506),
+ ICE_PTT_UNUSED_ENTRY(507),
+ ICE_PTT_UNUSED_ENTRY(508),
+ ICE_PTT_UNUSED_ENTRY(509),
+
+ ICE_PTT_UNUSED_ENTRY(510),
+ ICE_PTT_UNUSED_ENTRY(511),
+ ICE_PTT_UNUSED_ENTRY(512),
+ ICE_PTT_UNUSED_ENTRY(513),
+ ICE_PTT_UNUSED_ENTRY(514),
+ ICE_PTT_UNUSED_ENTRY(515),
+ ICE_PTT_UNUSED_ENTRY(516),
+ ICE_PTT_UNUSED_ENTRY(517),
+ ICE_PTT_UNUSED_ENTRY(518),
+ ICE_PTT_UNUSED_ENTRY(519),
+
+ ICE_PTT_UNUSED_ENTRY(520),
+ ICE_PTT_UNUSED_ENTRY(521),
+ ICE_PTT_UNUSED_ENTRY(522),
+ ICE_PTT_UNUSED_ENTRY(523),
+ ICE_PTT_UNUSED_ENTRY(524),
+ ICE_PTT_UNUSED_ENTRY(525),
+ ICE_PTT_UNUSED_ENTRY(526),
+ ICE_PTT_UNUSED_ENTRY(527),
+ ICE_PTT_UNUSED_ENTRY(528),
+ ICE_PTT_UNUSED_ENTRY(529),
+
+ ICE_PTT_UNUSED_ENTRY(530),
+ ICE_PTT_UNUSED_ENTRY(531),
+ ICE_PTT_UNUSED_ENTRY(532),
+ ICE_PTT_UNUSED_ENTRY(533),
+ ICE_PTT_UNUSED_ENTRY(534),
+ ICE_PTT_UNUSED_ENTRY(535),
+ ICE_PTT_UNUSED_ENTRY(536),
+ ICE_PTT_UNUSED_ENTRY(537),
+ ICE_PTT_UNUSED_ENTRY(538),
+ ICE_PTT_UNUSED_ENTRY(539),
+
+ ICE_PTT_UNUSED_ENTRY(540),
+ ICE_PTT_UNUSED_ENTRY(541),
+ ICE_PTT_UNUSED_ENTRY(542),
+ ICE_PTT_UNUSED_ENTRY(543),
+ ICE_PTT_UNUSED_ENTRY(544),
+ ICE_PTT_UNUSED_ENTRY(545),
+ ICE_PTT_UNUSED_ENTRY(546),
+ ICE_PTT_UNUSED_ENTRY(547),
+ ICE_PTT_UNUSED_ENTRY(548),
+ ICE_PTT_UNUSED_ENTRY(549),
+
+ ICE_PTT_UNUSED_ENTRY(550),
+ ICE_PTT_UNUSED_ENTRY(551),
+ ICE_PTT_UNUSED_ENTRY(552),
+ ICE_PTT_UNUSED_ENTRY(553),
+ ICE_PTT_UNUSED_ENTRY(554),
+ ICE_PTT_UNUSED_ENTRY(555),
+ ICE_PTT_UNUSED_ENTRY(556),
+ ICE_PTT_UNUSED_ENTRY(557),
+ ICE_PTT_UNUSED_ENTRY(558),
+ ICE_PTT_UNUSED_ENTRY(559),
+
+ ICE_PTT_UNUSED_ENTRY(560),
+ ICE_PTT_UNUSED_ENTRY(561),
+ ICE_PTT_UNUSED_ENTRY(562),
+ ICE_PTT_UNUSED_ENTRY(563),
+ ICE_PTT_UNUSED_ENTRY(564),
+ ICE_PTT_UNUSED_ENTRY(565),
+ ICE_PTT_UNUSED_ENTRY(566),
+ ICE_PTT_UNUSED_ENTRY(567),
+ ICE_PTT_UNUSED_ENTRY(568),
+ ICE_PTT_UNUSED_ENTRY(569),
+
+ ICE_PTT_UNUSED_ENTRY(570),
+ ICE_PTT_UNUSED_ENTRY(571),
+ ICE_PTT_UNUSED_ENTRY(572),
+ ICE_PTT_UNUSED_ENTRY(573),
+ ICE_PTT_UNUSED_ENTRY(574),
+ ICE_PTT_UNUSED_ENTRY(575),
+ ICE_PTT_UNUSED_ENTRY(576),
+ ICE_PTT_UNUSED_ENTRY(577),
+ ICE_PTT_UNUSED_ENTRY(578),
+ ICE_PTT_UNUSED_ENTRY(579),
+
+ ICE_PTT_UNUSED_ENTRY(580),
+ ICE_PTT_UNUSED_ENTRY(581),
+ ICE_PTT_UNUSED_ENTRY(582),
+ ICE_PTT_UNUSED_ENTRY(583),
+ ICE_PTT_UNUSED_ENTRY(584),
+ ICE_PTT_UNUSED_ENTRY(585),
+ ICE_PTT_UNUSED_ENTRY(586),
+ ICE_PTT_UNUSED_ENTRY(587),
+ ICE_PTT_UNUSED_ENTRY(588),
+ ICE_PTT_UNUSED_ENTRY(589),
+
+ ICE_PTT_UNUSED_ENTRY(590),
+ ICE_PTT_UNUSED_ENTRY(591),
+ ICE_PTT_UNUSED_ENTRY(592),
+ ICE_PTT_UNUSED_ENTRY(593),
+ ICE_PTT_UNUSED_ENTRY(594),
+ ICE_PTT_UNUSED_ENTRY(595),
+ ICE_PTT_UNUSED_ENTRY(596),
+ ICE_PTT_UNUSED_ENTRY(597),
+ ICE_PTT_UNUSED_ENTRY(598),
+ ICE_PTT_UNUSED_ENTRY(599),
+
+ ICE_PTT_UNUSED_ENTRY(600),
+ ICE_PTT_UNUSED_ENTRY(601),
+ ICE_PTT_UNUSED_ENTRY(602),
+ ICE_PTT_UNUSED_ENTRY(603),
+ ICE_PTT_UNUSED_ENTRY(604),
+ ICE_PTT_UNUSED_ENTRY(605),
+ ICE_PTT_UNUSED_ENTRY(606),
+ ICE_PTT_UNUSED_ENTRY(607),
+ ICE_PTT_UNUSED_ENTRY(608),
+ ICE_PTT_UNUSED_ENTRY(609),
+
+ ICE_PTT_UNUSED_ENTRY(610),
+ ICE_PTT_UNUSED_ENTRY(611),
+ ICE_PTT_UNUSED_ENTRY(612),
+ ICE_PTT_UNUSED_ENTRY(613),
+ ICE_PTT_UNUSED_ENTRY(614),
+ ICE_PTT_UNUSED_ENTRY(615),
+ ICE_PTT_UNUSED_ENTRY(616),
+ ICE_PTT_UNUSED_ENTRY(617),
+ ICE_PTT_UNUSED_ENTRY(618),
+ ICE_PTT_UNUSED_ENTRY(619),
+
+ ICE_PTT_UNUSED_ENTRY(620),
+ ICE_PTT_UNUSED_ENTRY(621),
+ ICE_PTT_UNUSED_ENTRY(622),
+ ICE_PTT_UNUSED_ENTRY(623),
+ ICE_PTT_UNUSED_ENTRY(624),
+ ICE_PTT_UNUSED_ENTRY(625),
+ ICE_PTT_UNUSED_ENTRY(626),
+ ICE_PTT_UNUSED_ENTRY(627),
+ ICE_PTT_UNUSED_ENTRY(628),
+ ICE_PTT_UNUSED_ENTRY(629),
+
+ ICE_PTT_UNUSED_ENTRY(630),
+ ICE_PTT_UNUSED_ENTRY(631),
+ ICE_PTT_UNUSED_ENTRY(632),
+ ICE_PTT_UNUSED_ENTRY(633),
+ ICE_PTT_UNUSED_ENTRY(634),
+ ICE_PTT_UNUSED_ENTRY(635),
+ ICE_PTT_UNUSED_ENTRY(636),
+ ICE_PTT_UNUSED_ENTRY(637),
+ ICE_PTT_UNUSED_ENTRY(638),
+ ICE_PTT_UNUSED_ENTRY(639),
+
+ ICE_PTT_UNUSED_ENTRY(640),
+ ICE_PTT_UNUSED_ENTRY(641),
+ ICE_PTT_UNUSED_ENTRY(642),
+ ICE_PTT_UNUSED_ENTRY(643),
+ ICE_PTT_UNUSED_ENTRY(644),
+ ICE_PTT_UNUSED_ENTRY(645),
+ ICE_PTT_UNUSED_ENTRY(646),
+ ICE_PTT_UNUSED_ENTRY(647),
+ ICE_PTT_UNUSED_ENTRY(648),
+ ICE_PTT_UNUSED_ENTRY(649),
+
+ ICE_PTT_UNUSED_ENTRY(650),
+ ICE_PTT_UNUSED_ENTRY(651),
+ ICE_PTT_UNUSED_ENTRY(652),
+ ICE_PTT_UNUSED_ENTRY(653),
+ ICE_PTT_UNUSED_ENTRY(654),
+ ICE_PTT_UNUSED_ENTRY(655),
+ ICE_PTT_UNUSED_ENTRY(656),
+ ICE_PTT_UNUSED_ENTRY(657),
+ ICE_PTT_UNUSED_ENTRY(658),
+ ICE_PTT_UNUSED_ENTRY(659),
+
+ ICE_PTT_UNUSED_ENTRY(660),
+ ICE_PTT_UNUSED_ENTRY(661),
+ ICE_PTT_UNUSED_ENTRY(662),
+ ICE_PTT_UNUSED_ENTRY(663),
+ ICE_PTT_UNUSED_ENTRY(664),
+ ICE_PTT_UNUSED_ENTRY(665),
+ ICE_PTT_UNUSED_ENTRY(666),
+ ICE_PTT_UNUSED_ENTRY(667),
+ ICE_PTT_UNUSED_ENTRY(668),
+ ICE_PTT_UNUSED_ENTRY(669),
+
+ ICE_PTT_UNUSED_ENTRY(670),
+ ICE_PTT_UNUSED_ENTRY(671),
+ ICE_PTT_UNUSED_ENTRY(672),
+ ICE_PTT_UNUSED_ENTRY(673),
+ ICE_PTT_UNUSED_ENTRY(674),
+ ICE_PTT_UNUSED_ENTRY(675),
+ ICE_PTT_UNUSED_ENTRY(676),
+ ICE_PTT_UNUSED_ENTRY(677),
+ ICE_PTT_UNUSED_ENTRY(678),
+ ICE_PTT_UNUSED_ENTRY(679),
+
+ ICE_PTT_UNUSED_ENTRY(680),
+ ICE_PTT_UNUSED_ENTRY(681),
+ ICE_PTT_UNUSED_ENTRY(682),
+ ICE_PTT_UNUSED_ENTRY(683),
+ ICE_PTT_UNUSED_ENTRY(684),
+ ICE_PTT_UNUSED_ENTRY(685),
+ ICE_PTT_UNUSED_ENTRY(686),
+ ICE_PTT_UNUSED_ENTRY(687),
+ ICE_PTT_UNUSED_ENTRY(688),
+ ICE_PTT_UNUSED_ENTRY(689),
+
+ ICE_PTT_UNUSED_ENTRY(690),
+ ICE_PTT_UNUSED_ENTRY(691),
+ ICE_PTT_UNUSED_ENTRY(692),
+ ICE_PTT_UNUSED_ENTRY(693),
+ ICE_PTT_UNUSED_ENTRY(694),
+ ICE_PTT_UNUSED_ENTRY(695),
+ ICE_PTT_UNUSED_ENTRY(696),
+ ICE_PTT_UNUSED_ENTRY(697),
+ ICE_PTT_UNUSED_ENTRY(698),
+ ICE_PTT_UNUSED_ENTRY(699),
+
+ ICE_PTT_UNUSED_ENTRY(700),
+ ICE_PTT_UNUSED_ENTRY(701),
+ ICE_PTT_UNUSED_ENTRY(702),
+ ICE_PTT_UNUSED_ENTRY(703),
+ ICE_PTT_UNUSED_ENTRY(704),
+ ICE_PTT_UNUSED_ENTRY(705),
+ ICE_PTT_UNUSED_ENTRY(706),
+ ICE_PTT_UNUSED_ENTRY(707),
+ ICE_PTT_UNUSED_ENTRY(708),
+ ICE_PTT_UNUSED_ENTRY(709),
+
+ ICE_PTT_UNUSED_ENTRY(710),
+ ICE_PTT_UNUSED_ENTRY(711),
+ ICE_PTT_UNUSED_ENTRY(712),
+ ICE_PTT_UNUSED_ENTRY(713),
+ ICE_PTT_UNUSED_ENTRY(714),
+ ICE_PTT_UNUSED_ENTRY(715),
+ ICE_PTT_UNUSED_ENTRY(716),
+ ICE_PTT_UNUSED_ENTRY(717),
+ ICE_PTT_UNUSED_ENTRY(718),
+ ICE_PTT_UNUSED_ENTRY(719),
+
+ ICE_PTT_UNUSED_ENTRY(720),
+ ICE_PTT_UNUSED_ENTRY(721),
+ ICE_PTT_UNUSED_ENTRY(722),
+ ICE_PTT_UNUSED_ENTRY(723),
+ ICE_PTT_UNUSED_ENTRY(724),
+ ICE_PTT_UNUSED_ENTRY(725),
+ ICE_PTT_UNUSED_ENTRY(726),
+ ICE_PTT_UNUSED_ENTRY(727),
+ ICE_PTT_UNUSED_ENTRY(728),
+ ICE_PTT_UNUSED_ENTRY(729),
+
+ ICE_PTT_UNUSED_ENTRY(730),
+ ICE_PTT_UNUSED_ENTRY(731),
+ ICE_PTT_UNUSED_ENTRY(732),
+ ICE_PTT_UNUSED_ENTRY(733),
+ ICE_PTT_UNUSED_ENTRY(734),
+ ICE_PTT_UNUSED_ENTRY(735),
+ ICE_PTT_UNUSED_ENTRY(736),
+ ICE_PTT_UNUSED_ENTRY(737),
+ ICE_PTT_UNUSED_ENTRY(738),
+ ICE_PTT_UNUSED_ENTRY(739),
+
+ ICE_PTT_UNUSED_ENTRY(740),
+ ICE_PTT_UNUSED_ENTRY(741),
+ ICE_PTT_UNUSED_ENTRY(742),
+ ICE_PTT_UNUSED_ENTRY(743),
+ ICE_PTT_UNUSED_ENTRY(744),
+ ICE_PTT_UNUSED_ENTRY(745),
+ ICE_PTT_UNUSED_ENTRY(746),
+ ICE_PTT_UNUSED_ENTRY(747),
+ ICE_PTT_UNUSED_ENTRY(748),
+ ICE_PTT_UNUSED_ENTRY(749),
+
+ ICE_PTT_UNUSED_ENTRY(750),
+ ICE_PTT_UNUSED_ENTRY(751),
+ ICE_PTT_UNUSED_ENTRY(752),
+ ICE_PTT_UNUSED_ENTRY(753),
+ ICE_PTT_UNUSED_ENTRY(754),
+ ICE_PTT_UNUSED_ENTRY(755),
+ ICE_PTT_UNUSED_ENTRY(756),
+ ICE_PTT_UNUSED_ENTRY(757),
+ ICE_PTT_UNUSED_ENTRY(758),
+ ICE_PTT_UNUSED_ENTRY(759),
+
+ ICE_PTT_UNUSED_ENTRY(760),
+ ICE_PTT_UNUSED_ENTRY(761),
+ ICE_PTT_UNUSED_ENTRY(762),
+ ICE_PTT_UNUSED_ENTRY(763),
+ ICE_PTT_UNUSED_ENTRY(764),
+ ICE_PTT_UNUSED_ENTRY(765),
+ ICE_PTT_UNUSED_ENTRY(766),
+ ICE_PTT_UNUSED_ENTRY(767),
+ ICE_PTT_UNUSED_ENTRY(768),
+ ICE_PTT_UNUSED_ENTRY(769),
+
+ ICE_PTT_UNUSED_ENTRY(770),
+ ICE_PTT_UNUSED_ENTRY(771),
+ ICE_PTT_UNUSED_ENTRY(772),
+ ICE_PTT_UNUSED_ENTRY(773),
+ ICE_PTT_UNUSED_ENTRY(774),
+ ICE_PTT_UNUSED_ENTRY(775),
+ ICE_PTT_UNUSED_ENTRY(776),
+ ICE_PTT_UNUSED_ENTRY(777),
+ ICE_PTT_UNUSED_ENTRY(778),
+ ICE_PTT_UNUSED_ENTRY(779),
+
+ ICE_PTT_UNUSED_ENTRY(780),
+ ICE_PTT_UNUSED_ENTRY(781),
+ ICE_PTT_UNUSED_ENTRY(782),
+ ICE_PTT_UNUSED_ENTRY(783),
+ ICE_PTT_UNUSED_ENTRY(784),
+ ICE_PTT_UNUSED_ENTRY(785),
+ ICE_PTT_UNUSED_ENTRY(786),
+ ICE_PTT_UNUSED_ENTRY(787),
+ ICE_PTT_UNUSED_ENTRY(788),
+ ICE_PTT_UNUSED_ENTRY(789),
+
+ ICE_PTT_UNUSED_ENTRY(790),
+ ICE_PTT_UNUSED_ENTRY(791),
+ ICE_PTT_UNUSED_ENTRY(792),
+ ICE_PTT_UNUSED_ENTRY(793),
+ ICE_PTT_UNUSED_ENTRY(794),
+ ICE_PTT_UNUSED_ENTRY(795),
+ ICE_PTT_UNUSED_ENTRY(796),
+ ICE_PTT_UNUSED_ENTRY(797),
+ ICE_PTT_UNUSED_ENTRY(798),
+ ICE_PTT_UNUSED_ENTRY(799),
+
+ ICE_PTT_UNUSED_ENTRY(800),
+ ICE_PTT_UNUSED_ENTRY(801),
+ ICE_PTT_UNUSED_ENTRY(802),
+ ICE_PTT_UNUSED_ENTRY(803),
+ ICE_PTT_UNUSED_ENTRY(804),
+ ICE_PTT_UNUSED_ENTRY(805),
+ ICE_PTT_UNUSED_ENTRY(806),
+ ICE_PTT_UNUSED_ENTRY(807),
+ ICE_PTT_UNUSED_ENTRY(808),
+ ICE_PTT_UNUSED_ENTRY(809),
+
+ ICE_PTT_UNUSED_ENTRY(810),
+ ICE_PTT_UNUSED_ENTRY(811),
+ ICE_PTT_UNUSED_ENTRY(812),
+ ICE_PTT_UNUSED_ENTRY(813),
+ ICE_PTT_UNUSED_ENTRY(814),
+ ICE_PTT_UNUSED_ENTRY(815),
+ ICE_PTT_UNUSED_ENTRY(816),
+ ICE_PTT_UNUSED_ENTRY(817),
+ ICE_PTT_UNUSED_ENTRY(818),
+ ICE_PTT_UNUSED_ENTRY(819),
+
+ ICE_PTT_UNUSED_ENTRY(820),
+ ICE_PTT_UNUSED_ENTRY(821),
+ ICE_PTT_UNUSED_ENTRY(822),
+ ICE_PTT_UNUSED_ENTRY(823),
+ ICE_PTT_UNUSED_ENTRY(824),
+ ICE_PTT_UNUSED_ENTRY(825),
+ ICE_PTT_UNUSED_ENTRY(826),
+ ICE_PTT_UNUSED_ENTRY(827),
+ ICE_PTT_UNUSED_ENTRY(828),
+ ICE_PTT_UNUSED_ENTRY(829),
+
+ ICE_PTT_UNUSED_ENTRY(830),
+ ICE_PTT_UNUSED_ENTRY(831),
+ ICE_PTT_UNUSED_ENTRY(832),
+ ICE_PTT_UNUSED_ENTRY(833),
+ ICE_PTT_UNUSED_ENTRY(834),
+ ICE_PTT_UNUSED_ENTRY(835),
+ ICE_PTT_UNUSED_ENTRY(836),
+ ICE_PTT_UNUSED_ENTRY(837),
+ ICE_PTT_UNUSED_ENTRY(838),
+ ICE_PTT_UNUSED_ENTRY(839),
+
+ ICE_PTT_UNUSED_ENTRY(840),
+ ICE_PTT_UNUSED_ENTRY(841),
+ ICE_PTT_UNUSED_ENTRY(842),
+ ICE_PTT_UNUSED_ENTRY(843),
+ ICE_PTT_UNUSED_ENTRY(844),
+ ICE_PTT_UNUSED_ENTRY(845),
+ ICE_PTT_UNUSED_ENTRY(846),
+ ICE_PTT_UNUSED_ENTRY(847),
+ ICE_PTT_UNUSED_ENTRY(848),
+ ICE_PTT_UNUSED_ENTRY(849),
+
+ ICE_PTT_UNUSED_ENTRY(850),
+ ICE_PTT_UNUSED_ENTRY(851),
+ ICE_PTT_UNUSED_ENTRY(852),
+ ICE_PTT_UNUSED_ENTRY(853),
+ ICE_PTT_UNUSED_ENTRY(854),
+ ICE_PTT_UNUSED_ENTRY(855),
+ ICE_PTT_UNUSED_ENTRY(856),
+ ICE_PTT_UNUSED_ENTRY(857),
+ ICE_PTT_UNUSED_ENTRY(858),
+ ICE_PTT_UNUSED_ENTRY(859),
+
+ ICE_PTT_UNUSED_ENTRY(860),
+ ICE_PTT_UNUSED_ENTRY(861),
+ ICE_PTT_UNUSED_ENTRY(862),
+ ICE_PTT_UNUSED_ENTRY(863),
+ ICE_PTT_UNUSED_ENTRY(864),
+ ICE_PTT_UNUSED_ENTRY(865),
+ ICE_PTT_UNUSED_ENTRY(866),
+ ICE_PTT_UNUSED_ENTRY(867),
+ ICE_PTT_UNUSED_ENTRY(868),
+ ICE_PTT_UNUSED_ENTRY(869),
+
+ ICE_PTT_UNUSED_ENTRY(870),
+ ICE_PTT_UNUSED_ENTRY(871),
+ ICE_PTT_UNUSED_ENTRY(872),
+ ICE_PTT_UNUSED_ENTRY(873),
+ ICE_PTT_UNUSED_ENTRY(874),
+ ICE_PTT_UNUSED_ENTRY(875),
+ ICE_PTT_UNUSED_ENTRY(876),
+ ICE_PTT_UNUSED_ENTRY(877),
+ ICE_PTT_UNUSED_ENTRY(878),
+ ICE_PTT_UNUSED_ENTRY(879),
+
+ ICE_PTT_UNUSED_ENTRY(880),
+ ICE_PTT_UNUSED_ENTRY(881),
+ ICE_PTT_UNUSED_ENTRY(882),
+ ICE_PTT_UNUSED_ENTRY(883),
+ ICE_PTT_UNUSED_ENTRY(884),
+ ICE_PTT_UNUSED_ENTRY(885),
+ ICE_PTT_UNUSED_ENTRY(886),
+ ICE_PTT_UNUSED_ENTRY(887),
+ ICE_PTT_UNUSED_ENTRY(888),
+ ICE_PTT_UNUSED_ENTRY(889),
+
+ ICE_PTT_UNUSED_ENTRY(890),
+ ICE_PTT_UNUSED_ENTRY(891),
+ ICE_PTT_UNUSED_ENTRY(892),
+ ICE_PTT_UNUSED_ENTRY(893),
+ ICE_PTT_UNUSED_ENTRY(894),
+ ICE_PTT_UNUSED_ENTRY(895),
+ ICE_PTT_UNUSED_ENTRY(896),
+ ICE_PTT_UNUSED_ENTRY(897),
+ ICE_PTT_UNUSED_ENTRY(898),
+ ICE_PTT_UNUSED_ENTRY(899),
+
+ ICE_PTT_UNUSED_ENTRY(900),
+ ICE_PTT_UNUSED_ENTRY(901),
+ ICE_PTT_UNUSED_ENTRY(902),
+ ICE_PTT_UNUSED_ENTRY(903),
+ ICE_PTT_UNUSED_ENTRY(904),
+ ICE_PTT_UNUSED_ENTRY(905),
+ ICE_PTT_UNUSED_ENTRY(906),
+ ICE_PTT_UNUSED_ENTRY(907),
+ ICE_PTT_UNUSED_ENTRY(908),
+ ICE_PTT_UNUSED_ENTRY(909),
+
+ ICE_PTT_UNUSED_ENTRY(910),
+ ICE_PTT_UNUSED_ENTRY(911),
+ ICE_PTT_UNUSED_ENTRY(912),
+ ICE_PTT_UNUSED_ENTRY(913),
+ ICE_PTT_UNUSED_ENTRY(914),
+ ICE_PTT_UNUSED_ENTRY(915),
+ ICE_PTT_UNUSED_ENTRY(916),
+ ICE_PTT_UNUSED_ENTRY(917),
+ ICE_PTT_UNUSED_ENTRY(918),
+ ICE_PTT_UNUSED_ENTRY(919),
+
+ ICE_PTT_UNUSED_ENTRY(920),
+ ICE_PTT_UNUSED_ENTRY(921),
+ ICE_PTT_UNUSED_ENTRY(922),
+ ICE_PTT_UNUSED_ENTRY(923),
+ ICE_PTT_UNUSED_ENTRY(924),
+ ICE_PTT_UNUSED_ENTRY(925),
+ ICE_PTT_UNUSED_ENTRY(926),
+ ICE_PTT_UNUSED_ENTRY(927),
+ ICE_PTT_UNUSED_ENTRY(928),
+ ICE_PTT_UNUSED_ENTRY(929),
+
+ ICE_PTT_UNUSED_ENTRY(930),
+ ICE_PTT_UNUSED_ENTRY(931),
+ ICE_PTT_UNUSED_ENTRY(932),
+ ICE_PTT_UNUSED_ENTRY(933),
+ ICE_PTT_UNUSED_ENTRY(934),
+ ICE_PTT_UNUSED_ENTRY(935),
+ ICE_PTT_UNUSED_ENTRY(936),
+ ICE_PTT_UNUSED_ENTRY(937),
+ ICE_PTT_UNUSED_ENTRY(938),
+ ICE_PTT_UNUSED_ENTRY(939),
+
+ ICE_PTT_UNUSED_ENTRY(940),
+ ICE_PTT_UNUSED_ENTRY(941),
+ ICE_PTT_UNUSED_ENTRY(942),
+ ICE_PTT_UNUSED_ENTRY(943),
+ ICE_PTT_UNUSED_ENTRY(944),
+ ICE_PTT_UNUSED_ENTRY(945),
+ ICE_PTT_UNUSED_ENTRY(946),
+ ICE_PTT_UNUSED_ENTRY(947),
+ ICE_PTT_UNUSED_ENTRY(948),
+ ICE_PTT_UNUSED_ENTRY(949),
+
+ ICE_PTT_UNUSED_ENTRY(950),
+ ICE_PTT_UNUSED_ENTRY(951),
+ ICE_PTT_UNUSED_ENTRY(952),
+ ICE_PTT_UNUSED_ENTRY(953),
+ ICE_PTT_UNUSED_ENTRY(954),
+ ICE_PTT_UNUSED_ENTRY(955),
+ ICE_PTT_UNUSED_ENTRY(956),
+ ICE_PTT_UNUSED_ENTRY(957),
+ ICE_PTT_UNUSED_ENTRY(958),
+ ICE_PTT_UNUSED_ENTRY(959),
+
+ ICE_PTT_UNUSED_ENTRY(960),
+ ICE_PTT_UNUSED_ENTRY(961),
+ ICE_PTT_UNUSED_ENTRY(962),
+ ICE_PTT_UNUSED_ENTRY(963),
+ ICE_PTT_UNUSED_ENTRY(964),
+ ICE_PTT_UNUSED_ENTRY(965),
+ ICE_PTT_UNUSED_ENTRY(966),
+ ICE_PTT_UNUSED_ENTRY(967),
+ ICE_PTT_UNUSED_ENTRY(968),
+ ICE_PTT_UNUSED_ENTRY(969),
+
+ ICE_PTT_UNUSED_ENTRY(970),
+ ICE_PTT_UNUSED_ENTRY(971),
+ ICE_PTT_UNUSED_ENTRY(972),
+ ICE_PTT_UNUSED_ENTRY(973),
+ ICE_PTT_UNUSED_ENTRY(974),
+ ICE_PTT_UNUSED_ENTRY(975),
+ ICE_PTT_UNUSED_ENTRY(976),
+ ICE_PTT_UNUSED_ENTRY(977),
+ ICE_PTT_UNUSED_ENTRY(978),
+ ICE_PTT_UNUSED_ENTRY(979),
+
+ ICE_PTT_UNUSED_ENTRY(980),
+ ICE_PTT_UNUSED_ENTRY(981),
+ ICE_PTT_UNUSED_ENTRY(982),
+ ICE_PTT_UNUSED_ENTRY(983),
+ ICE_PTT_UNUSED_ENTRY(984),
+ ICE_PTT_UNUSED_ENTRY(985),
+ ICE_PTT_UNUSED_ENTRY(986),
+ ICE_PTT_UNUSED_ENTRY(987),
+ ICE_PTT_UNUSED_ENTRY(988),
+ ICE_PTT_UNUSED_ENTRY(989),
+
+ ICE_PTT_UNUSED_ENTRY(990),
+ ICE_PTT_UNUSED_ENTRY(991),
+ ICE_PTT_UNUSED_ENTRY(992),
+ ICE_PTT_UNUSED_ENTRY(993),
+ ICE_PTT_UNUSED_ENTRY(994),
+ ICE_PTT_UNUSED_ENTRY(995),
+ ICE_PTT_UNUSED_ENTRY(996),
+ ICE_PTT_UNUSED_ENTRY(997),
+ ICE_PTT_UNUSED_ENTRY(998),
+ ICE_PTT_UNUSED_ENTRY(999),
+
+ ICE_PTT_UNUSED_ENTRY(1000),
+ ICE_PTT_UNUSED_ENTRY(1001),
+ ICE_PTT_UNUSED_ENTRY(1002),
+ ICE_PTT_UNUSED_ENTRY(1003),
+ ICE_PTT_UNUSED_ENTRY(1004),
+ ICE_PTT_UNUSED_ENTRY(1005),
+ ICE_PTT_UNUSED_ENTRY(1006),
+ ICE_PTT_UNUSED_ENTRY(1007),
+ ICE_PTT_UNUSED_ENTRY(1008),
+ ICE_PTT_UNUSED_ENTRY(1009),
+
+ ICE_PTT_UNUSED_ENTRY(1010),
+ ICE_PTT_UNUSED_ENTRY(1011),
+ ICE_PTT_UNUSED_ENTRY(1012),
+ ICE_PTT_UNUSED_ENTRY(1013),
+ ICE_PTT_UNUSED_ENTRY(1014),
+ ICE_PTT_UNUSED_ENTRY(1015),
+ ICE_PTT_UNUSED_ENTRY(1016),
+ ICE_PTT_UNUSED_ENTRY(1017),
+ ICE_PTT_UNUSED_ENTRY(1018),
+ ICE_PTT_UNUSED_ENTRY(1019),
+
+ ICE_PTT_UNUSED_ENTRY(1020),
+ ICE_PTT_UNUSED_ENTRY(1021),
+ ICE_PTT_UNUSED_ENTRY(1022),
+ ICE_PTT_UNUSED_ENTRY(1023),
+};
+
+static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
+{
+ return ice_ptype_lkup[ptype];
+}
+
+#define ICE_LINK_SPEED_UNKNOWN 0
+#define ICE_LINK_SPEED_10MBPS 10
+#define ICE_LINK_SPEED_100MBPS 100
+#define ICE_LINK_SPEED_1000MBPS 1000
+#define ICE_LINK_SPEED_2500MBPS 2500
+#define ICE_LINK_SPEED_5000MBPS 5000
+#define ICE_LINK_SPEED_10000MBPS 10000
+#define ICE_LINK_SPEED_20000MBPS 20000
+#define ICE_LINK_SPEED_25000MBPS 25000
+#define ICE_LINK_SPEED_40000MBPS 40000
+#define ICE_LINK_SPEED_50000MBPS 50000
+#define ICE_LINK_SPEED_100000MBPS 100000
+
+#endif /* _ICE_LAN_TX_RX_H_ */
Index: sys/dev/ice/ice_lib.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_lib.h
@@ -0,0 +1,811 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_lib.h
+ * @brief header for generic device and sysctl functions
+ *
+ * Contains definitions and function declarations for the ice_lib.c file. It
+ * does not depend on the iflib networking stack.
+ */
+
+#ifndef _ICE_LIB_H_
+#define _ICE_LIB_H_
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/syslog.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <sys/bitstring.h>
+
+#include "ice_dcb.h"
+#include "ice_type.h"
+#include "ice_common.h"
+#include "ice_flow.h"
+#include "ice_sched.h"
+#include "ice_resmgr.h"
+
+#include "ice_rss.h"
+
+/* Hide debug sysctls unless INVARIANTS is enabled */
+#ifdef INVARIANTS
+#define ICE_CTLFLAG_DEBUG 0
+#else
+#define ICE_CTLFLAG_DEBUG CTLFLAG_SKIP
+#endif
+
+/**
+ * for_each_set_bit - For loop over each set bit in a bit string
+ * @bit: storage for the bit index
+ * @data: address of data block to loop over
+ * @nbits: maximum number of bits to loop over
+ *
+ * macro to create a for loop over a bit string, which runs the body once for
+ * each bit that is set in the string. The bit variable will be set to the
+ * index of each set bit in the string, with zero representing the first bit.
+ */
+#define for_each_set_bit(bit, data, nbits) \
+ for (bit_ffs((bitstr_t *)(data), (nbits), &(bit)); \
+ (bit) != -1; \
+ bit_ffs_at((bitstr_t *)(data), (bit) + 1, (nbits), &(bit)))
+
+/**
+ * @var broadcastaddr
+ * @brief broadcast MAC address
+ *
+ * constant defining the broadcast MAC address, used for programming the
+ * broadcast address as a MAC filter for the PF VSI.
+ */
+static const u8 broadcastaddr[ETHER_ADDR_LEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+MALLOC_DECLARE(M_ICE);
+
+extern const char ice_driver_version[];
+extern const uint8_t ice_major_version;
+extern const uint8_t ice_minor_version;
+extern const uint8_t ice_patch_version;
+extern const uint8_t ice_rc_version;
+
+/* global sysctl indicating whether the Tx FC filter should be enabled */
+extern bool ice_enable_tx_fc_filter;
+
+/* global sysctl indicating whether the Tx LLDP filter should be enabled */
+extern bool ice_enable_tx_lldp_filter;
+
+/**
+ * @struct ice_bar_info
+ * @brief PCI BAR mapping information
+ *
+ * Contains data about a PCI BAR that the driver has mapped for use.
+ */
+struct ice_bar_info {
+ struct resource *res;
+ bus_space_tag_t tag;
+ bus_space_handle_t handle;
+ bus_size_t size;
+ int rid;
+};
+
+/* Alignment for queues */
+#define DBA_ALIGN 128
+
+/* Maximum TSO size is (256K)-1 */
+#define ICE_TSO_SIZE ((256*1024) - 1)
+
+/* Minimum size for TSO MSS */
+#define ICE_MIN_TSO_MSS 64
+
+#define ICE_MAX_TX_SEGS 8
+#define ICE_MAX_TSO_SEGS 128
+
+#define ICE_MAX_DMA_SEG_SIZE ((16*1024) - 1)
+
+#define ICE_MAX_RX_SEGS 5
+
+#define ICE_MAX_TSO_HDR_SEGS 3
+
+#define ICE_MSIX_BAR 3
+
+#define ICE_DEFAULT_DESC_COUNT 1024
+#define ICE_MAX_DESC_COUNT 8160
+#define ICE_MIN_DESC_COUNT 64
+#define ICE_DESC_COUNT_INCR 32
+
+/* List of hardware offloads we support */
+#define ICE_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | CSUM_IP_SCTP | \
+ CSUM_IP6_TCP| CSUM_IP6_UDP | CSUM_IP6_SCTP | \
+ CSUM_IP_TSO | CSUM_IP6_TSO)
+
+/* Macros to decide what kind of hardware offload to enable */
+#define ICE_CSUM_TCP (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP)
+#define ICE_CSUM_UDP (CSUM_IP_UDP|CSUM_IP6_UDP)
+#define ICE_CSUM_SCTP (CSUM_IP_SCTP|CSUM_IP6_SCTP)
+#define ICE_CSUM_IP (CSUM_IP|CSUM_IP_TSO)
+
+/* List of known RX CSUM offload flags */
+#define ICE_RX_CSUM_FLAGS (CSUM_L3_CALC | CSUM_L3_VALID | CSUM_L4_CALC | \
+ CSUM_L4_VALID | CSUM_L5_CALC | CSUM_L5_VALID | \
+ CSUM_COALESCED)
+
+/* List of interface capabilities supported by ice hardware */
+#define ICE_FULL_CAPS \
+ (IFCAP_TSO4 | IFCAP_TSO6 | \
+ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \
+ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \
+ IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \
+ IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO)
+
+/* Safe mode disables support for hardware checksums and TSO */
+#define ICE_SAFE_CAPS \
+ (ICE_FULL_CAPS & ~(IFCAP_HWCSUM | IFCAP_TSO | \
+ IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM))
+
+#define ICE_CAPS(sc) \
+ (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE) ? ICE_SAFE_CAPS : ICE_FULL_CAPS)
+
+/**
+ * ICE_NVM_ACCESS
+ * @brief Private ioctl command number for NVM access ioctls
+ *
+ * The ioctl command number used by NVM update for accessing the driver for
+ * NVM access commands.
+ */
+#define ICE_NVM_ACCESS \
+ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
+
+#define ICE_AQ_LEN 512
+#define ICE_MBXQ_LEN 512
+#define ICE_SBQ_LEN 512
+
+#define ICE_CTRLQ_WORK_LIMIT 256
+
+#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
+
+/* wait up to 50 microseconds for queue state change */
+#define ICE_Q_WAIT_RETRY_LIMIT 5
+
+#define ICE_UP_TABLE_TRANSLATE(val, i) \
+ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
+ ICE_AQ_VSI_UP_TABLE_UP##i##_M)
+
+/*
+ * For now, set this to the hardware maximum. Each function gets a smaller
+ * number assigned to it in hw->func_caps.guar_num_vsi, though there
+ * appears to be no guarantee that is the maximum number that a function
+ * can use.
+ */
+#define ICE_MAX_VSI_AVAILABLE 768
+
+/* Maximum size of a single frame (for Tx and Rx) */
+#define ICE_MAX_FRAME_SIZE ICE_AQ_SET_MAC_FRAME_SIZE_MAX
+
+/* Maximum MTU size */
+#define ICE_MAX_MTU (ICE_MAX_FRAME_SIZE - \
+ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)
+
+/*
+ * Hardware requires that TSO packets have an segment size of at least 64
+ * bytes. To avoid sending bad frames to the hardware, the driver forces the
+ * MSS for all TSO packets to have a segment size of at least 64 bytes.
+ *
+ * However, if the MTU is reduced below a certain size, then the resulting
+ * larger MSS can result in transmitting segmented frames with a packet size
+ * larger than the MTU.
+ *
+ * Avoid this by preventing the MTU from being lowered below this limit.
+ * Alternative solutions require changing the TCP stack to disable offloading
+ * the segmentation when the requested segment size goes below 64 bytes.
+ */
+#define ICE_MIN_MTU 112
+
+#define ICE_DEFAULT_VF_QUEUES 4
+
+/**
+ * @enum ice_dyn_idx_t
+ * @brief Dynamic Control ITR indexes
+ *
+ * This enum matches hardware bits and is meant to be used by DYN_CTLN
+ * registers and QINT registers or more generally anywhere in the manual
+ * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
+ * register but instead is a special value meaning "don't update" ITR0/1/2.
+ */
+enum ice_dyn_idx_t {
+ ICE_IDX_ITR0 = 0,
+ ICE_IDX_ITR1 = 1,
+ ICE_IDX_ITR2 = 2,
+ ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
+};
+
+/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
+#define ICE_RX_ITR ICE_IDX_ITR0
+#define ICE_TX_ITR ICE_IDX_ITR1
+
+#define ICE_ITR_MAX 8160
+
+/* Define the default Tx and Rx ITR as 50us (translates to ~20k int/sec max) */
+#define ICE_DFLT_TX_ITR 50
+#define ICE_DFLT_RX_ITR 50
+
+/**
+ * ice_itr_to_reg - Convert an ITR setting into its register equivalent
+ * @hw: The device HW structure
+ * @itr_setting: the ITR setting to convert
+ *
+ * Based on the hardware ITR granularity, convert an ITR setting into the
+ * correct value to prepare programming to the HW.
+ */
+static inline u16 ice_itr_to_reg(struct ice_hw *hw, u16 itr_setting)
+{
+ return itr_setting / hw->itr_gran;
+}
+
+/**
+ * @enum ice_rx_dtype
+ * @brief DTYPE header split options
+ *
+ * This enum matches the Rx context bits to define whether header split is
+ * enabled or not.
+ */
+enum ice_rx_dtype {
+ ICE_RX_DTYPE_NO_SPLIT = 0,
+ ICE_RX_DTYPE_HEADER_SPLIT = 1,
+ ICE_RX_DTYPE_SPLIT_ALWAYS = 2,
+};
+
+/* Strings used for displaying FEC mode
+ *
+ * Use ice_fec_str() to get these unless these need to be embedded in a
+ * string constant.
+ */
+#define ICE_FEC_STRING_AUTO "Auto"
+#define ICE_FEC_STRING_RS "RS-FEC"
+#define ICE_FEC_STRING_BASER "FC-FEC/BASE-R"
+#define ICE_FEC_STRING_NONE "None"
+
+/* Strings used for displaying Flow Control mode
+ *
+ * Use ice_fc_str() to get these unless these need to be embedded in a
+ * string constant.
+ */
+#define ICE_FC_STRING_FULL "Full"
+#define ICE_FC_STRING_TX "Tx"
+#define ICE_FC_STRING_RX "Rx"
+#define ICE_FC_STRING_NONE "None"
+
+/*
+ * The number of times the ice_handle_i2c_req function will retry reading
+ * I2C data via the Admin Queue before returning EBUSY.
+ */
+#define ICE_I2C_MAX_RETRIES 10
+
+/*
+ * The Start LLDP Agent AQ command will fail if it's sent too soon after
+ * the LLDP agent is stopped. The period between the stop and start
+ * commands must currently be at least 2 seconds.
+ */
+#define ICE_START_LLDP_RETRY_WAIT (2 * hz)
+
+/*
+ * The ice_(set|clear)_vsi_promisc() function expects a mask of promiscuous
+ * modes to operate on. This mask is the default one for the driver, where
+ * promiscuous is enabled/disabled for all types of non-VLAN-tagged/VLAN 0
+ * traffic.
+ */
+#define ICE_VSI_PROMISC_MASK (ICE_PROMISC_UCAST_TX | \
+ ICE_PROMISC_UCAST_RX | \
+ ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_MCAST_RX)
+
+struct ice_softc;
+
+/**
+ * @enum ice_rx_cso_stat
+ * @brief software checksum offload statistics
+ *
+ * Enumeration of possible checksum offload statistics captured by software
+ * during the Rx path.
+ */
+enum ice_rx_cso_stat {
+ ICE_CSO_STAT_RX_IP4_ERR,
+ ICE_CSO_STAT_RX_IP6_ERR,
+ ICE_CSO_STAT_RX_L3_ERR,
+ ICE_CSO_STAT_RX_TCP_ERR,
+ ICE_CSO_STAT_RX_UDP_ERR,
+ ICE_CSO_STAT_RX_SCTP_ERR,
+ ICE_CSO_STAT_RX_L4_ERR,
+ ICE_CSO_STAT_RX_COUNT
+};
+
+/**
+ * @enum ice_tx_cso_stat
+ * @brief software checksum offload statistics
+ *
+ * Enumeration of possible checksum offload statistics captured by software
+ * during the Tx path.
+ */
+enum ice_tx_cso_stat {
+ ICE_CSO_STAT_TX_TCP,
+ ICE_CSO_STAT_TX_UDP,
+ ICE_CSO_STAT_TX_SCTP,
+ ICE_CSO_STAT_TX_IP4,
+ ICE_CSO_STAT_TX_IP6,
+ ICE_CSO_STAT_TX_L3_ERR,
+ ICE_CSO_STAT_TX_L4_ERR,
+ ICE_CSO_STAT_TX_COUNT
+};
+
+/**
+ * @struct tx_stats
+ * @brief software Tx statistics
+ *
+ * Contains software counted Tx statistics for a single queue
+ */
+struct tx_stats {
+ /* Soft Stats */
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 mss_too_small;
+ u64 cso[ICE_CSO_STAT_TX_COUNT];
+};
+
+/**
+ * @struct rx_stats
+ * @brief software Rx statistics
+ *
+ * Contains software counted Rx statistics for a single queue
+ */
+struct rx_stats {
+ /* Soft Stats */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 desc_errs;
+ u64 cso[ICE_CSO_STAT_RX_COUNT];
+};
+
+/**
+ * @struct ice_vsi_hw_stats
+ * @brief hardware statistics for a VSI
+ *
+ * Stores statistics that are generated by hardware for a VSI.
+ */
+struct ice_vsi_hw_stats {
+ struct ice_eth_stats prev;
+ struct ice_eth_stats cur;
+ bool offsets_loaded;
+};
+
+/**
+ * @struct ice_pf_hw_stats
+ * @brief hardware statistics for a PF
+ *
+ * Stores statistics that are generated by hardware for each PF.
+ */
+struct ice_pf_hw_stats {
+ struct ice_hw_port_stats prev;
+ struct ice_hw_port_stats cur;
+ bool offsets_loaded;
+};
+
+/**
+ * @struct ice_pf_sw_stats
+ * @brief software statistics for a PF
+ *
+ * Contains software generated statistics relevant to a PF.
+ */
+struct ice_pf_sw_stats {
+ /* # of reset events handled, by type */
+ u32 corer_count;
+ u32 globr_count;
+ u32 empr_count;
+ u32 pfr_count;
+
+ /* # of detected MDD events for Tx and Rx */
+ u32 tx_mdd_count;
+ u32 rx_mdd_count;
+};
+
+/**
+ * @struct ice_vsi
+ * @brief VSI structure
+ *
+ * Contains data relevant to a single VSI
+ */
+struct ice_vsi {
+ /* back pointer to the softc */
+ struct ice_softc *sc;
+
+ bool dynamic; /* if true, dynamically allocated */
+
+ enum ice_vsi_type type; /* type of this VSI */
+ u16 idx; /* software index to sc->all_vsi[] */
+
+ u16 *tx_qmap; /* Tx VSI to PF queue mapping */
+ u16 *rx_qmap; /* Rx VSI to PF queue mapping */
+
+ bitstr_t *vmap; /* Vector(s) assigned to VSI */
+
+ enum ice_resmgr_alloc_type qmap_type;
+
+ struct ice_tx_queue *tx_queues; /* Tx queue array */
+ struct ice_rx_queue *rx_queues; /* Rx queue array */
+
+ int num_tx_queues;
+ int num_rx_queues;
+ int num_vectors;
+
+ int16_t rx_itr;
+ int16_t tx_itr;
+
+ /* RSS configuration */
+ u16 rss_table_size; /* HW RSS table size */
+ u8 rss_lut_type; /* Used to configure Get/Set RSS LUT AQ call */
+
+ int max_frame_size;
+ u16 mbuf_sz;
+
+ struct ice_aqc_vsi_props info;
+
+ /* context for per-VSI sysctls */
+ struct sysctl_ctx_list ctx;
+ struct sysctl_oid *vsi_node;
+
+ /* context for per-txq sysctls */
+ struct sysctl_ctx_list txqs_ctx;
+ struct sysctl_oid *txqs_node;
+
+ /* context for per-rxq sysctls */
+ struct sysctl_ctx_list rxqs_ctx;
+ struct sysctl_oid *rxqs_node;
+
+ /* VSI-level stats */
+ struct ice_vsi_hw_stats hw_stats;
+};
+
+/**
+ * @enum ice_state
+ * @brief Driver state flags
+ *
+ * Used to indicate the status of various driver events. Intended to be
+ * modified only using atomic operations, so that we can use it even in places
+ * which aren't locked.
+ */
+enum ice_state {
+ ICE_STATE_CONTROLQ_EVENT_PENDING,
+ ICE_STATE_VFLR_PENDING,
+ ICE_STATE_MDD_PENDING,
+ ICE_STATE_RESET_OICR_RECV,
+ ICE_STATE_RESET_PFR_REQ,
+ ICE_STATE_PREPARED_FOR_RESET,
+ ICE_STATE_RESET_FAILED,
+ ICE_STATE_DRIVER_INITIALIZED,
+ ICE_STATE_NO_MEDIA,
+ ICE_STATE_RECOVERY_MODE,
+ ICE_STATE_ROLLBACK_MODE,
+ ICE_STATE_LINK_STATUS_REPORTED,
+ ICE_STATE_DETACHING,
+ ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING,
+ /* This entry must be last */
+ ICE_STATE_LAST,
+};
+
+/* Functions for setting and checking driver state. Note the functions take
+ * bit positions, not bitmasks. The atomic_testandset_32 and
+ * atomic_testandclear_32 operations require bit positions, while the
+ * atomic_set_32 and atomic_clear_32 require bitmasks. This can easily lead to
+ * programming error, so we provide wrapper functions to avoid this.
+ */
+
+/**
+ * ice_set_state - Set the specified state
+ * @s: the state bitmap
+ * @bit: the state to set
+ *
+ * Atomically update the state bitmap with the specified bit set.
+ */
+static inline void
+ice_set_state(volatile u32 *s, enum ice_state bit)
+{
+ /* atomic_set_32 expects a bitmask */
+ atomic_set_32(s, BIT(bit));
+}
+
+/**
+ * ice_clear_state - Clear the specified state
+ * @s: the state bitmap
+ * @bit: the state to clear
+ *
+ * Atomically update the state bitmap with the specified bit cleared.
+ */
+static inline void
+ice_clear_state(volatile u32 *s, enum ice_state bit)
+{
+ /* atomic_clear_32 expects a bitmask */
+ atomic_clear_32(s, BIT(bit));
+}
+
+/**
+ * ice_testandset_state - Test and set the specified state
+ * @s: the state bitmap
+ * @bit: the bit to test
+ *
+ * Atomically update the state bitmap, setting the specified bit. Returns the
+ * previous value of the bit.
+ */
+static inline u32
+ice_testandset_state(volatile u32 *s, enum ice_state bit)
+{
+ /* atomic_testandset_32 expects a bit position */
+ return atomic_testandset_32(s, bit);
+}
+
+/**
+ * ice_testandclear_state - Test and clear the specified state
+ * @s: the state bitmap
+ * @bit: the bit to test
+ *
+ * Atomically update the state bitmap, clearing the specified bit. Returns the
+ * previous value of the bit.
+ */
+static inline u32
+ice_testandclear_state(volatile u32 *s, enum ice_state bit)
+{
+ /* atomic_testandclear_32 expects a bit position */
+ return atomic_testandclear_32(s, bit);
+}
+
+/**
+ * ice_test_state - Test the specified state
+ * @s: the state bitmap
+ * @bit: the bit to test
+ *
+ * Return true if the state is set, false otherwise. Use this only if the flow
+ * does not need to update the state. If you must update the state as well,
+ * prefer ice_testandset_state or ice_testandclear_state.
+ */
+static inline u32
+ice_test_state(volatile u32 *s, enum ice_state bit)
+{
+ return (*s & BIT(bit)) ? true : false;
+}
+
+/**
+ * @struct ice_str_buf
+ * @brief static length buffer for string returning
+ *
+ * Structure containing a fixed size string buffer, used to implement
+ * numeric->string conversion functions that may want to return non-constant
+ * strings.
+ *
+ * This allows returning a fixed size string that is generated by a conversion
+ * function, and then copied to the used location without needing to use an
+ * explicit local variable passed by reference.
+ */
+struct ice_str_buf {
+ char str[ICE_STR_BUF_LEN];
+};
+
+struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err);
+struct ice_str_buf _ice_status_str(enum ice_status status);
+struct ice_str_buf _ice_err_str(int err);
+struct ice_str_buf _ice_fltr_flag_str(u16 flag);
+struct ice_str_buf _ice_mdd_tx_tclan_str(u8 event);
+struct ice_str_buf _ice_mdd_tx_pqm_str(u8 event);
+struct ice_str_buf _ice_mdd_rx_str(u8 event);
+struct ice_str_buf _ice_fw_lldp_status(u32 lldp_status);
+
+#define ice_aq_str(err) _ice_aq_str(err).str
+#define ice_status_str(err) _ice_status_str(err).str
+#define ice_err_str(err) _ice_err_str(err).str
+#define ice_fltr_flag_str(flag) _ice_fltr_flag_str(flag).str
+
+#define ice_mdd_tx_tclan_str(event) _ice_mdd_tx_tclan_str(event).str
+#define ice_mdd_tx_pqm_str(event) _ice_mdd_tx_pqm_str(event).str
+#define ice_mdd_rx_str(event) _ice_mdd_rx_str(event).str
+
+#define ice_fw_lldp_status(lldp_status) _ice_fw_lldp_status(lldp_status).str
+
+/**
+ * ice_enable_intr - Enable interrupts for given vector
+ * @hw: the device private HW structure
+ * @vector: the interrupt index in PF space
+ *
+ * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index.
+ */
+static inline void
+ice_enable_intr(struct ice_hw *hw, int vector)
+{
+ u32 dyn_ctl;
+
+ /* Use ITR_NONE so that ITR configuration is not changed. */
+ dyn_ctl = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
+ (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl);
+}
+
+/**
+ * ice_disable_intr - Disable interrupts for given vector
+ * @hw: the device private HW structure
+ * @vector: the interrupt index in PF space
+ *
+ * In MSI or Legacy interrupt mode, interrupt 0 is the only valid index.
+ */
+static inline void
+ice_disable_intr(struct ice_hw *hw, int vector)
+{
+ u32 dyn_ctl;
+
+ /* Use ITR_NONE so that ITR configuration is not changed. */
+ dyn_ctl = ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S;
+ wr32(hw, GLINT_DYN_CTL(vector), dyn_ctl);
+}
+
+/**
+ * ice_is_tx_desc_done - determine if a Tx descriptor is done
+ * @txd: the Tx descriptor to check
+ *
+ * Returns true if hardware is done with a Tx descriptor and software is
+ * capable of re-using it.
+ */
+static inline bool
+ice_is_tx_desc_done(struct ice_tx_desc *txd)
+{
+ return (((txd->cmd_type_offset_bsz & ICE_TXD_QW1_DTYPE_M)
+ >> ICE_TXD_QW1_DTYPE_S) == ICE_TX_DESC_DTYPE_DESC_DONE);
+}
+
+/**
+ * ice_get_pf_id - Get the PF id from the hardware registers
+ * @hw: the ice hardware structure
+ *
+ * Reads the PF_FUNC_RID register and extracts the function number from it.
+ * Intended to be used in cases where hw->pf_id hasn't yet been assigned by
+ * ice_init_hw.
+ *
+ * @pre this function should be called only after PCI register access has been
+ * setup, and prior to ice_init_hw. After hardware has been initialized, the
+ * cached hw->pf_id value can be used.
+ */
+static inline u8
+ice_get_pf_id(struct ice_hw *hw)
+{
+ return (u8)((rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >>
+ PF_FUNC_RID_FUNCTION_NUMBER_S);
+}
+
+/* Details of how to re-initialize depend on the networking stack */
+void ice_request_stack_reinit(struct ice_softc *sc);
+
+/* Details of how to check if the network stack is detaching us */
+bool ice_driver_is_detaching(struct ice_softc *sc);
+
+int ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending);
+int ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num);
+void ice_free_bar(device_t dev, struct ice_bar_info *bar);
+void ice_set_ctrlq_len(struct ice_hw *hw);
+void ice_release_vsi(struct ice_vsi *vsi);
+struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type);
+int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
+ const int max_rx_queues);
+void ice_free_vsi_qmaps(struct ice_vsi *vsi);
+int ice_initialize_vsi(struct ice_vsi *vsi);
+void ice_deinit_vsi(struct ice_vsi *vsi);
+uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi);
+int ice_get_phy_type_low(uint64_t phy_type_low);
+int ice_get_phy_type_high(uint64_t phy_type_high);
+enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
+void ice_configure_rxq_interrupts(struct ice_vsi *vsi);
+void ice_configure_txq_interrupts(struct ice_vsi *vsi);
+void ice_flush_rxq_interrupts(struct ice_vsi *vsi);
+void ice_flush_txq_interrupts(struct ice_vsi *vsi);
+int ice_cfg_vsi_for_tx(struct ice_vsi *vsi);
+int ice_cfg_vsi_for_rx(struct ice_vsi *vsi);
+int ice_control_rx_queues(struct ice_vsi *vsi, bool enable);
+int ice_cfg_pf_default_mac_filters(struct ice_softc *sc);
+int ice_rm_pf_default_mac_filters(struct ice_softc *sc);
+void ice_print_nvm_version(struct ice_softc *sc);
+void ice_update_vsi_hw_stats(struct ice_vsi *vsi);
+void ice_reset_vsi_stats(struct ice_vsi *vsi);
+void ice_update_pf_stats(struct ice_softc *sc);
+void ice_reset_pf_stats(struct ice_softc *sc);
+void ice_add_device_sysctls(struct ice_softc *sc);
+void ice_log_hmc_error(struct ice_hw *hw, device_t dev);
+void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *parent,
+ struct ice_eth_stats *stats);
+void ice_add_vsi_sysctls(struct ice_vsi *vsi);
+void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *parent,
+ struct ice_hw_port_stats *stats);
+void ice_configure_misc_interrupts(struct ice_softc *sc);
+int ice_sync_multicast_filters(struct ice_softc *sc);
+enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
+enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
+void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
+void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi);
+void ice_add_device_tunables(struct ice_softc *sc);
+int ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr);
+int ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr);
+int ice_vsi_disable_tx(struct ice_vsi *vsi);
+void ice_vsi_add_txqs_ctx(struct ice_vsi *vsi);
+void ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi);
+void ice_vsi_del_txqs_ctx(struct ice_vsi *vsi);
+void ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi);
+void ice_add_txq_sysctls(struct ice_tx_queue *txq);
+void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
+int ice_config_rss(struct ice_vsi *vsi);
+void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
+void ice_load_pkg_file(struct ice_softc *sc);
+void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status);
+uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
+void ice_save_pci_info(struct ice_hw *hw, device_t dev);
+int ice_replay_all_vsi_cfg(struct ice_softc *sc);
+void ice_link_up_msg(struct ice_softc *sc);
+int ice_update_laa_mac(struct ice_softc *sc);
+void ice_get_and_print_bus_info(struct ice_softc *sc);
+const char *ice_fec_str(enum ice_fec_mode mode);
+const char *ice_fc_str(enum ice_fc_mode mode);
+const char *ice_fwd_act_str(enum ice_sw_fwd_act_type action);
+const char * ice_state_to_str(enum ice_state state);
+int ice_init_link_events(struct ice_softc *sc);
+void ice_configure_rx_itr(struct ice_vsi *vsi);
+void ice_configure_tx_itr(struct ice_vsi *vsi);
+void ice_setup_pf_vsi(struct ice_softc *sc);
+void ice_handle_mdd_event(struct ice_softc *sc);
+void ice_init_dcb_setup(struct ice_softc *sc);
+int ice_send_version(struct ice_softc *sc);
+int ice_cfg_pf_ethertype_filters(struct ice_softc *sc);
+void ice_init_link_configuration(struct ice_softc *sc);
+void ice_init_saved_phy_cfg(struct ice_softc *sc);
+void ice_apply_saved_phy_cfg(struct ice_softc *sc);
+void ice_set_link_management_mode(struct ice_softc *sc);
+int ice_module_event_handler(module_t mod, int what, void *arg);
+int ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd);
+int ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req);
+int ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length);
+int ice_alloc_intr_tracking(struct ice_softc *sc);
+void ice_free_intr_tracking(struct ice_softc *sc);
+
+#endif /* _ICE_LIB_H_ */
Index: sys/dev/ice/ice_lib.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_lib.c
@@ -0,0 +1,8000 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_lib.c
+ * @brief Generic device setup and sysctl functions
+ *
+ * Library of generic device functions not specific to the networking stack.
+ *
+ * This includes hardware initialization functions, as well as handlers for
+ * many of the device sysctls used to probe driver status or tune specific
+ * behaviors.
+ */
+
+#include "ice_lib.h"
+#include "ice_iflib.h"
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <machine/resource.h>
+#include <net/if_dl.h>
+#include <sys/firmware.h>
+#include <sys/priv.h>
+
+/**
+ * @var M_ICE
+ * @brief main ice driver allocation type
+ *
+ * malloc(9) allocation type used by the majority of memory allocations in the
+ * ice driver.
+ */
+MALLOC_DEFINE(M_ICE, "ice", "Intel(R) 100Gb Network Driver lib allocations");
+
+/*
+ * Helper function prototypes
+ */
+static int ice_get_next_vsi(struct ice_vsi **all_vsi, int size);
+static void ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx);
+static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type);
+static int ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx);
+static int ice_setup_tx_ctx(struct ice_tx_queue *txq,
+ struct ice_tlan_ctx *tlan_ctx, u16 pf_q);
+static int ice_setup_rx_ctx(struct ice_rx_queue *rxq);
+static int ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg);
+static void ice_free_fltr_list(struct ice_list_head *list);
+static int ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list,
+ const u8 *addr, enum ice_sw_fwd_act_type action);
+static void ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname,
+ struct ice_ctl_q_info *cq);
+static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_info *e);
+static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname,
+ struct ice_rq_event_info *event);
+static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf);
+static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf);
+static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf);
+static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info);
+static u_int ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl, u_int errors);
+static void ice_add_debug_tunables(struct ice_softc *sc);
+static void ice_add_debug_sysctls(struct ice_softc *sc);
+static void ice_vsi_set_rss_params(struct ice_vsi *vsi);
+static void ice_get_default_rss_key(u8 *seed);
+static int ice_set_rss_key(struct ice_vsi *vsi);
+static int ice_set_rss_lut(struct ice_vsi *vsi);
+static void ice_set_rss_flow_flds(struct ice_vsi *vsi);
+static void ice_clean_vsi_rss_cfg(struct ice_vsi *vsi);
+static const char *ice_aq_speed_to_str(struct ice_port_info *pi);
+static const char *ice_requested_fec_mode(struct ice_port_info *pi);
+static const char *ice_negotiated_fec_mode(struct ice_port_info *pi);
+static const char *ice_autoneg_mode(struct ice_port_info *pi);
+static const char *ice_flowcontrol_mode(struct ice_port_info *pi);
+static void ice_print_bus_link_data(device_t dev, struct ice_hw *hw);
+static void ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status);
+static uint8_t ice_pcie_bandwidth_check(struct ice_softc *sc);
+static uint64_t ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed);
+static int ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width);
+static uint64_t ice_phy_types_to_max_rate(struct ice_port_info *pi);
+static void ice_add_sysctls_sw_stats(struct ice_vsi *vsi,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *parent);
+static void ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi,
+ enum ice_vsi_type type, int idx,
+ bool dynamic);
+static void ice_handle_mib_change_event(struct ice_softc *sc,
+ struct ice_rq_event_info *event);
+static void
+ice_handle_lan_overflow_event(struct ice_softc *sc,
+ struct ice_rq_event_info *event);
+static int ice_add_ethertype_to_list(struct ice_vsi *vsi,
+ struct ice_list_head *list,
+ u16 ethertype, u16 direction,
+ enum ice_sw_fwd_act_type action);
+static void ice_add_rx_lldp_filter(struct ice_softc *sc);
+static void ice_del_rx_lldp_filter(struct ice_softc *sc);
+static u16 ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low,
+ u64 phy_type_high);
+static void
+ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+static void
+ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+static void
+ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+static void
+ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_set_phy_cfg_data *cfg);
+static void
+ice_print_ldo_tlv(struct ice_softc *sc,
+ struct ice_link_default_override_tlv *tlv);
+static void
+ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
+ u64 *phy_type_high);
+static int
+ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low,
+ u64 *phy_type_high);
+static int
+ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low,
+ u64 *phy_type_high);
+static void
+ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high);
+static enum ice_status
+ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high);
+
+static int ice_module_init(void);
+static int ice_module_exit(void);
+
+/*
+ * package version comparison functions
+ */
+static bool pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name);
+static int pkg_ver_compatible(struct ice_pkg_ver *pkg_ver);
+
+/*
+ * dynamic sysctl handlers
+ */
+static int ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS);
+static int __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS,
+ bool is_phy_type_high);
+static int ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode);
+static int ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS);
+
+/**
+ * ice_map_bar - Map PCIe BAR memory
+ * @dev: the PCIe device
+ * @bar: the BAR info structure
+ * @bar_num: PCIe BAR number
+ *
+ * Maps the specified PCIe BAR. Stores the mapping data in struct
+ * ice_bar_info.
+ */
+int
+ice_map_bar(device_t dev, struct ice_bar_info *bar, int bar_num)
+{
+ if (bar->res != NULL) {
+ device_printf(dev, "PCI BAR%d already mapped\n", bar_num);
+ return (EDOOFUS);
+ }
+
+ bar->rid = PCIR_BAR(bar_num);
+ bar->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar->rid,
+ RF_ACTIVE);
+ if (!bar->res) {
+ device_printf(dev, "PCI BAR%d mapping failed\n", bar_num);
+ return (ENXIO);
+ }
+
+ bar->tag = rman_get_bustag(bar->res);
+ bar->handle = rman_get_bushandle(bar->res);
+ bar->size = rman_get_size(bar->res);
+
+ return (0);
+}
+
+/**
+ * ice_free_bar - Free PCIe BAR memory
+ * @dev: the PCIe device
+ * @bar: the BAR info structure
+ *
+ * Frees the specified PCIe BAR, releasing its resources.
+ */
+void
+ice_free_bar(device_t dev, struct ice_bar_info *bar)
+{
+ if (bar->res != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY, bar->rid, bar->res);
+ bar->res = NULL;
+}
+
+/**
+ * ice_set_ctrlq_len - Configure ctrlq lengths for a device
+ * @hw: the device hardware structure
+ *
+ * Configures the control queues for the given device, setting up the
+ * specified lengths, prior to initializing hardware.
+ */
+void
+ice_set_ctrlq_len(struct ice_hw *hw)
+{
+ hw->adminq.num_rq_entries = ICE_AQ_LEN;
+ hw->adminq.num_sq_entries = ICE_AQ_LEN;
+ hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
+ hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
+
+ hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+
+}
+
+/**
+ * ice_get_next_vsi - Get the next available VSI slot
+ * @all_vsi: the VSI list
+ * @size: the size of the VSI list
+ *
+ * Returns the index to the first available VSI slot. Will return size (one
+ * past the last index) if there are no slots available.
+ */
+static int
+ice_get_next_vsi(struct ice_vsi **all_vsi, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (all_vsi[i] == NULL)
+ return i;
+ }
+
+ return size;
+}
+
+/**
+ * ice_setup_vsi_common - Common VSI setup for both dynamic and static VSIs
+ * @sc: the device private softc structure
+ * @vsi: the VSI to setup
+ * @type: the VSI type of the new VSI
+ * @idx: the index in the all_vsi array to use
+ * @dynamic: whether this VSI memory was dynamically allocated
+ *
+ * Perform setup for a VSI that is common to both dynamically allocated VSIs
+ * and the static PF VSI which is embedded in the softc structure.
+ */
+static void
+ice_setup_vsi_common(struct ice_softc *sc, struct ice_vsi *vsi,
+ enum ice_vsi_type type, int idx, bool dynamic)
+{
+ /* Store important values in VSI struct */
+ vsi->type = type;
+ vsi->sc = sc;
+ vsi->idx = idx;
+ sc->all_vsi[idx] = vsi;
+ vsi->dynamic = dynamic;
+
+ /* Setup the VSI tunables now */
+ ice_add_vsi_tunables(vsi, sc->vsi_sysctls);
+}
+
+/**
+ * ice_alloc_vsi - Allocate a dynamic VSI
+ * @sc: device softc structure
+ * @type: VSI type
+ *
+ * Allocates a new dynamic VSI structure and inserts it into the VSI list.
+ */
+struct ice_vsi *
+ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type)
+{
+ struct ice_vsi *vsi;
+ int idx;
+
+ /* Find an open index for a new VSI to be allocated. If the returned
+ * index is >= the num_available_vsi then it means no slot is
+ * available.
+ */
+ idx = ice_get_next_vsi(sc->all_vsi, sc->num_available_vsi);
+ if (idx >= sc->num_available_vsi) {
+ device_printf(sc->dev, "No available VSI slots\n");
+ return NULL;
+ }
+
+ vsi = (struct ice_vsi *)malloc(sizeof(*vsi), M_ICE, M_WAITOK|M_ZERO);
+ if (!vsi) {
+ device_printf(sc->dev, "Unable to allocate VSI memory\n");
+ return NULL;
+ }
+
+ ice_setup_vsi_common(sc, vsi, type, idx, true);
+
+ return vsi;
+}
+
+/**
+ * ice_setup_pf_vsi - Setup the PF VSI
+ * @sc: the device private softc
+ *
+ * Setup the PF VSI structure which is embedded as sc->pf_vsi in the device
+ * private softc. Unlike other VSIs, the PF VSI memory is allocated as part of
+ * the softc memory, instead of being dynamically allocated at creation.
+ */
+void
+ice_setup_pf_vsi(struct ice_softc *sc)
+{
+ ice_setup_vsi_common(sc, &sc->pf_vsi, ICE_VSI_PF, 0, false);
+}
+
+/**
+ * ice_alloc_vsi_qmap
+ * @vsi: VSI structure
+ * @max_tx_queues: Number of transmit queues to identify
+ * @max_rx_queues: Number of receive queues to identify
+ *
+ * Allocates a max_[t|r]x_queues array of words for the VSI where each
+ * word contains the index of the queue it represents. In here, all
+ * words are initialized to an index of ICE_INVALID_RES_IDX, indicating
+ * all queues for this VSI are not yet assigned an index and thus,
+ * not ready for use.
+ *
+ * Returns an error code on failure.
+ */
+int
+ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
+ const int max_rx_queues)
+{
+ struct ice_softc *sc = vsi->sc;
+ int i;
+
+ MPASS(max_tx_queues > 0);
+ MPASS(max_rx_queues > 0);
+
+ /* Allocate Tx queue mapping memory */
+ if (!(vsi->tx_qmap =
+ (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) {
+ device_printf(sc->dev, "Unable to allocate Tx qmap memory\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate Rx queue mapping memory */
+ if (!(vsi->rx_qmap =
+ (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) {
+ device_printf(sc->dev, "Unable to allocate Rx qmap memory\n");
+ goto free_tx_qmap;
+ }
+
+ /* Mark every queue map as invalid to start with */
+ for (i = 0; i < max_tx_queues; i++) {
+ vsi->tx_qmap[i] = ICE_INVALID_RES_IDX;
+ }
+ for (i = 0; i < max_rx_queues; i++) {
+ vsi->rx_qmap[i] = ICE_INVALID_RES_IDX;
+ }
+
+ return 0;
+
+free_tx_qmap:
+ free(vsi->tx_qmap, M_ICE);
+ vsi->tx_qmap = NULL;
+
+ return (ENOMEM);
+}
+
+/**
+ * ice_free_vsi_qmaps - Free the PF qmaps associated with a VSI
+ * @vsi: the VSI private structure
+ *
+ * Frees the PF qmaps associated with the given VSI. Generally this will be
+ * called by ice_release_vsi, but may need to be called during attach cleanup,
+ * depending on when the qmaps were allocated.
+ */
+void
+ice_free_vsi_qmaps(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+
+ if (vsi->tx_qmap) {
+ ice_resmgr_release_map(&sc->tx_qmgr, vsi->tx_qmap,
+ vsi->num_tx_queues);
+ free(vsi->tx_qmap, M_ICE);
+ vsi->tx_qmap = NULL;
+ }
+
+ if (vsi->rx_qmap) {
+ ice_resmgr_release_map(&sc->rx_qmgr, vsi->rx_qmap,
+ vsi->num_rx_queues);
+ free(vsi->rx_qmap, M_ICE);
+ vsi->rx_qmap = NULL;
+ }
+}
+
+/**
+ * ice_set_default_vsi_ctx - Setup default VSI context parameters
+ * @ctx: the VSI context to initialize
+ *
+ * Initialize and prepare a default VSI context for configuring a new VSI.
+ */
+static void
+ice_set_default_vsi_ctx(struct ice_vsi_ctx *ctx)
+{
+ u32 table = 0;
+
+ memset(&ctx->info, 0, sizeof(ctx->info));
+ /* VSI will be allocated from shared pool */
+ ctx->alloc_from_pool = true;
+ /* Enable source pruning by default */
+ ctx->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
+ /* Traffic from VSI can be sent to LAN */
+ ctx->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
+ /* Allow all packets untagged/tagged */
+ ctx->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+ ICE_AQ_VSI_VLAN_MODE_M) >>
+ ICE_AQ_VSI_VLAN_MODE_S);
+ /* Show VLAN/UP from packets in Rx descriptors */
+ ctx->info.vlan_flags |= ((ICE_AQ_VSI_VLAN_EMOD_STR_BOTH &
+ ICE_AQ_VSI_VLAN_EMOD_M) >>
+ ICE_AQ_VSI_VLAN_EMOD_S);
+ /* Have 1:1 UP mapping for both ingress/egress tables */
+ table |= ICE_UP_TABLE_TRANSLATE(0, 0);
+ table |= ICE_UP_TABLE_TRANSLATE(1, 1);
+ table |= ICE_UP_TABLE_TRANSLATE(2, 2);
+ table |= ICE_UP_TABLE_TRANSLATE(3, 3);
+ table |= ICE_UP_TABLE_TRANSLATE(4, 4);
+ table |= ICE_UP_TABLE_TRANSLATE(5, 5);
+ table |= ICE_UP_TABLE_TRANSLATE(6, 6);
+ table |= ICE_UP_TABLE_TRANSLATE(7, 7);
+ ctx->info.ingress_table = CPU_TO_LE32(table);
+ ctx->info.egress_table = CPU_TO_LE32(table);
+ /* Have 1:1 UP mapping for outer to inner UP table */
+ ctx->info.outer_up_table = CPU_TO_LE32(table);
+ /* No Outer tag support, so outer_tag_flags remains zero */
+}
+
+/**
+ * ice_set_rss_vsi_ctx - Setup VSI context parameters for RSS
+ * @ctx: the VSI context to configure
+ * @type: the VSI type
+ *
+ * Configures the VSI context for RSS, based on the VSI type.
+ */
+static void
+ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctx, enum ice_vsi_type type)
+{
+ u8 lut_type, hash_type;
+
+ switch (type) {
+ case ICE_VSI_PF:
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ case ICE_VSI_VF:
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+ break;
+ default:
+ /* Other VSI types do not support RSS */
+ return;
+ }
+
+ ctx->info.q_opt_rss = (((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M));
+}
+
+/**
+ * ice_setup_vsi_qmap - Setup the queue mapping for a VSI
+ * @vsi: the VSI to configure
+ * @ctx: the VSI context to configure
+ *
+ * Configures the context for the given VSI, setting up how the firmware
+ * should map the queues for this VSI.
+ */
+static int
+ice_setup_vsi_qmap(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
+{
+ int pow = 0;
+ u16 qmap;
+
+ MPASS(vsi->rx_qmap != NULL);
+
+ /* TODO:
+ * Handle multiple Traffic Classes
+ * Handle scattered queues (for VFs)
+ */
+ if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS)
+ return (EOPNOTSUPP);
+
+ ctx->info.mapping_flags |= CPU_TO_LE16(ICE_AQ_VSI_Q_MAP_CONTIG);
+
+ ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]);
+ ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues);
+
+
+ /* Calculate the next power-of-2 of number of queues */
+ if (vsi->num_rx_queues)
+ pow = flsl(vsi->num_rx_queues - 1);
+
+ /* Assign all the queues to traffic class zero */
+ qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M;
+ ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap);
+
+ return 0;
+}
+
+/**
+ * ice_initialize_vsi - Initialize a VSI for use
+ * @vsi: the vsi to initialize
+ *
+ * Initialize a VSI over the adminq and prepare it for operation.
+ */
+int
+ice_initialize_vsi(struct ice_vsi *vsi)
+{
+ struct ice_vsi_ctx ctx = { 0 };
+ struct ice_hw *hw = &vsi->sc->hw;
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ enum ice_status status;
+ int err;
+
+ /* For now, we only have code supporting PF VSIs */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ ctx.flags = ICE_AQ_VSI_TYPE_PF;
+ break;
+ default:
+ return (ENODEV);
+ }
+
+ ice_set_default_vsi_ctx(&ctx);
+ ice_set_rss_vsi_ctx(&ctx, vsi->type);
+
+ /* XXX: VSIs of other types may need different port info? */
+ ctx.info.sw_id = hw->port_info->sw_id;
+
+ /* Set some RSS parameters based on the VSI type */
+ ice_vsi_set_rss_params(vsi);
+
+ /* Initialize the Rx queue mapping for this VSI */
+ err = ice_setup_vsi_qmap(vsi, &ctx);
+ if (err) {
+ return err;
+ }
+
+ /* (Re-)add VSI to HW VSI handle list */
+ status = ice_add_vsi(hw, vsi->idx, &ctx, NULL);
+ if (status != 0) {
+ device_printf(vsi->sc->dev,
+ "Add VSI AQ call failed, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ vsi->info = ctx.info;
+
+ /* TODO: DCB traffic class support? */
+ max_txqs[0] = vsi->num_tx_queues;
+
+ status = ice_cfg_vsi_lan(hw->port_info, vsi->idx,
+ ICE_DFLT_TRAFFIC_CLASS, max_txqs);
+ if (status) {
+ device_printf(vsi->sc->dev,
+ "Failed VSI lan queue config, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ ice_deinit_vsi(vsi);
+ return (ENODEV);
+ }
+
+ /* Reset VSI stats */
+ ice_reset_vsi_stats(vsi);
+
+ return 0;
+}
+
+/**
+ * ice_deinit_vsi - Tell firmware to release resources for a VSI
+ * @vsi: the VSI to release
+ *
+ * Helper function which requests the firmware to release the hardware
+ * resources associated with a given VSI.
+ */
+void
+ice_deinit_vsi(struct ice_vsi *vsi)
+{
+ struct ice_vsi_ctx ctx = { 0 };
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+
+ /* Assert that the VSI pointer matches in the list */
+ MPASS(vsi == sc->all_vsi[vsi->idx]);
+
+ ctx.info = vsi->info;
+
+ status = ice_rm_vsi_lan_cfg(hw->port_info, vsi->idx);
+ if (status) {
+ /*
+ * This should only fail if the VSI handle is invalid, or if
+ * any of the nodes have leaf nodes which are still in use.
+ */
+ device_printf(sc->dev,
+ "Unable to remove scheduler nodes for VSI %d, err %s\n",
+ vsi->idx, ice_status_str(status));
+ }
+
+ /* Tell firmware to release the VSI resources */
+ status = ice_free_vsi(hw, vsi->idx, &ctx, false, NULL);
+ if (status != 0) {
+ device_printf(sc->dev,
+ "Free VSI %u AQ call failed, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_release_vsi - Release resources associated with a VSI
+ * @vsi: the VSI to release
+ *
+ * Release software and firmware resources associated with a VSI. Release the
+ * queue managers associated with this VSI. Also free the VSI structure memory
+ * if the VSI was allocated dynamically using ice_alloc_vsi().
+ */
+void
+ice_release_vsi(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+ int idx = vsi->idx;
+
+ /* Assert that the VSI pointer matches in the list */
+ MPASS(vsi == sc->all_vsi[idx]);
+
+ /* Cleanup RSS configuration */
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS))
+ ice_clean_vsi_rss_cfg(vsi);
+
+ ice_del_vsi_sysctl_ctx(vsi);
+
+ ice_deinit_vsi(vsi);
+
+ ice_free_vsi_qmaps(vsi);
+
+ if (vsi->dynamic) {
+ free(sc->all_vsi[idx], M_ICE);
+ }
+
+ sc->all_vsi[idx] = NULL;
+}
+
+/**
+ * ice_aq_speed_to_rate - Convert AdminQ speed enum to baudrate
+ * @pi: port info data
+ *
+ * Returns the baudrate value for the current link speed of a given port.
+ */
+uint64_t
+ice_aq_speed_to_rate(struct ice_port_info *pi)
+{
+ switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ return IF_Gbps(100);
+ case ICE_AQ_LINK_SPEED_50GB:
+ return IF_Gbps(50);
+ case ICE_AQ_LINK_SPEED_40GB:
+ return IF_Gbps(40);
+ case ICE_AQ_LINK_SPEED_25GB:
+ return IF_Gbps(25);
+ case ICE_AQ_LINK_SPEED_10GB:
+ return IF_Gbps(10);
+ case ICE_AQ_LINK_SPEED_5GB:
+ return IF_Gbps(5);
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return IF_Mbps(2500);
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return IF_Mbps(1000);
+ case ICE_AQ_LINK_SPEED_100MB:
+ return IF_Mbps(100);
+ case ICE_AQ_LINK_SPEED_10MB:
+ return IF_Mbps(10);
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ /* return 0 if we don't know the link speed */
+ return 0;
+ }
+}
+
+/**
+ * ice_aq_speed_to_str - Convert AdminQ speed enum to string representation
+ * @pi: port info data
+ *
+ * Returns the string representation of the current link speed for a given
+ * port.
+ */
+static const char *
+ice_aq_speed_to_str(struct ice_port_info *pi)
+{
+ switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ return "100 Gbps";
+ case ICE_AQ_LINK_SPEED_50GB:
+ return "50 Gbps";
+ case ICE_AQ_LINK_SPEED_40GB:
+ return "40 Gbps";
+ case ICE_AQ_LINK_SPEED_25GB:
+ return "25 Gbps";
+ case ICE_AQ_LINK_SPEED_20GB:
+ return "20 Gbps";
+ case ICE_AQ_LINK_SPEED_10GB:
+ return "10 Gbps";
+ case ICE_AQ_LINK_SPEED_5GB:
+ return "5 Gbps";
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return "2.5 Gbps";
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return "1 Gbps";
+ case ICE_AQ_LINK_SPEED_100MB:
+ return "100 Mbps";
+ case ICE_AQ_LINK_SPEED_10MB:
+ return "10 Mbps";
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ return "Unknown speed";
+ }
+}
+
+/**
+ * ice_get_phy_type_low - Get media associated with phy_type_low
+ * @phy_type_low: the low 64bits of phy_type from the AdminQ
+ *
+ * Given the lower 64bits of the phy_type from the hardware, return the
+ * ifm_active bit associated. Return IFM_UNKNOWN when phy_type_low is unknown.
+ * Note that only one of ice_get_phy_type_low or ice_get_phy_type_high should
+ * be called. If phy_type_low is zero, call ice_phy_type_high.
+ */
+int
+ice_get_phy_type_low(uint64_t phy_type_low)
+{
+ switch (phy_type_low) {
+ case ICE_PHY_TYPE_LOW_100BASE_TX:
+ return IFM_100_TX;
+ case ICE_PHY_TYPE_LOW_100M_SGMII:
+ return IFM_100_SGMII;
+ case ICE_PHY_TYPE_LOW_1000BASE_T:
+ return IFM_1000_T;
+ case ICE_PHY_TYPE_LOW_1000BASE_SX:
+ return IFM_1000_SX;
+ case ICE_PHY_TYPE_LOW_1000BASE_LX:
+ return IFM_1000_LX;
+ case ICE_PHY_TYPE_LOW_1000BASE_KX:
+ return IFM_1000_KX;
+ case ICE_PHY_TYPE_LOW_1G_SGMII:
+ return IFM_1000_SGMII;
+ case ICE_PHY_TYPE_LOW_2500BASE_T:
+ return IFM_2500_T;
+ case ICE_PHY_TYPE_LOW_2500BASE_X:
+ return IFM_2500_X;
+ case ICE_PHY_TYPE_LOW_2500BASE_KX:
+ return IFM_2500_KX;
+ case ICE_PHY_TYPE_LOW_5GBASE_T:
+ return IFM_5000_T;
+ case ICE_PHY_TYPE_LOW_5GBASE_KR:
+ return IFM_5000_KR;
+ case ICE_PHY_TYPE_LOW_10GBASE_T:
+ return IFM_10G_T;
+ case ICE_PHY_TYPE_LOW_10G_SFI_DA:
+ return IFM_10G_TWINAX;
+ case ICE_PHY_TYPE_LOW_10GBASE_SR:
+ return IFM_10G_SR;
+ case ICE_PHY_TYPE_LOW_10GBASE_LR:
+ return IFM_10G_LR;
+ case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ return IFM_10G_KR;
+ case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ return IFM_10G_AOC;
+ case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
+ return IFM_10G_SFI;
+ case ICE_PHY_TYPE_LOW_25GBASE_T:
+ return IFM_25G_T;
+ case ICE_PHY_TYPE_LOW_25GBASE_CR:
+ return IFM_25G_CR;
+ case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
+ return IFM_25G_CR_S;
+ case ICE_PHY_TYPE_LOW_25GBASE_CR1:
+ return IFM_25G_CR1;
+ case ICE_PHY_TYPE_LOW_25GBASE_SR:
+ return IFM_25G_SR;
+ case ICE_PHY_TYPE_LOW_25GBASE_LR:
+ return IFM_25G_LR;
+ case ICE_PHY_TYPE_LOW_25GBASE_KR:
+ return IFM_25G_KR;
+ case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
+ return IFM_25G_KR_S;
+ case ICE_PHY_TYPE_LOW_25GBASE_KR1:
+ return IFM_25G_KR1;
+ case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ return IFM_25G_AOC;
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+ return IFM_25G_AUI;
+ case ICE_PHY_TYPE_LOW_40GBASE_CR4:
+ return IFM_40G_CR4;
+ case ICE_PHY_TYPE_LOW_40GBASE_SR4:
+ return IFM_40G_SR4;
+ case ICE_PHY_TYPE_LOW_40GBASE_LR4:
+ return IFM_40G_LR4;
+ case ICE_PHY_TYPE_LOW_40GBASE_KR4:
+ return IFM_40G_KR4;
+ case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+ return IFM_40G_XLAUI_AC;
+ case ICE_PHY_TYPE_LOW_40G_XLAUI:
+ return IFM_40G_XLAUI;
+ case ICE_PHY_TYPE_LOW_50GBASE_CR2:
+ return IFM_50G_CR2;
+ case ICE_PHY_TYPE_LOW_50GBASE_SR2:
+ return IFM_50G_SR2;
+ case ICE_PHY_TYPE_LOW_50GBASE_LR2:
+ return IFM_50G_LR2;
+ case ICE_PHY_TYPE_LOW_50GBASE_KR2:
+ return IFM_50G_KR2;
+ case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+ return IFM_50G_LAUI2_AC;
+ case ICE_PHY_TYPE_LOW_50G_LAUI2:
+ return IFM_50G_LAUI2;
+ case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+ return IFM_50G_AUI2_AC;
+ case ICE_PHY_TYPE_LOW_50G_AUI2:
+ return IFM_50G_AUI2;
+ case ICE_PHY_TYPE_LOW_50GBASE_CP:
+ return IFM_50G_CP;
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ return IFM_50G_SR;
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ return IFM_50G_FR;
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ return IFM_50G_LR;
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ return IFM_50G_KR_PAM4;
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ return IFM_50G_AUI1_AC;
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ return IFM_50G_AUI1;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR4:
+ return IFM_100G_CR4;
+ case ICE_PHY_TYPE_LOW_100GBASE_SR4:
+ return IFM_100G_SR4;
+ case ICE_PHY_TYPE_LOW_100GBASE_LR4:
+ return IFM_100G_LR4;
+ case ICE_PHY_TYPE_LOW_100GBASE_KR4:
+ return IFM_100G_KR4;
+ case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+ return IFM_100G_CAUI4_AC;
+ case ICE_PHY_TYPE_LOW_100G_CAUI4:
+ return IFM_100G_CAUI4;
+ case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
+ return IFM_100G_AUI4_AC;
+ case ICE_PHY_TYPE_LOW_100G_AUI4:
+ return IFM_100G_AUI4;
+ case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
+ return IFM_100G_CR_PAM4;
+ case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
+ return IFM_100G_KR_PAM4;
+ case ICE_PHY_TYPE_LOW_100GBASE_CP2:
+ return IFM_100G_CP2;
+ case ICE_PHY_TYPE_LOW_100GBASE_SR2:
+ return IFM_100G_SR2;
+ case ICE_PHY_TYPE_LOW_100GBASE_DR:
+ return IFM_100G_DR;
+ default:
+ return IFM_UNKNOWN;
+ }
+}
+
+/**
+ * ice_get_phy_type_high - Get media associated with phy_type_high
+ * @phy_type_high: the upper 64bits of phy_type from the AdminQ
+ *
+ * Given the upper 64bits of the phy_type from the hardware, return the
+ * ifm_active bit associated. Return IFM_UNKNOWN on an unknown value. Note
+ * that only one of ice_get_phy_type_low or ice_get_phy_type_high should be
+ * called. If phy_type_high is zero, call ice_get_phy_type_low.
+ */
+int
+ice_get_phy_type_high(uint64_t phy_type_high)
+{
+ switch (phy_type_high) {
+ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
+ return IFM_100G_KR2_PAM4;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+ return IFM_100G_CAUI2_AC;
+ case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+ return IFM_100G_CAUI2;
+ case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+ return IFM_100G_AUI2_AC;
+ case ICE_PHY_TYPE_HIGH_100G_AUI2:
+ return IFM_100G_AUI2;
+ default:
+ return IFM_UNKNOWN;
+ }
+}
+
+/**
+ * ice_phy_types_to_max_rate - Returns port's max supported baudrate
+ * @pi: port info struct
+ *
+ * ice_aq_get_phy_caps() w/ ICE_AQC_REPORT_TOPO_CAP parameter needs to have
+ * been called before this function for it to work.
+ */
+static uint64_t
+ice_phy_types_to_max_rate(struct ice_port_info *pi)
+{
+ uint64_t phy_low = pi->phy.phy_type_low;
+ uint64_t phy_high = pi->phy.phy_type_high;
+ uint64_t max_rate = 0;
+ int bit;
+
+ /*
+ * These are based on the indices used in the BIT() macros for
+ * ICE_PHY_TYPE_LOW_*
+ */
+ static const uint64_t phy_rates[] = {
+ IF_Mbps(100),
+ IF_Mbps(100),
+ IF_Gbps(1ULL),
+ IF_Gbps(1ULL),
+ IF_Gbps(1ULL),
+ IF_Gbps(1ULL),
+ IF_Gbps(1ULL),
+ IF_Mbps(2500ULL),
+ IF_Mbps(2500ULL),
+ IF_Mbps(2500ULL),
+ IF_Gbps(5ULL),
+ IF_Gbps(5ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(10ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(25ULL),
+ IF_Gbps(40ULL),
+ IF_Gbps(40ULL),
+ IF_Gbps(40ULL),
+ IF_Gbps(40ULL),
+ IF_Gbps(40ULL),
+ IF_Gbps(40ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(50ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ /* These rates are for ICE_PHY_TYPE_HIGH_* */
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL),
+ IF_Gbps(100ULL)
+ };
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &phy_high, 64)
+ if ((bit + 64) < (int)ARRAY_SIZE(phy_rates))
+ max_rate = uqmax(max_rate, phy_rates[(bit + 64)]);
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &phy_low, 64)
+ max_rate = uqmax(max_rate, phy_rates[bit]);
+
+ return (max_rate);
+}
+
+/* The if_media type is split over the original 5 bit media variant field,
+ * along with extended types using up extra bits in the options section.
+ * We want to convert this split number into a bitmap index, so we reverse the
+ * calculation of IFM_X here.
+ */
+#define IFM_IDX(x) (((x) & IFM_TMASK) | \
+ (((x) & IFM_ETH_XTYPE) >> IFM_ETH_XSHIFT))
+
+/**
+ * ice_add_media_types - Add supported media types to the media structure
+ * @sc: ice private softc structure
+ * @media: ifmedia structure to setup
+ *
+ * Looks up the supported phy types, and initializes the various media types
+ * available.
+ *
+ * @pre this function must be protected from being called while another thread
+ * is accessing the ifmedia types.
+ */
+enum ice_status
+ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
+{
+ enum ice_status status;
+ uint64_t phy_low, phy_high;
+ int bit;
+
+ ASSERT_CFG_LOCKED(sc);
+
+ /* the maximum possible media type index is 511. We probably don't
+ * need most of this space, but this ensures future compatibility when
+ * additional media types are used.
+ */
+ ice_declare_bitmap(already_added, 511);
+
+ /* Remove all previous media types */
+ ifmedia_removeall(media);
+
+ status = ice_get_phy_types(sc, &phy_low, &phy_high);
+ if (status != ICE_SUCCESS) {
+ /* Function already prints appropriate error
+ * message
+ */
+ return (status);
+ }
+
+ /* make sure the added bitmap is zero'd */
+ memset(already_added, 0, sizeof(already_added));
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &phy_low, 64) {
+ uint64_t type = BIT_ULL(bit);
+ int ostype;
+
+ /* get the OS media type */
+ ostype = ice_get_phy_type_low(type);
+
+ /* don't bother adding the unknown type */
+ if (ostype == IFM_UNKNOWN)
+ continue;
+
+ /* only add each media type to the list once */
+ if (ice_is_bit_set(already_added, IFM_IDX(ostype)))
+ continue;
+
+ ifmedia_add(media, IFM_ETHER | ostype, 0, NULL);
+ ice_set_bit(IFM_IDX(ostype), already_added);
+ }
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &phy_high, 64) {
+ uint64_t type = BIT_ULL(bit);
+ int ostype;
+
+ /* get the OS media type */
+ ostype = ice_get_phy_type_high(type);
+
+ /* don't bother adding the unknown type */
+ if (ostype == IFM_UNKNOWN)
+ continue;
+
+ /* only add each media type to the list once */
+ if (ice_is_bit_set(already_added, IFM_IDX(ostype)))
+ continue;
+
+ ifmedia_add(media, IFM_ETHER | ostype, 0, NULL);
+ ice_set_bit(IFM_IDX(ostype), already_added);
+ }
+
+ /* Use autoselect media by default */
+ ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(media, IFM_ETHER | IFM_AUTO);
+
+ return (ICE_SUCCESS);
+}
+
+/**
+ * ice_configure_rxq_interrupts - Configure HW Rx queues for MSI-X interrupts
+ * @vsi: the VSI to configure
+ *
+ * Called when setting up MSI-X interrupts to configure the Rx hardware queues.
+ */
+void
+ice_configure_rxq_interrupts(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ int i;
+
+ for (i = 0; i < vsi->num_rx_queues; i++) {
+ struct ice_rx_queue *rxq = &vsi->rx_queues[i];
+ u32 val;
+
+ val = (QINT_RQCTL_CAUSE_ENA_M |
+ (ICE_RX_ITR << QINT_RQCTL_ITR_INDX_S) |
+ (rxq->irqv->me << QINT_RQCTL_MSIX_INDX_S));
+ wr32(hw, QINT_RQCTL(vsi->rx_qmap[rxq->me]), val);
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_configure_txq_interrupts - Configure HW Tx queues for MSI-X interrupts
+ * @vsi: the VSI to configure
+ *
+ * Called when setting up MSI-X interrupts to configure the Tx hardware queues.
+ */
+void
+ice_configure_txq_interrupts(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ int i;
+
+ for (i = 0; i < vsi->num_tx_queues; i++) {
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+ u32 val;
+
+ val = (QINT_TQCTL_CAUSE_ENA_M |
+ (ICE_TX_ITR << QINT_TQCTL_ITR_INDX_S) |
+ (txq->irqv->me << QINT_TQCTL_MSIX_INDX_S));
+ wr32(hw, QINT_TQCTL(vsi->tx_qmap[txq->me]), val);
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_flush_rxq_interrupts - Unconfigure Hw Rx queues MSI-X interrupt cause
+ * @vsi: the VSI to configure
+ *
+ * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger
+ * a software interrupt on that cause. This is required as part of the Rx
+ * queue disable logic to dissociate the Rx queue from the interrupt.
+ *
+ * Note: this function must be called prior to disabling Rx queues with
+ * ice_control_rx_queues, otherwise the Rx queue may not be disabled properly.
+ */
+void
+ice_flush_rxq_interrupts(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ int i;
+
+ for (i = 0; i < vsi->num_rx_queues; i++) {
+ struct ice_rx_queue *rxq = &vsi->rx_queues[i];
+ u32 reg, val;
+
+ /* Clear the CAUSE_ENA flag */
+ reg = vsi->rx_qmap[rxq->me];
+ val = rd32(hw, QINT_RQCTL(reg));
+ val &= ~QINT_RQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_RQCTL(reg), val);
+
+ ice_flush(hw);
+
+ /* Trigger a software interrupt to complete interrupt
+ * dissociation.
+ */
+ wr32(hw, GLINT_DYN_CTL(rxq->irqv->me),
+ GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
+ }
+}
+
+/**
+ * ice_flush_txq_interrupts - Unconfigure Hw Tx queues MSI-X interrupt cause
+ * @vsi: the VSI to configure
+ *
+ * Unset the CAUSE_ENA flag of the TQCTL register for each queue, then trigger
+ * a software interrupt on that cause. This is required as part of the Tx
+ * queue disable logic to dissociate the Tx queue from the interrupt.
+ *
+ * Note: this function must be called prior to ice_vsi_disable_tx, otherwise
+ * the Tx queue disable may not complete properly.
+ */
+void
+ice_flush_txq_interrupts(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ int i;
+
+ for (i = 0; i < vsi->num_tx_queues; i++) {
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+ u32 reg, val;
+
+ /* Clear the CAUSE_ENA flag */
+ reg = vsi->tx_qmap[txq->me];
+ val = rd32(hw, QINT_TQCTL(reg));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(reg), val);
+
+ ice_flush(hw);
+
+ /* Trigger a software interrupt to complete interrupt
+ * dissociation.
+ */
+ wr32(hw, GLINT_DYN_CTL(txq->irqv->me),
+ GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
+ }
+}
+
+/**
+ * ice_configure_rx_itr - Configure the Rx ITR settings for this VSI
+ * @vsi: the VSI to configure
+ *
+ * Program the hardware ITR registers with the settings for this VSI.
+ */
+void
+ice_configure_rx_itr(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ int i;
+
+ /* TODO: Handle per-queue/per-vector ITR? */
+
+ for (i = 0; i < vsi->num_rx_queues; i++) {
+ struct ice_rx_queue *rxq = &vsi->rx_queues[i];
+
+ wr32(hw, GLINT_ITR(ICE_RX_ITR, rxq->irqv->me),
+ ice_itr_to_reg(hw, vsi->rx_itr));
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_configure_tx_itr - Configure the Tx ITR settings for this VSI
+ * @vsi: the VSI to configure
+ *
+ * Program the hardware ITR registers with the settings for this VSI.
+ */
+void
+ice_configure_tx_itr(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ int i;
+
+ /* TODO: Handle per-queue/per-vector ITR? */
+
+ for (i = 0; i < vsi->num_tx_queues; i++) {
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+
+ wr32(hw, GLINT_ITR(ICE_TX_ITR, txq->irqv->me),
+ ice_itr_to_reg(hw, vsi->tx_itr));
+ }
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_setup_tx_ctx - Setup an ice_tlan_ctx structure for a queue
+ * @txq: the Tx queue to configure
+ * @tlan_ctx: the Tx LAN queue context structure to initialize
+ * @pf_q: real queue number
+ */
+static int
+ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = txq->vsi;
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+
+ tlan_ctx->port_num = hw->port_info->lport;
+
+ /* number of descriptors in the queue */
+ tlan_ctx->qlen = txq->desc_count;
+
+ /* set the transmit queue base address, defined in 128 byte units */
+ tlan_ctx->base = txq->tx_paddr >> 7;
+
+ tlan_ctx->pf_num = hw->pf_id;
+
+ /* For now, we only have code supporting PF VSIs */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ break;
+ default:
+ return (ENODEV);
+ }
+
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ /* Enable TSO */
+ tlan_ctx->tso_ena = 1;
+ tlan_ctx->internal_usage_flag = 1;
+
+ tlan_ctx->tso_qnum = pf_q;
+
+ /*
+ * Stick with the older legacy Tx queue interface, instead of the new
+ * advanced queue interface.
+ */
+ tlan_ctx->legacy_int = 1;
+
+ /* Descriptor WB mode */
+ tlan_ctx->wb_mode = 0;
+
+ return (0);
+}
+
+/**
+ * ice_cfg_vsi_for_tx - Configure the hardware for Tx
+ * @vsi: the VSI to configure
+ *
+ * Configure the device Tx queues through firmware AdminQ commands. After
+ * this, Tx queues will be ready for transmit.
+ */
+int
+ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
+{
+ struct ice_aqc_add_tx_qgrp qg = { 0 };
+ struct ice_hw *hw = &vsi->sc->hw;
+ device_t dev = vsi->sc->dev;
+ enum ice_status status;
+ int i, err;
+ u16 pf_q;
+
+ qg.num_txqs = 1;
+
+ for (i = 0; i < vsi->num_tx_queues; i++) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+
+ pf_q = vsi->tx_qmap[txq->me];
+ qg.txqs[0].txq_id = htole16(pf_q);
+
+ err = ice_setup_tx_ctx(txq, &tlan_ctx, pf_q);
+ if (err)
+ return err;
+
+ ice_set_ctx((u8 *)&tlan_ctx, qg.txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ status = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0,
+ i, 1, &qg, sizeof(qg), NULL);
+ if (status) {
+ device_printf(dev,
+ "Failed to set LAN Tx queue context, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (ENODEV);
+ }
+
+ /* Keep track of the Tx queue TEID */
+ if (pf_q == le16toh(qg.txqs[0].txq_id))
+ txq->q_teid = le32toh(qg.txqs[0].q_teid);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_setup_rx_ctx - Setup an Rx context structure for a receive queue
+ * @rxq: the receive queue to program
+ *
+ * Setup an Rx queue context structure and program it into the hardware
+ * registers. This is a necessary step for enabling the Rx queue.
+ *
+ * @pre the VSI associated with this queue must have initialized mbuf_sz
+ */
+static int
+ice_setup_rx_ctx(struct ice_rx_queue *rxq)
+{
+ struct ice_rlan_ctx rlan_ctx = {0};
+ struct ice_vsi *vsi = rxq->vsi;
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+ u32 rxdid = ICE_RXDID_FLEX_NIC;
+ u32 regval;
+ u16 pf_q;
+
+ pf_q = vsi->rx_qmap[rxq->me];
+
+ /* set the receive queue base address, defined in 128 byte units */
+ rlan_ctx.base = rxq->rx_paddr >> 7;
+
+ rlan_ctx.qlen = rxq->desc_count;
+
+ rlan_ctx.dbuf = vsi->mbuf_sz >> ICE_RLAN_CTX_DBUF_S;
+
+ /* use 32 byte descriptors */
+ rlan_ctx.dsize = 1;
+
+ /* Strip the Ethernet CRC bytes before the packet is posted to the
+ * host memory.
+ */
+ rlan_ctx.crcstrip = 1;
+
+ rlan_ctx.l2tsel = 1;
+
+ /* don't do header splitting */
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
+
+ /* strip VLAN from inner headers */
+ rlan_ctx.showiv = 1;
+
+ rlan_ctx.rxmax = min(vsi->max_frame_size,
+ ICE_MAX_RX_SEGS * vsi->mbuf_sz);
+
+ rlan_ctx.lrxqthresh = 1;
+
+ if (vsi->type != ICE_VSI_VF) {
+ regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
+ regval &= ~QRXFLXP_CNTXT_RXDID_IDX_M;
+ regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
+ QRXFLXP_CNTXT_RXDID_IDX_M;
+
+ regval &= ~QRXFLXP_CNTXT_RXDID_PRIO_M;
+ regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
+ QRXFLXP_CNTXT_RXDID_PRIO_M;
+
+ wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
+ }
+
+ status = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
+ if (status) {
+ device_printf(sc->dev,
+ "Failed to set LAN Rx queue context, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ wr32(hw, rxq->tail, 0);
+
+ return 0;
+}
+
+/**
+ * ice_cfg_vsi_for_rx - Configure the hardware for Rx
+ * @vsi: the VSI to configure
+ *
+ * Prepare an Rx context descriptor and configure the device to receive
+ * traffic.
+ *
+ * @pre the VSI must have initialized mbuf_sz
+ */
+int
+ice_cfg_vsi_for_rx(struct ice_vsi *vsi)
+{
+ int i, err;
+
+ for (i = 0; i < vsi->num_rx_queues; i++) {
+ MPASS(vsi->mbuf_sz > 0);
+ err = ice_setup_rx_ctx(&vsi->rx_queues[i]);
+ if (err)
+ return err;
+ }
+
+ return (0);
+}
+
+/**
+ * ice_is_rxq_ready - Check if an Rx queue is ready
+ * @hw: ice hw structure
+ * @pf_q: absolute PF queue index to check
+ * @reg: on successful return, contains qrx_ctrl contents
+ *
+ * Reads the QRX_CTRL register and verifies if the queue is in a consistent
+ * state. That is, QENA_REQ matches QENA_STAT. Used to check before making
+ * a request to change the queue, as well as to verify the request has
+ * finished. The queue should change status within a few microseconds, so we
+ * use a small delay while polling the register.
+ *
+ * Returns an error code if the queue does not update after a few retries.
+ */
+static int
+ice_is_rxq_ready(struct ice_hw *hw, int pf_q, u32 *reg)
+{
+ u32 qrx_ctrl, qena_req, qena_stat;
+ int i;
+
+ for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ qrx_ctrl = rd32(hw, QRX_CTRL(pf_q));
+ qena_req = (qrx_ctrl >> QRX_CTRL_QENA_REQ_S) & 1;
+ qena_stat = (qrx_ctrl >> QRX_CTRL_QENA_STAT_S) & 1;
+
+ /* if the request and status bits equal, then the queue is
+ * fully disabled or enabled.
+ */
+ if (qena_req == qena_stat) {
+ *reg = qrx_ctrl;
+ return (0);
+ }
+
+ /* wait a few microseconds before we check again */
+ DELAY(10);
+ }
+
+ return (ETIMEDOUT);
+}
+
+/**
+ * ice_control_rx_queues - Configure hardware to start or stop the Rx queues
+ * @vsi: VSI to enable/disable queues
+ * @enable: true to enable queues, false to disable
+ *
+ * Control the Rx queues through the QRX_CTRL register, enabling or disabling
+ * them. Wait for the appropriate time to ensure that the queues have actually
+ * reached the expected state.
+ */
+int
+ice_control_rx_queues(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ device_t dev = vsi->sc->dev;
+ u32 qrx_ctrl = 0;
+ int i, err;
+
+ /* TODO: amortize waits by changing all queues up front and then
+ * checking their status afterwards. This will become more necessary
+ * when we have a large number of queues.
+ */
+ for (i = 0; i < vsi->num_rx_queues; i++) {
+ struct ice_rx_queue *rxq = &vsi->rx_queues[i];
+ int pf_q = vsi->rx_qmap[rxq->me];
+
+ err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl);
+ if (err) {
+ device_printf(dev,
+ "Rx queue %d is not ready\n",
+ pf_q);
+ return err;
+ }
+
+ /* Skip if the queue is already in correct state */
+ if (enable == !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M))
+ continue;
+
+ if (enable)
+ qrx_ctrl |= QRX_CTRL_QENA_REQ_M;
+ else
+ qrx_ctrl &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), qrx_ctrl);
+
+ /* wait for the queue to finalize the request */
+ err = ice_is_rxq_ready(hw, pf_q, &qrx_ctrl);
+ if (err) {
+ device_printf(dev,
+ "Rx queue %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return err;
+ }
+
+ /* this should never happen */
+ if (enable != !!(qrx_ctrl & QRX_CTRL_QENA_STAT_M)) {
+ device_printf(dev,
+ "Rx queue %d invalid state\n",
+ pf_q);
+ return (EDOOFUS);
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * ice_add_mac_to_list - Add MAC filter to a MAC filter list
+ * @vsi: the VSI to forward to
+ * @list: list which contains MAC filter entries
+ * @addr: the MAC address to be added
+ * @action: filter action to perform on match
+ *
+ * Adds a MAC address filter to the list which will be forwarded to firmware
+ * to add a series of MAC address filters.
+ *
+ * Returns 0 on success, and an error code on failure.
+ *
+ */
+static int
+ice_add_mac_to_list(struct ice_vsi *vsi, struct ice_list_head *list,
+ const u8 *addr, enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_list_entry *entry;
+
+ entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO);
+ if (!entry)
+ return (ENOMEM);
+
+ entry->fltr_info.flag = ICE_FLTR_TX;
+ entry->fltr_info.src_id = ICE_SRC_ID_VSI;
+ entry->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
+ entry->fltr_info.fltr_act = action;
+ entry->fltr_info.vsi_handle = vsi->idx;
+ bcopy(addr, entry->fltr_info.l_data.mac.mac_addr, ETHER_ADDR_LEN);
+
+ LIST_ADD(&entry->list_entry, list);
+
+ return 0;
+}
+
+/**
+ * ice_free_fltr_list - Free memory associated with a MAC address list
+ * @list: the list to free
+ *
+ * Free the memory of each entry associated with the list.
+ */
+static void
+ice_free_fltr_list(struct ice_list_head *list)
+{
+ struct ice_fltr_list_entry *e, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(e, tmp, list, ice_fltr_list_entry, list_entry) {
+ LIST_DEL(&e->list_entry);
+ free(e, M_ICE);
+ }
+}
+
+/**
+ * ice_add_vsi_mac_filter - Add a MAC address filter for a VSI
+ * @vsi: the VSI to add the filter for
+ * @addr: MAC address to add a filter for
+ *
+ * Add a MAC address filter for a given VSI. This is a wrapper around
+ * ice_add_mac to simplify the interface. First, it only accepts a single
+ * address, so we don't have to mess around with the list setup in other
+ * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that
+ * callers don't need to worry about attempting to add the same filter twice.
+ */
+int
+ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
+{
+ struct ice_list_head mac_addr_list;
+ struct ice_hw *hw = &vsi->sc->hw;
+ device_t dev = vsi->sc->dev;
+ enum ice_status status;
+ int err = 0;
+
+ INIT_LIST_HEAD(&mac_addr_list);
+
+ err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI);
+ if (err)
+ goto free_mac_list;
+
+ status = ice_add_mac(hw, &mac_addr_list);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ ; /* Don't complain if we try to add a filter that already exists */
+ } else if (status) {
+ device_printf(dev,
+ "Failed to add a filter for MAC %6D, err %s aq_err %s\n",
+ addr, ":",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ err = (EIO);
+ }
+
+free_mac_list:
+ ice_free_fltr_list(&mac_addr_list);
+ return err;
+}
+
+/**
+ * ice_cfg_pf_default_mac_filters - Setup default unicast and broadcast addrs
+ * @sc: device softc structure
+ *
+ * Program the default unicast and broadcast filters for the PF VSI.
+ */
+int
+ice_cfg_pf_default_mac_filters(struct ice_softc *sc)
+{
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+ int err;
+
+ /* Add the LAN MAC address */
+ err = ice_add_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr);
+ if (err)
+ return err;
+
+ /* Add the broadcast address */
+ err = ice_add_vsi_mac_filter(vsi, broadcastaddr);
+ if (err)
+ return err;
+
+ return (0);
+}
+
+/**
+ * ice_remove_vsi_mac_filter - Remove a MAC address filter for a VSI
+ * @vsi: the VSI to add the filter for
+ * @addr: MAC address to remove a filter for
+ *
+ * Remove a MAC address filter from a given VSI. This is a wrapper around
+ * ice_remove_mac to simplify the interface. First, it only accepts a single
+ * address, so we don't have to mess around with the list setup in other
+ * functions. Second, it ignores the ICE_ERR_DOES_NOT_EXIST error, so that
+ * callers don't need to worry about attempting to remove filters which
+ * haven't yet been added.
+ */
+int
+ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
+{
+ struct ice_list_head mac_addr_list;
+ struct ice_hw *hw = &vsi->sc->hw;
+ device_t dev = vsi->sc->dev;
+ enum ice_status status;
+ int err = 0;
+
+ INIT_LIST_HEAD(&mac_addr_list);
+
+ err = ice_add_mac_to_list(vsi, &mac_addr_list, addr, ICE_FWD_TO_VSI);
+ if (err)
+ goto free_mac_list;
+
+ status = ice_remove_mac(hw, &mac_addr_list);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ ; /* Don't complain if we try to remove a filter that doesn't exist */
+ } else if (status) {
+ device_printf(dev,
+ "Failed to remove a filter for MAC %6D, err %s aq_err %s\n",
+ addr, ":",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ err = (EIO);
+ }
+
+free_mac_list:
+ ice_free_fltr_list(&mac_addr_list);
+ return err;
+}
+
+/**
+ * ice_rm_pf_default_mac_filters - Remove default unicast and broadcast addrs
+ * @sc: device softc structure
+ *
+ * Remove the default unicast and broadcast filters from the PF VSI.
+ */
+int
+ice_rm_pf_default_mac_filters(struct ice_softc *sc)
+{
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+ int err;
+
+ /* Remove the LAN MAC address */
+ err = ice_remove_vsi_mac_filter(vsi, hw->port_info->mac.lan_addr);
+ if (err)
+ return err;
+
+ /* Remove the broadcast address */
+ err = ice_remove_vsi_mac_filter(vsi, broadcastaddr);
+ if (err)
+ return (EIO);
+
+ return (0);
+}
+
+/**
+ * ice_check_ctrlq_errors - Check for and report controlq errors
+ * @sc: device private structure
+ * @qname: name of the controlq
+ * @cq: the controlq to check
+ *
+ * Check and report controlq errors. Currently all we do is report them to the
+ * kernel message log, but we might want to improve this in the future, such
+ * as to keep track of statistics.
+ */
+static void
+ice_check_ctrlq_errors(struct ice_softc *sc, const char *qname,
+ struct ice_ctl_q_info *cq)
+{
+ struct ice_hw *hw = &sc->hw;
+ u32 val;
+
+ /* Check for error indications. Note that all the controlqs use the
+ * same register layout, so we use the PF_FW_AxQLEN defines only.
+ */
+ val = rd32(hw, cq->rq.len);
+ if (val & (PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
+ PF_FW_ARQLEN_ARQCRIT_M)) {
+ if (val & PF_FW_ARQLEN_ARQVFE_M)
+ device_printf(sc->dev,
+ "%s Receive Queue VF Error detected\n", qname);
+ if (val & PF_FW_ARQLEN_ARQOVFL_M)
+ device_printf(sc->dev,
+ "%s Receive Queue Overflow Error detected\n",
+ qname);
+ if (val & PF_FW_ARQLEN_ARQCRIT_M)
+ device_printf(sc->dev,
+ "%s Receive Queue Critical Error detected\n",
+ qname);
+ val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
+ PF_FW_ARQLEN_ARQCRIT_M);
+ wr32(hw, cq->rq.len, val);
+ }
+
+ val = rd32(hw, cq->sq.len);
+ if (val & (PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
+ PF_FW_ATQLEN_ATQCRIT_M)) {
+ if (val & PF_FW_ATQLEN_ATQVFE_M)
+ device_printf(sc->dev,
+ "%s Send Queue VF Error detected\n", qname);
+ if (val & PF_FW_ATQLEN_ATQOVFL_M)
+ device_printf(sc->dev,
+ "%s Send Queue Overflow Error detected\n",
+ qname);
+ if (val & PF_FW_ATQLEN_ATQCRIT_M)
+ device_printf(sc->dev,
+ "%s Send Queue Critical Error detected\n",
+ qname);
+ val &= ~(PF_FW_ATQLEN_ATQVFE_M | PF_FW_ATQLEN_ATQOVFL_M |
+ PF_FW_ATQLEN_ATQCRIT_M);
+ wr32(hw, cq->sq.len, val);
+ }
+}
+
+/**
+ * ice_process_link_event - Process a link event indication from firmware
+ * @sc: device softc structure
+ * @e: the received event data
+ *
+ * Gets the current link status from hardware, and may print a message if an
+ * unqualified is detected.
+ */
+static void
+ice_process_link_event(struct ice_softc *sc,
+ struct ice_rq_event_info __invariant_only *e)
+{
+ struct ice_port_info *pi = sc->hw.port_info;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ /* Sanity check that the data length matches */
+ MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data));
+
+ /*
+ * Even though the adapter gets link status information inside the
+ * event, it needs to send a Get Link Status AQ command in order
+ * to re-enable link events.
+ */
+ pi->phy.get_link_info = true;
+ ice_get_link_status(pi, &sc->link_up);
+
+ if (pi->phy.link_info.topo_media_conflict &
+ (ICE_AQ_LINK_TOPO_CONFLICT | ICE_AQ_LINK_MEDIA_CONFLICT |
+ ICE_AQ_LINK_TOPO_CORRUPT))
+ device_printf(dev,
+ "Possible mis-configuration of the Ethernet port detected; please use the Intel (R) Ethernet Port Configuration Tool utility to address the issue.\n");
+
+ if ((pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) &&
+ !(pi->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
+ !(pi->phy.link_info.an_info & ICE_AQ_QUALIFIED_MODULE))
+ device_printf(dev,
+ "Link is disabled on this device because an unsupported module type was detected! Refer to the Intel (R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+
+ if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
+ if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) {
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (status != ICE_SUCCESS)
+ device_printf(dev,
+ "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+ }
+ /* ICE_STATE_NO_MEDIA is cleared when polling task detects media */
+
+ /* Indicate that link status must be reported again */
+ ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED);
+
+ /* OS link info is updated elsewhere */
+}
+
+/**
+ * ice_process_ctrlq_event - Respond to a controlq event
+ * @sc: device private structure
+ * @qname: the name for this controlq
+ * @event: the event to process
+ *
+ * Perform actions in response to various controlq event notifications.
+ */
+static void
+ice_process_ctrlq_event(struct ice_softc *sc, const char *qname,
+ struct ice_rq_event_info *event)
+{
+ u16 opcode;
+
+ opcode = le16toh(event->desc.opcode);
+
+ switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ ice_process_link_event(sc, event);
+ break;
+ case ice_mbx_opc_send_msg_to_pf:
+ /* TODO: handle IOV event */
+ break;
+ case ice_aqc_opc_lldp_set_mib_change:
+ ice_handle_mib_change_event(sc, event);
+ break;
+ case ice_aqc_opc_event_lan_overflow:
+ ice_handle_lan_overflow_event(sc, event);
+ break;
+ default:
+ device_printf(sc->dev,
+ "%s Receive Queue unhandled event 0x%04x ignored\n",
+ qname, opcode);
+ }
+}
+
+/**
+ * ice_process_ctrlq - helper function to process controlq rings
+ * @sc: device private structure
+ * @q_type: specific control queue type
+ * @pending: return parameter to track remaining events
+ *
+ * Process controlq events for a given control queue type. Returns zero on
+ * success, and an error code on failure. If successful, pending is the number
+ * of remaining events left in the queue.
+ */
+int
+ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
+{
+ struct ice_rq_event_info event = { { 0 } };
+ struct ice_hw *hw = &sc->hw;
+ struct ice_ctl_q_info *cq;
+ enum ice_status status;
+ const char *qname;
+ int loop = 0;
+
+ switch (q_type) {
+ case ICE_CTL_Q_ADMIN:
+ cq = &hw->adminq;
+ qname = "Admin";
+ break;
+ case ICE_CTL_Q_MAILBOX:
+ cq = &hw->mailboxq;
+ qname = "Mailbox";
+ break;
+ default:
+ device_printf(sc->dev,
+ "Unknown control queue type 0x%x\n",
+ q_type);
+ return 0;
+ }
+
+ ice_check_ctrlq_errors(sc, qname, cq);
+
+ /*
+ * Control queue processing happens during the admin task which may be
+ * holding a non-sleepable lock, so we *must* use M_NOWAIT here.
+ */
+ event.buf_len = cq->rq_buf_size;
+ event.msg_buf = (u8 *)malloc(event.buf_len, M_ICE, M_ZERO | M_NOWAIT);
+ if (!event.msg_buf) {
+ device_printf(sc->dev,
+ "Unable to allocate memory for %s Receive Queue event\n",
+ qname);
+ return (ENOMEM);
+ }
+
+ do {
+ status = ice_clean_rq_elem(hw, cq, &event, pending);
+ if (status == ICE_ERR_AQ_NO_WORK)
+ break;
+ if (status) {
+ if (q_type == ICE_CTL_Q_ADMIN)
+ device_printf(sc->dev,
+ "%s Receive Queue event error %s aq_err %s\n",
+ qname, ice_status_str(status),
+ ice_aq_str(cq->rq_last_status));
+ else
+ device_printf(sc->dev,
+ "%s Receive Queue event error %s cq_err %d\n",
+ qname, ice_status_str(status), cq->rq_last_status);
+ free(event.msg_buf, M_ICE);
+ return (EIO);
+ }
+ /* XXX should we separate this handler by controlq type? */
+ ice_process_ctrlq_event(sc, qname, &event);
+ } while (*pending && (++loop < ICE_CTRLQ_WORK_LIMIT));
+
+ free(event.msg_buf, M_ICE);
+
+ return 0;
+}
+
+/**
+ * pkg_ver_empty - Check if a package version is empty
+ * @pkg_ver: the package version to check
+ * @pkg_name: the package name to check
+ *
+ * Checks if the package version structure is empty. We consider a package
+ * version as empty if none of the versions are non-zero and the name string
+ * is null as well.
+ *
+ * This is used to check if the package version was initialized by the driver,
+ * as we do not expect an actual DDP package file to have a zero'd version and
+ * name.
+ *
+ * @returns true if the package version is valid, or false otherwise.
+ */
+static bool
+pkg_ver_empty(struct ice_pkg_ver *pkg_ver, u8 *pkg_name)
+{
+ return (pkg_name[0] == '\0' &&
+ pkg_ver->major == 0 &&
+ pkg_ver->minor == 0 &&
+ pkg_ver->update == 0 &&
+ pkg_ver->draft == 0);
+}
+
+/**
+ * pkg_ver_compatible - Check if the package version is compatible
+ * @pkg_ver: the package version to check
+ *
+ * Compares the package version number to the driver's expected major/minor
+ * version. Returns an integer indicating whether the version is older, newer,
+ * or compatible with the driver.
+ *
+ * @returns 0 if the package version is compatible, -1 if the package version
+ * is older, and 1 if the package version is newer than the driver version.
+ */
+static int
+pkg_ver_compatible(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ)
+ return (1); /* newer */
+ else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) &&
+ (pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return (1); /* newer */
+ else if ((pkg_ver->major == ICE_PKG_SUPP_VER_MAJ) &&
+ (pkg_ver->minor == ICE_PKG_SUPP_VER_MNR))
+ return (0); /* compatible */
+ else
+ return (-1); /* older */
+}
+
+/**
+ * ice_os_pkg_version_str - Format OS package version info into a sbuf
+ * @hw: device hw structure
+ * @buf: string buffer to store name/version string
+ *
+ * Formats the name and version of the OS DDP package as found in the ice_ddp
+ * module into a string.
+ *
+ * @remark This will almost always be the same as the active package, but
+ * could be different in some cases. Use ice_active_pkg_version_str to get the
+ * version of the active DDP package.
+ */
+static void
+ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf)
+{
+ char name_buf[ICE_PKG_NAME_SIZE];
+
+ /* If the OS DDP package info is empty, use "None" */
+ if (pkg_ver_empty(&hw->pkg_ver, hw->pkg_name)) {
+ sbuf_printf(buf, "None");
+ return;
+ }
+
+ /*
+ * This should already be null-terminated, but since this is a raw
+ * value from an external source, strlcpy() into a new buffer to
+ * make sure.
+ */
+ bzero(name_buf, sizeof(name_buf));
+ strlcpy(name_buf, (char *)hw->pkg_name, ICE_PKG_NAME_SIZE);
+
+ sbuf_printf(buf, "%s version %u.%u.%u.%u",
+ name_buf,
+ hw->pkg_ver.major,
+ hw->pkg_ver.minor,
+ hw->pkg_ver.update,
+ hw->pkg_ver.draft);
+}
+
+/**
+ * ice_active_pkg_version_str - Format active package version info into a sbuf
+ * @hw: device hw structure
+ * @buf: string buffer to store name/version string
+ *
+ * Formats the name and version of the active DDP package info into a string
+ * buffer for use.
+ */
+static void
+ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf)
+{
+ char name_buf[ICE_PKG_NAME_SIZE];
+
+ /* If the active DDP package info is empty, use "None" */
+ if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) {
+ sbuf_printf(buf, "None");
+ return;
+ }
+
+ /*
+ * This should already be null-terminated, but since this is a raw
+ * value from an external source, strlcpy() into a new buffer to
+ * make sure.
+ */
+ bzero(name_buf, sizeof(name_buf));
+ strlcpy(name_buf, (char *)hw->active_pkg_name, ICE_PKG_NAME_SIZE);
+
+ sbuf_printf(buf, "%s version %u.%u.%u.%u",
+ name_buf,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+
+ if (hw->active_track_id != 0)
+ sbuf_printf(buf, ", track id 0x%08x", hw->active_track_id);
+}
+
+/**
+ * ice_nvm_version_str - Format the NVM version information into a sbuf
+ * @hw: device hw structure
+ * @buf: string buffer to store version string
+ *
+ * Formats the NVM information including firmware version, API version, NVM
+ * version, the EETRACK id, and OEM specific version information into a string
+ * buffer.
+ */
+static void
+ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf)
+{
+ struct ice_nvm_info *nvm = &hw->nvm;
+ struct ice_orom_info *orom = &nvm->orom;
+ struct ice_netlist_ver_info *netlist_ver = &hw->netlist_ver;
+
+ /* Note that the netlist versions are stored in packed Binary Coded
+ * Decimal format. The use of '%x' will correctly display these as
+ * decimal numbers. This works because every 4 bits will be displayed
+ * as a hexadecimal digit, and the BCD format will only use the values
+ * 0-9.
+ */
+ sbuf_printf(buf,
+ "fw %u.%u.%u api %u.%u nvm %x.%02x etid %08x netlist %x.%x.%x-%x.%x.%x.%04x oem %u.%u.%u",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
+ hw->api_maj_ver, hw->api_min_ver,
+ nvm->major_ver, nvm->minor_ver, nvm->eetrack,
+ netlist_ver->major, netlist_ver->minor,
+ netlist_ver->type >> 16, netlist_ver->type & 0xFFFF,
+ netlist_ver->rev, netlist_ver->cust_ver, netlist_ver->hash,
+ orom->major, orom->build, orom->patch);
+}
+
+/**
+ * ice_print_nvm_version - Print the NVM info to the kernel message log
+ * @sc: the device softc structure
+ *
+ * Format and print an NVM version string using ice_nvm_version_str().
+ */
+void
+ice_print_nvm_version(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct sbuf *sbuf;
+
+ sbuf = sbuf_new_auto();
+ ice_nvm_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ device_printf(dev, "%s\n", sbuf_data(sbuf));
+ sbuf_delete(sbuf);
+}
+
+/**
+ * ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters
+ * @vsi: the VSI to be updated
+ *
+ * Reads hardware stats and updates the ice_vsi_hw_stats tracking structure with
+ * the updated values.
+ */
+void
+ice_update_vsi_hw_stats(struct ice_vsi *vsi)
+{
+ struct ice_eth_stats *prev_es, *cur_es;
+ struct ice_hw *hw = &vsi->sc->hw;
+ u16 vsi_num;
+
+ if (!ice_is_vsi_valid(hw, vsi->idx))
+ return;
+
+ vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); /* HW absolute index of a VSI */
+ prev_es = &vsi->hw_stats.prev;
+ cur_es = &vsi->hw_stats.cur;
+
+#define ICE_VSI_STAT40(name, location) \
+ ice_stat_update40(hw, name ## L(vsi_num), \
+ vsi->hw_stats.offsets_loaded, \
+ &prev_es->location, &cur_es->location)
+
+#define ICE_VSI_STAT32(name, location) \
+ ice_stat_update32(hw, name(vsi_num), \
+ vsi->hw_stats.offsets_loaded, \
+ &prev_es->location, &cur_es->location)
+
+ ICE_VSI_STAT40(GLV_GORC, rx_bytes);
+ ICE_VSI_STAT40(GLV_UPRC, rx_unicast);
+ ICE_VSI_STAT40(GLV_MPRC, rx_multicast);
+ ICE_VSI_STAT40(GLV_BPRC, rx_broadcast);
+ ICE_VSI_STAT32(GLV_RDPC, rx_discards);
+ ICE_VSI_STAT40(GLV_GOTC, tx_bytes);
+ ICE_VSI_STAT40(GLV_UPTC, tx_unicast);
+ ICE_VSI_STAT40(GLV_MPTC, tx_multicast);
+ ICE_VSI_STAT40(GLV_BPTC, tx_broadcast);
+ ICE_VSI_STAT32(GLV_TEPC, tx_errors);
+
+ ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded,
+ cur_es);
+
+#undef ICE_VSI_STAT40
+#undef ICE_VSI_STAT32
+
+ vsi->hw_stats.offsets_loaded = true;
+}
+
+/**
+ * ice_reset_vsi_stats - Reset VSI statistics counters
+ * @vsi: VSI structure
+ *
+ * Resets the software tracking counters for the VSI statistics, and indicate
+ * that the offsets haven't been loaded. This is intended to be called
+ * post-reset so that VSI statistics count from zero again.
+ */
+void
+ice_reset_vsi_stats(struct ice_vsi *vsi)
+{
+ /* Reset HW stats */
+ memset(&vsi->hw_stats.prev, 0, sizeof(vsi->hw_stats.prev));
+ memset(&vsi->hw_stats.cur, 0, sizeof(vsi->hw_stats.cur));
+ vsi->hw_stats.offsets_loaded = false;
+}
+
+/**
+ * ice_update_pf_stats - Update port stats counters
+ * @sc: device private softc structure
+ *
+ * Reads hardware statistics registers and updates the software tracking
+ * structure with new values.
+ */
+void
+ice_update_pf_stats(struct ice_softc *sc)
+{
+ struct ice_hw_port_stats *prev_ps, *cur_ps;
+ struct ice_hw *hw = &sc->hw;
+ u8 lport;
+
+ MPASS(hw->port_info);
+
+ prev_ps = &sc->stats.prev;
+ cur_ps = &sc->stats.cur;
+ lport = hw->port_info->lport;
+
+#define ICE_PF_STAT40(name, location) \
+ ice_stat_update40(hw, name ## L(lport), \
+ sc->stats.offsets_loaded, \
+ &prev_ps->location, &cur_ps->location)
+
+#define ICE_PF_STAT32(name, location) \
+ ice_stat_update32(hw, name(lport), \
+ sc->stats.offsets_loaded, \
+ &prev_ps->location, &cur_ps->location)
+
+ ICE_PF_STAT40(GLPRT_GORC, eth.rx_bytes);
+ ICE_PF_STAT40(GLPRT_UPRC, eth.rx_unicast);
+ ICE_PF_STAT40(GLPRT_MPRC, eth.rx_multicast);
+ ICE_PF_STAT40(GLPRT_BPRC, eth.rx_broadcast);
+ ICE_PF_STAT40(GLPRT_GOTC, eth.tx_bytes);
+ ICE_PF_STAT40(GLPRT_UPTC, eth.tx_unicast);
+ ICE_PF_STAT40(GLPRT_MPTC, eth.tx_multicast);
+ ICE_PF_STAT40(GLPRT_BPTC, eth.tx_broadcast);
+
+ ICE_PF_STAT32(GLPRT_TDOLD, tx_dropped_link_down);
+ ICE_PF_STAT40(GLPRT_PRC64, rx_size_64);
+ ICE_PF_STAT40(GLPRT_PRC127, rx_size_127);
+ ICE_PF_STAT40(GLPRT_PRC255, rx_size_255);
+ ICE_PF_STAT40(GLPRT_PRC511, rx_size_511);
+ ICE_PF_STAT40(GLPRT_PRC1023, rx_size_1023);
+ ICE_PF_STAT40(GLPRT_PRC1522, rx_size_1522);
+ ICE_PF_STAT40(GLPRT_PRC9522, rx_size_big);
+ ICE_PF_STAT40(GLPRT_PTC64, tx_size_64);
+ ICE_PF_STAT40(GLPRT_PTC127, tx_size_127);
+ ICE_PF_STAT40(GLPRT_PTC255, tx_size_255);
+ ICE_PF_STAT40(GLPRT_PTC511, tx_size_511);
+ ICE_PF_STAT40(GLPRT_PTC1023, tx_size_1023);
+ ICE_PF_STAT40(GLPRT_PTC1522, tx_size_1522);
+ ICE_PF_STAT40(GLPRT_PTC9522, tx_size_big);
+
+ ICE_PF_STAT32(GLPRT_LXONRXC, link_xon_rx);
+ ICE_PF_STAT32(GLPRT_LXOFFRXC, link_xoff_rx);
+ ICE_PF_STAT32(GLPRT_LXONTXC, link_xon_tx);
+ ICE_PF_STAT32(GLPRT_LXOFFTXC, link_xoff_tx);
+ ICE_PF_STAT32(GLPRT_CRCERRS, crc_errors);
+ ICE_PF_STAT32(GLPRT_ILLERRC, illegal_bytes);
+ ICE_PF_STAT32(GLPRT_MLFC, mac_local_faults);
+ ICE_PF_STAT32(GLPRT_MRFC, mac_remote_faults);
+ ICE_PF_STAT32(GLPRT_RLEC, rx_len_errors);
+ ICE_PF_STAT32(GLPRT_RUC, rx_undersize);
+ ICE_PF_STAT32(GLPRT_RFC, rx_fragments);
+ ICE_PF_STAT32(GLPRT_ROC, rx_oversize);
+ ICE_PF_STAT32(GLPRT_RJC, rx_jabber);
+
+#undef ICE_PF_STAT40
+#undef ICE_PF_STAT32
+
+ sc->stats.offsets_loaded = true;
+}
+
+/**
+ * ice_reset_pf_stats - Reset port stats counters
+ * @sc: Device private softc structure
+ *
+ * Reset software tracking values for statistics to zero, and indicate that
+ * offsets haven't been loaded. Intended to be called after a device reset so
+ * that statistics count from zero again.
+ */
+void
+ice_reset_pf_stats(struct ice_softc *sc)
+{
+ memset(&sc->stats.prev, 0, sizeof(sc->stats.prev));
+ memset(&sc->stats.cur, 0, sizeof(sc->stats.cur));
+ sc->stats.offsets_loaded = false;
+}
+
+/**
+ * ice_sysctl_show_fw - sysctl callback to show firmware information
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for the fw_version sysctl, to display the current firmware
+ * information found at hardware init time.
+ */
+static int
+ice_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct sbuf *sbuf;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ ice_nvm_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_pba_number - sysctl callback to show PBA number
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for the pba_number sysctl, used to read the Product Board Assembly
+ * number for this device.
+ */
+static int
+ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u8 pba_string[32] = "";
+ enum ice_status status;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_read_pba_string(hw, pba_string, sizeof(pba_string));
+ if (status) {
+ device_printf(dev,
+ "%s: failed to read PBA string from NVM; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ return sysctl_handle_string(oidp, pba_string, sizeof(pba_string), req);
+}
+
+/**
+ * ice_sysctl_pkg_version - sysctl to show the active package version info
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for the pkg_version sysctl, to display the active DDP package name
+ * and version information.
+ */
+static int
+ice_sysctl_pkg_version(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct sbuf *sbuf;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ ice_active_pkg_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_os_pkg_version - sysctl to show the OS package version info
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for the pkg_version sysctl, to display the OS DDP package name and
+ * version info found in the ice_ddp module.
+ */
+static int
+ice_sysctl_os_pkg_version(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct sbuf *sbuf;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ ice_os_pkg_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_current_speed - sysctl callback to show current link speed
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for the current_speed sysctl, to display the string representing
+ * the current link speed.
+ */
+static int
+ice_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct sbuf *sbuf;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 10, req);
+ sbuf_printf(sbuf, "%s", ice_aq_speed_to_str(hw->port_info));
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * @var phy_link_speeds
+ * @brief PHY link speed conversion array
+ *
+ * Array of link speeds to convert ICE_PHY_TYPE_LOW and ICE_PHY_TYPE_HIGH into
+ * link speeds used by the link speed sysctls.
+ *
+ * @remark these are based on the indices used in the BIT() macros for the
+ * ICE_PHY_TYPE_LOW_* and ICE_PHY_TYPE_HIGH_* definitions.
+ */
+static const uint16_t phy_link_speeds[] = {
+ ICE_AQ_LINK_SPEED_100MB,
+ ICE_AQ_LINK_SPEED_100MB,
+ ICE_AQ_LINK_SPEED_1000MB,
+ ICE_AQ_LINK_SPEED_1000MB,
+ ICE_AQ_LINK_SPEED_1000MB,
+ ICE_AQ_LINK_SPEED_1000MB,
+ ICE_AQ_LINK_SPEED_1000MB,
+ ICE_AQ_LINK_SPEED_2500MB,
+ ICE_AQ_LINK_SPEED_2500MB,
+ ICE_AQ_LINK_SPEED_2500MB,
+ ICE_AQ_LINK_SPEED_5GB,
+ ICE_AQ_LINK_SPEED_5GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_10GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_25GB,
+ ICE_AQ_LINK_SPEED_40GB,
+ ICE_AQ_LINK_SPEED_40GB,
+ ICE_AQ_LINK_SPEED_40GB,
+ ICE_AQ_LINK_SPEED_40GB,
+ ICE_AQ_LINK_SPEED_40GB,
+ ICE_AQ_LINK_SPEED_40GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_50GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ /* These rates are for ICE_PHY_TYPE_HIGH_* */
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_100GB
+};
+
+#define ICE_SYSCTL_HELP_ADVERTISE_SPEED \
+"\nControl advertised link speed." \
+"\nFlags:" \
+"\n\t 0x0 - Auto" \
+"\n\t 0x1 - 10 Mb" \
+"\n\t 0x2 - 100 Mb" \
+"\n\t 0x4 - 1G" \
+"\n\t 0x8 - 2.5G" \
+"\n\t 0x10 - 5G" \
+"\n\t 0x20 - 10G" \
+"\n\t 0x40 - 20G" \
+"\n\t 0x80 - 25G" \
+"\n\t 0x100 - 40G" \
+"\n\t 0x200 - 50G" \
+"\n\t 0x400 - 100G" \
+"\n\t0x8000 - Unknown" \
+"\n\t" \
+"\nUse \"sysctl -x\" to view flags properly."
+
+#define ICE_PHYS_100MB \
+ (ICE_PHY_TYPE_LOW_100BASE_TX | \
+ ICE_PHY_TYPE_LOW_100M_SGMII)
+#define ICE_PHYS_1000MB \
+ (ICE_PHY_TYPE_LOW_1000BASE_T | \
+ ICE_PHY_TYPE_LOW_1000BASE_SX | \
+ ICE_PHY_TYPE_LOW_1000BASE_LX | \
+ ICE_PHY_TYPE_LOW_1000BASE_KX | \
+ ICE_PHY_TYPE_LOW_1G_SGMII)
+#define ICE_PHYS_2500MB \
+ (ICE_PHY_TYPE_LOW_2500BASE_T | \
+ ICE_PHY_TYPE_LOW_2500BASE_X | \
+ ICE_PHY_TYPE_LOW_2500BASE_KX)
+#define ICE_PHYS_5GB \
+ (ICE_PHY_TYPE_LOW_5GBASE_T | \
+ ICE_PHY_TYPE_LOW_5GBASE_KR)
+#define ICE_PHYS_10GB \
+ (ICE_PHY_TYPE_LOW_10GBASE_T | \
+ ICE_PHY_TYPE_LOW_10G_SFI_DA | \
+ ICE_PHY_TYPE_LOW_10GBASE_SR | \
+ ICE_PHY_TYPE_LOW_10GBASE_LR | \
+ ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
+ ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_10G_SFI_C2C)
+#define ICE_PHYS_25GB \
+ (ICE_PHY_TYPE_LOW_25GBASE_T | \
+ ICE_PHY_TYPE_LOW_25GBASE_CR | \
+ ICE_PHY_TYPE_LOW_25GBASE_CR_S | \
+ ICE_PHY_TYPE_LOW_25GBASE_CR1 | \
+ ICE_PHY_TYPE_LOW_25GBASE_SR | \
+ ICE_PHY_TYPE_LOW_25GBASE_LR | \
+ ICE_PHY_TYPE_LOW_25GBASE_KR | \
+ ICE_PHY_TYPE_LOW_25GBASE_KR_S | \
+ ICE_PHY_TYPE_LOW_25GBASE_KR1 | \
+ ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_25G_AUI_C2C)
+#define ICE_PHYS_40GB \
+ (ICE_PHY_TYPE_LOW_40GBASE_CR4 | \
+ ICE_PHY_TYPE_LOW_40GBASE_SR4 | \
+ ICE_PHY_TYPE_LOW_40GBASE_LR4 | \
+ ICE_PHY_TYPE_LOW_40GBASE_KR4 | \
+ ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_40G_XLAUI)
+#define ICE_PHYS_50GB \
+ (ICE_PHY_TYPE_LOW_50GBASE_CR2 | \
+ ICE_PHY_TYPE_LOW_50GBASE_SR2 | \
+ ICE_PHY_TYPE_LOW_50GBASE_LR2 | \
+ ICE_PHY_TYPE_LOW_50GBASE_KR2 | \
+ ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_50G_LAUI2 | \
+ ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_50G_AUI2 | \
+ ICE_PHY_TYPE_LOW_50GBASE_CP | \
+ ICE_PHY_TYPE_LOW_50GBASE_SR | \
+ ICE_PHY_TYPE_LOW_50GBASE_FR | \
+ ICE_PHY_TYPE_LOW_50GBASE_LR | \
+ ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 | \
+ ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_50G_AUI1)
+#define ICE_PHYS_100GB_LOW \
+ (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
+ ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_100G_CAUI4 | \
+ ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_LOW_100G_AUI4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
+ ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
+ ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
+ ICE_PHY_TYPE_LOW_100GBASE_DR)
+#define ICE_PHYS_100GB_HIGH \
+ (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
+ ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
+ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_100G_AUI2)
+
+/**
+ * ice_aq_phy_types_to_sysctl_speeds - Convert the PHY Types to speeds
+ * @phy_type_low: lower 64-bit PHY Type bitmask
+ * @phy_type_high: upper 64-bit PHY Type bitmask
+ *
+ * Convert the PHY Type fields from Get PHY Abilities and Set PHY Config into
+ * link speed flags. If phy_type_high has an unknown PHY type, then the return
+ * value will include the "ICE_AQ_LINK_SPEED_UNKNOWN" flag as well.
+ */
+static u16
+ice_aq_phy_types_to_sysctl_speeds(u64 phy_type_low, u64 phy_type_high)
+{
+ u16 sysctl_speeds = 0;
+ int bit;
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &phy_type_low, 64)
+ sysctl_speeds |= phy_link_speeds[bit];
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &phy_type_high, 64) {
+ if ((bit + 64) < (int)ARRAY_SIZE(phy_link_speeds))
+ sysctl_speeds |= phy_link_speeds[bit + 64];
+ else
+ sysctl_speeds |= ICE_AQ_LINK_SPEED_UNKNOWN;
+ }
+
+ return (sysctl_speeds);
+}
+
+/**
+ * ice_sysctl_speeds_to_aq_phy_types - Convert sysctl speed flags to AQ PHY flags
+ * @sysctl_speeds: 16-bit sysctl speeds or AQ_LINK_SPEED flags
+ * @phy_type_low: output parameter for lower AQ PHY flags
+ * @phy_type_high: output parameter for higher AQ PHY flags
+ *
+ * Converts the given link speed flags into AQ PHY type flag sets appropriate
+ * for use in a Set PHY Config command.
+ */
+static void
+ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
+ u64 *phy_type_high)
+{
+ *phy_type_low = 0, *phy_type_high = 0;
+
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_100MB)
+ *phy_type_low |= ICE_PHYS_100MB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_1000MB)
+ *phy_type_low |= ICE_PHYS_1000MB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_2500MB)
+ *phy_type_low |= ICE_PHYS_2500MB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_5GB)
+ *phy_type_low |= ICE_PHYS_5GB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_10GB)
+ *phy_type_low |= ICE_PHYS_10GB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_25GB)
+ *phy_type_low |= ICE_PHYS_25GB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_40GB)
+ *phy_type_low |= ICE_PHYS_40GB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_50GB)
+ *phy_type_low |= ICE_PHYS_50GB;
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_100GB) {
+ *phy_type_low |= ICE_PHYS_100GB_LOW;
+ *phy_type_high |= ICE_PHYS_100GB_HIGH;
+ }
+}
+
+/**
+ * ice_intersect_media_types_with_caps - Restrict input AQ PHY flags
+ * @sc: driver private structure
+ * @phy_type_low: input/output flag set for low PHY types
+ * @phy_type_high: input/output flag set for high PHY types
+ *
+ * Intersects the input PHY flags with PHY flags retrieved from the adapter to
+ * ensure the flags are compatible.
+ *
+ * @returns 0 on success, EIO if an AQ command fails, or EINVAL if input PHY
+ * types have no intersection with TOPO_CAPS and the adapter is in non-lenient
+ * mode
+ */
+static int
+ice_intersect_media_types_with_caps(struct ice_softc *sc, u64 *phy_type_low,
+ u64 *phy_type_high)
+{
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ u64 new_phy_low, new_phy_high;
+
+ status = ice_get_phy_types(sc, &new_phy_low, &new_phy_high);
+ if (status != ICE_SUCCESS) {
+ /* Function already prints appropriate error message */
+ return (EIO);
+ }
+
+ ice_apply_supported_speed_filter(&new_phy_low, &new_phy_high);
+
+ new_phy_low &= *phy_type_low;
+ new_phy_high &= *phy_type_high;
+
+ if (new_phy_low == 0 && new_phy_high == 0) {
+ device_printf(dev,
+ "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+ return (EINVAL);
+ }
+
+ /* Overwrite input phy_type values and return */
+ *phy_type_low = new_phy_low;
+ *phy_type_high = new_phy_high;
+
+ return (0);
+}
+
+/**
+ * ice_get_auto_speeds - Get PHY type flags for "auto" speed
+ * @sc: driver private structure
+ * @phy_type_low: output low PHY type flags
+ * @phy_type_high: output high PHY type flags
+ *
+ * Retrieves a suitable set of PHY type flags to use for an "auto" speed
+ * setting by either using the NVM default overrides for speed, or retrieving
+ * a default from the adapter using Get PHY capabilities in TOPO_CAPS mode.
+ *
+ * @returns 0 on success or EIO on AQ command failure
+ */
+static int
+ice_get_auto_speeds(struct ice_softc *sc, u64 *phy_type_low,
+ u64 *phy_type_high)
+{
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi = hw->port_info;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE)) {
+ /* copy over speed settings from LDO TLV */
+ *phy_type_low = CPU_TO_LE64(sc->ldo_tlv.phy_type_low);
+ *phy_type_high = CPU_TO_LE64(sc->ldo_tlv.phy_type_high);
+ } else {
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ *phy_type_low = le64toh(pcaps.phy_type_low);
+ *phy_type_high = le64toh(pcaps.phy_type_high);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_advertise_speed - Display/change link speeds supported by port
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the currently supported speeds
+ * On write: Sets the device's supported speeds
+ * Valid input flags: see ICE_SYSCTL_HELP_ADVERTISE_SPEED
+ */
+static int
+ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi = hw->port_info;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u64 phy_low, phy_high;
+ u16 sysctl_speeds = 0;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Get the current speeds from the adapter's "active" configuration. */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (SW_CFG) failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ phy_low = le64toh(pcaps.phy_type_low);
+ phy_high = le64toh(pcaps.phy_type_high);
+ sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high);
+
+ error = sysctl_handle_16(oidp, &sysctl_speeds, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (sysctl_speeds > 0x7FF) {
+ device_printf(dev,
+ "%s: \"%u\" is outside of the range of acceptable values.\n",
+ __func__, sysctl_speeds);
+ return (EINVAL);
+ }
+
+ /* 0 is treated as "Auto"; the driver will handle selecting the correct speeds,
+ * or apply an override if one is specified in the NVM.
+ */
+ if (sysctl_speeds == 0) {
+ error = ice_get_auto_speeds(sc, &phy_low, &phy_high);
+ if (error)
+ /* Function already prints appropriate error message */
+ return (error);
+ } else {
+ ice_sysctl_speeds_to_aq_phy_types(sysctl_speeds, &phy_low, &phy_high);
+ error = ice_intersect_media_types_with_caps(sc, &phy_low, &phy_high);
+ if (error)
+ /* Function already prints appropriate error message */
+ return (error);
+ }
+ sysctl_speeds = ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high);
+
+ /* Cache new user setting for speeds */
+ pi->phy.curr_user_speed_req = sysctl_speeds;
+
+ /* Setup new PHY config with new input PHY types */
+ ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg);
+
+ cfg.phy_type_low = phy_low;
+ cfg.phy_type_high = phy_high;
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+ if (status != ICE_SUCCESS) {
+ /* Don't indicate failure if there's no media in the port -- the sysctl
+ * handler has saved the value and will apply it when media is inserted.
+ */
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) {
+ device_printf(dev,
+ "%s: Setting will be applied when media is inserted\n", __func__);
+ return (0);
+ } else {
+ device_printf(dev,
+ "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ }
+
+ return (0);
+}
+
+#define ICE_SYSCTL_HELP_FEC_CONFIG \
+"\nDisplay or set the port's requested FEC mode." \
+"\n\tauto - " ICE_FEC_STRING_AUTO \
+"\n\tfc - " ICE_FEC_STRING_BASER \
+"\n\trs - " ICE_FEC_STRING_RS \
+"\n\tnone - " ICE_FEC_STRING_NONE \
+"\nEither of the left or right strings above can be used to set the requested mode."
+
+/**
+ * ice_sysctl_fec_config - Display/change the configured FEC mode
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the configured FEC mode
+ * On write: Sets the device's FEC mode to the input string, if it's valid.
+ * Valid input strings: see ICE_SYSCTL_HELP_FEC_CONFIG
+ */
+static int
+ice_sysctl_fec_config(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_port_info *pi = sc->hw.port_info;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ enum ice_fec_mode new_mode;
+ enum ice_status status;
+ device_t dev = sc->dev;
+ char req_fec[32];
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ bzero(req_fec, sizeof(req_fec));
+ strlcpy(req_fec, ice_requested_fec_mode(pi), sizeof(req_fec));
+
+ error = sysctl_handle_string(oidp, req_fec, sizeof(req_fec), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (strcmp(req_fec, "auto") == 0 ||
+ strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) {
+ new_mode = ICE_FEC_AUTO;
+ } else if (strcmp(req_fec, "fc") == 0 ||
+ strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) {
+ new_mode = ICE_FEC_BASER;
+ } else if (strcmp(req_fec, "rs") == 0 ||
+ strcmp(req_fec, ice_fec_str(ICE_FEC_RS)) == 0) {
+ new_mode = ICE_FEC_RS;
+ } else if (strcmp(req_fec, "none") == 0 ||
+ strcmp(req_fec, ice_fec_str(ICE_FEC_NONE)) == 0) {
+ new_mode = ICE_FEC_NONE;
+ } else {
+ device_printf(dev,
+ "%s: \"%s\" is not a valid FEC mode\n",
+ __func__, req_fec);
+ return (EINVAL);
+ }
+
+ /* Cache user FEC mode for later link ups */
+ pi->phy.curr_user_fec_req = new_mode;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps failed (SW_CFG); status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg);
+
+ /* Get link_fec_opt/AUTO_FEC mode from TOPO caps for base for new FEC mode */
+ memset(&pcaps, 0, sizeof(pcaps));
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps failed (TOPO_CAP); status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ /* Configure new FEC options using TOPO caps */
+ cfg.link_fec_opt = pcaps.link_fec_options;
+ cfg.caps &= ~ICE_AQ_PHY_ENA_AUTO_FEC;
+ if (pcaps.caps & ICE_AQC_PHY_EN_AUTO_FEC)
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_FEC;
+
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_DEFAULT_OVERRIDE) &&
+ new_mode == ICE_FEC_AUTO) {
+ /* copy over FEC settings from LDO TLV */
+ cfg.link_fec_opt = sc->ldo_tlv.fec_options;
+ } else {
+ ice_cfg_phy_fec(pi, &cfg, new_mode);
+
+ /* Check if the new mode is valid, and exit with an error if not */
+ if (cfg.link_fec_opt &&
+ !(cfg.link_fec_opt & pcaps.link_fec_options)) {
+ device_printf(dev,
+ "%s: The requested FEC mode, %s, is not supported by current media\n",
+ __func__, ice_fec_str(new_mode));
+ return (ENOTSUP);
+ }
+ }
+
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+ if (status != ICE_SUCCESS) {
+ /* Don't indicate failure if there's no media in the port -- the sysctl
+ * handler has saved the value and will apply it when media is inserted.
+ */
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) {
+ device_printf(dev,
+ "%s: Setting will be applied when media is inserted\n", __func__);
+ return (0);
+ } else {
+ device_printf(dev,
+ "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_negotiated_fec - Display the negotiated FEC mode on the link
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the negotiated FEC mode, in a string
+ */
+static int
+ice_sysctl_negotiated_fec(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ char neg_fec[32];
+ int error;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Copy const string into a buffer to drop const qualifier */
+ bzero(neg_fec, sizeof(neg_fec));
+ strlcpy(neg_fec, ice_negotiated_fec_mode(hw->port_info), sizeof(neg_fec));
+
+ error = sysctl_handle_string(oidp, neg_fec, 0, req);
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ return (error);
+}
+
+#define ICE_SYSCTL_HELP_FC_CONFIG \
+"\nDisplay or set the port's advertised flow control mode.\n" \
+"\t0 - " ICE_FC_STRING_NONE \
+"\n\t1 - " ICE_FC_STRING_RX \
+"\n\t2 - " ICE_FC_STRING_TX \
+"\n\t3 - " ICE_FC_STRING_FULL \
+"\nEither the numbers or the strings above can be used to set the advertised mode."
+
+/**
+ * ice_sysctl_fc_config - Display/change the advertised flow control mode
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the configured flow control mode
+ * On write: Sets the device's flow control mode to the input, if it's valid.
+ * Valid input strings: see ICE_SYSCTL_HELP_FC_CONFIG
+ */
+static int
+ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_port_info *pi = sc->hw.port_info;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ enum ice_fc_mode old_mode, new_mode;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int error = 0, fc_num;
+ bool mode_set = false;
+ struct sbuf buf;
+ char *fc_str_end;
+ char fc_str[32];
+ u8 aq_failures;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ /* Convert HW response format to SW enum value */
+ if ((pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
+ (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE))
+ old_mode = ICE_FC_FULL;
+ else if (pcaps.caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
+ old_mode = ICE_FC_TX_PAUSE;
+ else if (pcaps.caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+ old_mode = ICE_FC_RX_PAUSE;
+ else
+ old_mode = ICE_FC_NONE;
+
+ /* Create "old" string for output */
+ bzero(fc_str, sizeof(fc_str));
+ sbuf_new_for_sysctl(&buf, fc_str, sizeof(fc_str), req);
+ sbuf_printf(&buf, "%d<%s>", old_mode, ice_fc_str(old_mode));
+ sbuf_finish(&buf);
+ sbuf_delete(&buf);
+
+ error = sysctl_handle_string(oidp, fc_str, sizeof(fc_str), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ /* Try to parse input as a string, first */
+ if (strcasecmp(ice_fc_str(ICE_FC_FULL), fc_str) == 0) {
+ new_mode = ICE_FC_FULL;
+ mode_set = true;
+ }
+ else if (strcasecmp(ice_fc_str(ICE_FC_TX_PAUSE), fc_str) == 0) {
+ new_mode = ICE_FC_TX_PAUSE;
+ mode_set = true;
+ }
+ else if (strcasecmp(ice_fc_str(ICE_FC_RX_PAUSE), fc_str) == 0) {
+ new_mode = ICE_FC_RX_PAUSE;
+ mode_set = true;
+ }
+ else if (strcasecmp(ice_fc_str(ICE_FC_NONE), fc_str) == 0) {
+ new_mode = ICE_FC_NONE;
+ mode_set = true;
+ }
+
+ /*
+ * Then check if it's an integer, for compatibility with the method
+ * used in older drivers.
+ */
+ if (!mode_set) {
+ fc_num = strtol(fc_str, &fc_str_end, 0);
+ if (fc_str_end == fc_str)
+ fc_num = -1;
+ switch (fc_num) {
+ case 3:
+ new_mode = ICE_FC_FULL;
+ break;
+ case 2:
+ new_mode = ICE_FC_TX_PAUSE;
+ break;
+ case 1:
+ new_mode = ICE_FC_RX_PAUSE;
+ break;
+ case 0:
+ new_mode = ICE_FC_NONE;
+ break;
+ default:
+ device_printf(dev,
+ "%s: \"%s\" is not a valid flow control mode\n",
+ __func__, fc_str);
+ return (EINVAL);
+ }
+ }
+
+ /* Finally, set the flow control mode in FW */
+ hw->port_info->fc.req_mode = new_mode;
+ status = ice_set_fc(pi, &aq_failures, true);
+ if (status != ICE_SUCCESS) {
+ /* Don't indicate failure if there's no media in the port -- the sysctl
+ * handler has saved the value and will apply it when media is inserted.
+ */
+ if (aq_failures == ICE_SET_FC_AQ_FAIL_SET &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) {
+ device_printf(dev,
+ "%s: Setting will be applied when media is inserted\n", __func__);
+ return (0);
+ } else {
+ device_printf(dev,
+ "%s: ice_set_fc AQ failure = %d\n", __func__, aq_failures);
+ return (EIO);
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_negotiated_fc - Display currently negotiated FC mode
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the currently negotiated flow control settings.
+ *
+ * If link is not established, this will report ICE_FC_NONE, as no flow
+ * control is negotiated while link is down.
+ */
+static int
+ice_sysctl_negotiated_fc(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_port_info *pi = sc->hw.port_info;
+ const char *negotiated_fc;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ negotiated_fc = ice_flowcontrol_mode(pi);
+
+ return sysctl_handle_string(oidp, __DECONST(char *, negotiated_fc), 0, req);
+}
+
+/**
+ * __ice_sysctl_phy_type_handler - Display/change supported PHY types/speeds
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ * @is_phy_type_high: if true, handle the high PHY type instead of the low PHY type
+ *
+ * Private handler for phy_type_high and phy_type_low sysctls.
+ */
+static int
+__ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ uint64_t types;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_SW_CFG,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ if (is_phy_type_high)
+ types = pcaps.phy_type_high;
+ else
+ types = pcaps.phy_type_low;
+
+ error = sysctl_handle_64(oidp, &types, sizeof(types), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ ice_copy_phy_caps_to_cfg(hw->port_info, &pcaps, &cfg);
+
+ if (is_phy_type_high)
+ cfg.phy_type_high = types & hw->port_info->phy.phy_type_high;
+ else
+ cfg.phy_type_low = types & hw->port_info->phy.phy_type_low;
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ return (0);
+
+}
+
+/**
+ * ice_sysctl_phy_type_low - Display/change supported lower PHY types/speeds
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the currently supported lower PHY types
+ * On write: Sets the device's supported low PHY types
+ */
+static int
+ice_sysctl_phy_type_low(SYSCTL_HANDLER_ARGS)
+{
+ return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, false);
+}
+
+/**
+ * ice_sysctl_phy_type_high - Display/change supported higher PHY types/speeds
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the currently supported higher PHY types
+ * On write: Sets the device's supported high PHY types
+ */
+static int
+ice_sysctl_phy_type_high(SYSCTL_HANDLER_ARGS)
+{
+ return __ice_sysctl_phy_type_handler(oidp, arg1, arg2, req, true);
+}
+
+/**
+ * ice_sysctl_phy_caps - Display response from Get PHY abililties
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ * @report_mode: the mode to report
+ *
+ * On read: Display the response from Get PHY abillities with the given report
+ * mode.
+ */
+static int
+ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi = hw->port_info;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int error;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ error = sysctl_handle_opaque(oidp, &pcaps, sizeof(pcaps), req);
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ return (error);
+}
+
+/**
+ * ice_sysctl_phy_sw_caps - Display response from Get PHY abililties
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Display the response from Get PHY abillities reporting the last
+ * software configuration.
+ */
+static int
+ice_sysctl_phy_sw_caps(SYSCTL_HANDLER_ARGS)
+{
+ return ice_sysctl_phy_caps(oidp, arg1, arg2, req,
+ ICE_AQC_REPORT_SW_CFG);
+}
+
+/**
+ * ice_sysctl_phy_nvm_caps - Display response from Get PHY abililties
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Display the response from Get PHY abillities reporting the NVM
+ * configuration.
+ */
+static int
+ice_sysctl_phy_nvm_caps(SYSCTL_HANDLER_ARGS)
+{
+ return ice_sysctl_phy_caps(oidp, arg1, arg2, req,
+ ICE_AQC_REPORT_NVM_CAP);
+}
+
+/**
+ * ice_sysctl_phy_topo_caps - Display response from Get PHY abililties
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Display the response from Get PHY abillities reporting the
+ * topology configuration.
+ */
+static int
+ice_sysctl_phy_topo_caps(SYSCTL_HANDLER_ARGS)
+{
+ return ice_sysctl_phy_caps(oidp, arg1, arg2, req,
+ ICE_AQC_REPORT_TOPO_CAP);
+}
+
+/**
+ * ice_sysctl_phy_link_status - Display response from Get Link Status
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Display the response from firmware for the Get Link Status
+ * request.
+ */
+static int
+ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_aqc_get_link_status_data link_data = { 0 };
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_aqc_get_link_status *resp;
+ struct ice_aq_desc desc;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int error;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ /*
+ * Ensure that only contexts with driver privilege are allowed to
+ * access this information
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
+ resp = &desc.params.get_link_status;
+ resp->lport_num = pi->lport;
+
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_send_cmd failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ error = sysctl_handle_opaque(oidp, &link_data, sizeof(link_data), req);
+ if (req->newptr != NULL)
+ return (EPERM);
+
+ return (error);
+}
+
+/**
+ * ice_sysctl_fw_cur_lldp_persist_status - Display current FW LLDP status
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private softc structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays current persistent LLDP status.
+ */
+static int
+ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ struct sbuf *sbuf;
+ u32 lldp_state;
+
+ UNREFERENCED_PARAMETER(arg2);
+ UNREFERENCED_PARAMETER(oidp);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_get_cur_lldp_persist_status(hw, &lldp_state);
+ if (status) {
+ device_printf(dev,
+ "Could not acquire current LLDP persistence status, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state));
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_fw_dflt_lldp_persist_status - Display default FW LLDP status
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private softc structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays default persistent LLDP status.
+ */
+static int
+ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ struct sbuf *sbuf;
+ u32 lldp_state;
+
+ UNREFERENCED_PARAMETER(arg2);
+ UNREFERENCED_PARAMETER(oidp);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_get_dflt_lldp_persist_status(hw, &lldp_state);
+ if (status) {
+ device_printf(dev,
+ "Could not acquire default LLDP persistence status, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ sbuf_printf(sbuf, "%s", ice_fw_lldp_status(lldp_state));
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+#define ICE_SYSCTL_HELP_FW_LLDP_AGENT \
+"\nDisplay or change FW LLDP agent state:" \
+"\n\t0 - disabled" \
+"\n\t1 - enabled"
+
+/**
+ * ice_sysctl_fw_lldp_agent - Display or change the FW LLDP agent status
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private softc structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays whether the FW LLDP agent is running
+ * On write: Persistently enables or disables the FW LLDP agent
+ */
+static int
+ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int error = 0;
+ u32 old_state;
+ u8 fw_lldp_enabled;
+ bool retried_start_lldp = false;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ status = ice_get_cur_lldp_persist_status(hw, &old_state);
+ if (status) {
+ device_printf(dev,
+ "Could not acquire current LLDP persistence status, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ if (old_state > ICE_LLDP_ADMINSTATUS_ENA_RXTX) {
+ status = ice_get_dflt_lldp_persist_status(hw, &old_state);
+ if (status) {
+ device_printf(dev,
+ "Could not acquire default LLDP persistence status, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ }
+ if (old_state == 0)
+ fw_lldp_enabled = false;
+ else
+ fw_lldp_enabled = true;
+
+ error = sysctl_handle_bool(oidp, &fw_lldp_enabled, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (old_state == 0 && fw_lldp_enabled == false)
+ return (0);
+
+ if (old_state != 0 && fw_lldp_enabled == true)
+ return (0);
+
+ if (fw_lldp_enabled == false) {
+ status = ice_aq_stop_lldp(hw, true, true, NULL);
+ /* EPERM is returned if the LLDP agent is already shutdown */
+ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) {
+ device_printf(dev,
+ "%s: ice_aq_stop_lldp failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ ice_aq_set_dcb_parameters(hw, true, NULL);
+ hw->port_info->is_sw_lldp = true;
+ ice_add_rx_lldp_filter(sc);
+ } else {
+retry_start_lldp:
+ status = ice_aq_start_lldp(hw, true, NULL);
+ if (status) {
+ switch (hw->adminq.sq_last_status) {
+ /* EEXIST is returned if the LLDP agent is already started */
+ case ICE_AQ_RC_EEXIST:
+ break;
+ case ICE_AQ_RC_EAGAIN:
+ /* Retry command after a 2 second wait */
+ if (retried_start_lldp == false) {
+ retried_start_lldp = true;
+ pause("slldp", ICE_START_LLDP_RETRY_WAIT);
+ goto retry_start_lldp;
+ }
+ /* Fallthrough */
+ default:
+ device_printf(dev,
+ "%s: ice_aq_start_lldp failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ }
+ hw->port_info->is_sw_lldp = false;
+ ice_del_rx_lldp_filter(sc);
+ }
+
+ return (error);
+}
+
+/**
+ * ice_add_device_sysctls - add device specific dynamic sysctls
+ * @sc: device private structure
+ *
+ * Add per-device dynamic sysctls which show device configuration or enable
+ * configuring device functionality. For tunable values which can be set prior
+ * to load, see ice_add_device_tunables.
+ *
+ * This function depends on the sysctl layout setup by ice_add_device_tunables,
+ * and likely should be called near the end of the attach process.
+ */
+void
+ice_add_device_sysctls(struct ice_softc *sc)
+{
+ struct sysctl_oid *hw_node;
+ device_t dev = sc->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_show_fw, "A", "Firmware version");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "pba_number", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_pba_number, "A", "Product Board Assembly Number");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "ddp_version", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_pkg_version, "A", "Active DDP package name and version");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_current_speed, "A", "Current Port Link Speed");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "requested_fec", CTLTYPE_STRING | CTLFLAG_RW,
+ sc, 0, ice_sysctl_fec_config, "A", ICE_SYSCTL_HELP_FEC_CONFIG);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "negotiated_fec", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_negotiated_fec, "A", "Current Negotiated FEC mode");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fc", CTLTYPE_STRING | CTLFLAG_RW,
+ sc, 0, ice_sysctl_fc_config, "A", ICE_SYSCTL_HELP_FC_CONFIG);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "advertise_speed", CTLTYPE_U16 | CTLFLAG_RW,
+ sc, 0, ice_sysctl_advertise_speed, "SU", ICE_SYSCTL_HELP_ADVERTISE_SPEED);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_lldp_agent", CTLTYPE_U8 | CTLFLAG_RWTUN,
+ sc, 0, ice_sysctl_fw_lldp_agent, "CU", ICE_SYSCTL_HELP_FW_LLDP_AGENT);
+
+ /* Differentiate software and hardware statistics, by keeping hw stats
+ * in their own node. This isn't in ice_add_device_tunables, because
+ * we won't have any CTLFLAG_TUN sysctls under this node.
+ */
+ hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD,
+ NULL, "Port Hardware Statistics");
+
+ ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur);
+
+ /* Add the main PF VSI stats now. Other VSIs will add their own stats
+ * during creation
+ */
+ ice_add_vsi_sysctls(&sc->pf_vsi);
+
+ /* Add sysctls related to debugging the device driver. This includes
+ * sysctls which display additional internal driver state for use in
+ * understanding what is happening within the driver.
+ */
+ ice_add_debug_sysctls(sc);
+}
+
+/**
+ * @enum hmc_error_type
+ * @brief enumeration of HMC errors
+ *
+ * Enumeration defining the possible HMC errors that might occur.
+ */
+enum hmc_error_type {
+ HMC_ERR_PMF_INVALID = 0,
+ HMC_ERR_VF_IDX_INVALID = 1,
+ HMC_ERR_VF_PARENT_PF_INVALID = 2,
+ /* 3 is reserved */
+ HMC_ERR_INDEX_TOO_BIG = 4,
+ HMC_ERR_ADDRESS_TOO_LARGE = 5,
+ HMC_ERR_SEGMENT_DESC_INVALID = 6,
+ HMC_ERR_SEGMENT_DESC_TOO_SMALL = 7,
+ HMC_ERR_PAGE_DESC_INVALID = 8,
+ HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION = 9,
+ /* 10 is reserved */
+ HMC_ERR_INVALID_OBJECT_TYPE = 11,
+ /* 12 is reserved */
+};
+
+/**
+ * ice_log_hmc_error - Log an HMC error message
+ * @hw: device hw structure
+ * @dev: the device to pass to device_printf()
+ *
+ * Log a message when an HMC error interrupt is triggered.
+ */
+void
+ice_log_hmc_error(struct ice_hw *hw, device_t dev)
+{
+ u32 info, data;
+ u8 index, errtype, objtype;
+ bool isvf;
+
+ info = rd32(hw, PFHMC_ERRORINFO);
+ data = rd32(hw, PFHMC_ERRORDATA);
+
+ index = (u8)(info & PFHMC_ERRORINFO_PMF_INDEX_M);
+ errtype = (u8)((info & PFHMC_ERRORINFO_HMC_ERROR_TYPE_M) >>
+ PFHMC_ERRORINFO_HMC_ERROR_TYPE_S);
+ objtype = (u8)((info & PFHMC_ERRORINFO_HMC_OBJECT_TYPE_M) >>
+ PFHMC_ERRORINFO_HMC_OBJECT_TYPE_S);
+
+ isvf = info & PFHMC_ERRORINFO_PMF_ISVF_M;
+
+ device_printf(dev, "%s HMC Error detected on PMF index %d:\n",
+ isvf ? "VF" : "PF", index);
+
+ device_printf(dev, "error type %d, object type %d, data 0x%08x\n",
+ errtype, objtype, data);
+
+ switch (errtype) {
+ case HMC_ERR_PMF_INVALID:
+ device_printf(dev, "Private Memory Function is not valid\n");
+ break;
+ case HMC_ERR_VF_IDX_INVALID:
+ device_printf(dev, "Invalid Private Memory Function index for PE enabled VF\n");
+ break;
+ case HMC_ERR_VF_PARENT_PF_INVALID:
+ device_printf(dev, "Invalid parent PF for PE enabled VF\n");
+ break;
+ case HMC_ERR_INDEX_TOO_BIG:
+ device_printf(dev, "Object index too big\n");
+ break;
+ case HMC_ERR_ADDRESS_TOO_LARGE:
+ device_printf(dev, "Address extends beyond segment descriptor limit\n");
+ break;
+ case HMC_ERR_SEGMENT_DESC_INVALID:
+ device_printf(dev, "Segment descriptor is invalid\n");
+ break;
+ case HMC_ERR_SEGMENT_DESC_TOO_SMALL:
+ device_printf(dev, "Segment descriptor is too small\n");
+ break;
+ case HMC_ERR_PAGE_DESC_INVALID:
+ device_printf(dev, "Page descriptor is invalid\n");
+ break;
+ case HMC_ERR_UNSUPPORTED_REQUEST_COMPLETION:
+ device_printf(dev, "Unsupported Request completion received from PCIe\n");
+ break;
+ case HMC_ERR_INVALID_OBJECT_TYPE:
+ device_printf(dev, "Invalid object type\n");
+ break;
+ default:
+ device_printf(dev, "Unknown HMC error\n");
+ }
+
+ /* Clear the error indication */
+ wr32(hw, PFHMC_ERRORINFO, 0);
+}
+
+/**
+ * @struct ice_sysctl_info
+ * @brief sysctl information
+ *
+ * Structure used to simplify the process of defining the many similar
+ * statistics sysctls.
+ */
+struct ice_sysctl_info {
+ u64 *stat;
+ const char *name;
+ const char *description;
+};
+
+/**
+ * ice_add_sysctls_eth_stats - Add sysctls for ethernet statistics
+ * @ctx: sysctl ctx to use
+ * @parent: the parent node to add sysctls under
+ * @stats: the ethernet stats structure to source values from
+ *
+ * Adds statistics sysctls for the ethernet statistics of the MAC or a VSI.
+ * Will add them under the parent node specified.
+ *
+ * Note that rx_discards and tx_errors are only meaningful for VSIs and not
+ * the global MAC/PF statistics, so they are not included here.
+ */
+void
+ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *parent,
+ struct ice_eth_stats *stats)
+{
+ const struct ice_sysctl_info ctls[] = {
+ /* Rx Stats */
+ { &stats->rx_bytes, "good_octets_rcvd", "Good Octets Received" },
+ { &stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received" },
+ { &stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received" },
+ { &stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received" },
+ /* Tx Stats */
+ { &stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted" },
+ { &stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted" },
+ { &stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted" },
+ { &stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted" },
+ /* End */
+ { 0, 0, 0 }
+ };
+
+ struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent);
+
+ const struct ice_sysctl_info *entry = ctls;
+ while (entry->stat != 0) {
+ SYSCTL_ADD_U64(ctx, parent_list, OID_AUTO, entry->name,
+ CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0,
+ entry->description);
+ entry++;
+ }
+}
+
+/**
+ * ice_sysctl_tx_cso_stat - Display Tx checksum offload statistic
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: Tx CSO stat to read
+ * @req: sysctl request pointer
+ *
+ * On read: Sums the per-queue Tx CSO stat and displays it.
+ */
+static int
+ice_sysctl_tx_cso_stat(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_vsi *vsi = (struct ice_vsi *)arg1;
+ enum ice_tx_cso_stat type = (enum ice_tx_cso_stat)arg2;
+ u64 stat = 0;
+ int i;
+
+ if (ice_driver_is_detaching(vsi->sc))
+ return (ESHUTDOWN);
+
+ /* Check that the type is valid */
+ if (type >= ICE_CSO_STAT_TX_COUNT)
+ return (EDOOFUS);
+
+ /* Sum the stat for each of the Tx queues */
+ for (i = 0; i < vsi->num_tx_queues; i++)
+ stat += vsi->tx_queues[i].stats.cso[type];
+
+ return sysctl_handle_64(oidp, NULL, stat, req);
+}
+
+/**
+ * ice_sysctl_rx_cso_stat - Display Rx checksum offload statistic
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: Rx CSO stat to read
+ * @req: sysctl request pointer
+ *
+ * On read: Sums the per-queue Rx CSO stat and displays it.
+ */
+static int
+ice_sysctl_rx_cso_stat(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_vsi *vsi = (struct ice_vsi *)arg1;
+ enum ice_rx_cso_stat type = (enum ice_rx_cso_stat)arg2;
+ u64 stat = 0;
+ int i;
+
+ if (ice_driver_is_detaching(vsi->sc))
+ return (ESHUTDOWN);
+
+ /* Check that the type is valid */
+ if (type >= ICE_CSO_STAT_RX_COUNT)
+ return (EDOOFUS);
+
+ /* Sum the stat for each of the Rx queues */
+ for (i = 0; i < vsi->num_rx_queues; i++)
+ stat += vsi->rx_queues[i].stats.cso[type];
+
+ return sysctl_handle_64(oidp, NULL, stat, req);
+}
+
+/**
+ * @struct ice_rx_cso_stat_info
+ * @brief sysctl information for an Rx checksum offload statistic
+ *
+ * Structure used to simplify the process of defining the checksum offload
+ * statistics.
+ */
+struct ice_rx_cso_stat_info {
+ enum ice_rx_cso_stat type;
+ const char *name;
+ const char *description;
+};
+
+/**
+ * @struct ice_tx_cso_stat_info
+ * @brief sysctl information for a Tx checksum offload statistic
+ *
+ * Structure used to simplify the process of defining the checksum offload
+ * statistics.
+ */
+struct ice_tx_cso_stat_info {
+ enum ice_tx_cso_stat type;
+ const char *name;
+ const char *description;
+};
+
+/**
+ * ice_add_sysctls_sw_stats - Add sysctls for software statistics
+ * @vsi: pointer to the VSI to add sysctls for
+ * @ctx: sysctl ctx to use
+ * @parent: the parent node to add sysctls under
+ *
+ * Add statistics sysctls for software tracked statistics of a VSI.
+ *
+ * Currently this only adds checksum offload statistics, but more counters may
+ * be added in the future.
+ */
+static void
+ice_add_sysctls_sw_stats(struct ice_vsi *vsi,
+ struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *parent)
+{
+ struct sysctl_oid *cso_node;
+ struct sysctl_oid_list *cso_list;
+
+ /* Tx CSO Stats */
+ const struct ice_tx_cso_stat_info tx_ctls[] = {
+ { ICE_CSO_STAT_TX_TCP, "tx_tcp", "Transmit TCP Packets marked for HW checksum" },
+ { ICE_CSO_STAT_TX_UDP, "tx_udp", "Transmit UDP Packets marked for HW checksum" },
+ { ICE_CSO_STAT_TX_SCTP, "tx_sctp", "Transmit SCTP Packets marked for HW checksum" },
+ { ICE_CSO_STAT_TX_IP4, "tx_ip4", "Transmit IPv4 Packets marked for HW checksum" },
+ { ICE_CSO_STAT_TX_IP6, "tx_ip6", "Transmit IPv6 Packets marked for HW checksum" },
+ { ICE_CSO_STAT_TX_L3_ERR, "tx_l3_err", "Transmit packets that driver failed to set L3 HW CSO bits for" },
+ { ICE_CSO_STAT_TX_L4_ERR, "tx_l4_err", "Transmit packets that driver failed to set L4 HW CSO bits for" },
+ /* End */
+ { ICE_CSO_STAT_TX_COUNT, 0, 0 }
+ };
+
+ /* Rx CSO Stats */
+ const struct ice_rx_cso_stat_info rx_ctls[] = {
+ { ICE_CSO_STAT_RX_IP4_ERR, "rx_ip4_err", "Received packets with invalid IPv4 checksum indicated by HW" },
+ { ICE_CSO_STAT_RX_IP6_ERR, "rx_ip6_err", "Received IPv6 packets with extension headers" },
+ { ICE_CSO_STAT_RX_L3_ERR, "rx_l3_err", "Received packets with an unexpected invalid L3 checksum indicated by HW" },
+ { ICE_CSO_STAT_RX_TCP_ERR, "rx_tcp_err", "Received packets with invalid TCP checksum indicated by HW" },
+ { ICE_CSO_STAT_RX_UDP_ERR, "rx_udp_err", "Received packets with invalid UDP checksum indicated by HW" },
+ { ICE_CSO_STAT_RX_SCTP_ERR, "rx_sctp_err", "Received packets with invalid SCTP checksum indicated by HW" },
+ { ICE_CSO_STAT_RX_L4_ERR, "rx_l4_err", "Received packets with an unexpected invalid L4 checksum indicated by HW" },
+ /* End */
+ { ICE_CSO_STAT_RX_COUNT, 0, 0 }
+ };
+
+ struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent);
+
+ /* Add a node for statistics tracked by software. */
+ cso_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "cso", CTLFLAG_RD,
+ NULL, "Checksum offload Statistics");
+ cso_list = SYSCTL_CHILDREN(cso_node);
+
+ const struct ice_tx_cso_stat_info *tx_entry = tx_ctls;
+ while (tx_entry->name && tx_entry->description) {
+ SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, tx_entry->name,
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ vsi, tx_entry->type, ice_sysctl_tx_cso_stat, "QU",
+ tx_entry->description);
+ tx_entry++;
+ }
+
+ const struct ice_rx_cso_stat_info *rx_entry = rx_ctls;
+ while (rx_entry->name && rx_entry->description) {
+ SYSCTL_ADD_PROC(ctx, cso_list, OID_AUTO, rx_entry->name,
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ vsi, rx_entry->type, ice_sysctl_rx_cso_stat, "QU",
+ rx_entry->description);
+ rx_entry++;
+ }
+}
+
+/**
+ * ice_add_vsi_sysctls - Add sysctls for a VSI
+ * @vsi: pointer to VSI structure
+ *
+ * Add various sysctls for a given VSI.
+ */
+void
+ice_add_vsi_sysctls(struct ice_vsi *vsi)
+{
+ struct sysctl_ctx_list *ctx = &vsi->ctx;
+ struct sysctl_oid *hw_node, *sw_node;
+ struct sysctl_oid_list *vsi_list, *hw_list, *sw_list;
+
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ /* Keep hw stats in their own node. */
+ hw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "hw", CTLFLAG_RD,
+ NULL, "VSI Hardware Statistics");
+ hw_list = SYSCTL_CHILDREN(hw_node);
+
+ /* Add the ethernet statistics for this VSI */
+ ice_add_sysctls_eth_stats(ctx, hw_node, &vsi->hw_stats.cur);
+
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_discards",
+ CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_discards,
+ 0, "Discarded Rx Packets");
+
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_errors",
+ CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_errors,
+ 0, "Rx Packets Discarded Due To Error");
+
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "rx_no_desc",
+ CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.rx_no_desc,
+ 0, "Rx Packets Discarded Due To Lack Of Descriptors");
+
+ SYSCTL_ADD_U64(ctx, hw_list, OID_AUTO, "tx_errors",
+ CTLFLAG_RD | CTLFLAG_STATS, &vsi->hw_stats.cur.tx_errors,
+ 0, "Tx Packets Discarded Due To Error");
+
+ /* Add a node for statistics tracked by software. */
+ sw_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, "sw", CTLFLAG_RD,
+ NULL, "VSI Software Statistics");
+ sw_list = SYSCTL_CHILDREN(sw_node);
+
+ ice_add_sysctls_sw_stats(vsi, ctx, sw_node);
+}
+
+/**
+ * ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics
+ * @ctx: the sysctl ctx to use
+ * @parent: parent node to add the sysctls under
+ * @stats: the hw ports stat structure to pull values from
+ *
+ * Add global MAC statistics sysctls.
+ */
+void
+ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *parent,
+ struct ice_hw_port_stats *stats)
+{
+ struct sysctl_oid *mac_node;
+ struct sysctl_oid_list *parent_list, *mac_list;
+
+ parent_list = SYSCTL_CHILDREN(parent);
+
+ mac_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "mac", CTLFLAG_RD,
+ NULL, "Mac Hardware Statistics");
+ mac_list = SYSCTL_CHILDREN(mac_node);
+
+ /* add the common ethernet statistics */
+ ice_add_sysctls_eth_stats(ctx, mac_node, &stats->eth);
+
+ const struct ice_sysctl_info ctls[] = {
+ /* Packet Reception Stats */
+ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
+ {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
+ {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
+ {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
+ {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
+ {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
+ {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
+ {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
+ {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
+ {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
+ {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
+ {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"},
+ /* Packet Transmission Stats */
+ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
+ {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
+ {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
+ {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
+ {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
+ {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
+ {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
+ {&stats->tx_dropped_link_down, "tx_dropped", "Tx Dropped Due To Link Down"},
+ /* Flow control */
+ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
+ {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
+ {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
+ {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
+ /* Other */
+ {&stats->crc_errors, "crc_errors", "CRC Errors"},
+ {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
+ {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
+ {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
+ /* End */
+ { 0, 0, 0 }
+ };
+
+ const struct ice_sysctl_info *entry = ctls;
+ while (entry->stat != 0) {
+ SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, entry->name,
+ CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0,
+ entry->description);
+ entry++;
+ }
+}
+
+/**
+ * ice_configure_misc_interrupts - enable 'other' interrupt causes
+ * @sc: pointer to device private softc
+ *
+ * Enable various "other" interrupt causes, and associate them to interrupt 0,
+ * which is our administrative interrupt.
+ */
+void
+ice_configure_misc_interrupts(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ u32 val;
+
+ /* Read the OICR register to clear it */
+ rd32(hw, PFINT_OICR);
+
+ /* Enable useful "other" interrupt causes */
+ val = (PFINT_OICR_ECC_ERR_M |
+ PFINT_OICR_MAL_DETECT_M |
+ PFINT_OICR_GRST_M |
+ PFINT_OICR_PCI_EXCEPTION_M |
+ PFINT_OICR_VFLR_M |
+ PFINT_OICR_HMC_ERR_M |
+ PFINT_OICR_PE_CRITERR_M);
+
+ wr32(hw, PFINT_OICR_ENA, val);
+
+ /* Note that since we're using MSI-X index 0, and ITR index 0, we do
+ * not explicitly program them when writing to the PFINT_*_CTL
+ * registers. Nevertheless, these writes are associating the
+ * interrupts with the ITR 0 vector
+ */
+
+ /* Associate the OICR interrupt with ITR 0, and enable it */
+ wr32(hw, PFINT_OICR_CTL, PFINT_OICR_CTL_CAUSE_ENA_M);
+
+ /* Associate the Mailbox interrupt with ITR 0, and enable it */
+ wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M);
+
+ /* Associate the AdminQ interrupt with ITR 0, and enable it */
+ wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_filter_is_mcast - Check if info is a multicast filter
+ * @vsi: vsi structure addresses are targeted towards
+ * @info: filter info
+ *
+ * @returns true if the provided info is a multicast filter, and false
+ * otherwise.
+ */
+static bool
+ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info)
+{
+ const u8 *addr = info->l_data.mac.mac_addr;
+
+ /*
+ * Check if this info matches a multicast filter added by
+ * ice_add_mac_to_list
+ */
+ if ((info->flag == ICE_FLTR_TX) &&
+ (info->src_id == ICE_SRC_ID_VSI) &&
+ (info->lkup_type == ICE_SW_LKUP_MAC) &&
+ (info->vsi_handle == vsi->idx) &&
+ ETHER_IS_MULTICAST(addr) && !ETHER_IS_BROADCAST(addr))
+ return true;
+
+ return false;
+}
+
+/**
+ * @struct ice_mcast_sync_data
+ * @brief data used by ice_sync_one_mcast_filter function
+ *
+ * Structure used to store data needed for processing by the
+ * ice_sync_one_mcast_filter. This structure contains a linked list of filters
+ * to be added, an error indication, and a pointer to the device softc.
+ */
+struct ice_mcast_sync_data {
+ struct ice_list_head add_list;
+ struct ice_softc *sc;
+ int err;
+};
+
+/**
+ * ice_sync_one_mcast_filter - Check if we need to program the filter
+ * @p: void pointer to algorithm data
+ * @sdl: link level socket address
+ * @count: unused count value
+ *
+ * Called by if_foreach_llmaddr to operate on each filter in the ifp filter
+ * list. For the given address, search our internal list to see if we have
+ * found the filter. If not, add it to our list of filters that need to be
+ * programmed.
+ *
+ * @returns (1) if we've actually setup the filter to be added
+ */
+static u_int
+ice_sync_one_mcast_filter(void *p, struct sockaddr_dl *sdl,
+ u_int __unused count)
+{
+ struct ice_mcast_sync_data *data = (struct ice_mcast_sync_data *)p;
+ struct ice_softc *sc = data->sc;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_switch_info *sw = hw->switch_info;
+ const u8 *sdl_addr = (const u8 *)LLADDR(sdl);
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct ice_list_head *rules;
+ int err;
+
+ rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ /*
+ * If a previous filter already indicated an error, there is no need
+ * for us to finish processing the rest of the filters.
+ */
+ if (data->err)
+ return (0);
+
+ /* See if this filter has already been programmed */
+ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) {
+ struct ice_fltr_info *info = &itr->fltr_info;
+ const u8 *addr = info->l_data.mac.mac_addr;
+
+ /* Only check multicast filters */
+ if (!ice_filter_is_mcast(&sc->pf_vsi, info))
+ continue;
+
+ /*
+ * If this filter matches, mark the internal filter as
+ * "found", and exit.
+ */
+ if (bcmp(addr, sdl_addr, ETHER_ADDR_LEN) == 0) {
+ itr->marker = ICE_FLTR_FOUND;
+ return (1);
+ }
+ }
+
+ /*
+ * If we failed to locate the filter in our internal list, we need to
+ * place it into our add list.
+ */
+ err = ice_add_mac_to_list(&sc->pf_vsi, &data->add_list, sdl_addr,
+ ICE_FWD_TO_VSI);
+ if (err) {
+ device_printf(sc->dev,
+ "Failed to place MAC %6D onto add list, err %s\n",
+ sdl_addr, ":", ice_err_str(err));
+ data->err = err;
+
+ return (0);
+ }
+
+ return (1);
+}
+
+/**
+ * ice_sync_multicast_filters - Synchronize OS and internal filter list
+ * @sc: device private structure
+ *
+ * Called in response to SIOCDELMULTI to synchronize the operating system
+ * multicast address list with the internal list of filters programmed to
+ * firmware.
+ *
+ * Works in one phase to find added and deleted filters using a marker bit on
+ * the internal list.
+ *
+ * First, a loop over the internal list clears the marker bit. Second, for
+ * each filter in the ifp list is checked. If we find it in the internal list,
+ * the marker bit is set. Otherwise, the filter is added to the add list.
+ * Third, a loop over the internal list determines if any filters have not
+ * been found. Each of these is added to the delete list. Finally, the add and
+ * delete lists are programmed to firmware to update the filters.
+ *
+ * @returns zero on success or an integer error code on failure.
+ */
+int
+ice_sync_multicast_filters(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct ice_mcast_sync_data data = {};
+ struct ice_list_head *rules, remove_list;
+ enum ice_status status;
+ int err = 0;
+
+ INIT_LIST_HEAD(&data.add_list);
+ INIT_LIST_HEAD(&remove_list);
+ data.sc = sc;
+ data.err = 0;
+
+ rules = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ /* Acquire the lock for the entire duration */
+ ice_acquire_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock);
+
+ /* (1) Reset the marker state for all filters */
+ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry)
+ itr->marker = ICE_FLTR_NOT_FOUND;
+
+ /* (2) determine which filters need to be added and removed */
+ if_foreach_llmaddr(sc->ifp, ice_sync_one_mcast_filter, (void *)&data);
+ if (data.err) {
+ /* ice_sync_one_mcast_filter already prints an error */
+ err = data.err;
+ ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock);
+ goto free_filter_lists;
+ }
+
+ LIST_FOR_EACH_ENTRY(itr, rules, ice_fltr_mgmt_list_entry, list_entry) {
+ struct ice_fltr_info *info = &itr->fltr_info;
+ const u8 *addr = info->l_data.mac.mac_addr;
+
+ /* Only check multicast filters */
+ if (!ice_filter_is_mcast(&sc->pf_vsi, info))
+ continue;
+
+ /*
+ * If the filter is not marked as found, then it must no
+ * longer be in the ifp address list, so we need to remove it.
+ */
+ if (itr->marker == ICE_FLTR_NOT_FOUND) {
+ err = ice_add_mac_to_list(&sc->pf_vsi, &remove_list,
+ addr, ICE_FWD_TO_VSI);
+ if (err) {
+ device_printf(sc->dev,
+ "Failed to place MAC %6D onto remove list, err %s\n",
+ addr, ":", ice_err_str(err));
+ ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock);
+ goto free_filter_lists;
+ }
+ }
+ }
+
+ ice_release_lock(&sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock);
+
+ status = ice_add_mac(hw, &data.add_list);
+ if (status) {
+ device_printf(sc->dev,
+ "Could not add new MAC filters, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = (EIO);
+ goto free_filter_lists;
+ }
+
+ status = ice_remove_mac(hw, &remove_list);
+ if (status) {
+ device_printf(sc->dev,
+ "Could not remove old MAC filters, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = (EIO);
+ goto free_filter_lists;
+ }
+
+free_filter_lists:
+ ice_free_fltr_list(&data.add_list);
+ ice_free_fltr_list(&remove_list);
+
+ return (err);
+}
+
+/**
+ * ice_add_vlan_hw_filter - Add a VLAN filter for a given VSI
+ * @vsi: The VSI to add the filter for
+ * @vid: VLAN to add
+ *
+ * Programs a HW filter so that the given VSI will receive the specified VLAN.
+ */
+enum ice_status
+ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ struct ice_list_head vlan_list;
+ struct ice_fltr_list_entry vlan_entry;
+
+ INIT_LIST_HEAD(&vlan_list);
+ memset(&vlan_entry, 0, sizeof(vlan_entry));
+
+ vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ vlan_entry.fltr_info.flag = ICE_FLTR_TX;
+ vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI;
+ vlan_entry.fltr_info.vsi_handle = vsi->idx;
+ vlan_entry.fltr_info.l_data.vlan.vlan_id = vid;
+
+ LIST_ADD(&vlan_entry.list_entry, &vlan_list);
+
+ return ice_add_vlan(hw, &vlan_list);
+}
+
+/**
+ * ice_remove_vlan_hw_filter - Remove a VLAN filter for a given VSI
+ * @vsi: The VSI to add the filter for
+ * @vid: VLAN to remove
+ *
+ * Removes a previously programmed HW filter for the specified VSI.
+ */
+enum ice_status
+ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
+{
+ struct ice_hw *hw = &vsi->sc->hw;
+ struct ice_list_head vlan_list;
+ struct ice_fltr_list_entry vlan_entry;
+
+ INIT_LIST_HEAD(&vlan_list);
+ memset(&vlan_entry, 0, sizeof(vlan_entry));
+
+ vlan_entry.fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
+ vlan_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ vlan_entry.fltr_info.flag = ICE_FLTR_TX;
+ vlan_entry.fltr_info.src_id = ICE_SRC_ID_VSI;
+ vlan_entry.fltr_info.vsi_handle = vsi->idx;
+ vlan_entry.fltr_info.l_data.vlan.vlan_id = vid;
+
+ LIST_ADD(&vlan_entry.list_entry, &vlan_list);
+
+ return ice_remove_vlan(hw, &vlan_list);
+}
+
+#define ICE_SYSCTL_HELP_RX_ITR \
+"\nControl Rx interrupt throttle rate." \
+"\n\t0-8160 - sets interrupt rate in usecs" \
+"\n\t -1 - reset the Rx itr to default"
+
+/**
+ * ice_sysctl_rx_itr - Display or change the Rx ITR for a VSI
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the current Rx ITR value
+ * on write: Sets the Rx ITR value, reconfiguring device if it is up
+ */
+static int
+ice_sysctl_rx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_vsi *vsi = (struct ice_vsi *)arg1;
+ struct ice_softc *sc = vsi->sc;
+ int increment, error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ error = sysctl_handle_16(oidp, &vsi->rx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (vsi->rx_itr < 0)
+ vsi->rx_itr = ICE_DFLT_RX_ITR;
+ if (vsi->rx_itr > ICE_ITR_MAX)
+ vsi->rx_itr = ICE_ITR_MAX;
+
+ /* Assume 2usec increment if it hasn't been loaded yet */
+ increment = sc->hw.itr_gran ? : 2;
+
+ /* We need to round the value to the hardware's ITR granularity */
+ vsi->rx_itr = (vsi->rx_itr / increment ) * increment;
+
+ /* If the driver has finished initializing, then we need to reprogram
+ * the ITR registers now. Otherwise, they will be programmed during
+ * driver initialization.
+ */
+ if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED))
+ ice_configure_rx_itr(vsi);
+
+ return (0);
+}
+
+#define ICE_SYSCTL_HELP_TX_ITR \
+"\nControl Tx interrupt throttle rate." \
+"\n\t0-8160 - sets interrupt rate in usecs" \
+"\n\t -1 - reset the Tx itr to default"
+
+/**
+ * ice_sysctl_tx_itr - Display or change the Tx ITR for a VSI
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * On read: Displays the current Tx ITR value
+ * on write: Sets the Tx ITR value, reconfiguring device if it is up
+ */
+static int
+ice_sysctl_tx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_vsi *vsi = (struct ice_vsi *)arg1;
+ struct ice_softc *sc = vsi->sc;
+ int increment, error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ error = sysctl_handle_16(oidp, &vsi->tx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ /* Allow configuring a negative value to reset to the default */
+ if (vsi->tx_itr < 0)
+ vsi->tx_itr = ICE_DFLT_TX_ITR;
+ if (vsi->tx_itr > ICE_ITR_MAX)
+ vsi->tx_itr = ICE_ITR_MAX;
+
+ /* Assume 2usec increment if it hasn't been loaded yet */
+ increment = sc->hw.itr_gran ? : 2;
+
+ /* We need to round the value to the hardware's ITR granularity */
+ vsi->tx_itr = (vsi->tx_itr / increment ) * increment;
+
+ /* If the driver has finished initializing, then we need to reprogram
+ * the ITR registers now. Otherwise, they will be programmed during
+ * driver initialization.
+ */
+ if (ice_test_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED))
+ ice_configure_tx_itr(vsi);
+
+ return (0);
+}
+
+/**
+ * ice_add_vsi_tunables - Add tunables and nodes for a VSI
+ * @vsi: pointer to VSI structure
+ * @parent: parent node to add the tunables under
+ *
+ * Create a sysctl context for the VSI, so that sysctls for the VSI can be
+ * dynamically removed upon VSI removal.
+ *
+ * Add various tunables and set up the basic node structure for the VSI. Must
+ * be called *prior* to ice_add_vsi_sysctls. It should be called as soon as
+ * possible after the VSI memory is initialized.
+ *
+ * VSI specific sysctls with CTLFLAG_TUN should be initialized here so that
+ * their values can be read from loader.conf prior to their first use in the
+ * driver.
+ */
+void
+ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent)
+{
+ struct sysctl_oid_list *vsi_list;
+ char vsi_name[32], vsi_desc[32];
+
+ struct sysctl_oid_list *parent_list = SYSCTL_CHILDREN(parent);
+
+ /* Initialize the sysctl context for this VSI */
+ sysctl_ctx_init(&vsi->ctx);
+
+ /* Add a node to collect this VSI's statistics together */
+ snprintf(vsi_name, sizeof(vsi_name), "%u", vsi->idx);
+ snprintf(vsi_desc, sizeof(vsi_desc), "VSI %u", vsi->idx);
+ vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->ctx, parent_list, OID_AUTO, vsi_name,
+ CTLFLAG_RD, NULL, vsi_desc);
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ vsi->rx_itr = ICE_DFLT_TX_ITR;
+ SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "rx_itr",
+ CTLTYPE_S16 | CTLFLAG_RWTUN,
+ vsi, 0, ice_sysctl_rx_itr, "S",
+ ICE_SYSCTL_HELP_RX_ITR);
+
+ vsi->tx_itr = ICE_DFLT_TX_ITR;
+ SYSCTL_ADD_PROC(&vsi->ctx, vsi_list, OID_AUTO, "tx_itr",
+ CTLTYPE_S16 | CTLFLAG_RWTUN,
+ vsi, 0, ice_sysctl_tx_itr, "S",
+ ICE_SYSCTL_HELP_TX_ITR);
+}
+
+/**
+ * ice_del_vsi_sysctl_ctx - Delete the sysctl context(s) of a VSI
+ * @vsi: the VSI to remove contexts for
+ *
+ * Free the context for the VSI sysctls. This includes the main context, as
+ * well as the per-queue sysctls.
+ */
+void
+ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi)
+{
+ device_t dev = vsi->sc->dev;
+ int err;
+
+ if (vsi->vsi_node) {
+ err = sysctl_ctx_free(&vsi->ctx);
+ if (err)
+ device_printf(dev, "failed to free VSI %d sysctl context, err %s\n",
+ vsi->idx, ice_err_str(err));
+ vsi->vsi_node = NULL;
+ }
+}
+
+/**
+ * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes
+ * @sc: device private structure
+ *
+ * Add per-device dynamic tunable sysctls, and setup the general sysctl trees
+ * for re-use by ice_add_device_sysctls.
+ *
+ * In order for the sysctl fields to be initialized before use, this function
+ * should be called as early as possible during attach activities.
+ *
+ * Any non-global sysctl marked as CTLFLAG_TUN should likely be initialized
+ * here in this function, rather than later in ice_add_device_sysctls.
+ *
+ * To make things easier, this function is also expected to setup the various
+ * sysctl nodes in addition to tunables so that other sysctls which can't be
+ * initialized early can hook into the same nodes.
+ */
+void
+ice_add_device_tunables(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ /* Add a node to track VSI sysctls. Keep track of the node in the
+ * softc so that we can hook other sysctls into it later. This
+ * includes both the VSI statistics, as well as potentially dynamic
+ * VSIs in the future.
+ */
+
+ sc->vsi_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "vsi",
+ CTLFLAG_RD, NULL, "VSI Configuration and Statistics");
+
+ /* Add debug tunables */
+ ice_add_debug_tunables(sc);
+}
+
+/**
+ * ice_sysctl_dump_mac_filters - Dump a list of all HW MAC Filters
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for "mac_filters" sysctl to dump the programmed MAC filters.
+ */
+static int
+ice_sysctl_dump_mac_filters(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_list_head *rule_head;
+ struct ice_lock *rule_lock;
+ struct ice_fltr_info *fi;
+ struct sbuf *sbuf;
+ int ret;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Wire the old buffer so we can take a non-sleepable lock */
+ ret = sysctl_wire_old_buffer(req, 0);
+ if (ret)
+ return (ret);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ sbuf_printf(sbuf, "MAC Filter List");
+
+ ice_acquire_lock(rule_lock);
+
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) {
+ fi = &fm_entry->fltr_info;
+
+ sbuf_printf(sbuf,
+ "\nmac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %d",
+ fi->l_data.mac.mac_addr, ":", fi->vsi_handle,
+ ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en,
+ ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id);
+
+ /* if we have a vsi_list_info, print some information about that */
+ if (fm_entry->vsi_list_info) {
+ sbuf_printf(sbuf,
+ ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d",
+ fm_entry->vsi_count,
+ fm_entry->vsi_list_info->vsi_list_id,
+ fm_entry->vsi_list_info->ref_cnt);
+ }
+ }
+
+ ice_release_lock(rule_lock);
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_dump_vlan_filters - Dump a list of all HW VLAN Filters
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for "vlan_filters" sysctl to dump the programmed VLAN filters.
+ */
+static int
+ice_sysctl_dump_vlan_filters(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_list_head *rule_head;
+ struct ice_lock *rule_lock;
+ struct ice_fltr_info *fi;
+ struct sbuf *sbuf;
+ int ret;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Wire the old buffer so we can take a non-sleepable lock */
+ ret = sysctl_wire_old_buffer(req, 0);
+ if (ret)
+ return (ret);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+
+ sbuf_printf(sbuf, "VLAN Filter List");
+
+ ice_acquire_lock(rule_lock);
+
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) {
+ fi = &fm_entry->fltr_info;
+
+ sbuf_printf(sbuf,
+ "\nvlan_id = %4d, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d",
+ fi->l_data.vlan.vlan_id, fi->vsi_handle,
+ ice_fltr_flag_str(fi->flag), fi->lb_en, fi->lan_en,
+ ice_fwd_act_str(fi->fltr_act), fi->fltr_rule_id);
+
+ /* if we have a vsi_list_info, print some information about that */
+ if (fm_entry->vsi_list_info) {
+ sbuf_printf(sbuf,
+ ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d",
+ fm_entry->vsi_count,
+ fm_entry->vsi_list_info->vsi_list_id,
+ fm_entry->vsi_list_info->ref_cnt);
+ }
+ }
+
+ ice_release_lock(rule_lock);
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_dump_ethertype_filters - Dump a list of all HW Ethertype filters
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for "ethertype_filters" sysctl to dump the programmed Ethertype
+ * filters.
+ */
+static int
+ice_sysctl_dump_ethertype_filters(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_list_head *rule_head;
+ struct ice_lock *rule_lock;
+ struct ice_fltr_info *fi;
+ struct sbuf *sbuf;
+ int ret;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Wire the old buffer so we can take a non-sleepable lock */
+ ret = sysctl_wire_old_buffer(req, 0);
+ if (ret)
+ return (ret);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE].filt_rules;
+
+ sbuf_printf(sbuf, "Ethertype Filter List");
+
+ ice_acquire_lock(rule_lock);
+
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) {
+ fi = &fm_entry->fltr_info;
+
+ sbuf_printf(sbuf,
+ "\nethertype = 0x%04x, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d",
+ fi->l_data.ethertype_mac.ethertype,
+ fi->vsi_handle, ice_fltr_flag_str(fi->flag),
+ fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act),
+ fi->fltr_rule_id);
+
+ /* if we have a vsi_list_info, print some information about that */
+ if (fm_entry->vsi_list_info) {
+ sbuf_printf(sbuf,
+ ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d",
+ fm_entry->vsi_count,
+ fm_entry->vsi_list_info->vsi_list_id,
+ fm_entry->vsi_list_info->ref_cnt);
+ }
+ }
+
+ ice_release_lock(rule_lock);
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_dump_ethertype_mac_filters - Dump a list of all HW Ethertype/MAC filters
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for "ethertype_mac_filters" sysctl to dump the programmed
+ * Ethertype/MAC filters.
+ */
+static int
+ice_sysctl_dump_ethertype_mac_filters(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_list_head *rule_head;
+ struct ice_lock *rule_lock;
+ struct ice_fltr_info *fi;
+ struct sbuf *sbuf;
+ int ret;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Wire the old buffer so we can take a non-sleepable lock */
+ ret = sysctl_wire_old_buffer(req, 0);
+ if (ret)
+ return (ret);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_ETHERTYPE_MAC].filt_rules;
+
+ sbuf_printf(sbuf, "Ethertype/MAC Filter List");
+
+ ice_acquire_lock(rule_lock);
+
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry, list_entry) {
+ fi = &fm_entry->fltr_info;
+
+ sbuf_printf(sbuf,
+ "\nethertype = 0x%04x, mac = %6D, vsi_handle = %3d, fw_act_flag = %5s, lb_en = %1d, lan_en = %1d, fltr_act = %15s, fltr_rule_id = %4d",
+ fi->l_data.ethertype_mac.ethertype,
+ fi->l_data.ethertype_mac.mac_addr, ":",
+ fi->vsi_handle, ice_fltr_flag_str(fi->flag),
+ fi->lb_en, fi->lan_en, ice_fwd_act_str(fi->fltr_act),
+ fi->fltr_rule_id);
+
+ /* if we have a vsi_list_info, print some information about that */
+ if (fm_entry->vsi_list_info) {
+ sbuf_printf(sbuf,
+ ", vsi_count = %3d, vsi_list_id = %3d, ref_cnt = %3d",
+ fm_entry->vsi_count,
+ fm_entry->vsi_list_info->vsi_list_id,
+ fm_entry->vsi_list_info->ref_cnt);
+ }
+ }
+
+ ice_release_lock(rule_lock);
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_dump_state_flags - Dump device driver state flags
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for "state" sysctl to display currently set driver state flags.
+ */
+static int
+ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct sbuf *sbuf;
+ u32 copied_state;
+ unsigned int i;
+ bool at_least_one = false;
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Make a copy of the state to ensure we display coherent values */
+ copied_state = atomic_load_acq_32(&sc->state);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ /* Add the string for each set state to the sbuf */
+ for (i = 0; i < 32; i++) {
+ if (copied_state & BIT(i)) {
+ const char *str = ice_state_to_str((enum ice_state)i);
+
+ at_least_one = true;
+
+ if (str)
+ sbuf_printf(sbuf, "\n%s", str);
+ else
+ sbuf_printf(sbuf, "\nBIT(%u)", i);
+ }
+ }
+
+ if (!at_least_one)
+ sbuf_printf(sbuf, "Nothing set");
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_add_debug_tunables - Add tunables helpful for debugging the device driver
+ * @sc: device private structure
+ *
+ * Add sysctl tunable values related to debugging the device driver. For now,
+ * this means a tunable to set the debug mask early during driver load.
+ *
+ * The debug node will be marked CTLFLAG_SKIP unless INVARIANTS is defined, so
+ * that in normal kernel builds, these will all be hidden, but on a debug
+ * kernel they will be more easily visible.
+ */
+static void
+ice_add_debug_tunables(struct ice_softc *sc)
+{
+ struct sysctl_oid_list *debug_list;
+ device_t dev = sc->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug",
+ ICE_CTLFLAG_DEBUG | CTLFLAG_RD,
+ NULL, "Debug Sysctls");
+ debug_list = SYSCTL_CHILDREN(sc->debug_sysctls);
+
+ SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask",
+ CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0,
+ "Debug message enable/disable mask");
+
+ /* Load the default value from the global sysctl first */
+ sc->enable_tx_fc_filter = ice_enable_tx_fc_filter;
+
+ SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_fc_filter",
+ CTLFLAG_RDTUN, &sc->enable_tx_fc_filter, 0,
+ "Drop Ethertype 0x8808 control frames originating from software on this PF");
+
+ /* Load the default value from the global sysctl first */
+ sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter;
+
+ SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "enable_tx_lldp_filter",
+ CTLFLAG_RDTUN, &sc->enable_tx_lldp_filter, 0,
+ "Drop Ethertype 0x88cc LLDP frames originating from software on this PF");
+
+}
+
+#define ICE_SYSCTL_HELP_REQUEST_RESET \
+"\nRequest the driver to initiate a reset." \
+"\n\tpfr - Initiate a PF reset" \
+"\n\tcorer - Initiate a CORE reset" \
+"\n\tglobr - Initiate a GLOBAL reset"
+
+/**
+ * @var rl_sysctl_ticks
+ * @brief timestamp for latest reset request sysctl call
+ *
+ * Helps rate-limit the call to the sysctl which resets the device
+ */
+int rl_sysctl_ticks = 0;
+
+/**
+ * ice_sysctl_request_reset - Request that the driver initiate a reset
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Callback for "request_reset" sysctl to request that the driver initiate
+ * a reset. Expects to be passed one of the following strings
+ *
+ * "pfr" - Initiate a PF reset
+ * "corer" - Initiate a CORE reset
+ * "globr" - Initiate a Global reset
+ */
+static int
+ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+ enum ice_reset_req reset_type = ICE_RESET_INVAL;
+ const char *reset_message;
+ int error = 0;
+
+ /* Buffer to store the requested reset string. Must contain enough
+ * space to store the largest expected reset string, which currently
+ * means 6 bytes of space.
+ */
+ char reset[6] = "";
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ /* Read in the requested reset type. */
+ error = sysctl_handle_string(oidp, reset, sizeof(reset), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (strcmp(reset, "pfr") == 0) {
+ reset_message = "Requesting a PF reset";
+ reset_type = ICE_RESET_PFR;
+ } else if (strcmp(reset, "corer") == 0) {
+ reset_message = "Initiating a CORE reset";
+ reset_type = ICE_RESET_CORER;
+ } else if (strcmp(reset, "globr") == 0) {
+ reset_message = "Initiating a GLOBAL reset";
+ reset_type = ICE_RESET_GLOBR;
+ } else if (strcmp(reset, "empr") == 0) {
+ device_printf(sc->dev, "Triggering an EMP reset via software is not currently supported\n");
+ return (EOPNOTSUPP);
+ }
+
+ if (reset_type == ICE_RESET_INVAL) {
+ device_printf(sc->dev, "%s is not a valid reset request\n", reset);
+ return (EINVAL);
+ }
+
+ /*
+ * Rate-limit the frequency at which this function is called.
+ * Assuming this is called successfully once, typically,
+ * everything should be handled within the allotted time frame.
+ * However, in the odd setup situations, we've also put in
+ * guards for when the reset has finished, but we're in the
+ * process of rebuilding. And instead of queueing an intent,
+ * simply error out and let the caller retry, if so desired.
+ */
+ if (TICKS_2_MSEC(ticks - rl_sysctl_ticks) < 500) {
+ device_printf(sc->dev,
+ "Call frequency too high. Operation aborted.\n");
+ return (EBUSY);
+ }
+ rl_sysctl_ticks = ticks;
+
+ if (TICKS_2_MSEC(ticks - sc->rebuild_ticks) < 100) {
+ device_printf(sc->dev, "Device rebuilding. Operation aborted.\n");
+ return (EBUSY);
+ }
+
+ if (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) {
+ device_printf(sc->dev, "Device in reset. Operation aborted.\n");
+ return (EBUSY);
+ }
+
+ device_printf(sc->dev, "%s\n", reset_message);
+
+ /* Initiate the PF reset during the admin status task */
+ if (reset_type == ICE_RESET_PFR) {
+ ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
+ return (0);
+ }
+
+ /*
+ * Other types of resets including CORE and GLOBAL resets trigger an
+ * interrupt on all PFs. Initiate the reset now. Preparation and
+ * rebuild logic will be handled by the admin status task.
+ */
+ status = ice_reset(hw, reset_type);
+
+ /*
+ * Resets can take a long time and we still don't want another call
+ * to this function before we settle down.
+ */
+ rl_sysctl_ticks = ticks;
+
+ if (status) {
+ device_printf(sc->dev, "failed to initiate device reset, err %s\n",
+ ice_status_str(status));
+ ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver
+ * @sc: device private structure
+ *
+ * Add sysctls related to debugging the device driver. Generally these should
+ * simply be sysctls which dump internal driver state, to aid in understanding
+ * what the driver is doing.
+ */
+static void
+ice_add_debug_sysctls(struct ice_softc *sc)
+{
+ struct sysctl_oid *sw_node;
+ struct sysctl_oid_list *debug_list, *sw_list;
+ device_t dev = sc->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+
+ debug_list = SYSCTL_CHILDREN(sc->debug_sysctls);
+
+ SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "request_reset",
+ CTLTYPE_STRING | CTLFLAG_WR, sc, 0,
+ ice_sysctl_request_reset, "A",
+ ICE_SYSCTL_HELP_REQUEST_RESET);
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "pfr_count", CTLFLAG_RD,
+ &sc->soft_stats.pfr_count, 0, "# of PF resets handled");
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "corer_count", CTLFLAG_RD,
+ &sc->soft_stats.corer_count, 0, "# of CORE resets handled");
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "globr_count", CTLFLAG_RD,
+ &sc->soft_stats.globr_count, 0, "# of Global resets handled");
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "empr_count", CTLFLAG_RD,
+ &sc->soft_stats.empr_count, 0, "# of EMP resets handled");
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "tx_mdd_count", CTLFLAG_RD,
+ &sc->soft_stats.tx_mdd_count, 0, "# of Tx MDD events detected");
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "rx_mdd_count", CTLFLAG_RD,
+ &sc->soft_stats.rx_mdd_count, 0, "# of Rx MDD events detected");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "state", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_state_flags, "A", "Driver State Flags");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_type_low", CTLTYPE_U64 | CTLFLAG_RW,
+ sc, 0, ice_sysctl_phy_type_low, "QU",
+ "PHY type Low from Get PHY Caps/Set PHY Cfg");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_type_high", CTLTYPE_U64 | CTLFLAG_RW,
+ sc, 0, ice_sysctl_phy_type_high, "QU",
+ "PHY type High from Get PHY Caps/Set PHY Cfg");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_sw_caps", CTLTYPE_STRUCT | CTLFLAG_RD,
+ sc, 0, ice_sysctl_phy_sw_caps, "",
+ "Get PHY Capabilities (Software configuration)");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_nvm_caps", CTLTYPE_STRUCT | CTLFLAG_RD,
+ sc, 0, ice_sysctl_phy_nvm_caps, "",
+ "Get PHY Capabilities (NVM configuration)");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_topo_caps", CTLTYPE_STRUCT | CTLFLAG_RD,
+ sc, 0, ice_sysctl_phy_topo_caps, "",
+ "Get PHY Capabilities (Topology configuration)");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_link_status", CTLTYPE_STRUCT | CTLFLAG_RD,
+ sc, 0, ice_sysctl_phy_link_status, "",
+ "Get PHY Link Status");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_read_i2c_diag_data, "A",
+ "Dump selected diagnostic data from FW");
+
+ SYSCTL_ADD_U32(ctx, debug_list, OID_AUTO, "fw_build", CTLFLAG_RD,
+ &sc->hw.fw_build, 0, "FW Build ID");
+
+ SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "os_ddp_version", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_os_pkg_version, "A",
+ "DDP package name and version found in ice_ddp");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "cur_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_fw_cur_lldp_persist_status, "A", "Current LLDP persistent status");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "dflt_lldp_persist_status", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_fw_dflt_lldp_persist_status, "A", "Default LLDP persistent status");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "negotiated_fc", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_negotiated_fc, "A", "Current Negotiated Flow Control mode");
+
+ sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch",
+ CTLFLAG_RD, NULL, "Switch Configuration");
+ sw_list = SYSCTL_CHILDREN(sw_node);
+
+ SYSCTL_ADD_PROC(ctx, sw_list,
+ OID_AUTO, "mac_filters", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_mac_filters, "A", "MAC Filters");
+
+ SYSCTL_ADD_PROC(ctx, sw_list,
+ OID_AUTO, "vlan_filters", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_vlan_filters, "A", "VLAN Filters");
+
+ SYSCTL_ADD_PROC(ctx, sw_list,
+ OID_AUTO, "ethertype_filters", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_ethertype_filters, "A", "Ethertype Filters");
+
+ SYSCTL_ADD_PROC(ctx, sw_list,
+ OID_AUTO, "ethertype_mac_filters", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters");
+
+}
+
+/**
+ * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI
+ * @vsi: the VSI to disable
+ *
+ * Disables the Tx queues associated with this VSI. Essentially the opposite
+ * of ice_cfg_vsi_for_tx.
+ */
+int
+ice_vsi_disable_tx(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+ u32 *q_teids;
+ u16 *q_ids, *q_handles;
+ int i, err = 0;
+
+ if (vsi->num_tx_queues > 255)
+ return (ENOSYS);
+
+ q_teids = (u32 *)malloc(sizeof(*q_teids) * vsi->num_tx_queues,
+ M_ICE, M_NOWAIT|M_ZERO);
+ if (!q_teids)
+ return (ENOMEM);
+
+ q_ids = (u16 *)malloc(sizeof(*q_ids) * vsi->num_tx_queues,
+ M_ICE, M_NOWAIT|M_ZERO);
+ if (!q_ids) {
+ err = (ENOMEM);
+ goto free_q_teids;
+ }
+
+ q_handles = (u16 *)malloc(sizeof(*q_handles) * vsi->num_tx_queues,
+ M_ICE, M_NOWAIT|M_ZERO);
+ if (!q_handles) {
+ err = (ENOMEM);
+ goto free_q_ids;
+ }
+
+
+ for (i = 0; i < vsi->num_tx_queues; i++) {
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+
+ q_ids[i] = vsi->tx_qmap[i];
+ q_handles[i] = i;
+ q_teids[i] = txq->q_teid;
+ }
+
+ status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, vsi->num_tx_queues,
+ q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ ; /* Queues have already been disabled, no need to report this as an error */
+ } else if (status == ICE_ERR_RESET_ONGOING) {
+ device_printf(sc->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
+ device_printf(sc->dev,
+ "Failed to disable LAN Tx queues: err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = (ENODEV);
+ }
+
+/* free_q_handles: */
+ free(q_handles, M_ICE);
+free_q_ids:
+ free(q_ids, M_ICE);
+free_q_teids:
+ free(q_teids, M_ICE);
+
+ return err;
+}
+
+/**
+ * ice_vsi_set_rss_params - Set the RSS parameters for the VSI
+ * @vsi: the VSI to configure
+ *
+ * Sets the RSS table size and lookup table type for the VSI based on its
+ * VSI type.
+ */
+static void
+ice_vsi_set_rss_params(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw_common_caps *cap;
+
+ cap = &sc->hw.func_caps.common_cap;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ /* The PF VSI inherits RSS instance of the PF */
+ vsi->rss_table_size = cap->rss_table_size;
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_VF:
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ break;
+ default:
+ device_printf(sc->dev,
+ "VSI %d: RSS not supported for VSI type %d\n",
+ vsi->idx, vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_add_txqs_ctx - Create a sysctl context and node to store txq sysctls
+ * @vsi: The VSI to add the context for
+ *
+ * Creates a sysctl context for storing txq sysctls. Additionally creates
+ * a node rooted at the given VSI's main sysctl node. This context will be
+ * used to store per-txq sysctls which may need to be released during the
+ * driver's lifetime.
+ */
+void
+ice_vsi_add_txqs_ctx(struct ice_vsi *vsi)
+{
+ struct sysctl_oid_list *vsi_list;
+
+ sysctl_ctx_init(&vsi->txqs_ctx);
+
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ vsi->txqs_node = SYSCTL_ADD_NODE(&vsi->txqs_ctx, vsi_list, OID_AUTO, "txqs",
+ CTLFLAG_RD, NULL, "Tx Queues");
+}
+
+/**
+ * ice_vsi_add_rxqs_ctx - Create a sysctl context and node to store rxq sysctls
+ * @vsi: The VSI to add the context for
+ *
+ * Creates a sysctl context for storing rxq sysctls. Additionally creates
+ * a node rooted at the given VSI's main sysctl node. This context will be
+ * used to store per-rxq sysctls which may need to be released during the
+ * driver's lifetime.
+ */
+void
+ice_vsi_add_rxqs_ctx(struct ice_vsi *vsi)
+{
+ struct sysctl_oid_list *vsi_list;
+
+ sysctl_ctx_init(&vsi->rxqs_ctx);
+
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ vsi->rxqs_node = SYSCTL_ADD_NODE(&vsi->rxqs_ctx, vsi_list, OID_AUTO, "rxqs",
+ CTLFLAG_RD, NULL, "Rx Queues");
+}
+
+/**
+ * ice_vsi_del_txqs_ctx - Delete the Tx queue sysctl context for this VSI
+ * @vsi: The VSI to delete from
+ *
+ * Frees the txq sysctl context created for storing the per-queue Tx sysctls.
+ * Must be called prior to freeing the Tx queue memory, in order to avoid
+ * having sysctls point at stale memory.
+ */
+void
+ice_vsi_del_txqs_ctx(struct ice_vsi *vsi)
+{
+ device_t dev = vsi->sc->dev;
+ int err;
+
+ if (vsi->txqs_node) {
+ err = sysctl_ctx_free(&vsi->txqs_ctx);
+ if (err)
+ device_printf(dev, "failed to free VSI %d txqs_ctx, err %s\n",
+ vsi->idx, ice_err_str(err));
+ vsi->txqs_node = NULL;
+ }
+}
+
+/**
+ * ice_vsi_del_rxqs_ctx - Delete the Rx queue sysctl context for this VSI
+ * @vsi: The VSI to delete from
+ *
+ * Frees the rxq sysctl context created for storing the per-queue Rx sysctls.
+ * Must be called prior to freeing the Rx queue memory, in order to avoid
+ * having sysctls point at stale memory.
+ */
+void
+ice_vsi_del_rxqs_ctx(struct ice_vsi *vsi)
+{
+ device_t dev = vsi->sc->dev;
+ int err;
+
+ if (vsi->rxqs_node) {
+ err = sysctl_ctx_free(&vsi->rxqs_ctx);
+ if (err)
+ device_printf(dev, "failed to free VSI %d rxqs_ctx, err %s\n",
+ vsi->idx, ice_err_str(err));
+ vsi->rxqs_node = NULL;
+ }
+}
+
+/**
+ * ice_add_txq_sysctls - Add per-queue sysctls for a Tx queue
+ * @txq: pointer to the Tx queue
+ *
+* Add per-queue sysctls for a given Tx queue. Can't be called during
+* ice_add_vsi_sysctls, since the queue memory has not yet been setup.
+ */
+void
+ice_add_txq_sysctls(struct ice_tx_queue *txq)
+{
+ struct ice_vsi *vsi = txq->vsi;
+ struct sysctl_ctx_list *ctx = &vsi->txqs_ctx;
+ struct sysctl_oid_list *txqs_list, *this_txq_list;
+ struct sysctl_oid *txq_node;
+ char txq_name[32], txq_desc[32];
+
+ const struct ice_sysctl_info ctls[] = {
+ { &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" },
+ { &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" },
+ { &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" },
+ { 0, 0, 0 }
+ };
+
+ const struct ice_sysctl_info *entry = ctls;
+
+ txqs_list = SYSCTL_CHILDREN(vsi->txqs_node);
+
+ snprintf(txq_name, sizeof(txq_name), "%u", txq->me);
+ snprintf(txq_desc, sizeof(txq_desc), "Tx Queue %u", txq->me);
+ txq_node = SYSCTL_ADD_NODE(ctx, txqs_list, OID_AUTO, txq_name,
+ CTLFLAG_RD, NULL, txq_desc);
+ this_txq_list = SYSCTL_CHILDREN(txq_node);
+
+ /* Add the Tx queue statistics */
+ while (entry->stat != 0) {
+ SYSCTL_ADD_U64(ctx, this_txq_list, OID_AUTO, entry->name,
+ CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0,
+ entry->description);
+ entry++;
+ }
+}
+
+/**
+ * ice_add_rxq_sysctls - Add per-queue sysctls for an Rx queue
+ * @rxq: pointer to the Rx queue
+ *
+ * Add per-queue sysctls for a given Rx queue. Can't be called during
+ * ice_add_vsi_sysctls, since the queue memory has not yet been setup.
+ */
+void
+ice_add_rxq_sysctls(struct ice_rx_queue *rxq)
+{
+ struct ice_vsi *vsi = rxq->vsi;
+ struct sysctl_ctx_list *ctx = &vsi->rxqs_ctx;
+ struct sysctl_oid_list *rxqs_list, *this_rxq_list;
+ struct sysctl_oid *rxq_node;
+ char rxq_name[32], rxq_desc[32];
+
+ const struct ice_sysctl_info ctls[] = {
+ { &rxq->stats.rx_packets, "rx_packets", "Queue Packets Received" },
+ { &rxq->stats.rx_bytes, "rx_bytes", "Queue Bytes Received" },
+ { &rxq->stats.desc_errs, "rx_desc_errs", "Queue Rx Descriptor Errors" },
+ { 0, 0, 0 }
+ };
+
+ const struct ice_sysctl_info *entry = ctls;
+
+ rxqs_list = SYSCTL_CHILDREN(vsi->rxqs_node);
+
+ snprintf(rxq_name, sizeof(rxq_name), "%u", rxq->me);
+ snprintf(rxq_desc, sizeof(rxq_desc), "Rx Queue %u", rxq->me);
+ rxq_node = SYSCTL_ADD_NODE(ctx, rxqs_list, OID_AUTO, rxq_name,
+ CTLFLAG_RD, NULL, rxq_desc);
+ this_rxq_list = SYSCTL_CHILDREN(rxq_node);
+
+ /* Add the Rx queue statistics */
+ while (entry->stat != 0) {
+ SYSCTL_ADD_U64(ctx, this_rxq_list, OID_AUTO, entry->name,
+ CTLFLAG_RD | CTLFLAG_STATS, entry->stat, 0,
+ entry->description);
+ entry++;
+ }
+}
+
+/**
+ * ice_get_default_rss_key - Obtain a default RSS key
+ * @seed: storage for the RSS key data
+ *
+ * Copies a pre-generated RSS key into the seed memory. The seed pointer must
+ * point to a block of memory that is at least 40 bytes in size.
+ *
+ * The key isn't randomly generated each time this function is called because
+ * that makes the RSS key change every time we reconfigure RSS. This does mean
+ * that we're hard coding a possibly 'well known' key. We might want to
+ * investigate randomly generating this key once during the first call.
+ */
+static void
+ice_get_default_rss_key(u8 *seed)
+{
+ const u8 default_seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE] = {
+ 0x39, 0xed, 0xff, 0x4d, 0x43, 0x58, 0x42, 0xc3, 0x5f, 0xb8,
+ 0xa5, 0x32, 0x95, 0x65, 0x81, 0xcd, 0x36, 0x79, 0x71, 0x97,
+ 0xde, 0xa4, 0x41, 0x40, 0x6f, 0x27, 0xe9, 0x81, 0x13, 0xa0,
+ 0x95, 0x93, 0x5b, 0x1e, 0x9d, 0x27, 0x9d, 0x24, 0x84, 0xb5,
+ };
+
+ bcopy(default_seed, seed, ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+}
+
+/**
+ * ice_set_rss_key - Configure a given VSI with the default RSS key
+ * @vsi: the VSI to configure
+ *
+ * Program the hardware RSS key. We use rss_getkey to grab the kernel RSS key.
+ * If the kernel RSS interface is not available, this will fall back to our
+ * pre-generated hash seed from ice_get_default_rss_key().
+ */
+static int
+ice_set_rss_key(struct ice_vsi *vsi)
+{
+ struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} };
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+
+ /*
+ * If the RSS kernel interface is disabled, this will return the
+ * default RSS key above.
+ */
+ rss_getkey(keydata.standard_rss_key);
+
+ status = ice_aq_set_rss_key(hw, vsi->idx, &keydata);
+ if (status) {
+ device_printf(sc->dev,
+ "ice_aq_set_rss_key status %s, error %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_set_rss_flow_flds - Program the RSS hash flows after package init
+ * @vsi: the VSI to configure
+ *
+ * If the package file is initialized, the default RSS flows are reset. We
+ * need to reprogram the expected hash configuration. We'll use
+ * rss_gethashconfig() to determine which flows to enable. If RSS kernel
+ * support is not enabled, this macro will fall back to suitable defaults.
+ */
+static void
+ice_set_rss_flow_flds(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u_int rss_hash_config;
+
+ rss_hash_config = rss_gethashconfig();
+
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) {
+ status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d failed for ipv4 flow, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ }
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) {
+ status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d failed for tcp4 flow, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ }
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) {
+ status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
+ if (status)
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d failed for udp4 flow, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ }
+ if (rss_hash_config & (RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX)) {
+ status = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d failed for ipv6 flow, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ }
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) {
+ status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d failed for tcp6 flow, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ }
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) {
+ status = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
+ if (status)
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d failed for udp6 flow, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ }
+
+ /* Warn about RSS hash types which are not supported */
+ /* coverity[dead_error_condition] */
+ if (rss_hash_config & ~ICE_DEFAULT_RSS_HASH_CONFIG) {
+ device_printf(dev,
+ "ice_add_rss_cfg on VSI %d could not configure every requested hash type\n",
+ vsi->idx);
+ }
+}
+
+/**
+ * ice_set_rss_lut - Program the RSS lookup table for a VSI
+ * @vsi: the VSI to configure
+ *
+ * Programs the RSS lookup table for a given VSI. We use
+ * rss_get_indirection_to_bucket which will use the indirection table provided
+ * by the kernel RSS interface when available. If the kernel RSS interface is
+ * not available, we will fall back to a simple round-robin fashion queue
+ * assignment.
+ */
+static int
+ice_set_rss_lut(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int i, err = 0;
+ u8 *lut;
+
+ lut = (u8 *)malloc(vsi->rss_table_size, M_ICE, M_NOWAIT|M_ZERO);
+ if (!lut) {
+ device_printf(dev, "Failed to allocate RSS lut memory\n");
+ return (ENOMEM);
+ }
+
+ /* Populate the LUT with max no. of queues. If the RSS kernel
+ * interface is disabled, this will assign the lookup table in
+ * a simple round robin fashion
+ */
+ for (i = 0; i < vsi->rss_table_size; i++) {
+ /* XXX: this needs to be changed if num_rx_queues ever counts
+ * more than just the RSS queues */
+ lut[i] = rss_get_indirection_to_bucket(i) % vsi->num_rx_queues;
+ }
+
+ status = ice_aq_set_rss_lut(hw, vsi->idx, vsi->rss_lut_type,
+ lut, vsi->rss_table_size);
+ if (status) {
+ device_printf(dev,
+ "Cannot set RSS lut, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = (EIO);
+ }
+
+ free(lut, M_ICE);
+ return err;
+}
+
+/**
+ * ice_config_rss - Configure RSS for a VSI
+ * @vsi: the VSI to configure
+ *
+ * If FEATURE_RSS is enabled, configures the RSS lookup table and hash key for
+ * a given VSI.
+ */
+int
+ice_config_rss(struct ice_vsi *vsi)
+{
+ int err;
+
+ /* Nothing to do, if RSS is not enabled */
+ if (!ice_is_bit_set(vsi->sc->feat_en, ICE_FEATURE_RSS))
+ return 0;
+
+ err = ice_set_rss_key(vsi);
+ if (err)
+ return err;
+
+ ice_set_rss_flow_flds(vsi);
+
+ return ice_set_rss_lut(vsi);
+}
+
+/**
+ * ice_log_pkg_init - Log a message about status of DDP initialization
+ * @sc: the device softc pointer
+ * @pkg_status: the status result of ice_copy_and_init_pkg
+ *
+ * Called by ice_load_pkg after an attempt to download the DDP package
+ * contents to the device. Determines whether the download was successful or
+ * not and logs an appropriate message for the system administrator.
+ *
+ * @post if a DDP package was previously downloaded on another port and it
+ * is not compatible with this driver, pkg_status will be updated to reflect
+ * this, and the driver will transition to safe mode.
+ */
+void
+ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct sbuf *active_pkg, *os_pkg;
+
+ active_pkg = sbuf_new_auto();
+ ice_active_pkg_version_str(hw, active_pkg);
+ sbuf_finish(active_pkg);
+
+ os_pkg = sbuf_new_auto();
+ ice_os_pkg_version_str(hw, os_pkg);
+ sbuf_finish(os_pkg);
+
+ switch (*pkg_status) {
+ case ICE_SUCCESS:
+ /* The package download AdminQ command returned success because
+ * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
+ * already a package loaded on the device.
+ */
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name,
+ sizeof(hw->pkg_name))) {
+ switch (hw->pkg_dwnld_status) {
+ case ICE_AQ_RC_OK:
+ device_printf(dev,
+ "The DDP package was successfully loaded: %s.\n",
+ sbuf_data(active_pkg));
+ break;
+ case ICE_AQ_RC_EEXIST:
+ device_printf(dev,
+ "DDP package already present on device: %s.\n",
+ sbuf_data(active_pkg));
+ break;
+ default:
+ /* We do not expect this to occur, but the
+ * extra messaging is here in case something
+ * changes in the ice_init_pkg flow.
+ */
+ device_printf(dev,
+ "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n",
+ sbuf_data(active_pkg),
+ ice_aq_str(hw->pkg_dwnld_status));
+ break;
+ }
+ } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) {
+ device_printf(dev,
+ "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n",
+ sbuf_data(active_pkg),
+ sbuf_data(os_pkg));
+ } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) {
+ device_printf(dev,
+ "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ *pkg_status = ICE_ERR_NOT_SUPPORTED;
+ } else {
+ device_printf(dev,
+ "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ *pkg_status = ICE_ERR_NOT_SUPPORTED;
+ }
+ break;
+ case ICE_ERR_NOT_SUPPORTED:
+ /*
+ * This assumes that the active_pkg_ver will not be
+ * initialized if the ice_ddp package version is not
+ * supported.
+ */
+ if (pkg_ver_empty(&hw->active_pkg_ver, hw->active_pkg_name)) {
+ /* The ice_ddp version is not supported */
+ if (pkg_ver_compatible(&hw->pkg_ver) > 0) {
+ device_printf(dev,
+ "The DDP package in the ice_ddp module is higher than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated driver. Entering Safe Mode.\n",
+ sbuf_data(os_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ } else if (pkg_ver_compatible(&hw->pkg_ver) < 0) {
+ device_printf(dev,
+ "The DDP package in the ice_ddp module is lower than the driver supports. The ice_ddp module has package %s. The driver requires version %d.%d.x.x. Please use an updated ice_ddp module. Entering Safe Mode.\n",
+ sbuf_data(os_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ } else {
+ device_printf(dev,
+ "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ ice_status_str(*pkg_status),
+ ice_aq_str(hw->pkg_dwnld_status),
+ sbuf_data(os_pkg),
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ }
+ } else {
+ if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) {
+ device_printf(dev,
+ "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ } else if (pkg_ver_compatible(&hw->active_pkg_ver) < 0) {
+ device_printf(dev,
+ "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ } else {
+ device_printf(dev,
+ "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ ice_status_str(*pkg_status),
+ ice_aq_str(hw->pkg_dwnld_status),
+ sbuf_data(os_pkg),
+ sbuf_data(active_pkg),
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ }
+ }
+ break;
+ case ICE_ERR_CFG:
+ case ICE_ERR_BUF_TOO_SHORT:
+ case ICE_ERR_PARAM:
+ device_printf(dev,
+ "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n");
+ break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ device_printf(dev,
+ "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
+ break;
+ case ICE_ERR_AQ_ERROR:
+ switch (hw->pkg_dwnld_status) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ device_printf(dev,
+ "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n");
+ goto free_sbufs;
+ case ICE_AQ_RC_ESVN:
+ device_printf(dev,
+ "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n");
+ goto free_sbufs;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ device_printf(dev,
+ "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n");
+ goto free_sbufs;
+ default:
+ break;
+ }
+ /* fall-through */
+ default:
+ device_printf(dev,
+ "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n",
+ ice_status_str(*pkg_status),
+ ice_aq_str(hw->pkg_dwnld_status));
+ break;
+ }
+
+free_sbufs:
+ sbuf_delete(active_pkg);
+ sbuf_delete(os_pkg);
+}
+
+/**
+ * ice_load_pkg_file - Load the DDP package file using firmware_get
+ * @sc: device private softc
+ *
+ * Use firmware_get to load the DDP package memory and then request that
+ * firmware download the package contents and program the relevant hardware
+ * bits.
+ *
+ * This function makes a copy of the DDP package memory which is tracked in
+ * the ice_hw structure. The copy will be managed and released by
+ * ice_deinit_hw(). This allows the firmware reference to be immediately
+ * released using firmware_put.
+ */
+void
+ice_load_pkg_file(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ const struct firmware *pkg;
+
+ pkg = firmware_get("ice_ddp");
+ if (!pkg) {
+ device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n");
+ if (cold)
+ device_printf(dev,
+ "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n");
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
+ return;
+ }
+
+ /* Copy and download the pkg contents */
+ status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize);
+
+ /* Release the firmware reference */
+ firmware_put(pkg, FIRMWARE_UNLOAD);
+
+ /* Check the active DDP package version and log a message */
+ ice_log_pkg_init(sc, &status);
+
+ /* Place the driver into safe mode */
+ if (status != ICE_SUCCESS) {
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
+ }
+}
+
+/**
+ * ice_get_ifnet_counter - Retrieve counter value for a given ifnet counter
+ * @vsi: the vsi to retrieve the value for
+ * @counter: the counter type to retrieve
+ *
+ * Returns the value for a given ifnet counter. To do so, we calculate the
+ * value based on the matching hardware statistics.
+ */
+uint64_t
+ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
+{
+ struct ice_hw_port_stats *hs = &vsi->sc->stats.cur;
+ struct ice_eth_stats *es = &vsi->hw_stats.cur;
+
+ /* For some statistics, especially those related to error flows, we do
+ * not have per-VSI counters. In this case, we just report the global
+ * counters.
+ */
+
+ switch (counter) {
+ case IFCOUNTER_IPACKETS:
+ return (es->rx_unicast + es->rx_multicast + es->rx_broadcast);
+ case IFCOUNTER_IERRORS:
+ return (hs->crc_errors + hs->illegal_bytes +
+ hs->mac_local_faults + hs->mac_remote_faults +
+ hs->rx_len_errors + hs->rx_undersize +
+ hs->rx_oversize + hs->rx_fragments + hs->rx_jabber);
+ case IFCOUNTER_OPACKETS:
+ return (es->tx_unicast + es->tx_multicast + es->tx_broadcast);
+ case IFCOUNTER_OERRORS:
+ return (es->tx_errors);
+ case IFCOUNTER_COLLISIONS:
+ return (0);
+ case IFCOUNTER_IBYTES:
+ return (es->rx_bytes);
+ case IFCOUNTER_OBYTES:
+ return (es->tx_bytes);
+ case IFCOUNTER_IMCASTS:
+ return (es->rx_multicast);
+ case IFCOUNTER_OMCASTS:
+ return (es->tx_multicast);
+ case IFCOUNTER_IQDROPS:
+ return (es->rx_discards);
+ case IFCOUNTER_OQDROPS:
+ return (hs->tx_dropped_link_down);
+ case IFCOUNTER_NOPROTO:
+ return (es->rx_unknown_protocol);
+ default:
+ return if_get_counter_default(vsi->sc->ifp, counter);
+ }
+}
+
+/**
+ * ice_save_pci_info - Save PCI configuration fields in HW struct
+ * @hw: the ice_hw struct to save the PCI information in
+ * @dev: the device to get the PCI information from
+ *
+ * This should only be called once, early in the device attach
+ * process.
+ */
+void
+ice_save_pci_info(struct ice_hw *hw, device_t dev)
+{
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->subsystem_vendor_id = pci_get_subvendor(dev);
+ hw->subsystem_device_id = pci_get_subdevice(dev);
+ hw->revision_id = pci_get_revid(dev);
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+}
+
+/**
+ * ice_replay_all_vsi_cfg - Replace configuration for all VSIs after reset
+ * @sc: the device softc
+ *
+ * Replace the configuration for each VSI, and then cleanup replay
+ * information. Called after a hardware reset in order to reconfigure the
+ * active VSIs.
+ */
+int
+ice_replay_all_vsi_cfg(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+ int i;
+
+ for (i = 0 ; i < sc->num_available_vsi; i++) {
+ struct ice_vsi *vsi = sc->all_vsi[i];
+
+ if (!vsi)
+ continue;
+
+ status = ice_replay_vsi(hw, vsi->idx);
+ if (status) {
+ device_printf(sc->dev, "Failed to replay VSI %d, err %s aq_err %s\n",
+ vsi->idx, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ }
+
+ /* Cleanup replay filters after successful reconfiguration */
+ ice_replay_post(hw);
+ return (0);
+}
+
+/**
+ * ice_clean_vsi_rss_cfg - Cleanup RSS configuration for a given VSI
+ * @vsi: pointer to the VSI structure
+ *
+ * Cleanup the advanced RSS configuration for a given VSI. This is necessary
+ * during driver removal to ensure that all RSS resources are properly
+ * released.
+ *
+ * @remark this function doesn't report an error as it is expected to be
+ * called during driver reset and unload, and there isn't much the driver can
+ * do if freeing RSS resources fails.
+ */
+static void
+ice_clean_vsi_rss_cfg(struct ice_vsi *vsi)
+{
+ struct ice_softc *sc = vsi->sc;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ status = ice_rem_vsi_rss_cfg(hw, vsi->idx);
+ if (status)
+ device_printf(dev,
+ "Failed to remove RSS configuration for VSI %d, err %s\n",
+ vsi->idx, ice_status_str(status));
+
+ /* Remove this VSI from the RSS list */
+ ice_rem_vsi_rss_list(hw, vsi->idx);
+}
+
+/**
+ * ice_clean_all_vsi_rss_cfg - Cleanup RSS configuration for all VSIs
+ * @sc: the device softc pointer
+ *
+ * Cleanup the advanced RSS configuration for all VSIs on a given PF
+ * interface.
+ *
+ * @remark This should be called while preparing for a reset, to cleanup stale
+ * RSS configuration for all VSIs.
+ */
+void
+ice_clean_all_vsi_rss_cfg(struct ice_softc *sc)
+{
+ int i;
+
+ /* No need to cleanup if RSS is not enabled */
+ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_RSS))
+ return;
+
+ for (i = 0; i < sc->num_available_vsi; i++) {
+ struct ice_vsi *vsi = sc->all_vsi[i];
+
+ if (vsi)
+ ice_clean_vsi_rss_cfg(vsi);
+ }
+}
+
+/**
+ * ice_requested_fec_mode - Return the requested FEC mode as a string
+ * @pi: The port info structure
+ *
+ * Return a string representing the requested FEC mode.
+ */
+static const char *
+ice_requested_fec_mode(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ enum ice_status status;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ &pcaps, NULL);
+ if (status)
+ /* Just report unknown if we can't get capabilities */
+ return "Unknown";
+
+ /* Check if RS-FEC has been requested first */
+ if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+ ICE_AQC_PHY_FEC_25G_RS_544_REQ))
+ return ice_fec_str(ICE_FEC_RS);
+
+ /* If RS FEC has not been requested, then check BASE-R */
+ if (pcaps.link_fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+ ICE_AQC_PHY_FEC_25G_KR_REQ))
+ return ice_fec_str(ICE_FEC_BASER);
+
+ return ice_fec_str(ICE_FEC_NONE);
+}
+
+/**
+ * ice_negotiated_fec_mode - Return the negotiated FEC mode as a string
+ * @pi: The port info structure
+ *
+ * Return a string representing the current FEC mode.
+ */
+static const char *
+ice_negotiated_fec_mode(struct ice_port_info *pi)
+{
+ /* First, check if RS has been requested first */
+ if (pi->phy.link_info.fec_info & (ICE_AQ_LINK_25G_RS_528_FEC_EN |
+ ICE_AQ_LINK_25G_RS_544_FEC_EN))
+ return ice_fec_str(ICE_FEC_RS);
+
+ /* If RS FEC has not been requested, then check BASE-R */
+ if (pi->phy.link_info.fec_info & ICE_AQ_LINK_25G_KR_FEC_EN)
+ return ice_fec_str(ICE_FEC_BASER);
+
+ return ice_fec_str(ICE_FEC_NONE);
+}
+
+/**
+ * ice_autoneg_mode - Return string indicating of autoneg completed
+ * @pi: The port info structure
+ *
+ * Return "True" if autonegotiation is completed, "False" otherwise.
+ */
+static const char *
+ice_autoneg_mode(struct ice_port_info *pi)
+{
+ if (pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)
+ return "True";
+ else
+ return "False";
+}
+
+/**
+ * ice_flowcontrol_mode - Return string indicating the Flow Control mode
+ * @pi: The port info structure
+ *
+ * Returns the current Flow Control mode as a string.
+ */
+static const char *
+ice_flowcontrol_mode(struct ice_port_info *pi)
+{
+ return ice_fc_str(pi->fc.current_mode);
+}
+
+/**
+ * ice_link_up_msg - Log a link up message with associated info
+ * @sc: the device private softc
+ *
+ * Log a link up message with LOG_NOTICE message level. Include information
+ * about the duplex, FEC mode, autonegotiation and flow control.
+ */
+void
+ice_link_up_msg(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct ifnet *ifp = sc->ifp;
+ const char *speed, *req_fec, *neg_fec, *autoneg, *flowcontrol;
+
+ speed = ice_aq_speed_to_str(hw->port_info);
+ req_fec = ice_requested_fec_mode(hw->port_info);
+ neg_fec = ice_negotiated_fec_mode(hw->port_info);
+ autoneg = ice_autoneg_mode(hw->port_info);
+ flowcontrol = ice_flowcontrol_mode(hw->port_info);
+
+ log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ ifp->if_xname, speed, req_fec, neg_fec, autoneg, flowcontrol);
+}
+
+/**
+ * ice_update_laa_mac - Update MAC address if Locally Administered
+ * @sc: the device softc
+ *
+ * Update the device MAC address when a Locally Administered Address is
+ * assigned.
+ *
+ * This function does *not* update the MAC filter list itself. Instead, it
+ * should be called after ice_rm_pf_default_mac_filters, so that the previous
+ * address filter will be removed, and before ice_cfg_pf_default_mac_filters,
+ * so that the new address filter will be assigned.
+ */
+int
+ice_update_laa_mac(struct ice_softc *sc)
+{
+ const u8 *lladdr = (const u8 *)IF_LLADDR(sc->ifp);
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+
+ /* If the address is the same, then there is nothing to update */
+ if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN))
+ return (0);
+
+ /* Reject Multicast addresses */
+ if (ETHER_IS_MULTICAST(lladdr))
+ return (EINVAL);
+
+ status = ice_aq_manage_mac_write(hw, lladdr, ICE_AQC_MAN_MAC_UPDATE_LAA_WOL, NULL);
+ if (status) {
+ device_printf(sc->dev, "Failed to write mac %6D to firmware, err %s aq_err %s\n",
+ lladdr, ":", ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EFAULT);
+ }
+
+ /* Copy the address into place of the LAN address. */
+ bcopy(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN);
+
+ return (0);
+}
+
+/**
+ * ice_get_and_print_bus_info - Save (PCI) bus info and print messages
+ * @sc: device softc
+ *
+ * This will potentially print out a warning message if bus bandwidth
+ * is insufficient for full-speed operation.
+ *
+ * This should only be called once, during the attach process, after
+ * hw->port_info has been filled out with port link topology information
+ * (from the Get PHY Capabilities Admin Queue command).
+ */
+void
+ice_get_and_print_bus_info(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u16 pci_link_status;
+ int offset;
+
+ pci_find_cap(dev, PCIY_EXPRESS, &offset);
+ pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
+
+ /* Fill out hw struct with PCIE link status info */
+ ice_set_pci_link_status_data(hw, pci_link_status);
+
+ /* Use info to print out bandwidth messages */
+ ice_print_bus_link_data(dev, hw);
+
+ if (ice_pcie_bandwidth_check(sc)) {
+ device_printf(dev,
+ "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
+ device_printf(dev,
+ "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
+ }
+}
+
+/**
+ * ice_pcie_bus_speed_to_rate - Convert driver bus speed enum value to
+ * a 64-bit baudrate.
+ * @speed: enum value to convert
+ *
+ * This only goes up to PCIE Gen 4.
+ */
+static uint64_t
+ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed)
+{
+ /* If the PCI-E speed is Gen1 or Gen2, then report
+ * only 80% of bus speed to account for encoding overhead.
+ */
+ switch (speed) {
+ case ice_pcie_speed_2_5GT:
+ return IF_Gbps(2);
+ case ice_pcie_speed_5_0GT:
+ return IF_Gbps(4);
+ case ice_pcie_speed_8_0GT:
+ return IF_Gbps(8);
+ case ice_pcie_speed_16_0GT:
+ return IF_Gbps(16);
+ case ice_pcie_speed_unknown:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ice_pcie_lnk_width_to_int - Convert driver pci-e width enum value to
+ * a 32-bit number.
+ * @width: enum value to convert
+ */
+static int
+ice_pcie_lnk_width_to_int(enum ice_pcie_link_width width)
+{
+ switch (width) {
+ case ice_pcie_lnk_x1:
+ return (1);
+ case ice_pcie_lnk_x2:
+ return (2);
+ case ice_pcie_lnk_x4:
+ return (4);
+ case ice_pcie_lnk_x8:
+ return (8);
+ case ice_pcie_lnk_x12:
+ return (12);
+ case ice_pcie_lnk_x16:
+ return (16);
+ case ice_pcie_lnk_x32:
+ return (32);
+ case ice_pcie_lnk_width_resrv:
+ case ice_pcie_lnk_width_unknown:
+ default:
+ return (0);
+ }
+}
+
+/**
+ * ice_pcie_bandwidth_check - Check if PCI-E bandwidth is sufficient for
+ * full-speed device operation.
+ * @sc: adapter softc
+ *
+ * Returns 0 if sufficient; 1 if not.
+ */
+static uint8_t
+ice_pcie_bandwidth_check(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ int num_ports, pcie_width;
+ u64 pcie_speed, port_speed;
+
+ MPASS(hw->port_info);
+
+ num_ports = bitcount32(hw->func_caps.common_cap.valid_functions);
+ port_speed = ice_phy_types_to_max_rate(hw->port_info);
+ pcie_speed = ice_pcie_bus_speed_to_rate(hw->bus.speed);
+ pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width);
+
+ /*
+ * If 2x100, clamp ports to 1 -- 2nd port is intended for
+ * failover.
+ */
+ if (port_speed == IF_Gbps(100))
+ num_ports = 1;
+
+ return !!((num_ports * port_speed) > pcie_speed * pcie_width);
+}
+
+/**
+ * ice_print_bus_link_data - Print PCI-E bandwidth information
+ * @dev: device to print string for
+ * @hw: hw struct with PCI-e link information
+ */
+static void
+ice_print_bus_link_data(device_t dev, struct ice_hw *hw)
+{
+ device_printf(dev, "PCI Express Bus: Speed %s %s\n",
+ ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" :
+ (hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" :
+ (hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" :
+ (hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"),
+ (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" :
+ (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" :
+ (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" :
+ (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" :
+ (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" :
+ (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" :
+ (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown");
+}
+
+/**
+ * ice_set_pci_link_status_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the ice_hw structure
+ **/
+static void
+ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status)
+{
+ u16 reg;
+
+ hw->bus.type = ice_bus_pci_express;
+
+ reg = (link_status & PCIEM_LINK_STA_WIDTH) >> 4;
+
+ switch (reg) {
+ case ice_pcie_lnk_x1:
+ case ice_pcie_lnk_x2:
+ case ice_pcie_lnk_x4:
+ case ice_pcie_lnk_x8:
+ case ice_pcie_lnk_x12:
+ case ice_pcie_lnk_x16:
+ case ice_pcie_lnk_x32:
+ hw->bus.width = (enum ice_pcie_link_width)reg;
+ break;
+ default:
+ hw->bus.width = ice_pcie_lnk_width_unknown;
+ break;
+ }
+
+ reg = (link_status & PCIEM_LINK_STA_SPEED) + 0x14;
+
+ switch (reg) {
+ case ice_pcie_speed_2_5GT:
+ case ice_pcie_speed_5_0GT:
+ case ice_pcie_speed_8_0GT:
+ case ice_pcie_speed_16_0GT:
+ hw->bus.speed = (enum ice_pcie_bus_speed)reg;
+ break;
+ default:
+ hw->bus.speed = ice_pcie_speed_unknown;
+ break;
+ }
+}
+
+/**
+ * ice_init_link_events - Initialize Link Status Events mask
+ * @sc: the device softc
+ *
+ * Initialize the Link Status Events mask to disable notification of link
+ * events we don't care about in software. Also request that link status
+ * events be enabled.
+ */
+int
+ice_init_link_events(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+ u16 wanted_events;
+
+ /* Set the bits for the events that we want to be notified by */
+ wanted_events = (ICE_AQ_LINK_EVENT_UPDOWN |
+ ICE_AQ_LINK_EVENT_MEDIA_NA |
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL);
+
+ /* request that every event except the wanted events be masked */
+ status = ice_aq_set_event_mask(hw, hw->port_info->lport, ~wanted_events, NULL);
+ if (status) {
+ device_printf(sc->dev,
+ "Failed to set link status event mask, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ /* Request link info with the LSE bit set to enable link status events */
+ status = ice_aq_get_link_info(hw->port_info, true, NULL, NULL);
+ if (status) {
+ device_printf(sc->dev,
+ "Failed to enable link status events, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_handle_mdd_event - Handle possibly malicious events
+ * @sc: the device softc
+ *
+ * Called by the admin task if an MDD detection interrupt is triggered.
+ * Identifies possibly malicious events coming from VFs. Also triggers for
+ * similar incorrect behavior from the PF as well.
+ */
+void
+ice_handle_mdd_event(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ bool mdd_detected = false, request_reinit = false;
+ device_t dev = sc->dev;
+ u32 reg;
+
+ if (!ice_testandclear_state(&sc->state, ICE_STATE_MDD_PENDING))
+ return;
+
+ reg = rd32(hw, GL_MDET_TX_TCLAN);
+ if (reg & GL_MDET_TX_TCLAN_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> GL_MDET_TX_TCLAN_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> GL_MDET_TX_TCLAN_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> GL_MDET_TX_TCLAN_MAL_TYPE_S;
+ u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S;
+
+ device_printf(dev, "Malicious Driver Detection Tx Descriptor check event '%s' on Tx queue %u PF# %u VF# %u\n",
+ ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num);
+
+ /* Only clear this event if it matches this PF, that way other
+ * PFs can read the event and determine VF and queue number.
+ */
+ if (pf_num == hw->pf_id)
+ wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+
+ mdd_detected = true;
+ }
+
+ /* Determine what triggered the MDD event */
+ reg = rd32(hw, GL_MDET_TX_PQM);
+ if (reg & GL_MDET_TX_PQM_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> GL_MDET_TX_PQM_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> GL_MDET_TX_PQM_VF_NUM_S;
+ u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> GL_MDET_TX_PQM_MAL_TYPE_S;
+ u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S;
+
+ device_printf(dev, "Malicious Driver Detection Tx Quanta check event '%s' on Tx queue %u PF# %u VF# %u\n",
+ ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num);
+
+ /* Only clear this event if it matches this PF, that way other
+ * PFs can read the event and determine VF and queue number.
+ */
+ if (pf_num == hw->pf_id)
+ wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
+
+ mdd_detected = true;
+ }
+
+ reg = rd32(hw, GL_MDET_RX);
+ if (reg & GL_MDET_RX_VALID_M) {
+ u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> GL_MDET_RX_PF_NUM_S;
+ u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> GL_MDET_RX_VF_NUM_S;
+ u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> GL_MDET_RX_MAL_TYPE_S;
+ u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S;
+
+ device_printf(dev, "Malicious Driver Detection Rx event '%s' on Rx queue %u PF# %u VF# %u\n",
+ ice_mdd_rx_str(event), queue, pf_num, vf_num);
+
+ /* Only clear this event if it matches this PF, that way other
+ * PFs can read the event and determine VF and queue number.
+ */
+ if (pf_num == hw->pf_id)
+ wr32(hw, GL_MDET_RX, 0xffffffff);
+
+ mdd_detected = true;
+ }
+
+ /* Now, confirm that this event actually affects this PF, by checking
+ * the PF registers.
+ */
+ if (mdd_detected) {
+ reg = rd32(hw, PF_MDET_TX_TCLAN);
+ if (reg & PF_MDET_TX_TCLAN_VALID_M) {
+ wr32(hw, PF_MDET_TX_TCLAN, 0xffff);
+ sc->soft_stats.tx_mdd_count++;
+ request_reinit = true;
+ }
+
+ reg = rd32(hw, PF_MDET_TX_PQM);
+ if (reg & PF_MDET_TX_PQM_VALID_M) {
+ wr32(hw, PF_MDET_TX_PQM, 0xffff);
+ sc->soft_stats.tx_mdd_count++;
+ request_reinit = true;
+ }
+
+ reg = rd32(hw, PF_MDET_RX);
+ if (reg & PF_MDET_RX_VALID_M) {
+ wr32(hw, PF_MDET_RX, 0xffff);
+ sc->soft_stats.rx_mdd_count++;
+ request_reinit = true;
+ }
+ }
+
+ /* TODO: Implement logic to detect and handle events caused by VFs. */
+
+ /* request that the upper stack re-initialize the Tx/Rx queues */
+ if (request_reinit)
+ ice_request_stack_reinit(sc);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_init_dcb_setup - Initialize DCB settings for HW
+ * @sc: the device softc
+ *
+ * This needs to be called after the fw_lldp_agent sysctl is added, since that
+ * can update the device's LLDP agent status if a tunable value is set.
+ *
+ * Get and store the initial state of DCB settings on driver load. Print out
+ * informational messages as well.
+ */
+void
+ice_init_dcb_setup(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ bool dcbx_agent_status;
+ enum ice_status status;
+
+ /* Don't do anything if DCB isn't supported */
+ if (!hw->func_caps.common_cap.dcb) {
+ device_printf(dev, "%s: No DCB support\n",
+ __func__);
+ return;
+ }
+
+ hw->port_info->dcbx_status = ice_get_dcbx_status(hw);
+ if (hw->port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
+ hw->port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
+ /*
+ * Start DCBX agent, but not LLDP. The return value isn't
+ * checked here because a more detailed dcbx agent status is
+ * retrieved and checked in ice_init_dcb() and below.
+ */
+ ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL);
+ }
+
+ /* This sets hw->port_info->is_sw_lldp */
+ status = ice_init_dcb(hw, true);
+
+ /* If there is an error, then FW LLDP is not in a usable state */
+ if (status != 0 && status != ICE_ERR_NOT_READY) {
+ /* Don't print an error message if the return code from the AQ
+ * cmd performed in ice_init_dcb() is is EPERM; that means the
+ * FW LLDP engine is disabled, and that is a valid state.
+ */
+ if (!(status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EPERM)) {
+ device_printf(dev, "DCB init failed, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+ hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
+ }
+
+ switch (hw->port_info->dcbx_status) {
+ case ICE_DCBX_STATUS_DIS:
+ ice_debug(hw, ICE_DBG_DCB, "DCBX disabled\n");
+ break;
+ case ICE_DCBX_STATUS_NOT_STARTED:
+ ice_debug(hw, ICE_DBG_DCB, "DCBX not started\n");
+ break;
+ case ICE_DCBX_STATUS_MULTIPLE_PEERS:
+ ice_debug(hw, ICE_DBG_DCB, "DCBX detected multiple peers\n");
+ break;
+ default:
+ break;
+ }
+
+ /* LLDP disabled in FW */
+ if (hw->port_info->is_sw_lldp) {
+ ice_add_rx_lldp_filter(sc);
+ device_printf(dev, "Firmware LLDP agent disabled\n");
+ } else {
+ ice_del_rx_lldp_filter(sc);
+ }
+}
+
+/**
+ * ice_handle_mib_change_event - helper function to log LLDP MIB change events
+ * @sc: device softc
+ * @event: event received on a control queue
+ *
+ * Prints out the type of an LLDP MIB change event in a DCB debug message.
+ *
+ * XXX: Should be extended to do more if the driver decides to notify other SW
+ * of LLDP MIB changes, or needs to extract info from the MIB.
+ */
+static void
+ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event)
+{
+ struct ice_aqc_lldp_get_mib *params =
+ (struct ice_aqc_lldp_get_mib *)&event->desc.params.lldp_get_mib;
+ u8 mib_type, bridge_type, tx_status;
+
+ /* XXX: To get the contents of the MIB that caused the event, set the
+ * ICE_DBG_AQ debug mask and read that output
+ */
+ static const char* mib_type_strings[] = {
+ "Local MIB",
+ "Remote MIB",
+ "Reserved",
+ "Reserved"
+ };
+ static const char* bridge_type_strings[] = {
+ "Nearest Bridge",
+ "Non-TPMR Bridge",
+ "Reserved",
+ "Reserved"
+ };
+ static const char* tx_status_strings[] = {
+ "Port's TX active",
+ "Port's TX suspended and drained",
+ "Reserved",
+ "Port's TX suspended and srained; blocked TC pipe flushed"
+ };
+
+ mib_type = (params->type & ICE_AQ_LLDP_MIB_TYPE_M) >>
+ ICE_AQ_LLDP_MIB_TYPE_S;
+ bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >>
+ ICE_AQ_LLDP_BRID_TYPE_S;
+ tx_status = (params->type & ICE_AQ_LLDP_TX_M) >>
+ ICE_AQ_LLDP_TX_S;
+
+ ice_debug(&sc->hw, ICE_DBG_DCB, "LLDP MIB Change Event (%s, %s, %s)\n",
+ mib_type_strings[mib_type], bridge_type_strings[bridge_type],
+ tx_status_strings[tx_status]);
+}
+
+/**
+ * ice_send_version - Send driver version to firmware
+ * @sc: the device private softc
+ *
+ * Send the driver version to the firmware. This must be called as early as
+ * possible after ice_init_hw().
+ */
+int
+ice_send_version(struct ice_softc *sc)
+{
+ struct ice_driver_ver driver_version = {0};
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ driver_version.major_ver = ice_major_version;
+ driver_version.minor_ver = ice_minor_version;
+ driver_version.build_ver = ice_patch_version;
+ driver_version.subbuild_ver = ice_rc_version;
+
+ strlcpy((char *)driver_version.driver_string, ice_driver_version,
+ sizeof(driver_version.driver_string));
+
+ status = ice_aq_send_driver_ver(hw, &driver_version, NULL);
+ if (status) {
+ device_printf(dev, "Unable to send driver version to firmware, err %s aq_err %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+
+ return (0);
+}
+
+/**
+ * ice_handle_lan_overflow_event - helper function to log LAN overflow events
+ * @sc: device softc
+ * @event: event received on a control queue
+ *
+ * Prints out a message when a LAN overflow event is detected on a receive
+ * queue.
+ */
+static void
+ice_handle_lan_overflow_event(struct ice_softc *sc, struct ice_rq_event_info *event)
+{
+ struct ice_aqc_event_lan_overflow *params =
+ (struct ice_aqc_event_lan_overflow *)&event->desc.params.lan_overflow;
+ struct ice_hw *hw = &sc->hw;
+
+ ice_debug(hw, ICE_DBG_DCB, "LAN overflow event detected, prtdcb_ruptq=0x%08x, qtx_ctl=0x%08x\n",
+ LE32_TO_CPU(params->prtdcb_ruptq),
+ LE32_TO_CPU(params->qtx_ctl));
+}
+
+/**
+ * ice_add_ethertype_to_list - Add an Ethertype filter to a filter list
+ * @vsi: the VSI to target packets to
+ * @list: the list to add the filter to
+ * @ethertype: the Ethertype to filter on
+ * @direction: The direction of the filter (Tx or Rx)
+ * @action: the action to take
+ *
+ * Add an Ethertype filter to a filter list. Used to forward a series of
+ * filters to the firmware for configuring the switch.
+ *
+ * Returns 0 on success, and an error code on failure.
+ */
+static int
+ice_add_ethertype_to_list(struct ice_vsi *vsi, struct ice_list_head *list,
+ u16 ethertype, u16 direction,
+ enum ice_sw_fwd_act_type action)
+{
+ struct ice_fltr_list_entry *entry;
+
+ MPASS((direction == ICE_FLTR_TX) || (direction == ICE_FLTR_RX));
+
+ entry = (__typeof(entry))malloc(sizeof(*entry), M_ICE, M_NOWAIT|M_ZERO);
+ if (!entry)
+ return (ENOMEM);
+
+ entry->fltr_info.flag = direction;
+ entry->fltr_info.src_id = ICE_SRC_ID_VSI;
+ entry->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ entry->fltr_info.fltr_act = action;
+ entry->fltr_info.vsi_handle = vsi->idx;
+ entry->fltr_info.l_data.ethertype_mac.ethertype = ethertype;
+
+ LIST_ADD(&entry->list_entry, list);
+
+ return 0;
+}
+
+#define ETHERTYPE_PAUSE_FRAMES 0x8808
+#define ETHERTYPE_LLDP_FRAMES 0x88cc
+
+/**
+ * ice_cfg_pf_ethertype_filters - Configure switch to drop ethertypes
+ * @sc: the device private softc
+ *
+ * Configure the switch to drop PAUSE frames and LLDP frames transmitted from
+ * the host. This prevents malicious VFs from sending these frames and being
+ * able to control or configure the network.
+ */
+int
+ice_cfg_pf_ethertype_filters(struct ice_softc *sc)
+{
+ struct ice_list_head ethertype_list;
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int err = 0;
+
+ INIT_LIST_HEAD(&ethertype_list);
+
+ /*
+ * Note that the switch filters will ignore the VSI index for the drop
+ * action, so we only need to program drop filters once for the main
+ * VSI.
+ */
+
+ /* Configure switch to drop all Tx pause frames coming from any VSI. */
+ if (sc->enable_tx_fc_filter) {
+ err = ice_add_ethertype_to_list(vsi, &ethertype_list,
+ ETHERTYPE_PAUSE_FRAMES,
+ ICE_FLTR_TX, ICE_DROP_PACKET);
+ if (err)
+ goto free_ethertype_list;
+ }
+
+ /* Configure switch to drop LLDP frames coming from any VSI */
+ if (sc->enable_tx_lldp_filter) {
+ err = ice_add_ethertype_to_list(vsi, &ethertype_list,
+ ETHERTYPE_LLDP_FRAMES,
+ ICE_FLTR_TX, ICE_DROP_PACKET);
+ if (err)
+ goto free_ethertype_list;
+ }
+
+ status = ice_add_eth_mac(hw, &ethertype_list);
+ if (status) {
+ device_printf(dev,
+ "Failed to add Tx Ethertype filters, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ err = (EIO);
+ }
+
+free_ethertype_list:
+ ice_free_fltr_list(&ethertype_list);
+ return err;
+}
+
+/**
+ * ice_add_rx_lldp_filter - add ethertype filter for Rx LLDP frames
+ * @sc: the device private structure
+ *
+ * Add a switch ethertype filter which forwards the LLDP frames to the main PF
+ * VSI. Called when the fw_lldp_agent is disabled, to allow the LLDP frames to
+ * be forwarded to the stack.
+ */
+static void
+ice_add_rx_lldp_filter(struct ice_softc *sc)
+{
+ struct ice_list_head ethertype_list;
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int err;
+
+ INIT_LIST_HEAD(&ethertype_list);
+
+ /* Forward Rx LLDP frames to the stack */
+ err = ice_add_ethertype_to_list(vsi, &ethertype_list,
+ ETHERTYPE_LLDP_FRAMES,
+ ICE_FLTR_RX, ICE_FWD_TO_VSI);
+ if (err) {
+ device_printf(dev,
+ "Failed to add Rx LLDP filter, err %s\n",
+ ice_err_str(err));
+ goto free_ethertype_list;
+ }
+
+ status = ice_add_eth_mac(hw, &ethertype_list);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ ; /* Don't complain if we try to add a filter that already exists */
+ } else if (status) {
+ device_printf(dev,
+ "Failed to add Rx LLDP filter, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+
+free_ethertype_list:
+ ice_free_fltr_list(&ethertype_list);
+}
+
+/**
+ * ice_del_rx_lldp_filter - Remove ethertype filter for Rx LLDP frames
+ * @sc: the device private structure
+ *
+ * Remove the switch filter forwarding LLDP frames to the main PF VSI, called
+ * when the firmware LLDP agent is enabled, to stop routing LLDP frames to the
+ * stack.
+ */
+static void
+ice_del_rx_lldp_filter(struct ice_softc *sc)
+{
+ struct ice_list_head ethertype_list;
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int err;
+
+ INIT_LIST_HEAD(&ethertype_list);
+
+ /* Remove filter forwarding Rx LLDP frames to the stack */
+ err = ice_add_ethertype_to_list(vsi, &ethertype_list,
+ ETHERTYPE_LLDP_FRAMES,
+ ICE_FLTR_RX, ICE_FWD_TO_VSI);
+ if (err) {
+ device_printf(dev,
+ "Failed to remove Rx LLDP filter, err %s\n",
+ ice_err_str(err));
+ goto free_ethertype_list;
+ }
+
+ status = ice_remove_eth_mac(hw, &ethertype_list);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ ; /* Don't complain if we try to remove a filter that doesn't exist */
+ } else if (status) {
+ device_printf(dev,
+ "Failed to remove Rx LLDP filter, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+
+free_ethertype_list:
+ ice_free_fltr_list(&ethertype_list);
+}
+
+/**
+ * ice_init_link_configuration -- Setup link in different ways depending
+ * on whether media is available or not.
+ * @sc: device private structure
+ *
+ * Called at the end of the attach process to either set default link
+ * parameters if there is media available, or force HW link down and
+ * set a state bit if there is no media.
+ */
+void
+ice_init_link_configuration(struct ice_softc *sc)
+{
+ struct ice_port_info *pi = sc->hw.port_info;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ pi->phy.get_link_info = true;
+ status = ice_get_link_status(pi, &sc->link_up);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_get_link_status failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return;
+ }
+
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA);
+ /* Apply default link settings */
+ ice_apply_saved_phy_cfg(sc);
+ } else {
+ /* Set link down, and poll for media available in timer. This prevents the
+ * driver from receiving spurious link-related events.
+ */
+ ice_set_state(&sc->state, ICE_STATE_NO_MEDIA);
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (status != ICE_SUCCESS)
+ device_printf(dev,
+ "%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_apply_saved_phy_req_to_cfg -- Write saved user PHY settings to cfg data
+ * @pi: port info struct
+ * @pcaps: TOPO_CAPS capability data to use for defaults
+ * @cfg: new PHY config data to be modified
+ *
+ * Applies user settings for advertised speeds to the PHY type fields in the
+ * supplied PHY config struct. It uses the data from pcaps to check if the
+ * saved settings are invalid and uses the pcaps data instead if they are
+ * invalid.
+ */
+static void
+ice_apply_saved_phy_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ u64 phy_low = 0, phy_high = 0;
+
+ ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req);
+ cfg->phy_type_low = pcaps->phy_type_low & htole64(phy_low);
+ cfg->phy_type_high = pcaps->phy_type_high & htole64(phy_high);
+
+ /* Can't use saved user speed request; use NVM default PHY capabilities */
+ if (!cfg->phy_type_low && !cfg->phy_type_high) {
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ }
+}
+
+/**
+ * ice_apply_saved_fec_req_to_cfg -- Write saved user FEC mode to cfg data
+ * @pi: port info struct
+ * @pcaps: TOPO_CAPS capability data to use for defaults
+ * @cfg: new PHY config data to be modified
+ *
+ * Applies user setting for FEC mode to PHY config struct. It uses the data
+ * from pcaps to check if the saved settings are invalid and uses the pcaps
+ * data instead if they are invalid.
+ */
+static void
+ice_apply_saved_fec_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
+
+ /* Can't use saved user FEC mode; use NVM default PHY capabilities */
+ if (cfg->link_fec_opt &&
+ !(cfg->link_fec_opt & pcaps->link_fec_options)) {
+ cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ }
+}
+
+/**
+ * ice_apply_saved_fc_req_to_cfg -- Write saved user flow control mode to cfg data
+ * @pi: port info struct
+ * @cfg: new PHY config data to be modified
+ *
+ * Applies user setting for flow control mode to PHY config struct. There are
+ * no invalid flow control mode settings; if there are, then this function
+ * treats them like "ICE_FC_NONE".
+ */
+static void
+ice_apply_saved_fc_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ cfg->caps &= ~(ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY |
+ ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY);
+
+ switch (pi->phy.curr_user_fc_req) {
+ case ICE_FC_FULL:
+ cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY |
+ ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY;
+ break;
+ case ICE_FC_RX_PAUSE:
+ cfg->caps |= ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY;
+ break;
+ case ICE_FC_TX_PAUSE:
+ cfg->caps |= ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY;
+ break;
+ default:
+ /* ICE_FC_NONE */
+ break;
+ }
+}
+
+/**
+ * ice_apply_saved_user_req_to_cfg -- Apply all saved user settings to AQ cfg data
+ * @pi: port info struct
+ * @pcaps: TOPO_CAPS capability data to use for defaults
+ * @cfg: new PHY config data to be modified
+ *
+ * Applies user settings for advertised speeds, FEC mode, and flow control
+ * mode to the supplied PHY config struct; it uses the data from pcaps to check
+ * if the saved settings are invalid and uses the pcaps data instead if they
+ * are invalid.
+ */
+static void
+ice_apply_saved_user_req_to_cfg(struct ice_port_info *pi,
+ struct ice_aqc_get_phy_caps_data *pcaps,
+ struct ice_aqc_set_phy_cfg_data *cfg)
+{
+ ice_apply_saved_phy_req_to_cfg(pi, pcaps, cfg);
+ ice_apply_saved_fec_req_to_cfg(pi, pcaps, cfg);
+ ice_apply_saved_fc_req_to_cfg(pi, cfg);
+}
+
+/**
+ * ice_apply_saved_phy_cfg -- Re-apply user PHY config settings
+ * @sc: device private structure
+ *
+ * Takes the saved user PHY config settings, overwrites the NVM
+ * default with them if they're valid, and uses the Set PHY Config AQ command
+ * to apply them.
+ *
+ * Intended for use when media is inserted.
+ *
+ * @pre Port has media available
+ */
+void
+ice_apply_saved_phy_cfg(struct ice_softc *sc)
+{
+ struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+ struct ice_port_info *pi = sc->hw.port_info;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return;
+ }
+
+ /* Setup new PHY config */
+ ice_copy_phy_caps_to_cfg(pi, &pcaps, &cfg);
+
+ /* Apply settings requested by user */
+ ice_apply_saved_user_req_to_cfg(pi, &pcaps, &cfg);
+
+ /* Enable link and re-negotiate it */
+ cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
+
+ status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
+ if (status != ICE_SUCCESS) {
+ if ((status == ICE_ERR_AQ_ERROR) &&
+ (hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY))
+ device_printf(dev,
+ "%s: User PHY cfg not applied; no media in port\n",
+ __func__);
+ else
+ device_printf(dev,
+ "%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_print_ldo_tlv - Print out LDO TLV information
+ * @sc: device private structure
+ * @tlv: LDO TLV information from the adapter NVM
+ *
+ * Dump out the information in tlv to the kernel message buffer; intended for
+ * debugging purposes.
+ */
+static void
+ice_print_ldo_tlv(struct ice_softc *sc, struct ice_link_default_override_tlv *tlv)
+{
+ device_t dev = sc->dev;
+
+ device_printf(dev, "TLV: -options 0x%02x\n", tlv->options);
+ device_printf(dev, " -phy_config 0x%02x\n", tlv->phy_config);
+ device_printf(dev, " -fec_options 0x%02x\n", tlv->fec_options);
+ device_printf(dev, " -phy_high 0x%016llx\n",
+ (unsigned long long)tlv->phy_type_high);
+ device_printf(dev, " -phy_low 0x%016llx\n",
+ (unsigned long long)tlv->phy_type_low);
+}
+
+/**
+ * ice_set_link_management_mode -- Strict or lenient link management
+ * @sc: device private structure
+ *
+ * Some NVMs give the adapter the option to advertise a superset of link
+ * configurations. This checks to see if that option is enabled.
+ * Further, the NVM could also provide a specific set of configurations
+ * to try; these are cached in the driver's private structure if they
+ * are available.
+ */
+void
+ice_set_link_management_mode(struct ice_softc *sc)
+{
+ struct ice_port_info *pi = sc->hw.port_info;
+ device_t dev = sc->dev;
+ struct ice_link_default_override_tlv tlv = { 0 };
+ enum ice_status status;
+
+ /* Port must be in strict mode if FW version is below a certain
+ * version. (i.e. Don't set lenient mode features)
+ */
+ if (!(ice_fw_supports_link_override(&sc->hw)))
+ return;
+
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_get_link_default_override failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ return;
+ }
+
+ if (sc->hw.debug_mask & ICE_DBG_LINK)
+ ice_print_ldo_tlv(sc, &tlv);
+
+ /* Set lenient link mode */
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_LENIENT_LINK_MODE) &&
+ (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)))
+ ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_en);
+
+ /* Default overrides only work if in lenient link mode */
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DEFAULT_OVERRIDE) &&
+ ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE) &&
+ (tlv.options & ICE_LINK_OVERRIDE_EN))
+ ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_en);
+
+ /* Cache the LDO TLV structure in the driver, since it won't change
+ * during the driver's lifetime.
+ */
+ sc->ldo_tlv = tlv;
+}
+
+/**
+ * ice_init_saved_phy_cfg -- Set cached user PHY cfg settings with NVM defaults
+ * @sc: device private structure
+ *
+ * This should be called before the tunables for these link settings
+ * (e.g. advertise_speed) are added -- so that these defaults don't overwrite
+ * the cached values that the sysctl handlers will write.
+ *
+ * This also needs to be called before ice_init_link_configuration, to ensure
+ * that there are sane values that can be written if there is media available
+ * in the port.
+ */
+void
+ice_init_saved_phy_cfg(struct ice_softc *sc)
+{
+ struct ice_port_info *pi = sc->hw.port_info;
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u64 phy_low, phy_high;
+
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
+ &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (TOPO_CAP) failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return;
+ }
+
+ phy_low = le64toh(pcaps.phy_type_low);
+ phy_high = le64toh(pcaps.phy_type_high);
+
+ /* Save off initial config parameters */
+ pi->phy.curr_user_speed_req =
+ ice_aq_phy_types_to_sysctl_speeds(phy_low, phy_high);
+ pi->phy.curr_user_fec_req = ice_caps_to_fec_mode(pcaps.caps,
+ pcaps.link_fec_options);
+ pi->phy.curr_user_fc_req = ice_caps_to_fc_mode(pcaps.caps);
+}
+
+/**
+ * ice_module_init - Driver callback to handle module load
+ *
+ * Callback for handling module load events. This function should initialize
+ * any data structures that are used for the life of the device driver.
+ */
+static int
+ice_module_init(void)
+{
+ return (0);
+}
+
+/**
+ * ice_module_exit - Driver callback to handle module exit
+ *
+ * Callback for handling module unload events. This function should release
+ * any resources initialized during ice_module_init.
+ *
+ * If this function returns non-zero, the module will not be unloaded. It
+ * should only return such a value if the module cannot be unloaded at all,
+ * such as due to outstanding memory references that cannot be revoked.
+ */
+static int
+ice_module_exit(void)
+{
+ return (0);
+}
+
+/**
+ * ice_module_event_handler - Callback for module events
+ * @mod: unused module_t parameter
+ * @what: the event requested
+ * @arg: unused event argument
+ *
+ * Callback used to handle module events from the stack. Used to allow the
+ * driver to define custom behavior that should happen at module load and
+ * unload.
+ */
+int
+ice_module_event_handler(module_t __unused mod, int what, void __unused *arg)
+{
+ switch (what) {
+ case MOD_LOAD:
+ return ice_module_init();
+ case MOD_UNLOAD:
+ return ice_module_exit();
+ default:
+ /* TODO: do we need to handle MOD_QUIESCE and MOD_SHUTDOWN? */
+ return (EOPNOTSUPP);
+ }
+}
+
+/**
+ * ice_handle_nvm_access_ioctl - Handle an NVM access ioctl request
+ * @sc: the device private softc
+ * @ifd: ifdrv ioctl request pointer
+ */
+int
+ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
+{
+ union ice_nvm_access_data *data;
+ struct ice_nvm_access_cmd *cmd;
+ size_t ifd_len = ifd->ifd_len, malloc_len;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u8 *nvm_buffer;
+ int err;
+
+ /*
+ * ifioctl forwards SIOCxDRVSPEC to iflib without performing
+ * a privilege check. In turn, iflib forwards the ioctl to the driver
+ * without performing a privilege check. Perform one here to ensure
+ * that non-privileged threads cannot access this interface.
+ */
+ err = priv_check(curthread, PRIV_DRIVER);
+ if (err)
+ return (err);
+
+ if (ifd_len < sizeof(struct ice_nvm_access_cmd)) {
+ device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n",
+ __func__, ifd_len, sizeof(struct ice_nvm_access_cmd));
+ return (EINVAL);
+ }
+
+ if (ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: ifd data buffer not present.\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ /*
+ * If everything works correctly, ice_handle_nvm_access should not
+ * modify data past the size of the ioctl length. However, it could
+ * lead to memory corruption if it did. Make sure to allocate at least
+ * enough space for the command and data regardless. This
+ * ensures that any access to the data union will not access invalid
+ * memory.
+ */
+ malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd));
+
+ nvm_buffer = (u8 *)malloc(malloc_len, M_ICE, M_ZERO | M_WAITOK);
+ if (!nvm_buffer)
+ return (ENOMEM);
+
+ /* Copy the NVM access command and data in from user space */
+ /* coverity[tainted_data_argument] */
+ err = copyin(ifd->ifd_data, nvm_buffer, ifd_len);
+ if (err) {
+ device_printf(dev, "%s: Copying request from user space failed, err %s\n",
+ __func__, ice_err_str(err));
+ goto cleanup_free_nvm_buffer;
+ }
+
+ /*
+ * The NVM command structure is immediately followed by data which
+ * varies in size based on the command.
+ */
+ cmd = (struct ice_nvm_access_cmd *)nvm_buffer;
+ data = (union ice_nvm_access_data *)(nvm_buffer + sizeof(struct ice_nvm_access_cmd));
+
+ /* Handle the NVM access request */
+ status = ice_handle_nvm_access(hw, cmd, data);
+ if (status)
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM access request failed, err %s\n",
+ ice_status_str(status));
+
+ /* Copy the possibly modified contents of the handled request out */
+ err = copyout(nvm_buffer, ifd->ifd_data, ifd_len);
+ if (err) {
+ device_printf(dev, "%s: Copying response back to user space failed, err %s\n",
+ __func__, ice_err_str(err));
+ goto cleanup_free_nvm_buffer;
+ }
+
+ /* Convert private status to an error code for proper ioctl response */
+ switch (status) {
+ case ICE_SUCCESS:
+ err = (0);
+ break;
+ case ICE_ERR_NO_MEMORY:
+ err = (ENOMEM);
+ break;
+ case ICE_ERR_OUT_OF_RANGE:
+ err = (ENOTTY);
+ break;
+ case ICE_ERR_PARAM:
+ default:
+ err = (EINVAL);
+ break;
+ }
+
+cleanup_free_nvm_buffer:
+ free(nvm_buffer, M_ICE);
+ return err;
+}
+
+/**
+ * ice_read_sff_eeprom - Read data from SFF eeprom
+ * @sc: device softc
+ * @dev_addr: I2C device address (typically 0xA0 or 0xA2)
+ * @offset: offset into the eeprom
+ * @data: pointer to data buffer to store read data in
+ * @length: length to read; max length is 16
+ *
+ * Read from the SFF eeprom in the module for this PF's port. For more details
+ * on the contents of an SFF eeprom, refer to SFF-8724 (SFP), SFF-8636 (QSFP),
+ * and SFF-8024 (both).
+ */
+int
+ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u16 length)
+{
+ struct ice_hw *hw = &sc->hw;
+ int error = 0, retries = 0;
+ enum ice_status status;
+ u16 lport;
+
+ if (length > 16)
+ return (EINVAL);
+
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA))
+ return (ENXIO);
+
+ /* Set bit to indicate lport value is valid */
+ lport = hw->port_info->lport | (0x1 << 8);
+
+ do {
+ status = ice_aq_sff_eeprom(hw, lport, dev_addr,
+ offset, 0, 0, data, length,
+ false, NULL);
+ if (!status) {
+ error = 0;
+ break;
+ }
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) {
+ error = EBUSY;
+ continue;
+ }
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EACCES) {
+ /* FW says I2C access isn't supported */
+ error = EACCES;
+ break;
+ }
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EPERM) {
+ device_printf(sc->dev,
+ "%s: Module pointer location specified in command does not permit the required operation.\n",
+ __func__);
+ error = EPERM;
+ break;
+ } else {
+ device_printf(sc->dev,
+ "%s: Error reading I2C data: err %s aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ error = EIO;
+ break;
+ }
+ } while (retries++ < ICE_I2C_MAX_RETRIES);
+
+ if (error == EBUSY)
+ device_printf(sc->dev,
+ "%s: Error reading I2C data after %d retries\n",
+ __func__, ICE_I2C_MAX_RETRIES);
+
+ return (error);
+}
+
+/**
+ * ice_handle_i2c_req - Driver independent I2C request handler
+ * @sc: device softc
+ * @req: The I2C parameters to use
+ *
+ * Read from the port's I2C eeprom using the parameters from the ioctl.
+ */
+int
+ice_handle_i2c_req(struct ice_softc *sc, struct ifi2creq *req)
+{
+ return ice_read_sff_eeprom(sc, req->dev_addr, req->offset, req->data, req->len);
+}
+
+/**
+ * ice_sysctl_read_i2c_diag_data - Read some module diagnostic data via i2c
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ *
+ * Read 8 bytes of diagnostic data from the SFF eeprom in the (Q)SFP module
+ * inserted into the port.
+ *
+ * | SFP A2 | QSFP Lower Page
+ * ------------|---------|----------------
+ * Temperature | 96-97 | 22-23
+ * Vcc | 98-99 | 26-27
+ * TX power | 102-103 | 34-35..40-41
+ * RX power | 104-105 | 50-51..56-57
+ */
+static int
+ice_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ device_t dev = sc->dev;
+ struct sbuf *sbuf;
+ int error = 0;
+ u8 data[16];
+
+ UNREFERENCED_PARAMETER(arg2);
+ UNREFERENCED_PARAMETER(oidp);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ if (req->oldptr == NULL) {
+ error = SYSCTL_OUT(req, 0, 128);
+ return (error);
+ }
+
+ error = ice_read_sff_eeprom(sc, 0xA0, 0, data, 1);
+ if (error)
+ return (error);
+
+ /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
+ if (data[0] == 0x3) {
+ /*
+ * Check for:
+ * - Internally calibrated data
+ * - Diagnostic monitoring is implemented
+ */
+ ice_read_sff_eeprom(sc, 0xA0, 92, data, 1);
+ if (!(data[0] & 0x60)) {
+ device_printf(dev, "Module doesn't support diagnostics: 0xA0[92] = %02X\n", data[0]);
+ return (ENODEV);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ ice_read_sff_eeprom(sc, 0xA2, 96, data, 4);
+ for (int i = 0; i < 4; i++)
+ sbuf_printf(sbuf, "%02X ", data[i]);
+
+ ice_read_sff_eeprom(sc, 0xA2, 102, data, 4);
+ for (int i = 0; i < 4; i++)
+ sbuf_printf(sbuf, "%02X ", data[i]);
+ } else if (data[0] == 0xD || data[0] == 0x11) {
+ /*
+ * QSFP+ modules are always internally calibrated, and must indicate
+ * what types of diagnostic monitoring are implemented
+ */
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ ice_read_sff_eeprom(sc, 0xA0, 22, data, 2);
+ for (int i = 0; i < 2; i++)
+ sbuf_printf(sbuf, "%02X ", data[i]);
+
+ ice_read_sff_eeprom(sc, 0xA0, 26, data, 2);
+ for (int i = 0; i < 2; i++)
+ sbuf_printf(sbuf, "%02X ", data[i]);
+
+ ice_read_sff_eeprom(sc, 0xA0, 34, data, 2);
+ for (int i = 0; i < 2; i++)
+ sbuf_printf(sbuf, "%02X ", data[i]);
+
+ ice_read_sff_eeprom(sc, 0xA0, 50, data, 2);
+ for (int i = 0; i < 2; i++)
+ sbuf_printf(sbuf, "%02X ", data[i]);
+ } else {
+ device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", data[0]);
+ return (ENODEV);
+ }
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
+ * ice_alloc_intr_tracking - Setup interrupt tracking structures
+ * @sc: device softc structure
+ *
+ * Sets up the resource manager for keeping track of interrupt allocations,
+ * and initializes the tracking maps for the PF's interrupt allocations.
+ *
+ * Unlike the scheme for queues, this is done in one step since both the
+ * manager and the maps both have the same lifetime.
+ *
+ * @returns 0 on success, or an error code on failure.
+ */
+int
+ice_alloc_intr_tracking(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ int err;
+
+ /* Initialize the interrupt allocation manager */
+ err = ice_resmgr_init_contig_only(&sc->imgr,
+ hw->func_caps.common_cap.num_msix_vectors);
+ if (err) {
+ device_printf(dev, "Unable to initialize PF interrupt manager: %s\n",
+ ice_err_str(err));
+ return (err);
+ }
+
+ /* Allocate PF interrupt mapping storage */
+ if (!(sc->pf_imap =
+ (u16 *)malloc(sizeof(u16) * hw->func_caps.common_cap.num_msix_vectors,
+ M_ICE, M_NOWAIT))) {
+ device_printf(dev, "Unable to allocate PF imap memory\n");
+ err = ENOMEM;
+ goto free_imgr;
+ }
+ for (u32 i = 0; i < hw->func_caps.common_cap.num_msix_vectors; i++) {
+ sc->pf_imap[i] = ICE_INVALID_RES_IDX;
+ }
+
+ return (0);
+
+free_imgr:
+ ice_resmgr_destroy(&sc->imgr);
+ return (err);
+}
+
+/**
+ * ice_free_intr_tracking - Free PF interrupt tracking structures
+ * @sc: device softc structure
+ *
+ * Frees the interrupt resource allocation manager and the PF's owned maps.
+ *
+ * VF maps are released when the owning VF's are destroyed, which should always
+ * happen before this function is called.
+ */
+void
+ice_free_intr_tracking(struct ice_softc *sc)
+{
+ if (sc->pf_imap) {
+ ice_resmgr_release_map(&sc->imgr, sc->pf_imap,
+ sc->lan_vectors);
+ free(sc->pf_imap, M_ICE);
+ sc->pf_imap = NULL;
+ }
+
+ ice_resmgr_destroy(&sc->imgr);
+}
+
+/**
+ * ice_apply_supported_speed_filter - Mask off unsupported speeds
+ * @phy_type_low: bit-field for the low quad word of PHY types
+ * @phy_type_high: bit-field for the high quad word of PHY types
+ *
+ * Given the two quad words containing the supported PHY types,
+ * this function will mask off the speeds that are not currently
+ * supported by the device.
+ */
+static void
+ice_apply_supported_speed_filter(u64 *phy_type_low, u64 *phy_type_high)
+{
+ u64 phylow_mask;
+
+ /* We won't offer anything lower than 1G for any part,
+ * but we also won't offer anything under 25G for 100G
+ * parts.
+ */
+ phylow_mask = ~(ICE_PHY_TYPE_LOW_1000BASE_T - 1);
+ if (*phy_type_high ||
+ *phy_type_low & ~(ICE_PHY_TYPE_LOW_100GBASE_CR4 - 1))
+ phylow_mask = ~(ICE_PHY_TYPE_LOW_25GBASE_T - 1);
+ *phy_type_low &= phylow_mask;
+}
+
+/**
+ * ice_get_phy_types - Report appropriate PHY types
+ * @sc: device softc structure
+ * @phy_type_low: bit-field for the low quad word of PHY types
+ * @phy_type_high: bit-field for the high quad word of PHY types
+ *
+ * Populate the two quad words with bits representing the PHY types
+ * supported by the device. This is really just a wrapper around
+ * the ice_aq_get_phy_caps() that chooses the appropriate report
+ * mode (lenient or strict) and reports back only the relevant PHY
+ * types. In lenient mode the capabilities are retrieved with the
+ * NVM_CAP report mode, otherwise they're retrieved using the
+ * TOPO_CAP report mode (NVM intersected with current media).
+ *
+ * @returns 0 on success, or an error code on failure.
+ */
+static enum ice_status
+ice_get_phy_types(struct ice_softc *sc, u64 *phy_type_low, u64 *phy_type_high)
+{
+ struct ice_aqc_get_phy_caps_data pcaps = { 0 };
+ struct ice_port_info *pi = sc->hw.port_info;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ u8 report_mode;
+
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LENIENT_LINK_MODE))
+ report_mode = ICE_AQC_REPORT_NVM_CAP;
+ else
+ report_mode = ICE_AQC_REPORT_TOPO_CAP;
+ status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL);
+ if (status != ICE_SUCCESS) {
+ device_printf(dev,
+ "%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n",
+ __func__, (report_mode) ? "TOPO_CAP" : "NVM_CAP",
+ ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ return (status);
+ }
+
+ *phy_type_low = le64toh(pcaps.phy_type_low);
+ *phy_type_high = le64toh(pcaps.phy_type_high);
+
+ return (ICE_SUCCESS);
+}
Index: sys/dev/ice/ice_nvm.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_nvm.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_NVM_H_
+#define _ICE_NVM_H_
+
+#define ICE_NVM_CMD_READ 0x0000000B
+#define ICE_NVM_CMD_WRITE 0x0000000C
+
+/* NVM Access config bits */
+#define ICE_NVM_CFG_MODULE_M MAKEMASK(0xFF, 0)
+#define ICE_NVM_CFG_MODULE_S 0
+#define ICE_NVM_CFG_FLAGS_M MAKEMASK(0xF, 8)
+#define ICE_NVM_CFG_FLAGS_S 8
+#define ICE_NVM_CFG_EXT_FLAGS_M MAKEMASK(0xF, 12)
+#define ICE_NVM_CFG_EXT_FLAGS_S 12
+#define ICE_NVM_CFG_ADAPTER_INFO_M MAKEMASK(0xFFFF, 16)
+#define ICE_NVM_CFG_ADAPTER_INFO_S 16
+
+/* NVM Read Get Driver Features */
+#define ICE_NVM_GET_FEATURES_MODULE 0xE
+#define ICE_NVM_GET_FEATURES_FLAGS 0xF
+
+/* NVM Read/Write Mapped Space */
+#define ICE_NVM_REG_RW_MODULE 0x0
+#define ICE_NVM_REG_RW_FLAGS 0x1
+
+#define ICE_NVM_ACCESS_MAJOR_VER 0
+#define ICE_NVM_ACCESS_MINOR_VER 5
+
+/* NVM Access feature flags. Other bits in the features field are reserved and
+ * should be set to zero when reporting the ice_nvm_features structure.
+ */
+#define ICE_NVM_FEATURES_0_REG_ACCESS BIT(1)
+
+/* NVM Access Features */
+struct ice_nvm_features {
+ u8 major; /* Major version (informational only) */
+ u8 minor; /* Minor version (informational only) */
+ u16 size; /* size of ice_nvm_features structure */
+ u8 features[12]; /* Array of feature bits */
+};
+
+/* NVM Access command */
+struct ice_nvm_access_cmd {
+ u32 command; /* NVM command: READ or WRITE */
+ u32 config; /* NVM command configuration */
+ u32 offset; /* offset to read/write, in bytes */
+ u32 data_size; /* size of data field, in bytes */
+};
+
+/* NVM Access data */
+union ice_nvm_access_data {
+ u32 regval; /* Storage for register value */
+ struct ice_nvm_features drv_features; /* NVM features */
+};
+
+/* NVM Access registers */
+#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
+#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
+#define GL_HICR 0x00082040
+#define GL_HICR_EN 0x00082044
+#define GLGEN_CSR_DEBUG_C 0x00075750
+#define GLPCI_LBARCTRL 0x0009DE74
+#define GLNVM_GENS 0x000B6100
+#define GLNVM_FLA 0x000B6108
+
+#define ICE_NVM_ACCESS_GL_HIDA_MAX 15
+#define ICE_NVM_ACCESS_GL_HIBA_MAX 1023
+
+u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd);
+u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd);
+u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd);
+enum ice_status
+ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data);
+enum ice_status
+ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data);
+enum ice_status
+ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data);
+enum ice_status
+ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data);
+enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_nvm(struct ice_hw *hw);
+enum ice_status
+ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
+ void *data, bool last_command, bool read_shadow_ram,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ bool read_shadow_ram);
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type);
+enum ice_status
+ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data);
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
+ u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
+ u16 elem_count, struct ice_sq_cd *cd);
+#endif /* _ICE_NVM_H_ */
Index: sys/dev/ice/ice_nvm.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_nvm.c
@@ -0,0 +1,1303 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+
+/**
+ * ice_aq_read_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands (0x0701)
+ */
+enum ice_status
+ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
+ void *data, bool last_command, bool read_shadow_ram,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.nvm;
+
+ if (offset > ICE_AQC_NVM_MAX_OFFSET)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == ICE_AQC_NVM_START_POINT)
+ cmd->cmd_flags |= ICE_AQC_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
+ cmd->module_typeid = CPU_TO_LE16(module_typeid);
+ cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = CPU_TO_LE16(length);
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/**
+ * ice_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors and ensures that no single
+ * read request exceeds the maximum 4Kb read for a single AdminQ command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ */
+enum ice_status
+ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
+ bool read_shadow_ram)
+{
+ enum ice_status status;
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ *length = 0;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: requested data is beyond Shadow RAM limit\n");
+ return ICE_ERR_PARAM;
+ }
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ice_aq_read_nvm cannot read more than 4Kb at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also
+ * 4Kb.
+ */
+ sector_offset = offset % ICE_AQ_MAX_BUF_LEN;
+ read_size = MIN_T(u32, ICE_AQ_MAX_BUF_LEN - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ice_aq_read_nvm takes the length as a u16. Our read_size is
+ * calculated using a u32, but the ICE_AQ_MAX_BUF_LEN maximum
+ * size guarantees that it will fit within the 2 bytes.
+ */
+ status = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram, NULL);
+ if (status)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return status;
+}
+
+/**
+ * ice_aq_update_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands (0x0703)
+ */
+static enum ice_status
+ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command, u8 command_flags,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD;
+ cmd->module_typeid = CPU_TO_LE16(module_typeid);
+ cmd->offset_low = CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = CPU_TO_LE16(length);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, data, length, cd);
+}
+
+/**
+ * ice_aq_erase_nvm
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @cd: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands (0x0702)
+ */
+enum ice_status
+ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+ struct ice_aqc_nvm *cmd;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.nvm;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
+
+ cmd->module_typeid = CPU_TO_LE16(module_typeid);
+ cmd->length = CPU_TO_LE16(ICE_AQC_NVM_ERASE_LEN);
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_read_nvm_cfg - read an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature ID
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: pointer to count of elements read by FW
+ * @cd: pointer to command details structure or NULL
+ *
+ * Reads single or multiple feature/field ID and data (0x0704)
+ */
+enum ice_status
+ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
+ u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_cfg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.nvm_cfg;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_read);
+
+ cmd->cmd_flags = cmd_flags;
+ cmd->id = CPU_TO_LE16(field_id);
+
+ status = ice_aq_send_cmd(hw, &desc, data, buf_size, cd);
+ if (!status && elem_count)
+ *elem_count = LE16_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ice_aq_write_nvm_cfg - write an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: count of elements to be written
+ * @cd: pointer to command details structure or NULL
+ *
+ * Writes single or multiple feature/field ID and data (0x0705)
+ */
+enum ice_status
+ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
+ u16 elem_count, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_nvm_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.nvm_cfg;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_cfg_write);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->count = CPU_TO_LE16(elem_count);
+ cmd->cmd_flags = cmd_flags;
+
+ return ice_aq_send_cmd(hw, &desc, data, buf_size, cd);
+}
+
+/**
+ * ice_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ */
+static enum ice_status
+ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
+{
+ if ((offset + words) > hw->nvm.sr_words) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: offset beyond SR lmt.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ if (words > ICE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector), in one AQ write */
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: tried to access %d words, limit is %d.\n",
+ words, ICE_SR_SECTOR_SIZE_IN_WORDS);
+ return ICE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / ICE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / ICE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM error: cannot spread over two sectors.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_sr_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
+ */
+enum ice_status
+ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ enum ice_status status;
+ __le16 data_local;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Note that ice_read_flat_nvm checks if the read is past the Shadow
+ * RAM size, and ensures we don't read across a Shadow RAM sector
+ * boundary
+ */
+ status = ice_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
+
+ *data = LE16_TO_CPU(data_local);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_write_sr_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ */
+static enum ice_status
+ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data,
+ bool last_command)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_check_sr_access_params(hw, offset, words);
+ if (!status)
+ status = ice_aq_update_nvm(hw, 0, 2 * offset, 2 * words, data,
+ last_command, 0, NULL);
+
+ return status;
+}
+
+/**
+ * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ */
+static enum ice_status
+ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
+{
+ u32 bytes = *words * 2, i;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* ice_read_flat_nvm takes into account the 4Kb AdminQ and Shadow RAM
+ * sector restrictions necessary when reading from the NVM.
+ */
+ status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+ /* Report the number of words successfully read */
+ *words = bytes / 2;
+
+ /* Byte swap the words up to the amount we actually read */
+ for (i = 0; i < *words; i++)
+ data[i] = LE16_TO_CPU(((_FORCE_ __le16 *)data)[i]);
+
+ return status;
+}
+
+/**
+ * ice_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership.
+ */
+enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (hw->nvm.blank_nvm_mode)
+ return ICE_SUCCESS;
+
+ return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
+}
+
+/**
+ * ice_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM ownership.
+ */
+void ice_release_nvm(struct ice_hw *hw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (hw->nvm.blank_nvm_mode)
+ return;
+
+ ice_release_res(hw, ICE_NVM_RES_ID);
+}
+
+/**
+ * ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
+ */
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+{
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (!status) {
+ status = ice_read_sr_word_aq(hw, offset, data);
+ ice_release_nvm(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ */
+enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+ u16 module_type)
+{
+ enum ice_status status;
+ u16 pfa_len, pfa_ptr;
+ u16 next_tlv;
+
+ status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ while (next_tlv < pfa_ptr + pfa_len) {
+ u16 tlv_sub_module_type;
+ u16 tlv_len;
+
+ /* Read TLV type */
+ status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
+ break;
+ }
+ /* Read TLV length */
+ status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return ICE_SUCCESS;
+ }
+ return ICE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ice_read_pba_string - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ */
+enum ice_status
+ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ enum ice_status status;
+ u16 pba_word, pba_size;
+ u16 i;
+
+ status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ ICE_SR_PBA_BLOCK_PTR);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Buffer too small for PBA data.\n");
+ return ICE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read PBA Block word %d.\n", i);
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
+
+/**
+ * ice_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ *
+ * Read the Combo Image version data from the Boot Configuration TLV and fill
+ * in the option ROM version data.
+ */
+static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
+{
+ u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
+ struct ice_orom_info *orom = &hw->nvm.orom;
+ enum ice_status status;
+ u32 combo_ver;
+
+ status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
+ ICE_SR_BOOT_CFG_PTR);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read Boot Configuration Block TLV.\n");
+ return status;
+ }
+
+ /* Boot Configuration Block must have length at least 2 words
+ * (Combo Image Version High and Combo Image Version Low)
+ */
+ if (boot_cfg_tlv_len < 2) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Invalid Boot Configuration Block TLV size.\n");
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
+ &combo_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
+ return status;
+ }
+
+ status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
+ &combo_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
+ return status;
+ }
+
+ combo_ver = ((u32)combo_hi << 16) | combo_lo;
+
+ orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
+ ICE_OROM_VER_SHIFT);
+ orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
+ orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
+ ICE_OROM_VER_BUILD_SHIFT);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ */
+static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+{
+ u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ice_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == ICE_ERR_AQ_ERROR &&
+ hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "%s: New upper bound of %u bytes\n",
+ __func__, offset);
+ status = ICE_SUCCESS;
+ max_size = offset;
+ } else if (!status) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "%s: New lower bound of %u bytes\n",
+ __func__, offset);
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ ice_debug(hw, ICE_DBG_NVM,
+ "Predicted flash size is %u bytes\n", max_size);
+
+ hw->nvm.flash_size = max_size;
+
+err_read_flat_nvm:
+ ice_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ice_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * This function reads and populates NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ */
+enum ice_status ice_init_nvm(struct ice_hw *hw)
+{
+ struct ice_nvm_info *nvm = &hw->nvm;
+ u16 eetrack_lo, eetrack_hi, ver;
+ enum ice_status status;
+ u32 fla, gens_stat;
+ u8 sr_size;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = rd32(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = rd32(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ nvm->blank_nvm_mode = false;
+ } else {
+ /* Blank programming mode */
+ nvm->blank_nvm_mode = true;
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM init error: unsupported blank mode.\n");
+ return ICE_ERR_NVM_BLANK_MODE;
+ }
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Failed to read DEV starter version.\n");
+ return status;
+ }
+ nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
+ return status;
+ }
+ status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ status = ice_discover_flash_size(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM init error: failed to discover flash size.\n");
+ return status;
+ }
+
+ switch (hw->device_id) {
+ /* the following devices do not have boot_cfg_tlv yet */
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
+ return status;
+ default:
+ break;
+ }
+
+ status = ice_get_orom_ver_info(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
+ return status;
+ }
+
+ /* read the netlist version information */
+ status = ice_get_netlist_ver_info(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq
+ * method. The buf read is preceded by the NVM ownership take
+ * and followed by the release.
+ */
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
+{
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (!status) {
+ status = ice_read_sr_buf_aq(hw, offset, words, data);
+ ice_release_nvm(hw);
+ }
+
+ return status;
+}
+
+/**
+ * __ice_write_sr_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the ice_write_sr_aq method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ */
+enum ice_status
+__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data)
+{
+ __le16 data_local = CPU_TO_LE16(*data);
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Value 0x00 below means that we treat SR as a flat mem */
+ return ice_write_sr_aq(hw, offset, 1, &data_local, false);
+}
+
+/**
+ * __ice_write_sr_buf - Writes Shadow RAM buf
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ */
+enum ice_status
+__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data)
+{
+ enum ice_status status;
+ __le16 *data_local;
+ void *vmem;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ vmem = ice_calloc(hw, words, sizeof(u16));
+ if (!vmem)
+ return ICE_ERR_NO_MEMORY;
+ data_local = (_FORCE_ __le16 *)vmem;
+
+ for (i = 0; i < words; i++)
+ data_local[i] = CPU_TO_LE16(data[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ status = ice_write_sr_aq(hw, offset, words, data_local, false);
+
+ ice_free(hw, vmem);
+
+ return status;
+}
+
+/**
+ * ice_calc_sr_checksum - Calculates and returns Shadow RAM SW checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ */
+static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module;
+ void *vmem;
+ u16 *data;
+ u16 i;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ vmem = ice_calloc(hw, ICE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+ if (!vmem)
+ return ICE_ERR_NO_MEMORY;
+ data = (u16 *)vmem;
+
+ /* read pointer to VPD area */
+ status = ice_read_sr_word_aq(hw, ICE_SR_VPD_PTR, &vpd_module);
+ if (status)
+ goto ice_calc_sr_checksum_exit;
+
+ /* read pointer to PCIe Alt Auto-load module */
+ status = ice_read_sr_word_aq(hw, ICE_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (status)
+ goto ice_calc_sr_checksum_exit;
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_words; i++) {
+ /* Read SR page */
+ if ((i % ICE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS;
+
+ status = ice_read_sr_buf_aq(hw, i, &words, data);
+ if (status != ICE_SUCCESS)
+ goto ice_calc_sr_checksum_exit;
+ }
+
+ /* Skip Checksum word */
+ if (i == ICE_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if ((i >= (u32)vpd_module) &&
+ (i < ((u32)vpd_module + ICE_SR_VPD_SIZE_WORDS)))
+ continue;
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if ((i >= (u32)pcie_alt_module) &&
+ (i < ((u32)pcie_alt_module + ICE_SR_PCIE_ALT_SIZE_WORDS)))
+ continue;
+
+ checksum_local += data[i % ICE_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ *checksum = (u16)ICE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ice_calc_sr_checksum_exit:
+ ice_free(hw, vmem);
+ return status;
+}
+
+/**
+ * ice_update_sr_checksum - Updates the Shadow RAM SW checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ */
+enum ice_status ice_update_sr_checksum(struct ice_hw *hw)
+{
+ enum ice_status status;
+ __le16 le_sum;
+ u16 checksum;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_calc_sr_checksum(hw, &checksum);
+ if (!status) {
+ le_sum = CPU_TO_LE16(checksum);
+ status = ice_write_sr_aq(hw, ICE_SR_SW_CHECKSUM_WORD, 1,
+ &le_sum, true);
+ }
+ return status;
+}
+
+/**
+ * ice_validate_sr_checksum - Validate Shadow RAM SW checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the Shadow RAM SW checksum.
+ * If the caller does not need checksum, the value can be NULL.
+ */
+enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
+{
+ enum ice_status status;
+ u16 checksum_local;
+ u16 checksum_sr;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (!status) {
+ status = ice_calc_sr_checksum(hw, &checksum_local);
+ ice_release_nvm(hw);
+ if (status)
+ return status;
+ } else {
+ return status;
+ }
+
+ ice_read_sr_word(hw, ICE_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ status = ICE_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+ return status;
+}
+
+/**
+ * ice_nvm_validate_checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity (0x0706)
+ */
+enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+{
+ struct ice_aqc_nvm_checksum *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
+ cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ ice_release_nvm(hw);
+
+ if (!status)
+ if (LE16_TO_CPU(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
+ status = ICE_ERR_NVM_CHECKSUM;
+
+ return status;
+}
+
+/**
+ * ice_nvm_access_get_features - Return the NVM access features structure
+ * @cmd: NVM access command to process
+ * @data: storage for the driver NVM features
+ *
+ * Fill in the data section of the NVM access request with a copy of the NVM
+ * features structure.
+ */
+enum ice_status
+ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data)
+{
+ /* The provided data_size must be at least as large as our NVM
+ * features structure. A larger size should not be treated as an
+ * error, to allow future extensions to to the features structure to
+ * work on older drivers.
+ */
+ if (cmd->data_size < sizeof(struct ice_nvm_features))
+ return ICE_ERR_NO_MEMORY;
+
+ /* Initialize the data buffer to zeros */
+ ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM);
+
+ /* Fill in the features data */
+ data->drv_features.major = ICE_NVM_ACCESS_MAJOR_VER;
+ data->drv_features.minor = ICE_NVM_ACCESS_MINOR_VER;
+ data->drv_features.size = sizeof(struct ice_nvm_features);
+ data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_nvm_access_get_module - Helper function to read module value
+ * @cmd: NVM access command structure
+ *
+ * Reads the module value out of the NVM access config field.
+ */
+u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd)
+{
+ return ((cmd->config & ICE_NVM_CFG_MODULE_M) >> ICE_NVM_CFG_MODULE_S);
+}
+
+/**
+ * ice_nvm_access_get_flags - Helper function to read flags value
+ * @cmd: NVM access command structure
+ *
+ * Reads the flags value out of the NVM access config field.
+ */
+u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd)
+{
+ return ((cmd->config & ICE_NVM_CFG_FLAGS_M) >> ICE_NVM_CFG_FLAGS_S);
+}
+
+/**
+ * ice_nvm_access_get_adapter - Helper function to read adapter info
+ * @cmd: NVM access command structure
+ *
+ * Read the adapter info value out of the NVM access config field.
+ */
+u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd)
+{
+ return ((cmd->config & ICE_NVM_CFG_ADAPTER_INFO_M) >>
+ ICE_NVM_CFG_ADAPTER_INFO_S);
+}
+
+/**
+ * ice_validate_nvm_rw_reg - Check than an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ */
+static enum ice_status
+ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
+{
+ u32 module, flags, offset;
+ u16 i;
+
+ module = ice_nvm_access_get_module(cmd);
+ flags = ice_nvm_access_get_flags(cmd);
+ offset = cmd->offset;
+
+ /* Make sure the module and flags indicate a read/write request */
+ if (module != ICE_NVM_REG_RW_MODULE ||
+ flags != ICE_NVM_REG_RW_FLAGS ||
+ cmd->data_size != FIELD_SIZEOF(union ice_nvm_access_data, regval))
+ return ICE_ERR_PARAM;
+
+ switch (offset) {
+ case GL_HICR:
+ case GL_HICR_EN: /* Note, this register is read only */
+ case GL_FWSTS:
+ case GL_MNG_FWSM:
+ case GLGEN_CSR_DEBUG_C:
+ case GLGEN_RSTAT:
+ case GLPCI_LBARCTRL:
+ case GLNVM_GENS:
+ case GLNVM_FLA:
+ case PF_FUNC_RID:
+ return ICE_SUCCESS;
+ default:
+ break;
+ }
+
+ for (i = 0; i <= ICE_NVM_ACCESS_GL_HIDA_MAX; i++)
+ if (offset == (u32)GL_HIDA(i))
+ return ICE_SUCCESS;
+
+ for (i = 0; i <= ICE_NVM_ACCESS_GL_HIBA_MAX; i++)
+ if (offset == (u32)GL_HIBA(i))
+ return ICE_SUCCESS;
+
+ /* All other register offsets are not valid */
+ return ICE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ice_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ */
+enum ice_status
+ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Always initialize the output data, even on failure */
+ ice_memset(data, 0, cmd->data_size, ICE_NONDMA_MEM);
+
+ /* Make sure this is a valid read/write access request */
+ status = ice_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ ice_debug(hw, ICE_DBG_NVM, "NVM access: reading register %08x\n",
+ cmd->offset);
+
+ /* Read the register and store the contents in the data field */
+ data->regval = rd32(hw, cmd->offset);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ */
+enum ice_status
+ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data)
+{
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Make sure this is a valid read/write access request */
+ status = ice_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ /* Reject requests to write to read-only registers */
+ switch (cmd->offset) {
+ case GL_HICR_EN:
+ case GLGEN_RSTAT:
+ return ICE_ERR_OUT_OF_RANGE;
+ default:
+ break;
+ }
+
+ ice_debug(hw, ICE_DBG_NVM,
+ "NVM access: writing register %08x with value %08x\n",
+ cmd->offset, data->regval);
+
+ /* Write the data field to the specified register */
+ wr32(hw, cmd->offset, data->regval);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ */
+enum ice_status
+ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
+ union ice_nvm_access_data *data)
+{
+ u32 module, flags, adapter_info;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ /* Extended flags are currently reserved and must be zero */
+ if ((cmd->config & ICE_NVM_CFG_EXT_FLAGS_M) != 0)
+ return ICE_ERR_PARAM;
+
+ /* Adapter info must match the HW device ID */
+ adapter_info = ice_nvm_access_get_adapter(cmd);
+ if (adapter_info != hw->device_id)
+ return ICE_ERR_PARAM;
+
+ switch (cmd->command) {
+ case ICE_NVM_CMD_READ:
+ module = ice_nvm_access_get_module(cmd);
+ flags = ice_nvm_access_get_flags(cmd);
+
+ /* Getting the driver's NVM features structure shares the same
+ * command type as reading a register. Read the config field
+ * to determine if this is a request to get features.
+ */
+ if (module == ICE_NVM_GET_FEATURES_MODULE &&
+ flags == ICE_NVM_GET_FEATURES_FLAGS &&
+ cmd->offset == 0)
+ return ice_nvm_access_get_features(cmd, data);
+ else
+ return ice_nvm_access_read(hw, cmd, data);
+ case ICE_NVM_CMD_WRITE:
+ return ice_nvm_access_write(hw, cmd, data);
+ default:
+ return ICE_ERR_PARAM;
+ }
+}
+
Index: sys/dev/ice/ice_opts.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_opts.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_opts.h
+ * @brief header including kernel option files
+ *
+ * Contains the various opt_*.h header files which set various macros
+ * indicating features and functionality which depend on kernel configuration.
+ */
+
+#ifndef _ICE_OPTS_H_
+#define _ICE_OPTS_H_
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
+
+#endif
Index: sys/dev/ice/ice_osdep.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_osdep.h
@@ -0,0 +1,521 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_osdep.h
+ * @brief OS compatibility layer
+ *
+ * Contains various definitions and functions which are part of an OS
+ * compatibility layer for sharing code with other operating systems.
+ */
+#ifndef _ICE_OSDEP_H_
+#define _ICE_OSDEP_H_
+
+#include <sys/endian.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/bus_dma.h>
+#include <netinet/in.h>
+#include <sys/counter.h>
+#include <sys/sbuf.h>
+
+#include "ice_alloc.h"
+
+#define ICE_INTEL_VENDOR_ID 0x8086
+
+#define ICE_STR_BUF_LEN 32
+
+struct ice_hw;
+
+device_t ice_hw_to_dev(struct ice_hw *hw);
+
+/* configure hw->debug_mask to enable debug prints */
+void ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...) __printflike(3, 4);
+void ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
+ uint32_t groupsize, uint8_t *buf, size_t len);
+
+#define ice_info(_hw, _fmt, args...) \
+ device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
+
+#define ice_warn(_hw, _fmt, args...) \
+ device_printf(ice_hw_to_dev(_hw), (_fmt), ##args)
+
+#define DIVIDE_AND_ROUND_UP howmany
+#define ROUND_UP roundup
+
+uint32_t rd32(struct ice_hw *hw, uint32_t reg);
+uint64_t rd64(struct ice_hw *hw, uint32_t reg);
+void wr32(struct ice_hw *hw, uint32_t reg, uint32_t val);
+void wr64(struct ice_hw *hw, uint32_t reg, uint64_t val);
+
+#define ice_flush(_hw) rd32((_hw), GLGEN_STAT)
+
+MALLOC_DECLARE(M_ICE_OSDEP);
+
+/**
+ * ice_calloc - Allocate an array of elementes
+ * @hw: the hardware private structure
+ * @count: number of elements to allocate
+ * @size: the size of each element
+ *
+ * Allocate memory for an array of items equal to size. Note that the OS
+ * compatibility layer assumes all allocation functions will provide zero'd
+ * memory.
+ */
+static inline void *
+ice_calloc(struct ice_hw __unused *hw, size_t count, size_t size)
+{
+ return malloc(count * size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
+}
+
+/**
+ * ice_malloc - Allocate memory of a specified size
+ * @hw: the hardware private structure
+ * @size: the size to allocate
+ *
+ * Allocates memory of the specified size. Note that the OS compatibility
+ * layer assumes that all allocations will provide zero'd memory.
+ */
+static inline void *
+ice_malloc(struct ice_hw __unused *hw, size_t size)
+{
+ return malloc(size, M_ICE_OSDEP, M_ZERO | M_NOWAIT);
+}
+
+/**
+ * ice_memdup - Allocate a copy of some other memory
+ * @hw: private hardware structure
+ * @src: the source to copy from
+ * @size: allocation size
+ * @dir: the direction of copying
+ *
+ * Allocate memory of the specified size, and copy bytes from the src to fill
+ * it. We don't need to zero this memory as we immediately initialize it by
+ * copying from the src pointer.
+ */
+static inline void *
+ice_memdup(struct ice_hw __unused *hw, const void *src, size_t size,
+ enum ice_memcpy_type __unused dir)
+{
+ void *dst = malloc(size, M_ICE_OSDEP, M_NOWAIT);
+
+ if (dst != NULL)
+ memcpy(dst, src, size);
+
+ return dst;
+}
+
+/**
+ * ice_free - Free previously allocated memory
+ * @hw: the hardware private structure
+ * @mem: pointer to the memory to free
+ *
+ * Free memory that was previously allocated by ice_calloc, ice_malloc, or
+ * ice_memdup.
+ */
+static inline void
+ice_free(struct ice_hw __unused *hw, void *mem)
+{
+ free(mem, M_ICE_OSDEP);
+}
+
+/* These are macros in order to drop the unused direction enumeration constant */
+#define ice_memset(addr, c, len, unused) memset((addr), (c), (len))
+#define ice_memcpy(dst, src, len, unused) memcpy((dst), (src), (len))
+
+void ice_usec_delay(uint32_t time, bool sleep);
+void ice_msec_delay(uint32_t time, bool sleep);
+void ice_msec_pause(uint32_t time);
+void ice_msec_spin(uint32_t time);
+
+#define UNREFERENCED_PARAMETER(_p) _p = _p
+#define UNREFERENCED_1PARAMETER(_p) do { \
+ UNREFERENCED_PARAMETER(_p); \
+} while (0)
+#define UNREFERENCED_2PARAMETER(_p, _q) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+} while (0)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+ UNREFERENCED_PARAMETER(_r); \
+} while (0)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+ UNREFERENCED_PARAMETER(_r); \
+ UNREFERENCED_PARAMETER(_s); \
+} while (0)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) do { \
+ UNREFERENCED_PARAMETER(_p); \
+ UNREFERENCED_PARAMETER(_q); \
+ UNREFERENCED_PARAMETER(_r); \
+ UNREFERENCED_PARAMETER(_s); \
+ UNREFERENCED_PARAMETER(_t); \
+} while (0)
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#define MAKEMASK(_m, _s) ((_m) << (_s))
+
+#define LIST_HEAD_TYPE ice_list_head
+#define LIST_ENTRY_TYPE ice_list_node
+
+/**
+ * @struct ice_list_node
+ * @brief simplified linked list node API
+ *
+ * Represents a node in a linked list, which can be embedded into a structure
+ * to allow that structure to be inserted into a linked list. Access to the
+ * contained structure is done via __containerof
+ */
+struct ice_list_node {
+ LIST_ENTRY(ice_list_node) entries;
+};
+
+/**
+ * @struct ice_list_head
+ * @brief simplified linked list head API
+ *
+ * Represents the head of a linked list. The linked list should consist of
+ * a series of ice_list_node structures embedded into another structure
+ * accessed using __containerof. This way, the ice_list_head doesn't need to
+ * know the type of the structure it contains.
+ */
+LIST_HEAD(ice_list_head, ice_list_node);
+
+#define INIT_LIST_HEAD LIST_INIT
+/* LIST_EMPTY doesn't need to be changed */
+#define LIST_ADD(entry, head) LIST_INSERT_HEAD(head, entry, entries)
+#define LIST_ADD_AFTER(entry, elem) LIST_INSERT_AFTER(elem, entry, entries)
+#define LIST_DEL(entry) LIST_REMOVE(entry, entries)
+#define _osdep_LIST_ENTRY(ptr, type, member) \
+ __containerof(ptr, type, member)
+#define LIST_FIRST_ENTRY(head, type, member) \
+ _osdep_LIST_ENTRY(LIST_FIRST(head), type, member)
+#define LIST_NEXT_ENTRY(ptr, unused, member) \
+ _osdep_LIST_ENTRY(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
+#define LIST_REPLACE_INIT(old_head, new_head) do { \
+ __typeof(new_head) _new_head = (new_head); \
+ LIST_INIT(_new_head); \
+ LIST_SWAP(old_head, _new_head, ice_list_node, entries); \
+} while (0)
+
+#define LIST_ENTRY_SAFE(_ptr, _type, _member) \
+({ __typeof(_ptr) ____ptr = (_ptr); \
+ ____ptr ? _osdep_LIST_ENTRY(____ptr, _type, _member) : NULL; \
+})
+
+/**
+ * ice_get_list_tail - Return the pointer to the last node in the list
+ * @head: the pointer to the head of the list
+ *
+ * A helper function for implementing LIST_ADD_TAIL and LIST_LAST_ENTRY.
+ * Returns the pointer to the last node in the list, or NULL of the list is
+ * empty.
+ *
+ * Note: due to the list implementation this is O(N), where N is the size of
+ * the list. An O(1) implementation requires replacing the underlying list
+ * datastructure with one that has a tail pointer. This is problematic,
+ * because using a simple TAILQ would require that the addition and deletion
+ * be given the head of the list.
+ */
+static inline struct ice_list_node *
+ice_get_list_tail(struct ice_list_head *head)
+{
+ struct ice_list_node *node = LIST_FIRST(head);
+
+ if (node == NULL)
+ return NULL;
+ while (LIST_NEXT(node, entries) != NULL)
+ node = LIST_NEXT(node, entries);
+
+ return node;
+}
+
+/* TODO: This is O(N). An O(1) implementation would require a different
+ * underlying list structure, such as a circularly linked list. */
+#define LIST_ADD_TAIL(entry, head) do { \
+ struct ice_list_node *node = ice_get_list_tail(head); \
+ \
+ if (node == NULL) { \
+ LIST_ADD(entry, head); \
+ } else { \
+ LIST_INSERT_AFTER(node, entry, entries); \
+ } \
+} while (0)
+
+#define LIST_LAST_ENTRY(head, type, member) \
+ LIST_ENTRY_SAFE(ice_get_list_tail(head), type, member)
+
+#define LIST_FIRST_ENTRY_SAFE(head, type, member) \
+ LIST_ENTRY_SAFE(LIST_FIRST(head), type, member)
+
+#define LIST_NEXT_ENTRY_SAFE(ptr, member) \
+ LIST_ENTRY_SAFE(LIST_NEXT(&(ptr->member), entries), __typeof(*ptr), member)
+
+#define LIST_FOR_EACH_ENTRY(pos, head, unused, member) \
+ for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member); \
+ pos; \
+ pos = LIST_NEXT_ENTRY_SAFE(pos, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, n, head, unused, member) \
+ for (pos = LIST_FIRST_ENTRY_SAFE(head, __typeof(*pos), member); \
+ pos && ({ n = LIST_NEXT_ENTRY_SAFE(pos, member); 1; }); \
+ pos = n)
+
+#define STATIC static
+
+#define NTOHS ntohs
+#define NTOHL ntohl
+#define HTONS htons
+#define HTONL htonl
+#define LE16_TO_CPU le16toh
+#define LE32_TO_CPU le32toh
+#define LE64_TO_CPU le64toh
+#define CPU_TO_LE16 htole16
+#define CPU_TO_LE32 htole32
+#define CPU_TO_LE64 htole64
+#define CPU_TO_BE16 htobe16
+#define CPU_TO_BE32 htobe32
+
+#define SNPRINTF snprintf
+
+/**
+ * @typedef u8
+ * @brief compatibility typedef for uint8_t
+ */
+typedef uint8_t u8;
+
+/**
+ * @typedef u16
+ * @brief compatibility typedef for uint16_t
+ */
+typedef uint16_t u16;
+
+/**
+ * @typedef u32
+ * @brief compatibility typedef for uint32_t
+ */
+typedef uint32_t u32;
+
+/**
+ * @typedef u64
+ * @brief compatibility typedef for uint64_t
+ */
+typedef uint64_t u64;
+
+/**
+ * @typedef s8
+ * @brief compatibility typedef for int8_t
+ */
+typedef int8_t s8;
+
+/**
+ * @typedef s16
+ * @brief compatibility typedef for int16_t
+ */
+typedef int16_t s16;
+
+/**
+ * @typedef s32
+ * @brief compatibility typedef for int32_t
+ */
+typedef int32_t s32;
+
+/**
+ * @typedef s64
+ * @brief compatibility typedef for int64_t
+ */
+typedef int64_t s64;
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+#define __be16 u16
+#define __be32 u32
+#define __be64 u64
+
+#define ice_hweight8(x) bitcount16((u8)x)
+#define ice_hweight16(x) bitcount16(x)
+#define ice_hweight32(x) bitcount32(x)
+#define ice_hweight64(x) bitcount64(x)
+
+/**
+ * @struct ice_dma_mem
+ * @brief DMA memory allocation
+ *
+ * Contains DMA allocation bits, used to simplify DMA allocations.
+ */
+struct ice_dma_mem {
+ void *va;
+ uint64_t pa;
+ size_t size;
+
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+};
+
+
+void * ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size);
+void ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem);
+
+/**
+ * @struct ice_lock
+ * @brief simplified lock API
+ *
+ * Contains a simple lock implementation used to lock various resources.
+ */
+struct ice_lock {
+ struct mtx mutex;
+ char name[ICE_STR_BUF_LEN];
+};
+
+extern u16 ice_lock_count;
+
+/**
+ * ice_init_lock - Initialize a lock for use
+ * @lock: the lock memory to initialize
+ *
+ * OS compatibility layer to provide a simple locking mechanism. We use
+ * a mutex for this purpose.
+ */
+static inline void
+ice_init_lock(struct ice_lock *lock)
+{
+ /*
+ * Make each lock unique by incrementing a counter each time this
+ * function is called. Use of a u16 allows 65535 possible locks before
+ * we'd hit a duplicate.
+ */
+ memset(lock->name, 0, sizeof(lock->name));
+ snprintf(lock->name, ICE_STR_BUF_LEN, "ice_lock_%u", ice_lock_count++);
+ mtx_init(&lock->mutex, lock->name, NULL, MTX_DEF);
+}
+
+/**
+ * ice_acquire_lock - Acquire the lock
+ * @lock: the lock to acquire
+ *
+ * Acquires the mutex specified by the lock pointer.
+ */
+static inline void
+ice_acquire_lock(struct ice_lock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+/**
+ * ice_release_lock - Release the lock
+ * @lock: the lock to release
+ *
+ * Releases the mutex specified by the lock pointer.
+ */
+static inline void
+ice_release_lock(struct ice_lock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+/**
+ * ice_destroy_lock - Destroy the lock to de-allocate it
+ * @lock: the lock to destroy
+ *
+ * Destroys a previously initialized lock. We only do this if the mutex was
+ * previously initialized.
+ */
+static inline void
+ice_destroy_lock(struct ice_lock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+ memset(lock->name, 0, sizeof(lock->name));
+}
+
+/* Some function parameters are unused outside of MPASS/KASSERT macros. Rather
+ * than marking these as __unused all the time, mark them as __invariant_only,
+ * and define this to __unused when INVARIANTS is disabled. Otherwise, define
+ * it empty so that __invariant_only parameters are caught as unused by the
+ * INVARIANTS build.
+ */
+#ifndef INVARIANTS
+#define __invariant_only __unused
+#else
+#define __invariant_only
+#endif
+
+#define __ALWAYS_UNUSED __unused
+
+/**
+ * ice_ilog2 - Calculate the integer log base 2 of a 64bit value
+ * @n: 64bit number
+ *
+ * Calculates the integer log base 2 of a 64bit value, rounded down.
+ *
+ * @remark The integer log base 2 of zero is technically undefined, but this
+ * function will return 0 in that case.
+ *
+ */
+static inline int
+ice_ilog2(u64 n) {
+ if (n == 0)
+ return 0;
+ return flsll(n) - 1;
+}
+
+/**
+ * ice_is_pow2 - Check if the value is a power of 2
+ * @n: 64bit number
+ *
+ * Check if the given value is a power of 2.
+ *
+ * @remark FreeBSD's powerof2 function treats zero as a power of 2, while this
+ * function does not.
+ *
+ * @returns true or false
+ */
+static inline bool
+ice_is_pow2(u64 n) {
+ if (n == 0)
+ return false;
+ return powerof2(n);
+}
+#endif /* _ICE_OSDEP_H_ */
Index: sys/dev/ice/ice_osdep.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_osdep.c
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_osdep.c
+ * @brief Functions used to implement OS compatibility layer
+ *
+ * Contains functions used by ice_osdep.h to implement the OS compatibility
+ * layer used by some of the hardware files. Specifically, it is for the bits
+ * of OS compatibility which don't make sense as macros or inline functions.
+ */
+
+#include "ice_common.h"
+#include "ice_iflib.h"
+#include <machine/stdarg.h>
+#include <sys/time.h>
+
+/**
+ * @var M_ICE_OSDEP
+ * @brief OS compatibility layer allocation type
+ *
+ * malloc(9) allocation type used by the OS compatibility layer for
+ * distinguishing allocations by this layer from those of the rest of the
+ * driver.
+ */
+MALLOC_DEFINE(M_ICE_OSDEP, "ice-osdep", "Intel(R) 100Gb Network Driver osdep allocations");
+
+/**
+ * @var ice_lock_count
+ * @brief Global count of # of ice_lock mutexes initialized
+ *
+ * A global count of the total number of times that ice_init_lock has been
+ * called. This is used to generate unique lock names for each ice_lock, to
+ * aid in witness lock checking.
+ */
+u16 ice_lock_count = 0;
+
+static void ice_dmamap_cb(void *arg, bus_dma_segment_t * segs, int __unused nseg, int error);
+
+/**
+ * ice_hw_to_dev - Given a hw private struct, find the associated device_t
+ * @hw: the hardware private structure
+ *
+ * Given a hw structure pointer, lookup the softc and extract the device
+ * pointer. Assumes that hw is embedded within the ice_softc, instead of being
+ * allocated separately, so that __containerof math will work.
+ *
+ * This can't be defined in ice_osdep.h as it depends on the complete
+ * definition of struct ice_softc. That can't be easily included in
+ * ice_osdep.h without creating circular header dependencies.
+ */
+device_t
+ice_hw_to_dev(struct ice_hw *hw) {
+ struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
+
+ return sc->dev;
+}
+
+/**
+ * ice_debug - Log a debug message if the type is enabled
+ * @hw: device private hardware structure
+ * @mask: the debug message type
+ * @fmt: printf format specifier
+ *
+ * Check if hw->debug_mask has enabled the given message type. If so, log the
+ * message to the console using vprintf. Mimic the output of device_printf by
+ * using device_print_prettyname().
+ */
+void
+ice_debug(struct ice_hw *hw, uint64_t mask, char *fmt, ...)
+{
+ device_t dev = ice_hw_to_dev(hw);
+ va_list args;
+
+ if (!(mask & hw->debug_mask))
+ return;
+
+ device_print_prettyname(dev);
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+}
+
+/**
+ * ice_debug_array - Format and print an array of values to the console
+ * @hw: private hardware structure
+ * @mask: the debug message type
+ * @rowsize: preferred number of rows to use
+ * @groupsize: preferred size in bytes to print each chunk
+ * @buf: the array buffer to print
+ * @len: size of the array buffer
+ *
+ * Format the given array as a series of uint8_t values with hexadecimal
+ * notation and log the contents to the console log.
+ *
+ * TODO: Currently only supports a group size of 1, due to the way hexdump is
+ * implemented.
+ */
+void
+ice_debug_array(struct ice_hw *hw, uint64_t mask, uint32_t rowsize,
+ uint32_t __unused groupsize, uint8_t *buf, size_t len)
+{
+ device_t dev = ice_hw_to_dev(hw);
+ char prettyname[20];
+
+ if (!(mask & hw->debug_mask))
+ return;
+
+ /* Format the device header to a string */
+ snprintf(prettyname, sizeof(prettyname), "%s: ", device_get_nameunit(dev));
+
+ /* Make sure the row-size isn't too large */
+ if (rowsize > 0xFF)
+ rowsize = 0xFF;
+
+ hexdump(buf, len, prettyname, HD_OMIT_CHARS | rowsize);
+}
+
+/**
+ * rd32 - Read a 32bit hardware register value
+ * @hw: the private hardware structure
+ * @reg: register address to read
+ *
+ * Read the specified 32bit register value from BAR0 and return its contents.
+ */
+uint32_t
+rd32(struct ice_hw *hw, uint32_t reg)
+{
+ struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
+
+ return bus_space_read_4(sc->bar0.tag, sc->bar0.handle, reg);
+}
+
+/**
+ * rd64 - Read a 64bit hardware register value
+ * @hw: the private hardware structure
+ * @reg: register address to read
+ *
+ * Read the specified 64bit register value from BAR0 and return its contents.
+ *
+ * @pre For 32-bit builds, assumes that the 64bit register read can be
+ * safely broken up into two 32-bit register reads.
+ */
+uint64_t
+rd64(struct ice_hw *hw, uint32_t reg)
+{
+ struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
+ uint64_t data;
+
+#ifdef __amd64__
+ data = bus_space_read_8(sc->bar0.tag, sc->bar0.handle, reg);
+#else
+ /*
+ * bus_space_read_8 isn't supported on 32bit platforms, so we fall
+ * back to using two bus_space_read_4 calls.
+ */
+ data = bus_space_read_4(sc->bar0.tag, sc->bar0.handle, reg);
+ data |= ((uint64_t)bus_space_read_4(sc->bar0.tag, sc->bar0.handle, reg + 4)) << 32;
+#endif
+
+ return data;
+}
+
+/**
+ * wr32 - Write a 32bit hardware register
+ * @hw: the private hardware structure
+ * @reg: the register address to write to
+ * @val: the 32bit value to write
+ *
+ * Write the specified 32bit value to a register address in BAR0.
+ */
+void
+wr32(struct ice_hw *hw, uint32_t reg, uint32_t val)
+{
+ struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
+
+ bus_space_write_4(sc->bar0.tag, sc->bar0.handle, reg, val);
+}
+
+/**
+ * wr64 - Write a 64bit hardware register
+ * @hw: the private hardware structure
+ * @reg: the register address to write to
+ * @val: the 64bit value to write
+ *
+ * Write the specified 64bit value to a register address in BAR0.
+ *
+ * @pre For 32-bit builds, assumes that the 64bit register write can be safely
+ * broken up into two 32-bit register writes.
+ */
+void
+wr64(struct ice_hw *hw, uint32_t reg, uint64_t val)
+{
+ struct ice_softc *sc = __containerof(hw, struct ice_softc, hw);
+
+#ifdef __amd64__
+ bus_space_write_8(sc->bar0.tag, sc->bar0.handle, reg, val);
+#else
+ uint32_t lo_val, hi_val;
+
+ /*
+ * bus_space_write_8 isn't supported on 32bit platforms, so we fall
+ * back to using two bus_space_write_4 calls.
+ */
+ lo_val = (uint32_t)val;
+ hi_val = (uint32_t)(val >> 32);
+ bus_space_write_4(sc->bar0.tag, sc->bar0.handle, reg, lo_val);
+ bus_space_write_4(sc->bar0.tag, sc->bar0.handle, reg + 4, hi_val);
+#endif
+}
+
+/**
+ * ice_usec_delay - Delay for the specified number of microseconds
+ * @time: microseconds to delay
+ * @sleep: if true, sleep where possible
+ *
+ * If sleep is true, and if the current thread is allowed to sleep, pause so
+ * that another thread can execute. Otherwise, use DELAY to spin the thread
+ * instead.
+ */
+void
+ice_usec_delay(uint32_t time, bool sleep)
+{
+ if (sleep && THREAD_CAN_SLEEP())
+ pause("ice_usec_delay", USEC_2_TICKS(time));
+ else
+ DELAY(time);
+}
+
+/**
+ * ice_msec_delay - Delay for the specified number of milliseconds
+ * @time: milliseconds to delay
+ * @sleep: if true, sleep where possible
+ *
+ * If sleep is true, and if the current thread is allowed to sleep, pause so
+ * that another thread can execute. Otherwise, use DELAY to spin the thread
+ * instead.
+ */
+void
+ice_msec_delay(uint32_t time, bool sleep)
+{
+ if (sleep && THREAD_CAN_SLEEP())
+ pause("ice_msec_delay", MSEC_2_TICKS(time));
+ else
+ DELAY(time * 1000);
+}
+
+/**
+ * ice_msec_pause - pause (sleep) the thread for a time in milliseconds
+ * @time: milliseconds to sleep
+ *
+ * Wrapper for ice_msec_delay with sleep set to true.
+ */
+void
+ice_msec_pause(uint32_t time)
+{
+ ice_msec_delay(time, true);
+}
+
+/**
+ * ice_msec_spin - Spin the thread for a time in milliseconds
+ * @time: milliseconds to delay
+ *
+ * Wrapper for ice_msec_delay with sleep sent to false.
+ */
+void
+ice_msec_spin(uint32_t time)
+{
+ ice_msec_delay(time, false);
+}
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+
+/**
+ * ice_dmamap_cb - Callback function DMA maps
+ * @arg: pointer to return the segment address
+ * @segs: the segments array
+ * @nseg: number of segments in the array
+ * @error: error code
+ *
+ * Callback used by the bus DMA code to obtain the segment address.
+ */
+static void
+ice_dmamap_cb(void *arg, bus_dma_segment_t * segs, int __unused nseg, int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+ return;
+}
+
+/**
+ * ice_alloc_dma_mem - Request OS to allocate DMA memory
+ * @hw: private hardware structure
+ * @mem: structure defining the DMA memory request
+ * @size: the allocation size
+ *
+ * Allocates some memory for DMA use. Use the FreeBSD bus DMA interface to
+ * track this memory using a bus DMA tag and map.
+ *
+ * Returns a pointer to the DMA memory address.
+ */
+void *
+ice_alloc_dma_mem(struct ice_hw *hw, struct ice_dma_mem *mem, u64 size)
+{
+ device_t dev = ice_hw_to_dev(hw);
+ int err;
+
+ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filtfunc, filtfuncarg */
+ size, /* maxsize */
+ 1, /* nsegments */
+ size, /* maxsegsz */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &mem->tag);
+ if (err != 0) {
+ device_printf(dev,
+ "ice_alloc_dma: bus_dma_tag_create failed, "
+ "error %s\n", ice_err_str(err));
+ goto fail_0;
+ }
+ err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
+ if (err != 0) {
+ device_printf(dev,
+ "ice_alloc_dma: bus_dmamem_alloc failed, "
+ "error %s\n", ice_err_str(err));
+ goto fail_1;
+ }
+ err = bus_dmamap_load(mem->tag, mem->map, mem->va,
+ size,
+ ice_dmamap_cb,
+ &mem->pa,
+ BUS_DMA_NOWAIT);
+ if (err != 0) {
+ device_printf(dev,
+ "ice_alloc_dma: bus_dmamap_load failed, "
+ "error %s\n", ice_err_str(err));
+ goto fail_2;
+ }
+ mem->size = size;
+ bus_dmamap_sync(mem->tag, mem->map,
+ BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+ return (mem->va);
+fail_2:
+ bus_dmamem_free(mem->tag, mem->va, mem->map);
+fail_1:
+ bus_dma_tag_destroy(mem->tag);
+fail_0:
+ mem->map = NULL;
+ mem->tag = NULL;
+ return (NULL);
+}
+
+/**
+ * ice_free_dma_mem - Free DMA memory allocated by ice_alloc_dma_mem
+ * @hw: the hardware private structure
+ * @mem: DMA memory to free
+ *
+ * Release the bus DMA tag and map, and free the DMA memory associated with
+ * it.
+ */
+void
+ice_free_dma_mem(struct ice_hw __unused *hw, struct ice_dma_mem *mem)
+{
+ bus_dmamap_sync(mem->tag, mem->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(mem->tag, mem->map);
+ bus_dmamem_free(mem->tag, mem->va, mem->map);
+ bus_dma_tag_destroy(mem->tag);
+ mem->map = NULL;
+ mem->tag = NULL;
+}
Index: sys/dev/ice/ice_protocol_type.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_protocol_type.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_PROTOCOL_TYPE_H_
+#define _ICE_PROTOCOL_TYPE_H_
+#include "ice_flex_type.h"
+#define ICE_IPV6_ADDR_LENGTH 16
+
+/* Each recipe can match up to 5 different fields. Fields to match can be meta-
+ * data, values extracted from packet headers, or results from other recipes.
+ * One of the 5 fields is reserved for matching the switch ID. So, up to 4
+ * recipes can provide intermediate results to another one through chaining,
+ * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ */
+#define ICE_NUM_WORDS_RECIPE 4
+
+/* Max recipes that can be chained */
+#define ICE_MAX_CHAIN_RECIPE 5
+
+/* 1 word reserved for switch ID from allowed 5 words.
+ * So a recipe can have max 4 words. And you can chain 5 such recipes
+ * together. So maximum words that can be programmed for look up is 5 * 4.
+ */
+#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
+
+/* Field vector index corresponding to chaining */
+#define ICE_CHAIN_FV_INDEX_START 47
+
+enum ice_protocol_type {
+ ICE_MAC_OFOS = 0,
+ ICE_MAC_IL,
+ ICE_ETYPE_OL,
+ ICE_VLAN_OFOS,
+ ICE_IPV4_OFOS,
+ ICE_IPV4_IL,
+ ICE_IPV6_OFOS,
+ ICE_IPV6_IL,
+ ICE_TCP_IL,
+ ICE_UDP_OF,
+ ICE_UDP_ILOS,
+ ICE_SCTP_IL,
+ ICE_VXLAN,
+ ICE_GENEVE,
+ ICE_VXLAN_GPE,
+ ICE_NVGRE,
+ ICE_PROTOCOL_LAST
+};
+
+enum ice_sw_tunnel_type {
+ ICE_NON_TUN = 0,
+ ICE_SW_TUN_AND_NON_TUN,
+ ICE_SW_TUN_VXLAN_GPE,
+ ICE_SW_TUN_GENEVE,
+ ICE_SW_TUN_VXLAN,
+ ICE_SW_TUN_NVGRE,
+ ICE_SW_TUN_UDP, /* This means all "UDP" tunnel types: VXLAN-GPE, VXLAN
+ * and GENEVE
+ */
+ ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
+};
+
+/* Decoders for ice_prot_id:
+ * - F: First
+ * - I: Inner
+ * - L: Last
+ * - O: Outer
+ * - S: Single
+ */
+enum ice_prot_id {
+ ICE_PROT_ID_INVAL = 0,
+ ICE_PROT_MAC_OF_OR_S = 1,
+ ICE_PROT_MAC_O2 = 2,
+ ICE_PROT_MAC_IL = 4,
+ ICE_PROT_MAC_IN_MAC = 7,
+ ICE_PROT_ETYPE_OL = 9,
+ ICE_PROT_ETYPE_IL = 10,
+ ICE_PROT_PAY = 15,
+ ICE_PROT_EVLAN_O = 16,
+ ICE_PROT_VLAN_O = 17,
+ ICE_PROT_VLAN_IF = 18,
+ ICE_PROT_MPLS_OL_MINUS_1 = 27,
+ ICE_PROT_MPLS_OL_OR_OS = 28,
+ ICE_PROT_MPLS_IL = 29,
+ ICE_PROT_IPV4_OF_OR_S = 32,
+ ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV6_OF_OR_S = 40,
+ ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_IPV6_FRAG = 47,
+ ICE_PROT_TCP_IL = 49,
+ ICE_PROT_UDP_OF = 52,
+ ICE_PROT_UDP_IL_OR_S = 53,
+ ICE_PROT_GRE_OF = 64,
+ ICE_PROT_NSH_F = 84,
+ ICE_PROT_ESP_F = 88,
+ ICE_PROT_ESP_2 = 89,
+ ICE_PROT_SCTP_IL = 96,
+ ICE_PROT_ICMP_IL = 98,
+ ICE_PROT_ICMPV6_IL = 100,
+ ICE_PROT_VRRP_F = 101,
+ ICE_PROT_OSPF = 102,
+ ICE_PROT_ATAOE_OF = 114,
+ ICE_PROT_CTRL_OF = 116,
+ ICE_PROT_LLDP_OF = 117,
+ ICE_PROT_ARP_OF = 118,
+ ICE_PROT_EAPOL_OF = 120,
+ ICE_PROT_META_ID = 255, /* when offset == metaddata */
+ ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
+};
+
+#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
+
+#define ICE_MAC_OFOS_HW 1
+#define ICE_MAC_IL_HW 4
+#define ICE_ETYPE_OL_HW 9
+#define ICE_VLAN_OL_HW 17
+#define ICE_IPV4_OFOS_HW 32
+#define ICE_IPV4_IL_HW 33
+#define ICE_IPV6_OFOS_HW 40
+#define ICE_IPV6_IL_HW 41
+#define ICE_TCP_IL_HW 49
+#define ICE_UDP_ILOS_HW 53
+#define ICE_SCTP_IL_HW 96
+
+/* ICE_UDP_OF is used to identify all 3 tunnel types
+ * VXLAN, GENEVE and VXLAN_GPE. To differentiate further
+ * need to use flags from the field vector
+ */
+#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
+#define ICE_GRE_OF_HW 64 /* NVGRE */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+
+#define ICE_MDID_SIZE 2
+#define ICE_TUN_FLAG_MDID 21
+#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_TUN_FLAG_FV_IND 2
+
+#define ICE_PROTOCOL_MAX_ENTRIES 16
+
+/* Mapping of software defined protocol ID to hardware defined protocol ID */
+struct ice_protocol_entry {
+ enum ice_protocol_type type;
+ u8 protocol_id;
+};
+
+struct ice_ether_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+};
+
+struct ice_ethtype_hdr {
+ __be16 ethtype_id;
+};
+
+struct ice_ether_vlan_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 vlan_id;
+};
+
+struct ice_vlan_hdr {
+ __be16 vlan;
+ __be16 type;
+};
+
+struct ice_ipv4_hdr {
+ u8 version;
+ u8 tos;
+ __be16 total_length;
+ __be16 id;
+ __be16 frag_off;
+ u8 time_to_live;
+ u8 protocol;
+ __be16 check;
+ __be32 src_addr;
+ __be32 dst_addr;
+};
+
+struct ice_le_ver_tc_flow {
+ union {
+ struct {
+ u32 flow_label : 20;
+ u32 tc : 8;
+ u32 version : 4;
+ } fld;
+ u32 val;
+ } u;
+};
+
+struct ice_ipv6_hdr {
+ __be32 be_ver_tc_flow;
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 hop_limit;
+ u8 src_addr[ICE_IPV6_ADDR_LENGTH];
+ u8 dst_addr[ICE_IPV6_ADDR_LENGTH];
+};
+
+struct ice_sctp_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 verification_tag;
+ __be32 check;
+};
+
+struct ice_l4_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 len;
+ __be16 check;
+};
+
+struct ice_udp_tnl_hdr {
+ __be16 field;
+ __be16 proto_type;
+ __be32 vni; /* only use lower 24-bits */
+};
+
+struct ice_nvgre {
+ __be16 flags;
+ __be16 protocol;
+ __be32 tni_flow;
+};
+
+union ice_prot_hdr {
+ struct ice_ether_hdr eth_hdr;
+ struct ice_ethtype_hdr ethertype;
+ struct ice_vlan_hdr vlan_hdr;
+ struct ice_ipv4_hdr ipv4_hdr;
+ struct ice_ipv6_hdr ipv6_hdr;
+ struct ice_l4_hdr l4_hdr;
+ struct ice_sctp_hdr sctp_hdr;
+ struct ice_udp_tnl_hdr tnl_hdr;
+ struct ice_nvgre nvgre_hdr;
+};
+
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for e.g. dst address is 3 words in ethertype header and corresponding bytes
+ * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ */
+struct ice_prot_ext_tbl_entry {
+ enum ice_protocol_type prot_type;
+ /* Byte offset into header of given protocol type */
+ u8 offs[sizeof(union ice_prot_hdr)];
+};
+
+/* Extractions to be looked up for a given recipe */
+struct ice_prot_lkup_ext {
+ u16 prot_type;
+ u8 n_val_words;
+ /* create a buffer to hold max words per recipe */
+ u16 field_off[ICE_MAX_CHAIN_WORDS];
+ u16 field_mask[ICE_MAX_CHAIN_WORDS];
+
+ struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
+
+ /* Indicate field offsets that have field vector indices assigned */
+ ice_declare_bitmap(done, ICE_MAX_CHAIN_WORDS);
+};
+
+struct ice_pref_recipe_group {
+ u8 n_val_pairs; /* Number of valid pairs */
+ struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
+ u16 mask[ICE_NUM_WORDS_RECIPE];
+};
+
+struct ice_recp_grp_entry {
+ struct LIST_ENTRY_TYPE l_entry;
+
+#define ICE_INVAL_CHAIN_IND 0xFF
+ u16 rid;
+ u8 chain_idx;
+ u16 fv_idx[ICE_NUM_WORDS_RECIPE];
+ u16 fv_mask[ICE_NUM_WORDS_RECIPE];
+ struct ice_pref_recipe_group r_group;
+};
+#endif /* _ICE_PROTOCOL_TYPE_H_ */
Index: sys/dev/ice/ice_resmgr.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_resmgr.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_resmgr.h
+ * @brief Resource manager interface
+ *
+ * Defines an interface for managing PF hardware queues and interrupts for assigning them to
+ * hardware VSIs and VFs.
+ *
+ * For queue management:
+ * The total number of available Tx and Rx queues is not equal, so it is
+ * expected that each PF will allocate two ice_resmgr structures, one for Tx
+ * and one for Rx. These should be allocated in attach() prior to initializing
+ * VSIs, and destroyed in detach().
+ *
+ * For interrupt management:
+ * The PF allocates an ice_resmgr structure that does not allow scattered
+ * allocations since interrupt allocations must be contiguous.
+ */
+
+#ifndef _ICE_RESMGR_H_
+#define _ICE_RESMGR_H_
+#include <sys/param.h>
+#include "ice_osdep.h"
+
+#include <sys/bitstring.h>
+
+/*
+ * For managing VSI queue allocations
+ */
+/* Hardware only supports a limited number of resources in scattered mode */
+#define ICE_MAX_SCATTERED_QUEUES 16
+/* Use highest value to indicate invalid resource mapping */
+#define ICE_INVALID_RES_IDX 0xFFFF
+
+/*
+ * Structures
+ */
+
+/**
+ * @struct ice_resmgr
+ * @brief Resource manager
+ *
+ * Represent resource allocations using a bitstring, where bit zero represents
+ * the first resource. If a particular bit is set this indicates that the
+ * resource has been allocated and is not free.
+ */
+struct ice_resmgr {
+ bitstr_t *resources;
+ u16 num_res;
+ bool contig_only;
+};
+
+/**
+ * @enum ice_resmgr_alloc_type
+ * @brief resource manager allocation types
+ *
+ * Enumeration of possible allocation types that can be used when
+ * assigning resources. For now, SCATTERED is only used with
+ * managing queue allocations.
+ */
+enum ice_resmgr_alloc_type {
+ ICE_RESMGR_ALLOC_INVALID = 0,
+ ICE_RESMGR_ALLOC_CONTIGUOUS,
+ ICE_RESMGR_ALLOC_SCATTERED
+};
+
+/* Public resource manager allocation functions */
+int ice_resmgr_init(struct ice_resmgr *resmgr, u16 num_res);
+int ice_resmgr_init_contig_only(struct ice_resmgr *resmgr, u16 num_res);
+void ice_resmgr_destroy(struct ice_resmgr *resmgr);
+
+/* Public resource assignment functions */
+int ice_resmgr_assign_contiguous(struct ice_resmgr *resmgr, u16 *idx, u16 num_res);
+int ice_resmgr_assign_scattered(struct ice_resmgr *resmgr, u16 *idx, u16 num_res);
+
+/* Release resources */
+void ice_resmgr_release_map(struct ice_resmgr *resmgr, u16 *idx, u16 num_res);
+
+#endif /* _ICE_RESMGR_H_ */
+
Index: sys/dev/ice/ice_resmgr.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_resmgr.c
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_resmgr.c
+ * @brief Resource allocation manager
+ *
+ * Manage device resource allocations for a PF, including assigning queues to
+ * VSIs, or managing interrupt allocations across the PF.
+ *
+ * It can handle contiguous and scattered resource allocations, and upon
+ * assigning them, will fill in the mapping array with a map of
+ * resource IDs to PF-space resource indices.
+ */
+
+#include "ice_resmgr.h"
+
+/**
+ * @var M_ICE_RESMGR
+ * @brief PF resource manager allocation type
+ *
+ * malloc(9) allocation type used by the resource manager code.
+ */
+MALLOC_DEFINE(M_ICE_RESMGR, "ice-resmgr", "Intel(R) 100Gb Network Driver resmgr allocations");
+
+/*
+ * Public resource manager allocation functions
+ */
+
+/**
+ * ice_resmgr_init - Initialize a resource manager structure
+ * @resmgr: structure to track the resource manager state
+ * @num_res: the maximum number of resources it can assign
+ *
+ * Initialize the state of a resource manager structure, allocating space to
+ * assign up to the requested number of resources. Uses bit strings to track
+ * which resources have been assigned. This type of resmgr is intended to be
+ * used for tracking LAN queue assignments between VSIs.
+ */
+int
+ice_resmgr_init(struct ice_resmgr *resmgr, u16 num_res)
+{
+ resmgr->resources = bit_alloc(num_res, M_ICE_RESMGR, M_NOWAIT);
+ if (resmgr->resources == NULL)
+ return (ENOMEM);
+
+ resmgr->num_res = num_res;
+ resmgr->contig_only = false;
+ return (0);
+}
+
+/**
+ * ice_resmgr_init_contig_only - Initialize a resource manager structure
+ * @resmgr: structure to track the resource manager state
+ * @num_res: the maximum number of resources it can assign
+ *
+ * Functions similarly to ice_resmgr_init(), but the resulting resmgr structure
+ * will only allow contiguous allocations. This type of resmgr is intended to
+ * be used with tracking device MSI-X interrupt allocations.
+ */
+int
+ice_resmgr_init_contig_only(struct ice_resmgr *resmgr, u16 num_res)
+{
+ int error;
+
+ error = ice_resmgr_init(resmgr, num_res);
+ if (error)
+ return (error);
+
+ resmgr->contig_only = true;
+ return (0);
+}
+
+/**
+ * ice_resmgr_destroy - Deallocate memory associated with a resource manager
+ * @resmgr: resource manager structure
+ *
+ * De-allocates the bit string associated with this resource manager. It is
+ * expected that this function will not be called until all of the assigned
+ * resources have been released.
+ */
+void
+ice_resmgr_destroy(struct ice_resmgr *resmgr)
+{
+ if (resmgr->resources != NULL) {
+#ifdef INVARIANTS
+ int set;
+
+ bit_count(resmgr->resources, 0, resmgr->num_res, &set);
+ MPASS(set == 0);
+#endif
+
+ free(resmgr->resources, M_ICE_RESMGR);
+ resmgr->resources = NULL;
+ }
+ resmgr->num_res = 0;
+}
+
+/*
+ * Resource allocation functions
+ */
+
+/**
+ * ice_resmgr_assign_contiguous - Assign contiguous mapping of resources
+ * @resmgr: resource manager structure
+ * @idx: memory to store mapping, at least num_res wide
+ * @num_res: the number of resources to assign
+ *
+ * Assign num_res number of contiguous resources into the idx mapping. On
+ * success, idx will be updated to map each index to a PF resource.
+ *
+ * This function guarantees that the resource mapping will be contiguous, and
+ * will fail if that is not possible.
+ */
+int
+ice_resmgr_assign_contiguous(struct ice_resmgr *resmgr, u16 *idx, u16 num_res)
+{
+ int start, i;
+
+ bit_ffc_area(resmgr->resources, resmgr->num_res, num_res, &start);
+ if (start < 0)
+ return (ENOSPC);
+
+ /* Set each bit and update the index array */
+ for (i = 0; i < num_res; i++) {
+ bit_set(resmgr->resources, start + i);
+ idx[i] = start + i;
+ }
+
+ return (0);
+}
+
+/**
+ * ice_resmgr_assign_scattered - Assign possibly scattered resources
+ * @resmgr: the resource manager structure
+ * @idx: memory to store associated resource mapping, at least num_res wide
+ * @num_res: the number of resources to assign
+ *
+ * Assign num_res number of resources into the idx_mapping. On success, idx
+ * will be updated to map each index to a PF-space resource.
+ *
+ * Queues may be allocated non-contiguously, and this function requires that
+ * num_res be less than the ICE_MAX_SCATTERED_QUEUES due to hardware
+ * limitations on scattered queue assignment.
+ */
+int
+ice_resmgr_assign_scattered(struct ice_resmgr *resmgr, u16 *idx, u16 num_res)
+{
+ int index = 0, i;
+
+ /* Scattered allocations won't work if they weren't allowed at resmgr
+ * creation time.
+ */
+ if (resmgr->contig_only)
+ return (EPERM);
+
+ /* Hardware can only support a limited total of scattered queues for
+ * a single VSI
+ */
+ if (num_res > ICE_MAX_SCATTERED_QUEUES)
+ return (EOPNOTSUPP);
+
+ for (i = 0; i < num_res; i++) {
+ bit_ffc_at(resmgr->resources, index, resmgr->num_res, &index);
+ if (index < 0)
+ goto err_no_space;
+
+ bit_set(resmgr->resources, index);
+ idx[i] = index;
+ }
+ return (0);
+
+err_no_space:
+ /* Release any resources we did assign up to this point. */
+ ice_resmgr_release_map(resmgr, idx, i);
+ return (ENOSPC);
+}
+
+/**
+ * ice_resmgr_release_map - Release previously assigned resource mapping
+ * @resmgr: the resource manager structure
+ * @idx: previously assigned resource mapping
+ * @num_res: number of resources in the mapping
+ *
+ * Clears the assignment of each resource in the provided resource index. Updates
+ * the idx to indicate that each of the virtual indexes have invalid resource
+ * mappings by assigning them to ICE_INVALID_RES_IDX.
+ */
+void
+ice_resmgr_release_map(struct ice_resmgr *resmgr, u16 *idx, u16 num_res)
+{
+ int i;
+
+ for (i = 0; i < num_res; i++) {
+ if (idx[i] < resmgr->num_res)
+ bit_clear(resmgr->resources, idx[i]);
+ idx[i] = ICE_INVALID_RES_IDX;
+ }
+}
Index: sys/dev/ice/ice_rss.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_rss.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_rss.h
+ * @brief default RSS values if kernel RSS is not enabled
+ *
+ * This header includes default definitions for RSS functionality if the
+ * kernel RSS interface is not enabled. This allows main driver code to avoid
+ * having to check the RSS ifdef throughout, but instead just use the RSS
+ * definitions, as they will fall back to these defaults when the kernel
+ * interface is disabled.
+ */
+#ifndef _ICE_RSS_H_
+#define _ICE_RSS_H_
+
+#ifdef RSS
+// We have the kernel RSS interface available
+#include <net/rss_config.h>
+
+/* Make sure our key size buffer has enough space to store the kernel RSS key */
+CTASSERT(ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE >= RSS_KEYSIZE);
+#else
+/* The kernel RSS interface is not enabled. Use suitable defaults for the RSS
+ * configuration functions.
+ *
+ * The RSS hash key will be a pre-generated random key.
+ * The number of buckets will just match the number of CPUs.
+ * The lookup table will be assigned using round-robin with no indirection.
+ * The RSS hash configuration will be set to suitable defaults.
+ */
+
+#define RSS_HASHTYPE_RSS_IPV4 (1 << 1) /* IPv4 2-tuple */
+#define RSS_HASHTYPE_RSS_TCP_IPV4 (1 << 2) /* TCPv4 4-tuple */
+#define RSS_HASHTYPE_RSS_IPV6 (1 << 3) /* IPv6 2-tuple */
+#define RSS_HASHTYPE_RSS_TCP_IPV6 (1 << 4) /* TCPv6 4-tuple */
+#define RSS_HASHTYPE_RSS_IPV6_EX (1 << 5) /* IPv6 2-tuple + ext hdrs */
+#define RSS_HASHTYPE_RSS_TCP_IPV6_EX (1 << 6) /* TCPv6 4-tiple + ext hdrs */
+#define RSS_HASHTYPE_RSS_UDP_IPV4 (1 << 7) /* IPv4 UDP 4-tuple */
+#define RSS_HASHTYPE_RSS_UDP_IPV6 (1 << 9) /* IPv6 UDP 4-tuple */
+#define RSS_HASHTYPE_RSS_UDP_IPV6_EX (1 << 10) /* IPv6 UDP 4-tuple + ext hdrs */
+
+#define ICE_DEFAULT_RSS_HASH_CONFIG \
+ ((u_int)(RSS_HASHTYPE_RSS_IPV4 | \
+ RSS_HASHTYPE_RSS_TCP_IPV4 | \
+ RSS_HASHTYPE_RSS_UDP_IPV4 | \
+ RSS_HASHTYPE_RSS_IPV6 | \
+ RSS_HASHTYPE_RSS_TCP_IPV6 | \
+ RSS_HASHTYPE_RSS_UDP_IPV6))
+
+#define rss_getkey(key) ice_get_default_rss_key(key)
+#define rss_getnumbuckets() (mp_ncpus)
+#define rss_get_indirection_to_bucket(index) (index)
+#define rss_gethashconfig() (ICE_DEFAULT_RSS_HASH_CONFIG)
+
+/**
+ * rss_hash2bucket - Determine the bucket for a given hash value
+ * @hash_val: the hash value to use
+ * @hash_type: the type of the hash
+ * @bucket_id: on success, updated with the bucket
+ *
+ * This function simply verifies that the hash type is known. If it is, then
+ * we forward the hash value directly as the bucket id. If the hash type is
+ * unknown, we return -1.
+ *
+ * This is the simplest mechanism for converting a hash value into a bucket,
+ * and does not support any form of indirection table.
+ */
+static inline int
+rss_hash2bucket(uint32_t hash_val, uint32_t hash_type, uint32_t *bucket_id)
+{
+ switch (hash_type) {
+ case M_HASHTYPE_RSS_IPV4:
+ case M_HASHTYPE_RSS_TCP_IPV4:
+ case M_HASHTYPE_RSS_UDP_IPV4:
+ case M_HASHTYPE_RSS_IPV6:
+ case M_HASHTYPE_RSS_TCP_IPV6:
+ case M_HASHTYPE_RSS_UDP_IPV6:
+ *bucket_id = hash_val;
+ return (0);
+ default:
+ return (-1);
+ }
+}
+
+#endif /* !RSS */
+
+#endif /* _ICE_COMMON_COMPAT_H_ */
Index: sys/dev/ice/ice_sbq_cmd.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_sbq_cmd.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_SBQ_CMD_H_
+#define _ICE_SBQ_CMD_H_
+
+/* This header file defines the Sideband Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+/* Sideband Queue command structure and opcodes */
+enum ice_sbq_opc {
+ /* Sideband Queue commands */
+ ice_sbq_opc_neigh_dev_req = 0x0C00,
+ ice_sbq_opc_neigh_dev_ev = 0x0C01
+};
+
+/* Sideband Queue descriptor. Indirect command
+ * and non posted
+ */
+struct ice_sbq_cmd_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+
+ /* Opaque message data */
+ __le32 cookie_high;
+ __le32 cookie_low;
+
+ union {
+ __le16 cmd_len;
+ __le16 cmpl_len;
+ } param0;
+
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_sbq_evt_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+ u8 data[24];
+};
+
+enum ice_sbq_msg_dev {
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06
+};
+
+enum ice_sbq_msg_opcode {
+ ice_sbq_msg_rd = 0x00,
+ ice_sbq_msg_wr = 0x01
+};
+
+#define ICE_SBQ_MSG_FLAGS 0x40
+#define ICE_SBQ_MSG_SBE_FBE 0x0F
+
+struct ice_sbq_msg_req {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ u8 sbe_fbe;
+ u8 func_id;
+ __le16 msg_addr_low;
+ __le32 msg_addr_high;
+ __le32 data;
+};
+
+struct ice_sbq_msg_cmpl {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ __le32 data;
+};
+
+/* Internal struct */
+struct ice_sbq_msg_input {
+ u8 dest_dev;
+ u8 opcode;
+ u16 msg_addr_low;
+ u32 msg_addr_high;
+ u32 data;
+};
+#endif /* _ICE_SBQ_CMD_H_ */
Index: sys/dev/ice/ice_sched.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_sched.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_SCHED_H_
+#define _ICE_SCHED_H_
+
+#include "ice_common.h"
+
+#define ICE_QGRP_LAYER_OFFSET 2
+#define ICE_VSI_LAYER_OFFSET 4
+#define ICE_AGG_LAYER_OFFSET 6
+#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
+/* Burst size is a 12 bits register that is configured while creating the RL
+ * profile(s). MSB is a granularity bit and tells the granularity type
+ * 0 - LSB bits are in 64 bytes granularity
+ * 1 - LSB bits are in 1K bytes granularity
+ */
+#define ICE_64_BYTE_GRANULARITY 0
+#define ICE_KBYTE_GRANULARITY BIT(11)
+#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
+#define ICE_MAX_BURST_SIZE_ALLOWED \
+ ((BIT(11) - 1) * 1024) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
+ ((BIT(11) - 1) * 64) /* In Bytes */
+#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
+
+#define ICE_RL_PROF_ACCURACY_BYTES 128
+#define ICE_RL_PROF_MULTIPLIER 10000
+#define ICE_RL_PROF_TS_MULTIPLIER 32
+#define ICE_RL_PROF_FRACTION 512
+
+#define ICE_PSM_CLK_367MHZ_IN_HZ 367647059
+#define ICE_PSM_CLK_416MHZ_IN_HZ 416666667
+#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
+#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
+
+struct rl_profile_params {
+ u32 bw; /* in Kbps */
+ u16 rl_multiplier;
+ u16 wake_up_calc;
+ u16 rl_encode;
+};
+
+/* BW rate limit profile parameters list entry along
+ * with bandwidth maintained per layer in port info
+ */
+struct ice_aqc_rl_profile_info {
+ struct ice_aqc_rl_profile_elem profile;
+ struct LIST_ENTRY_TYPE list_entry;
+ u32 bw; /* requested */
+ u16 prof_id_ref; /* profile ID to node association ref count */
+};
+
+struct ice_sched_agg_vsi_info {
+ struct LIST_ENTRY_TYPE list_entry;
+ ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u16 vsi_handle;
+ /* save aggregator VSI TC bitmap */
+ ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+};
+
+struct ice_sched_agg_info {
+ struct LIST_HEAD_TYPE agg_vsi_list;
+ struct LIST_ENTRY_TYPE list_entry;
+ ice_declare_bitmap(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u32 agg_id;
+ enum ice_agg_type agg_type;
+ /* bw_t_info saves aggregator BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
+ /* save aggregator TC bitmap */
+ ice_declare_bitmap(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+};
+
+/* FW AQ command calls */
+enum ice_status
+ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
+ struct ice_aqc_cfg_l2_node_cgd_data *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd);
+enum ice_status ice_sched_init_port(struct ice_port_info *pi);
+enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
+
+/* Functions to cleanup scheduler SW DB */
+void ice_sched_clear_port(struct ice_port_info *pi);
+void ice_sched_cleanup_all(struct ice_hw *hw);
+void ice_sched_clear_agg(struct ice_hw *hw);
+
+/* Get a scheduling node from SW DB for given TEID */
+struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid);
+struct ice_sched_node *
+ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
+/* Add a scheduling node into SW DB for given info */
+enum ice_status
+ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ struct ice_aqc_txsched_elem_data *info);
+void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
+struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
+struct ice_sched_node *
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u8 owner);
+enum ice_status
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
+ u8 owner, bool enable);
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+struct ice_sched_node *
+ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u16 vsi_handle);
+bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node);
+enum ice_status
+ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+
+/* Tx scheduler rate limiter functions */
+enum ice_status
+ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
+ enum ice_agg_type agg_type, u8 tc_bitmap);
+enum ice_status
+ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ u8 tc_bitmap);
+enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id);
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
+ enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw);
+enum ice_status
+ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status
+ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw);
+enum ice_status
+ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id);
+enum ice_status
+ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
+ u8 *q_prio);
+enum ice_status
+ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
+ enum ice_rl_type rl_type, u8 *bw_alloc);
+enum ice_status
+ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
+ u16 num_vsis, u16 *vsi_handle_arr,
+ u8 *node_prio, u8 tc);
+enum ice_status
+ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
+ enum ice_rl_type rl_type, u8 *bw_alloc);
+bool
+ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
+ struct ice_sched_node *node);
+enum ice_status
+ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
+ u32 bw);
+enum ice_status
+ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw);
+enum ice_status
+ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
+ struct ice_sched_node *node, u8 priority);
+enum ice_status
+ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u8 bw_alloc);
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+#endif /* _ICE_SCHED_H_ */
Index: sys/dev/ice/ice_sched.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_sched.c
@@ -0,0 +1,5541 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_sched.h"
+
+/**
+ * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
+ * @pi: port information structure
+ * @info: Scheduler element information from firmware
+ *
+ * This function inserts the root node of the scheduling tree topology
+ * to the SW DB.
+ */
+static enum ice_status
+ice_sched_add_root_node(struct ice_port_info *pi,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_sched_node *root;
+ struct ice_hw *hw;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ root = (struct ice_sched_node *)ice_malloc(hw, sizeof(*root));
+ if (!root)
+ return ICE_ERR_NO_MEMORY;
+
+ /* coverity[suspicious_sizeof] */
+ root->children = (struct ice_sched_node **)
+ ice_calloc(hw, hw->max_children[0], sizeof(*root));
+ if (!root->children) {
+ ice_free(hw, root);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA);
+ pi->root = root;
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
+ * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
+ * @teid: node TEID to search
+ *
+ * This function searches for a node matching the TEID in the scheduling tree
+ * from the SW DB. The search is recursive and is restricted by the number of
+ * layers it has searched through; stopping at the max supported layer.
+ *
+ * This function needs to be called when holding the port_info->sched_lock
+ */
+struct ice_sched_node *
+ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
+{
+ u16 i;
+
+ /* The TEID is same as that of the start_node */
+ if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
+ return start_node;
+
+ /* The node has no children or is at the max layer */
+ if (!start_node->num_children ||
+ start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
+ start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
+ return NULL;
+
+ /* Check if TEID matches to any of the children nodes */
+ for (i = 0; i < start_node->num_children; i++)
+ if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
+ return start_node->children[i];
+
+ /* Search within each child's sub-tree */
+ for (i = 0; i < start_node->num_children; i++) {
+ struct ice_sched_node *tmp;
+
+ tmp = ice_sched_find_node_by_teid(start_node->children[i],
+ teid);
+ if (tmp)
+ return tmp;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
+ * @hw: pointer to the HW struct
+ * @cmd_opc: cmd opcode
+ * @elems_req: number of elements to request
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_resp: returns total number of elements response
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function sends a scheduling elements cmd (cmd_opc)
+ */
+static enum ice_status
+ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
+ u16 elems_req, void *buf, u16 buf_size,
+ u16 *elems_resp, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_sched_elem_cmd *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.sched_elem_cmd;
+ ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
+ cmd->num_elem_req = CPU_TO_LE16(elems_req);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && elems_resp)
+ *elems_resp = LE16_TO_CPU(cmd->num_elem_resp);
+
+ return status;
+}
+
+/**
+ * ice_aq_query_sched_elems - query scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements returned
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduling elements (0x0404)
+ */
+enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
+}
+
+/**
+ * ice_sched_add_node - Insert the Tx scheduler node in SW DB
+ * @pi: port information structure
+ * @layer: Scheduler layer of the node
+ * @info: Scheduler element information from firmware
+ *
+ * This function inserts a scheduler node to the SW DB.
+ */
+enum ice_status
+ice_sched_add_node(struct ice_port_info *pi, u8 layer,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_sched_node *parent;
+ struct ice_aqc_get_elem elem;
+ struct ice_sched_node *node;
+ enum ice_status status;
+ struct ice_hw *hw;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* A valid parent node should be there */
+ parent = ice_sched_find_node_by_teid(pi->root,
+ LE32_TO_CPU(info->parent_teid));
+ if (!parent) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Parent Node not found for parent_teid=0x%x\n",
+ LE32_TO_CPU(info->parent_teid));
+ return ICE_ERR_PARAM;
+ }
+
+ /* query the current node information from FW before additing it
+ * to the SW DB
+ */
+ status = ice_sched_query_elem(hw, LE32_TO_CPU(info->node_teid), &elem);
+ if (status)
+ return status;
+ node = (struct ice_sched_node *)ice_malloc(hw, sizeof(*node));
+ if (!node)
+ return ICE_ERR_NO_MEMORY;
+ if (hw->max_children[layer]) {
+ /* coverity[suspicious_sizeof] */
+ node->children = (struct ice_sched_node **)
+ ice_calloc(hw, hw->max_children[layer], sizeof(*node));
+ if (!node->children) {
+ ice_free(hw, node);
+ return ICE_ERR_NO_MEMORY;
+ }
+ }
+
+ node->in_use = true;
+ node->parent = parent;
+ node->tx_sched_layer = layer;
+ parent->children[parent->num_children++] = node;
+ node->info = elem.generic[0];
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_delete_sched_elems - delete scheduler elements
+ * @hw: pointer to the HW struct
+ * @grps_req: number of groups to delete
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_del: returns total number of elements deleted
+ * @cd: pointer to command details structure or NULL
+ *
+ * Delete scheduling elements (0x040F)
+ */
+static enum ice_status
+ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_delete_elem *buf, u16 buf_size,
+ u16 *grps_del, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_del, cd);
+}
+
+/**
+ * ice_sched_remove_elems - remove nodes from HW
+ * @hw: pointer to the HW struct
+ * @parent: pointer to the parent node
+ * @num_nodes: number of nodes
+ * @node_teids: array of node teids to be deleted
+ *
+ * This function remove nodes from HW
+ */
+static enum ice_status
+ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
+ u16 num_nodes, u32 *node_teids)
+{
+ struct ice_aqc_delete_elem *buf;
+ u16 i, num_groups_removed = 0;
+ enum ice_status status;
+ u16 buf_size;
+
+ buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+ buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->hdr.parent_teid = parent->info.node_teid;
+ buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
+ for (i = 0; i < num_nodes; i++)
+ buf->teid[i] = CPU_TO_LE32(node_teids[i]);
+
+ status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
+ &num_groups_removed, NULL);
+ if (status != ICE_SUCCESS || num_groups_removed != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
+ hw->adminq.sq_last_status);
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_sched_get_first_node - get the first node of the given layer
+ * @pi: port information structure
+ * @parent: pointer the base node of the subtree
+ * @layer: layer number
+ *
+ * This function retrieves the first node of the given layer from the subtree
+ */
+static struct ice_sched_node *
+ice_sched_get_first_node(struct ice_port_info *pi,
+ struct ice_sched_node *parent, u8 layer)
+{
+ return pi->sib_head[parent->tc_num][layer];
+}
+
+/**
+ * ice_sched_get_tc_node - get pointer to TC node
+ * @pi: port information structure
+ * @tc: TC number
+ *
+ * This function returns the TC node pointer
+ */
+struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
+{
+ u8 i;
+
+ if (!pi || !pi->root)
+ return NULL;
+ for (i = 0; i < pi->root->num_children; i++)
+ if (pi->root->children[i]->tc_num == tc)
+ return pi->root->children[i];
+ return NULL;
+}
+
+/**
+ * ice_free_sched_node - Free a Tx scheduler node from SW DB
+ * @pi: port information structure
+ * @node: pointer to the ice_sched_node struct
+ *
+ * This function frees up a node from SW DB as well as from HW
+ *
+ * This function needs to be called with the port_info->sched_lock held
+ */
+void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
+{
+ struct ice_sched_node *parent;
+ struct ice_hw *hw = pi->hw;
+ u8 i, j;
+
+ /* Free the children before freeing up the parent node
+ * The parent array is updated below and that shifts the nodes
+ * in the array. So always pick the first child if num children > 0
+ */
+ while (node->num_children)
+ ice_free_sched_node(pi, node->children[0]);
+
+ /* Leaf, TC and root nodes can't be deleted by SW */
+ if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
+ u32 teid = LE32_TO_CPU(node->info.node_teid);
+
+ ice_sched_remove_elems(hw, node->parent, 1, &teid);
+ }
+ parent = node->parent;
+ /* root has no parent */
+ if (parent) {
+ struct ice_sched_node *p;
+
+ /* update the parent */
+ for (i = 0; i < parent->num_children; i++)
+ if (parent->children[i] == node) {
+ for (j = i + 1; j < parent->num_children; j++)
+ parent->children[j - 1] =
+ parent->children[j];
+ parent->num_children--;
+ break;
+ }
+
+ p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
+ while (p) {
+ if (p->sibling == node) {
+ p->sibling = node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+
+ /* update the sibling head if head is getting removed */
+ if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
+ pi->sib_head[node->tc_num][node->tx_sched_layer] =
+ node->sibling;
+ }
+
+ /* leaf nodes have no children */
+ if (node->children)
+ ice_free(hw, node->children);
+ ice_free(hw, node);
+}
+
+/**
+ * ice_aq_get_dflt_topo - gets default scheduler topology
+ * @hw: pointer to the HW struct
+ * @lport: logical port number
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_branches: returns total number of queue to port branches
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get default scheduler topology (0x400)
+ */
+static enum ice_status
+ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
+ struct ice_aqc_get_topo_elem *buf, u16 buf_size,
+ u8 *num_branches, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_topo *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.get_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
+ cmd->port_num = lport;
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && num_branches)
+ *num_branches = cmd->num_branches;
+
+ return status;
+}
+
+/**
+ * ice_aq_add_sched_elems - adds scheduling element
+ * @hw: pointer to the HW struct
+ * @grps_req: the number of groups that are requested to be added
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_added: returns total number of groups added
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add scheduling elements (0x0401)
+ */
+static enum ice_status
+ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_add_elem *buf, u16 buf_size,
+ u16 *grps_added, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_added, cd);
+}
+
+/**
+ * ice_aq_cfg_sched_elems - configures scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_cfgd: returns total number of elements configured
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure scheduling elements (0x0403)
+ */
+static enum ice_status
+ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_conf_elem *buf, u16 buf_size,
+ u16 *elems_cfgd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_cfgd, cd);
+}
+
+/**
+ * ice_aq_move_sched_elems - move scheduler elements
+ * @hw: pointer to the HW struct
+ * @grps_req: number of groups to move
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_movd: returns total number of groups moved
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move scheduling elements (0x0408)
+ */
+static enum ice_status
+ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_move_elem *buf, u16 buf_size,
+ u16 *grps_movd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_movd, cd);
+}
+
+/**
+ * ice_aq_suspend_sched_elems - suspend scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to suspend
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements suspended
+ * @cd: pointer to command details structure or NULL
+ *
+ * Suspend scheduling elements (0x0409)
+ */
+static enum ice_status
+ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_suspend_resume_elem *buf,
+ u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
+}
+
+/**
+ * ice_aq_resume_sched_elems - resume scheduler elements
+ * @hw: pointer to the HW struct
+ * @elems_req: number of elements to resume
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @elems_ret: returns total number of elements resumed
+ * @cd: pointer to command details structure or NULL
+ *
+ * resume scheduling elements (0x040A)
+ */
+static enum ice_status
+ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_suspend_resume_elem *buf,
+ u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
+ elems_req, (void *)buf, buf_size,
+ elems_ret, cd);
+}
+
+/**
+ * ice_aq_query_sched_res - query scheduler resource
+ * @hw: pointer to the HW struct
+ * @buf_size: buffer size in bytes
+ * @buf: pointer to buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Query scheduler resource allocation (0x0412)
+ */
+static enum ice_status
+ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
+ struct ice_aqc_query_txsched_res_resp *buf,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_sched_suspend_resume_elems - suspend or resume HW nodes
+ * @hw: pointer to the HW struct
+ * @num_nodes: number of nodes
+ * @node_teids: array of node teids to be suspended or resumed
+ * @suspend: true means suspend / false means resume
+ *
+ * This function suspends or resumes HW nodes
+ */
+static enum ice_status
+ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
+ bool suspend)
+{
+ struct ice_aqc_suspend_resume_elem *buf;
+ u16 i, buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf) * num_nodes;
+ buf = (struct ice_aqc_suspend_resume_elem *)
+ ice_malloc(hw, buf_size);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < num_nodes; i++)
+ buf->teid[i] = CPU_TO_LE32(node_teids[i]);
+
+ if (suspend)
+ status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
+ buf_size, &num_elem_ret,
+ NULL);
+ else
+ status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
+ buf_size, &num_elem_ret,
+ NULL);
+ if (status != ICE_SUCCESS || num_elem_ret != num_nodes)
+ ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate LAN queue contexts */
+ if (!vsi_ctx->lan_q_ctx[tc]) {
+ vsi_ctx->lan_q_ctx[tc] = (struct ice_q_ctx *)
+ ice_calloc(hw, new_numqs, sizeof(*q_ctx));
+ if (!vsi_ctx->lan_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ return ICE_SUCCESS;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
+
+ q_ctx = (struct ice_q_ctx *)
+ ice_calloc(hw, new_numqs, sizeof(*q_ctx));
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ ice_memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
+ prev_num * sizeof(*q_ctx), ICE_DMA_TO_NONDMA);
+ ice_free(hw, vsi_ctx->lan_q_ctx[tc]);
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_rl_profile - performs a rate limiting task
+ * @hw: pointer to the HW struct
+ * @opcode:opcode for add, query, or remove profile(s)
+ * @num_profiles: the number of profiles
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_processed: number of processed add or remove profile(s) to return
+ * @cd: pointer to command details structure
+ *
+ * Rl profile function to add, query, or remove profile(s)
+ */
+static enum ice_status
+ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
+ u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_rl_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.rl_profile;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ cmd->num_profiles = CPU_TO_LE16(num_profiles);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status && num_processed)
+ *num_processed = LE16_TO_CPU(cmd->num_processed);
+ return status;
+}
+
+/**
+ * ice_aq_add_rl_profile - adds rate limiting profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to be add
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_added: total number of profiles added to return
+ * @cd: pointer to command details structure
+ *
+ * Add RL profile (0x0410)
+ */
+static enum ice_status
+ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_added,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_added, cd);
+}
+
+/**
+ * ice_aq_query_rl_profile - query rate limiting profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to query
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure
+ *
+ * Query RL profile (0x0411)
+ */
+enum ice_status
+ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_query_rl_profiles,
+ num_profiles, buf, buf_size, NULL, cd);
+}
+
+/**
+ * ice_aq_remove_rl_profile - removes RL profile(s)
+ * @hw: pointer to the HW struct
+ * @num_profiles: the number of profile(s) to remove
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @num_profiles_removed: total number of profiles removed to return
+ * @cd: pointer to command details structure or NULL
+ *
+ * Remove RL profile (0x0415)
+ */
+static enum ice_status
+ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
+ struct ice_aqc_rl_profile_generic_elem *buf,
+ u16 buf_size, u16 *num_profiles_removed,
+ struct ice_sq_cd *cd)
+{
+ return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
+ num_profiles, buf,
+ buf_size, num_profiles_removed, cd);
+}
+
+/**
+ * ice_sched_del_rl_profile - remove RL profile
+ * @hw: pointer to the HW struct
+ * @rl_info: rate limit profile information
+ *
+ * If the profile ID is not referenced anymore, it removes profile ID with
+ * its associated parameters from HW DB,and locally. The caller needs to
+ * hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_del_rl_profile(struct ice_hw *hw,
+ struct ice_aqc_rl_profile_info *rl_info)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ u16 num_profiles_removed;
+ enum ice_status status;
+ u16 num_profiles = 1;
+
+ if (rl_info->prof_id_ref != 0)
+ return ICE_ERR_IN_USE;
+
+ /* Safe to remove profile ID */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_info->profile;
+ status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &num_profiles_removed, NULL);
+ if (status || num_profiles_removed != num_profiles)
+ return ICE_ERR_CFG;
+
+ /* Delete stale entry now */
+ LIST_DEL(&rl_info->list_entry);
+ ice_free(hw, rl_info);
+ return status;
+}
+
+/**
+ * ice_sched_clear_rl_prof - clears RL prof entries
+ * @pi: port information structure
+ *
+ * This function removes all RL profile from HW as well as from SW DB.
+ */
+static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln],
+ ice_aqc_rl_profile_info, list_entry) {
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ rl_prof_elem->prof_id_ref = 0;
+ status = ice_sched_del_rl_profile(hw, rl_prof_elem);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ /* On error, free mem required */
+ LIST_DEL(&rl_prof_elem->list_entry);
+ ice_free(hw, rl_prof_elem);
+ }
+ }
+ }
+}
+
+/**
+ * ice_sched_clear_agg - clears the aggregator related information
+ * @hw: pointer to the hardware structure
+ *
+ * This function removes aggregator list and free up aggregator related memory
+ * previously allocated.
+ */
+void ice_sched_clear_agg(struct ice_hw *hw)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *atmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &hw->agg_list,
+ ice_sched_agg_info,
+ list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list,
+ ice_sched_agg_vsi_info, list_entry) {
+ LIST_DEL(&agg_vsi_info->list_entry);
+ ice_free(hw, agg_vsi_info);
+ }
+ LIST_DEL(&agg_info->list_entry);
+ ice_free(hw, agg_info);
+ }
+}
+
+/**
+ * ice_sched_clear_tx_topo - clears the schduler tree nodes
+ * @pi: port information structure
+ *
+ * This function removes all the nodes from HW as well as from SW DB.
+ */
+static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
+{
+ if (!pi)
+ return;
+ /* remove RL profiles related lists */
+ ice_sched_clear_rl_prof(pi);
+ if (pi->root) {
+ ice_free_sched_node(pi, pi->root);
+ pi->root = NULL;
+ }
+}
+
+/**
+ * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
+ * @pi: port information structure
+ *
+ * Cleanup scheduling elements from SW DB
+ */
+void ice_sched_clear_port(struct ice_port_info *pi)
+{
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return;
+
+ pi->port_state = ICE_SCHED_PORT_STATE_INIT;
+ ice_acquire_lock(&pi->sched_lock);
+ ice_sched_clear_tx_topo(pi);
+ ice_release_lock(&pi->sched_lock);
+ ice_destroy_lock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
+ * @hw: pointer to the HW struct
+ *
+ * Cleanup scheduling elements from SW DB for all the ports
+ */
+void ice_sched_cleanup_all(struct ice_hw *hw)
+{
+ if (!hw)
+ return;
+
+ if (hw->layer_info) {
+ ice_free(hw, hw->layer_info);
+ hw->layer_info = NULL;
+ }
+
+ ice_sched_clear_port(hw->port_info);
+
+ hw->num_tx_sched_layers = 0;
+ hw->num_tx_sched_phys_layers = 0;
+ hw->flattened_layers = 0;
+ hw->max_cgds = 0;
+}
+
+/**
+ * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping
+ * @hw: pointer to the HW struct
+ * @num_l2_nodes: the number of L2 nodes whose CGDs to configure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * Configure L2 Node CGD (0x0414)
+ */
+enum ice_status
+ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
+ struct ice_aqc_cfg_l2_node_cgd_data *buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_cfg_l2_node_cgd *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.cfg_l2_node_cgd;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_l2_node_cgd);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ cmd->num_l2_nodes = CPU_TO_LE16(num_l2_nodes);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_sched_add_elems - add nodes to HW and SW DB
+ * @pi: port information structure
+ * @tc_node: pointer to the branch node
+ * @parent: pointer to the parent node
+ * @layer: layer number to add nodes
+ * @num_nodes: number of nodes
+ * @num_nodes_added: pointer to num nodes added
+ * @first_node_teid: if new nodes are added then return the TEID of first node
+ *
+ * This function add nodes to HW as well as to SW DB for a given layer
+ */
+static enum ice_status
+ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer, u16 num_nodes,
+ u16 *num_nodes_added, u32 *first_node_teid)
+{
+ struct ice_sched_node *prev, *new_node;
+ struct ice_aqc_add_elem *buf;
+ u16 i, num_groups_added = 0;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u16 buf_size;
+ u32 teid;
+
+ buf_size = ice_struct_size(buf, generic, num_nodes - 1);
+ buf = (struct ice_aqc_add_elem *)ice_malloc(hw, buf_size);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->hdr.parent_teid = parent->info.node_teid;
+ buf->hdr.num_elems = CPU_TO_LE16(num_nodes);
+ for (i = 0; i < num_nodes; i++) {
+ buf->generic[i].parent_teid = parent->info.node_teid;
+ buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
+ buf->generic[i].data.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->generic[i].data.generic = 0;
+ buf->generic[i].data.cir_bw.bw_profile_idx =
+ CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.cir_bw.bw_alloc =
+ CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
+ buf->generic[i].data.eir_bw.bw_profile_idx =
+ CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->generic[i].data.eir_bw.bw_alloc =
+ CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
+ }
+
+ status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
+ &num_groups_added, NULL);
+ if (status != ICE_SUCCESS || num_groups_added != 1) {
+ ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
+ hw->adminq.sq_last_status);
+ ice_free(hw, buf);
+ return ICE_ERR_CFG;
+ }
+
+ *num_nodes_added = num_nodes;
+ /* add nodes to the SW DB */
+ for (i = 0; i < num_nodes; i++) {
+ status = ice_sched_add_node(pi, layer, &buf->generic[i]);
+ if (status != ICE_SUCCESS) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "add nodes in SW DB failed status =%d\n",
+ status);
+ break;
+ }
+
+ teid = LE32_TO_CPU(buf->generic[i].node_teid);
+ new_node = ice_sched_find_node_by_teid(parent, teid);
+ if (!new_node) {
+ ice_debug(hw, ICE_DBG_SCHED,
+ "Node is missing for teid =%d\n", teid);
+ break;
+ }
+
+ new_node->sibling = NULL;
+ new_node->tc_num = tc_node->tc_num;
+
+ /* add it to previous node sibling pointer */
+ /* Note: siblings are not linked across branches */
+ prev = ice_sched_get_first_node(pi, tc_node, layer);
+ if (prev && prev != new_node) {
+ while (prev->sibling)
+ prev = prev->sibling;
+ prev->sibling = new_node;
+ }
+
+ /* initialize the sibling head */
+ if (!pi->sib_head[tc_node->tc_num][layer])
+ pi->sib_head[tc_node->tc_num][layer] = new_node;
+
+ if (i == 0)
+ *first_node_teid = teid;
+ }
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_sched_add_nodes_to_layer - Add nodes to a given layer
+ * @pi: port information structure
+ * @tc_node: pointer to TC node
+ * @parent: pointer to parent node
+ * @layer: layer number to add nodes
+ * @num_nodes: number of nodes to be added
+ * @first_node_teid: pointer to the first node TEID
+ * @num_nodes_added: pointer to number of nodes added
+ *
+ * This function add nodes to a given layer.
+ */
+static enum ice_status
+ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer,
+ u16 num_nodes, u32 *first_node_teid,
+ u16 *num_nodes_added)
+{
+ u32 *first_teid_ptr = first_node_teid;
+ u16 new_num_nodes, max_child_nodes;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u16 num_added = 0;
+ u32 temp;
+
+ *num_nodes_added = 0;
+
+ if (!num_nodes)
+ return status;
+
+ if (!parent || layer < hw->sw_entry_point_layer)
+ return ICE_ERR_PARAM;
+
+ /* max children per node per layer */
+ max_child_nodes = hw->max_children[parent->tx_sched_layer];
+
+ /* current number of children + required nodes exceed max children ? */
+ if ((parent->num_children + num_nodes) > max_child_nodes) {
+ /* Fail if the parent is a TC node */
+ if (parent == tc_node)
+ return ICE_ERR_CFG;
+
+ /* utilize all the spaces if the parent is not full */
+ if (parent->num_children < max_child_nodes) {
+ new_num_nodes = max_child_nodes - parent->num_children;
+ /* this recursion is intentional, and wouldn't
+ * go more than 2 calls
+ */
+ status = ice_sched_add_nodes_to_layer(pi, tc_node,
+ parent, layer,
+ new_num_nodes,
+ first_node_teid,
+ &num_added);
+ if (status != ICE_SUCCESS)
+ return status;
+
+ *num_nodes_added += num_added;
+ }
+ /* Don't modify the first node TEID memory if the first node was
+ * added already in the above call. Instead send some temp
+ * memory for all other recursive calls.
+ */
+ if (num_added)
+ first_teid_ptr = &temp;
+
+ new_num_nodes = num_nodes - num_added;
+
+ /* This parent is full, try the next sibling */
+ parent = parent->sibling;
+
+ /* this recursion is intentional, for 1024 queues
+ * per VSI, it goes max of 16 iterations.
+ * 1024 / 8 = 128 layer 8 nodes
+ * 128 /8 = 16 (add 8 nodes per iteration)
+ */
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
+ layer, new_num_nodes,
+ first_teid_ptr,
+ &num_added);
+ *num_nodes_added += num_added;
+ return status;
+ }
+
+ status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
+ num_nodes_added, first_node_teid);
+ return status;
+}
+
+/**
+ * ice_sched_get_qgrp_layer - get the current queue group layer number
+ * @hw: pointer to the HW struct
+ *
+ * This function returns the current queue group layer number
+ */
+static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
+{
+ /* It's always total layers - 1, the array is 0 relative so -2 */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
+}
+
+/**
+ * ice_sched_get_vsi_layer - get the current VSI layer number
+ * @hw: pointer to the HW struct
+ *
+ * This function returns the current VSI layer number
+ */
+static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
+{
+ /* Num Layers VSI layer
+ * 9 6
+ * 7 4
+ * 5 or less sw_entry_point_layer
+ */
+ /* calculate the VSI layer based on number of layers. */
+ if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
+ u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+
+ if (layer > hw->sw_entry_point_layer)
+ return layer;
+ }
+ return hw->sw_entry_point_layer;
+}
+
+/**
+ * ice_sched_get_agg_layer - get the current aggregator layer number
+ * @hw: pointer to the HW struct
+ *
+ * This function returns the current aggregator layer number
+ */
+static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
+{
+ /* Num Layers aggregator layer
+ * 9 4
+ * 7 or less sw_entry_point_layer
+ */
+ /* calculate the aggregator layer based on number of layers. */
+ if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
+ u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+
+ if (layer > hw->sw_entry_point_layer)
+ return layer;
+ }
+ return hw->sw_entry_point_layer;
+}
+
+/**
+ * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
+ * @pi: port information structure
+ *
+ * This function removes the leaf node that was created by the FW
+ * during initialization
+ */
+static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
+{
+ struct ice_sched_node *node;
+
+ node = pi->root;
+ while (node) {
+ if (!node->num_children)
+ break;
+ node = node->children[0];
+ }
+ if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
+ u32 teid = LE32_TO_CPU(node->info.node_teid);
+ enum ice_status status;
+
+ /* remove the default leaf node */
+ status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
+ if (!status)
+ ice_free_sched_node(pi, node);
+ }
+}
+
+/**
+ * ice_sched_rm_dflt_nodes - free the default nodes in the tree
+ * @pi: port information structure
+ *
+ * This function frees all the nodes except root and TC that were created by
+ * the FW during initialization
+ */
+static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
+{
+ struct ice_sched_node *node;
+
+ ice_rm_dflt_leaf_node(pi);
+
+ /* remove the default nodes except TC and root nodes */
+ node = pi->root;
+ while (node) {
+ if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
+ node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
+ ice_free_sched_node(pi, node);
+ break;
+ }
+
+ if (!node->num_children)
+ break;
+ node = node->children[0];
+ }
+}
+
+/**
+ * ice_sched_init_port - Initialize scheduler by querying information from FW
+ * @pi: port info structure for the tree to cleanup
+ *
+ * This function is the initial call to find the total number of Tx scheduler
+ * resources, default topology created by firmware and storing the information
+ * in SW DB.
+ */
+enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_topo_elem *buf;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 num_branches;
+ u16 num_elems;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+
+ /* Query the Default Topology from FW */
+ buf = (struct ice_aqc_get_topo_elem *)ice_malloc(hw,
+ ICE_AQ_MAX_BUF_LEN);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Query default scheduling tree topology */
+ status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
+ &num_branches, NULL);
+ if (status)
+ goto err_init_port;
+
+ /* num_branches should be between 1-8 */
+ if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
+ ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
+ num_branches);
+ status = ICE_ERR_PARAM;
+ goto err_init_port;
+ }
+
+ /* get the number of elements on the default/first branch */
+ num_elems = LE16_TO_CPU(buf[0].hdr.num_elems);
+
+ /* num_elems should always be between 1-9 */
+ if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
+ ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
+ num_elems);
+ status = ICE_ERR_PARAM;
+ goto err_init_port;
+ }
+
+ /* If the last node is a leaf node then the index of the queue group
+ * layer is two less than the number of elements.
+ */
+ if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
+ ICE_AQC_ELEM_TYPE_LEAF)
+ pi->last_node_teid =
+ LE32_TO_CPU(buf[0].generic[num_elems - 2].node_teid);
+ else
+ pi->last_node_teid =
+ LE32_TO_CPU(buf[0].generic[num_elems - 1].node_teid);
+
+ /* Insert the Tx Sched root node */
+ status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
+ if (status)
+ goto err_init_port;
+
+ /* Parse the default tree and cache the information */
+ for (i = 0; i < num_branches; i++) {
+ num_elems = LE16_TO_CPU(buf[i].hdr.num_elems);
+
+ /* Skip root element as already inserted */
+ for (j = 1; j < num_elems; j++) {
+ /* update the sw entry point */
+ if (buf[0].generic[j].data.elem_type ==
+ ICE_AQC_ELEM_TYPE_ENTRY_POINT)
+ hw->sw_entry_point_layer = j;
+
+ status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
+ if (status)
+ goto err_init_port;
+ }
+ }
+
+ /* Remove the default nodes. */
+ if (pi->root)
+ ice_sched_rm_dflt_nodes(pi);
+
+ /* initialize the port for handling the scheduler tree */
+ pi->port_state = ICE_SCHED_PORT_STATE_READY;
+ ice_init_lock(&pi->sched_lock);
+ for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
+ INIT_LIST_HEAD(&pi->rl_prof_list[i]);
+
+err_init_port:
+ if (status && pi->root) {
+ ice_free_sched_node(pi, pi->root);
+ pi->root = NULL;
+ }
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_sched_get_node - Get the struct ice_sched_node for given TEID
+ * @pi: port information structure
+ * @teid: Scheduler node TEID
+ *
+ * This function retrieves the ice_sched_node struct for given TEID from
+ * the SW DB and returns it to the caller.
+ */
+struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
+{
+ struct ice_sched_node *node;
+
+ if (!pi)
+ return NULL;
+
+ /* Find the node starting from root */
+ ice_acquire_lock(&pi->sched_lock);
+ node = ice_sched_find_node_by_teid(pi->root, teid);
+ ice_release_lock(&pi->sched_lock);
+
+ if (!node)
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Node not found for teid=0x%x\n", teid);
+
+ return node;
+}
+
+/**
+ * ice_sched_query_res_alloc - query the FW for num of logical sched layers
+ * @hw: pointer to the HW struct
+ *
+ * query FW for allocated scheduler resources and store in HW struct
+ */
+enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+{
+ struct ice_aqc_query_txsched_res_resp *buf;
+ enum ice_status status = ICE_SUCCESS;
+ __le16 max_sibl;
+ u8 i;
+
+ if (hw->layer_info)
+ return status;
+
+ buf = (struct ice_aqc_query_txsched_res_resp *)
+ ice_malloc(hw, sizeof(*buf));
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
+ if (status)
+ goto sched_query_out;
+
+ hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels);
+ hw->num_tx_sched_phys_layers =
+ LE16_TO_CPU(buf->sched_props.phys_levels);
+ hw->flattened_layers = buf->sched_props.flattening_bitmap;
+ hw->max_cgds = buf->sched_props.max_pf_cgds;
+
+ /* max sibling group size of current layer refers to the max children
+ * of the below layer node.
+ * layer 1 node max children will be layer 2 max sibling group size
+ * layer 2 node max children will be layer 3 max sibling group size
+ * and so on. This array will be populated from root (index 0) to
+ * qgroup layer 7. Leaf node has no children.
+ */
+ for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
+ max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
+ hw->max_children[i] = LE16_TO_CPU(max_sibl);
+ }
+
+ hw->layer_info = (struct ice_aqc_layer_props *)
+ ice_memdup(hw, buf->layer_props,
+ (hw->num_tx_sched_layers *
+ sizeof(*hw->layer_info)),
+ ICE_DMA_TO_DMA);
+ if (!hw->layer_info) {
+ status = ICE_ERR_NO_MEMORY;
+ goto sched_query_out;
+ }
+
+sched_query_out:
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
+ * @hw: pointer to the HW struct
+ *
+ * Determine the PSM clock frequency and store in HW struct
+ */
+void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
+{
+ u32 val, clk_src;
+
+ val = rd32(hw, GLGEN_CLKSTAT_SRC);
+ clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
+ GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
+
+#define PSM_CLK_SRC_367_MHZ 0x0
+#define PSM_CLK_SRC_416_MHZ 0x1
+#define PSM_CLK_SRC_446_MHZ 0x2
+#define PSM_CLK_SRC_390_MHZ 0x3
+
+ switch (clk_src) {
+ case PSM_CLK_SRC_367_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_416_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_446_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_390_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
+ clk_src);
+ /* fall back to a safe default */
+ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+ }
+}
+
+/**
+ * ice_sched_find_node_in_subtree - Find node in part of base node subtree
+ * @hw: pointer to the HW struct
+ * @base: pointer to the base node
+ * @node: pointer to the node to search
+ *
+ * This function checks whether a given node is part of the base node
+ * subtree or not
+ */
+bool
+ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
+ struct ice_sched_node *node)
+{
+ u8 i;
+
+ for (i = 0; i < base->num_children; i++) {
+ struct ice_sched_node *child = base->children[i];
+
+ if (node == child)
+ return true;
+
+ if (child->tx_sched_layer > node->tx_sched_layer)
+ return false;
+
+ /* this recursion is intentional, and wouldn't
+ * go more than 8 calls
+ */
+ if (ice_sched_find_node_in_subtree(hw, child, node))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: branch number
+ * @owner: LAN or RDMA
+ *
+ * This function retrieves a free LAN or RDMA queue group node
+ */
+struct ice_sched_node *
+ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u8 owner)
+{
+ struct ice_sched_node *vsi_node, *qgrp_node = NULL;
+ struct ice_vsi_ctx *vsi_ctx;
+ u16 max_children;
+ u8 qgrp_layer;
+
+ qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ max_children = pi->hw->max_children[qgrp_layer];
+
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return NULL;
+ vsi_node = vsi_ctx->sched.vsi_node[tc];
+ /* validate invalid VSI ID */
+ if (!vsi_node)
+ goto lan_q_exit;
+
+ /* get the first queue group node from VSI sub-tree */
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
+ while (qgrp_node) {
+ /* make sure the qgroup node is part of the VSI subtree */
+ if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
+ if (qgrp_node->num_children < max_children &&
+ qgrp_node->owner == owner)
+ break;
+ qgrp_node = qgrp_node->sibling;
+ }
+
+lan_q_exit:
+ return qgrp_node;
+}
+
+/**
+ * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
+ * @pi: pointer to the port information structure
+ * @tc_node: pointer to the TC node
+ * @vsi_handle: software VSI handle
+ *
+ * This function retrieves a VSI node for a given VSI ID from a given
+ * TC branch
+ */
+struct ice_sched_node *
+ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u16 vsi_handle)
+{
+ struct ice_sched_node *node;
+ u8 vsi_layer;
+
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
+ node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
+
+ /* Check whether it already exists */
+ while (node) {
+ if (node->vsi_handle == vsi_handle)
+ return node;
+ node = node->sibling;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
+ * @pi: pointer to the port information structure
+ * @tc_node: pointer to the TC node
+ * @agg_id: aggregator ID
+ *
+ * This function retrieves an aggregator node for a given aggregator ID from
+ * a given TC branch
+ */
+static struct ice_sched_node *
+ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u32 agg_id)
+{
+ struct ice_sched_node *node;
+ struct ice_hw *hw = pi->hw;
+ u8 agg_layer;
+
+ if (!hw)
+ return NULL;
+ agg_layer = ice_sched_get_agg_layer(hw);
+ node = ice_sched_get_first_node(pi, tc_node, agg_layer);
+
+ /* Check whether it already exists */
+ while (node) {
+ if (node->agg_id == agg_id)
+ return node;
+ node = node->sibling;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_check_node - Compare node parameters between SW DB and HW DB
+ * @hw: pointer to the HW struct
+ * @node: pointer to the ice_sched_node struct
+ *
+ * This function queries and compares the HW element with SW DB node parameters
+ */
+static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
+{
+ struct ice_aqc_get_elem buf;
+ enum ice_status status;
+ u32 node_teid;
+
+ node_teid = LE32_TO_CPU(node->info.node_teid);
+ status = ice_sched_query_elem(hw, node_teid, &buf);
+ if (status != ICE_SUCCESS)
+ return false;
+
+ if (memcmp(buf.generic, &node->info, sizeof(*buf.generic))) {
+ ice_debug(hw, ICE_DBG_SCHED, "Node mismatch for teid=0x%x\n",
+ node_teid);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
+ * @hw: pointer to the HW struct
+ * @num_qs: number of queues
+ * @num_nodes: num nodes array
+ *
+ * This function calculates the number of VSI child nodes based on the
+ * number of queues.
+ */
+static void
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+{
+ u16 num = num_qs;
+ u8 i, qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ /* calculate num nodes from queue group to VSI layer */
+ for (i = qgl; i > vsil; i--) {
+ /* round to the next integer if there is a remainder */
+ num = DIVIDE_AND_ROUND_UP(num, hw->max_children[i]);
+
+ /* need at least one node */
+ num_nodes[i] = num ? num : 1;
+ }
+}
+
+/**
+ * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_node: pointer to the TC node
+ * @num_nodes: pointer to the num nodes that needs to be added per layer
+ * @owner: node owner (LAN or RDMA)
+ *
+ * This function adds the VSI child nodes to tree. It gets called for
+ * LAN and RDMA separately.
+ */
+static enum ice_status
+ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ struct ice_sched_node *tc_node, u16 *num_nodes,
+ u8 owner)
+{
+ struct ice_sched_node *parent, *node;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+ u32 first_node_teid;
+ u16 num_added = 0;
+ u8 i, qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+ parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ for (i = vsil + 1; i <= qgl; i++) {
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_added);
+ if (status != ICE_SUCCESS || num_nodes[i] != num_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_added) {
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ node = parent;
+ while (node) {
+ node->owner = owner;
+ node = node->sibling;
+ }
+ } else {
+ parent = parent->children[0];
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
+ * @pi: pointer to the port info structure
+ * @tc_node: pointer to TC node
+ * @num_nodes: pointer to num nodes array
+ *
+ * This function calculates the number of supported nodes needed to add this
+ * VSI into Tx tree including the VSI, parent and intermediate nodes in below
+ * layers
+ */
+static void
+ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node, u16 *num_nodes)
+{
+ struct ice_sched_node *node;
+ u8 vsil;
+ int i;
+
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
+ /* Add intermediate nodes if TC has no children and
+ * need at least one node for VSI
+ */
+ if (!tc_node->num_children || i == vsil) {
+ num_nodes[i]++;
+ } else {
+ /* If intermediate nodes are reached max children
+ * then add a new one.
+ */
+ node = ice_sched_get_first_node(pi, tc_node, (u8)i);
+ /* scan all the siblings */
+ while (node) {
+ if (node->num_children <
+ pi->hw->max_children[i])
+ break;
+ node = node->sibling;
+ }
+
+ /* tree has one intermediate node to add this new VSI.
+ * So no need to calculate supported nodes for below
+ * layers.
+ */
+ if (node)
+ break;
+ /* all the nodes are full, allocate a new one */
+ num_nodes[i]++;
+ }
+}
+
+/**
+ * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_node: pointer to TC node
+ * @num_nodes: pointer to num nodes array
+ *
+ * This function adds the VSI supported nodes into Tx tree including the
+ * VSI, its parent and intermediate nodes in below layers
+ */
+static enum ice_status
+ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ struct ice_sched_node *tc_node, u16 *num_nodes)
+{
+ struct ice_sched_node *parent = tc_node;
+ enum ice_status status;
+ u32 first_node_teid;
+ u16 num_added = 0;
+ u8 i, vsil;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
+ i, num_nodes[i],
+ &first_node_teid,
+ &num_added);
+ if (status != ICE_SUCCESS || num_nodes[i] != num_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ else
+ parent = parent->children[0];
+
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ if (i == vsil)
+ parent->vsi_handle = vsi_handle;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_add_vsi_to_topo - add a new VSI into tree
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ *
+ * This function adds a new VSI into scheduler tree
+ */
+static enum ice_status
+ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
+{
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_PARAM;
+
+ /* calculate number of supported nodes needed for this VSI */
+ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
+
+ /* add VSI supported nodes to TC subtree */
+ return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ num_nodes);
+}
+
+/**
+ * ice_sched_update_vsi_child_nodes - update VSI child nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @new_numqs: new number of max queues
+ * @owner: owner of this subtree
+ *
+ * This function updates the VSI child nodes based on the number of queues
+ */
+static enum ice_status
+ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
+ u8 tc, u16 new_numqs, u8 owner)
+{
+ u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ struct ice_sched_node *vsi_node;
+ struct ice_sched_node *tc_node;
+ struct ice_vsi_ctx *vsi_ctx;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u16 prev_numqs;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ return ICE_ERR_CFG;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ /* num queues are not changed or less than the previous number */
+ if (new_numqs <= prev_numqs)
+ return status;
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+
+ if (new_numqs)
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
+ /* Keep the max number of queue configuration all the time. Update the
+ * tree only if number of queues > previous number of queues. This may
+ * leave some extra nodes in the tree if number of queues < previous
+ * number but that wouldn't harm anything. Removing those extra nodes
+ * may complicate the code if those nodes are part of SRL or
+ * individually rate limited.
+ */
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes, owner);
+ if (status)
+ return status;
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_cfg_vsi - configure the new/existing VSI
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @maxqs: max number of queues
+ * @owner: LAN or RDMA
+ * @enable: TC enabled or disabled
+ *
+ * This function adds/updates VSI nodes based on the number of queues. If TC is
+ * enabled and VSI is in suspended state then resume the VSI back. If TC is
+ * disabled then suspend the VSI if it is not already.
+ */
+enum ice_status
+ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
+ u8 owner, bool enable)
+{
+ struct ice_sched_node *vsi_node, *tc_node;
+ struct ice_vsi_ctx *vsi_ctx;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+
+ ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_PARAM;
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+
+ /* suspend the VSI if TC is not enabled */
+ if (!enable) {
+ if (vsi_node && vsi_node->in_use) {
+ u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
+
+ status = ice_sched_suspend_resume_elems(hw, 1, &teid,
+ true);
+ if (!status)
+ vsi_node->in_use = false;
+ }
+ return status;
+ }
+
+ /* TC is enabled, if it is a new VSI then add it to the tree */
+ if (!vsi_node) {
+ status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
+ if (status)
+ return status;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ return ICE_ERR_CFG;
+
+ vsi_ctx->sched.vsi_node[tc] = vsi_node;
+ vsi_node->in_use = true;
+ /* invalidate the max queues whenever VSI gets added first time
+ * into the scheduler tree (boot or after reset). We need to
+ * recreate the child nodes all the time in these cases.
+ */
+ vsi_ctx->sched.max_lanq[tc] = 0;
+ }
+
+ /* update the VSI child nodes */
+ status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
+ owner);
+ if (status)
+ return status;
+
+ /* TC is enabled, resume the VSI if it is in the suspend state */
+ if (!vsi_node->in_use) {
+ u32 teid = LE32_TO_CPU(vsi_node->info.node_teid);
+
+ status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
+ if (!status)
+ vsi_node->in_use = true;
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes single aggregator VSI info entry from
+ * aggregator list.
+ */
+static void
+ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *atmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(agg_info, atmp, &pi->hw->agg_list,
+ ice_sched_agg_info,
+ list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list,
+ ice_sched_agg_vsi_info, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle) {
+ LIST_DEL(&agg_vsi_info->list_entry);
+ ice_free(pi->hw, agg_vsi_info);
+ return;
+ }
+ }
+}
+
+/**
+ * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
+ * @node: pointer to the sub-tree node
+ *
+ * This function checks for a leaf node presence in a given sub-tree node.
+ */
+static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
+{
+ u8 i;
+
+ for (i = 0; i < node->num_children; i++)
+ if (ice_sched_is_leaf_node_present(node->children[i]))
+ return true;
+ /* check for a leaf node */
+ return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
+}
+
+/**
+ * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @owner: LAN or RDMA
+ *
+ * This function removes the VSI and its LAN or RDMA children nodes from the
+ * scheduler tree.
+ */
+static enum ice_status
+ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_vsi_ctx *vsi_ctx;
+ u8 i;
+
+ ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return status;
+ ice_acquire_lock(&pi->sched_lock);
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ goto exit_sched_rm_vsi_cfg;
+
+ ice_for_each_traffic_class(i) {
+ struct ice_sched_node *vsi_node, *tc_node;
+ u8 j = 0;
+
+ tc_node = ice_sched_get_tc_node(pi, i);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "VSI has leaf nodes in TC %d\n", i);
+ status = ICE_ERR_IN_USE;
+ goto exit_sched_rm_vsi_cfg;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner) {
+ ice_free_sched_node(pi, vsi_node->children[j]);
+
+ /* reset the counter again since the num
+ * children will be updated after node removal
+ */
+ j = 0;
+ } else {
+ j++;
+ }
+ }
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children) {
+ ice_free_sched_node(pi, vsi_node);
+ vsi_ctx->sched.vsi_node[i] = NULL;
+
+ /* clean up aggregator related VSI info if any */
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+ }
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[i] = 0;
+ }
+ status = ICE_SUCCESS;
+
+exit_sched_rm_vsi_cfg:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its LAN children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
+}
+
+/**
+ * ice_sched_is_tree_balanced - Check tree nodes are identical or not
+ * @hw: pointer to the HW struct
+ * @node: pointer to the ice_sched_node struct
+ *
+ * This function compares all the nodes for a given tree against HW DB nodes
+ * This function needs to be called with the port_info->sched_lock held
+ */
+bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
+{
+ u8 i;
+
+ /* start from the leaf node */
+ for (i = 0; i < node->num_children; i++)
+ /* Fail if node doesn't match with the SW DB
+ * this recursion is intentional, and wouldn't
+ * go more than 9 calls
+ */
+ if (!ice_sched_is_tree_balanced(hw, node->children[i]))
+ return false;
+
+ return ice_sched_check_node(hw, node);
+}
+
+/**
+ * ice_aq_query_node_to_root - retrieve the tree topology for a given node TEID
+ * @hw: pointer to the HW struct
+ * @node_teid: node TEID
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * This function retrieves the tree topology from the firmware for a given
+ * node TEID to the root node.
+ */
+enum ice_status
+ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_query_node_to_root *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.query_node_to_root;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_node_to_root);
+ cmd->teid = CPU_TO_LE32(node_teid);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_get_agg_info - get the aggregator ID
+ * @hw: pointer to the hardware structure
+ * @agg_id: aggregator ID
+ *
+ * This function validates aggregator ID. The function returns info if
+ * aggregator ID is present in list otherwise it returns null.
+ */
+static struct ice_sched_agg_info*
+ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
+ list_entry)
+ if (agg_info->agg_id == agg_id)
+ return agg_info;
+
+ return NULL;
+}
+
+/**
+ * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
+ * @hw: pointer to the HW struct
+ * @node: pointer to a child node
+ * @num_nodes: num nodes count array
+ *
+ * This function walks through the aggregator subtree to find a free parent
+ * node
+ */
+static struct ice_sched_node *
+ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
+ u16 *num_nodes)
+{
+ u8 l = node->tx_sched_layer;
+ u8 vsil, i;
+
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ /* Is it VSI parent layer ? */
+ if (l == vsil - 1)
+ return (node->num_children < hw->max_children[l]) ? node : NULL;
+
+ /* We have intermediate nodes. Let's walk through the subtree. If the
+ * intermediate node has space to add a new node then clear the count
+ */
+ if (node->num_children < hw->max_children[l])
+ num_nodes[l] = 0;
+ /* The below recursive call is intentional and wouldn't go more than
+ * 2 or 3 iterations.
+ */
+
+ for (i = 0; i < node->num_children; i++) {
+ struct ice_sched_node *parent;
+
+ parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
+ num_nodes);
+ if (parent)
+ return parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_sched_update_parent - update the new parent in SW DB
+ * @new_parent: pointer to a new parent node
+ * @node: pointer to a child node
+ *
+ * This function removes the child from the old parent and adds it to a new
+ * parent
+ */
+static void
+ice_sched_update_parent(struct ice_sched_node *new_parent,
+ struct ice_sched_node *node)
+{
+ struct ice_sched_node *old_parent;
+ u8 i, j;
+
+ old_parent = node->parent;
+
+ /* update the old parent children */
+ for (i = 0; i < old_parent->num_children; i++)
+ if (old_parent->children[i] == node) {
+ for (j = i + 1; j < old_parent->num_children; j++)
+ old_parent->children[j - 1] =
+ old_parent->children[j];
+ old_parent->num_children--;
+ break;
+ }
+
+ /* now move the node to a new parent */
+ new_parent->children[new_parent->num_children++] = node;
+ node->parent = new_parent;
+ node->info.parent_teid = new_parent->info.node_teid;
+}
+
+/**
+ * ice_sched_move_nodes - move child nodes to a given parent
+ * @pi: port information structure
+ * @parent: pointer to parent node
+ * @num_items: number of child nodes to be moved
+ * @list: pointer to child node teids
+ *
+ * This function move the child nodes to a given parent.
+ */
+static enum ice_status
+ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
+ u16 num_items, u32 *list)
+{
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_aqc_move_elem *buf;
+ struct ice_sched_node *node;
+ u16 i, grps_movd = 0;
+ struct ice_hw *hw;
+
+ hw = pi->hw;
+
+ if (!parent || !num_items)
+ return ICE_ERR_PARAM;
+
+ /* Does parent have enough space */
+ if (parent->num_children + num_items >=
+ hw->max_children[parent->tx_sched_layer])
+ return ICE_ERR_AQ_FULL;
+
+ buf = (struct ice_aqc_move_elem *)ice_malloc(hw, sizeof(*buf));
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < num_items; i++) {
+ node = ice_sched_find_node_by_teid(pi->root, list[i]);
+ if (!node) {
+ status = ICE_ERR_PARAM;
+ goto move_err_exit;
+ }
+
+ buf->hdr.src_parent_teid = node->info.parent_teid;
+ buf->hdr.dest_parent_teid = parent->info.node_teid;
+ buf->teid[0] = node->info.node_teid;
+ buf->hdr.num_elems = CPU_TO_LE16(1);
+ status = ice_aq_move_sched_elems(hw, 1, buf, sizeof(*buf),
+ &grps_movd, NULL);
+ if (status && grps_movd != 1) {
+ status = ICE_ERR_CFG;
+ goto move_err_exit;
+ }
+
+ /* update the SW DB */
+ ice_sched_update_parent(parent, node);
+ }
+
+move_err_exit:
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_sched_move_vsi_to_agg - move VSI to aggregator node
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function moves a VSI to an aggregator node or its subtree.
+ * Intermediate nodes may be created if required.
+ */
+static enum ice_status
+ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
+ u8 tc)
+{
+ struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ u32 first_node_teid, vsi_teid;
+ enum ice_status status;
+ u16 num_nodes_added;
+ u8 aggl, vsil, i;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ aggl = ice_sched_get_agg_layer(pi->hw);
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+
+ /* set intermediate node count to 1 between aggregator and VSI layers */
+ for (i = aggl + 1; i < vsil; i++)
+ num_nodes[i] = 1;
+
+ /* Check if the aggregator subtree has any free node to add the VSI */
+ for (i = 0; i < agg_node->num_children; i++) {
+ parent = ice_sched_get_free_vsi_parent(pi->hw,
+ agg_node->children[i],
+ num_nodes);
+ if (parent)
+ goto move_nodes;
+ }
+
+ /* add new nodes */
+ parent = agg_node;
+ for (i = aggl + 1; i < vsil; i++) {
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_nodes_added);
+ if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_nodes_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ else
+ parent = parent->children[0];
+
+ if (!parent)
+ return ICE_ERR_CFG;
+ }
+
+move_nodes:
+ vsi_teid = LE32_TO_CPU(vsi_node->info.node_teid);
+ return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
+}
+
+/**
+ * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
+ * @pi: port information structure
+ * @agg_info: aggregator info
+ * @tc: traffic class number
+ * @rm_vsi_info: true or false
+ *
+ * This function move all the VSI(s) to the default aggregator and delete
+ * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
+ * caller holds the scheduler lock.
+ */
+static enum ice_status
+ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
+ struct ice_sched_agg_info *agg_info, u8 tc,
+ bool rm_vsi_info)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *tmp;
+ enum ice_status status = ICE_SUCCESS;
+
+ LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
+ ice_sched_agg_vsi_info, list_entry) {
+ u16 vsi_handle = agg_vsi_info->vsi_handle;
+
+ /* Move VSI to default aggregator */
+ if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
+ continue;
+
+ status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
+ ICE_DFLT_AGG_ID, tc);
+ if (status)
+ break;
+
+ ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
+ if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
+ LIST_DEL(&agg_vsi_info->list_entry);
+ ice_free(pi->hw, agg_vsi_info);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
+ * @pi: port information structure
+ * @node: node pointer
+ *
+ * This function checks whether the aggregator is attached with any VSI or not.
+ */
+static bool
+ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
+{
+ u8 vsil, i;
+
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ if (node->tx_sched_layer < vsil - 1) {
+ for (i = 0; i < node->num_children; i++)
+ if (ice_sched_is_agg_inuse(pi, node->children[i]))
+ return true;
+ return false;
+ } else {
+ return node->num_children ? true : false;
+ }
+}
+
+/**
+ * ice_sched_rm_agg_cfg - remove the aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function removes the aggregator node and intermediate nodes if any
+ * from the given TC
+ */
+static enum ice_status
+ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
+{
+ struct ice_sched_node *tc_node, *agg_node;
+ struct ice_hw *hw = pi->hw;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Can't remove the aggregator node if it has children */
+ if (ice_sched_is_agg_inuse(pi, agg_node))
+ return ICE_ERR_IN_USE;
+
+ /* need to remove the whole subtree if aggregator node is the
+ * only child.
+ */
+ while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
+ struct ice_sched_node *parent = agg_node->parent;
+
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ if (parent->num_children > 1)
+ break;
+
+ agg_node = parent;
+ }
+
+ ice_free_sched_node(pi, agg_node);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
+ * @pi: port information structure
+ * @agg_info: aggregator ID
+ * @tc: TC number
+ * @rm_vsi_info: bool value true or false
+ *
+ * This function removes aggregator reference to VSI of given TC. It removes
+ * the aggregator configuration completely for requested TC. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
+ u8 tc, bool rm_vsi_info)
+{
+ enum ice_status status = ICE_SUCCESS;
+
+ /* If nothing to remove - return success */
+ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ goto exit_rm_agg_cfg_tc;
+
+ status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
+ if (status)
+ goto exit_rm_agg_cfg_tc;
+
+ /* Delete aggregator node(s) */
+ status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
+ if (status)
+ goto exit_rm_agg_cfg_tc;
+
+ ice_clear_bit(tc, agg_info->tc_bitmap);
+exit_rm_agg_cfg_tc:
+ return status;
+}
+
+/**
+ * ice_save_agg_tc_bitmap - save aggregator TC bitmap
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc_bitmap: 8 bits TC bitmap
+ *
+ * Save aggregator TC bitmap. This function needs to be called with scheduler
+ * lock held.
+ */
+static enum ice_status
+ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
+ ice_bitmap_t *tc_bitmap)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_add_agg_cfg - create an aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function creates an aggregator node and intermediate nodes if required
+ * for the given TC
+ */
+static enum ice_status
+ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
+{
+ struct ice_sched_node *parent, *agg_node, *tc_node;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u32 first_node_teid;
+ u16 num_nodes_added;
+ u8 i, aggl;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ /* Does Agg node already exist ? */
+ if (agg_node)
+ return status;
+
+ aggl = ice_sched_get_agg_layer(hw);
+
+ /* need one node in Agg layer */
+ num_nodes[aggl] = 1;
+
+ /* Check whether the intermediate nodes have space to add the
+ * new aggregator. If they are full, then SW needs to allocate a new
+ * intermediate node on those layers
+ */
+ for (i = hw->sw_entry_point_layer; i < aggl; i++) {
+ parent = ice_sched_get_first_node(pi, tc_node, i);
+
+ /* scan all the siblings */
+ while (parent) {
+ if (parent->num_children < hw->max_children[i])
+ break;
+ parent = parent->sibling;
+ }
+
+ /* all the nodes are full, reserve one for this layer */
+ if (!parent)
+ num_nodes[i]++;
+ }
+
+ /* add the aggregator node */
+ parent = tc_node;
+ for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_nodes_added);
+ if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_nodes_added) {
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ /* register aggregator ID with the aggregator node */
+ if (parent && i == aggl)
+ parent->agg_id = agg_id;
+ } else {
+ parent = parent->children[0];
+ }
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_cfg_agg - configure aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @agg_type: aggregator type queue, VSI, or aggregator group
+ * @tc_bitmap: bits TC bitmap
+ *
+ * It registers a unique aggregator node into scheduler services. It
+ * allows a user to register with a unique ID to track it's resources.
+ * The aggregator type determines if this is a queue group, VSI group
+ * or aggregator group. It then creates the aggregator node(s) for requested
+ * TC(s) or removes an existing aggregator node including its configuration
+ * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
+ * resources and remove aggregator ID.
+ * This function needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
+ enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
+{
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ agg_info = ice_get_agg_info(hw, agg_id);
+ if (!agg_info) {
+ /* Create new entry for new aggregator ID */
+ agg_info = (struct ice_sched_agg_info *)
+ ice_malloc(hw, sizeof(*agg_info));
+ if (!agg_info) {
+ status = ICE_ERR_NO_MEMORY;
+ goto exit_reg_agg;
+ }
+ agg_info->agg_id = agg_id;
+ agg_info->agg_type = agg_type;
+ agg_info->tc_bitmap[0] = 0;
+
+ /* Initialize the aggregator VSI list head */
+ INIT_LIST_HEAD(&agg_info->agg_vsi_list);
+
+ /* Add new entry in aggregator list */
+ LIST_ADD(&agg_info->list_entry, &hw->agg_list);
+ }
+ /* Create aggregator node(s) for requested TC(s) */
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc)) {
+ /* Delete aggregator cfg TC if it exists previously */
+ status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
+ if (status)
+ break;
+ continue;
+ }
+
+ /* Check if aggregator node for TC already exists */
+ if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ continue;
+
+ /* Create new aggregator node for TC */
+ status = ice_sched_add_agg_cfg(pi, agg_id, tc);
+ if (status)
+ break;
+
+ /* Save aggregator node's TC information */
+ ice_set_bit(tc, agg_info->tc_bitmap);
+ }
+exit_reg_agg:
+ return status;
+}
+
+/**
+ * ice_cfg_agg - config aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @agg_type: aggregator type queue, VSI, or aggregator group
+ * @tc_bitmap: bits TC bitmap
+ *
+ * This function configures aggregator node(s).
+ */
+enum ice_status
+ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
+ u8 tc_bitmap)
+{
+ ice_bitmap_t bitmap = tc_bitmap;
+ enum ice_status status;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type,
+ (ice_bitmap_t *)&bitmap);
+ if (!status)
+ status = ice_save_agg_tc_bitmap(pi, agg_id,
+ (ice_bitmap_t *)&bitmap);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_get_agg_vsi_info - get the aggregator ID
+ * @agg_info: aggregator info
+ * @vsi_handle: software VSI handle
+ *
+ * The function returns aggregator VSI info based on VSI handle. This function
+ * needs to be called with scheduler lock held.
+ */
+static struct ice_sched_agg_vsi_info*
+ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
+ ice_sched_agg_vsi_info, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle)
+ return agg_vsi_info;
+
+ return NULL;
+}
+
+/**
+ * ice_get_vsi_agg_info - get the aggregator info of VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: Sw VSI handle
+ *
+ * The function returns aggregator info of VSI represented via vsi_handle. The
+ * VSI has in this case a different aggregator than the default one. This
+ * function needs to be called with scheduler lock held.
+ */
+static struct ice_sched_agg_info*
+ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
+ list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (agg_vsi_info)
+ return agg_info;
+ }
+ return NULL;
+}
+
+/**
+ * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
+ * lock held.
+ */
+static enum ice_status
+ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ ice_bitmap_t *tc_bitmap)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ /* check if entry already exist */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info)
+ return ICE_ERR_PARAM;
+ ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * This function moves VSI to a new or default aggregator node. If VSI is
+ * already associated to the aggregator node then no operation is performed on
+ * the tree. This function needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ u16 vsi_handle, ice_bitmap_t *tc_bitmap)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ agg_info = ice_get_agg_info(hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ /* check if entry already exist */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info) {
+ /* Create new entry for VSI under aggregator list */
+ agg_vsi_info = (struct ice_sched_agg_vsi_info *)
+ ice_malloc(hw, sizeof(*agg_vsi_info));
+ if (!agg_vsi_info)
+ return ICE_ERR_PARAM;
+
+ /* add VSI ID into the aggregator list */
+ agg_vsi_info->vsi_handle = vsi_handle;
+ LIST_ADD(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
+ }
+ /* Move VSI node to new aggregator node for requested TC(s) */
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc))
+ continue;
+
+ /* Move VSI to new aggregator */
+ status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
+ if (status)
+ break;
+
+ if (agg_id != ICE_DFLT_AGG_ID)
+ ice_set_bit(tc, agg_vsi_info->tc_bitmap);
+ else
+ ice_clear_bit(tc, agg_vsi_info->tc_bitmap);
+ }
+ /* If VSI moved back to default aggregator, delete agg_vsi_info. */
+ if (!ice_is_any_bit_set(agg_vsi_info->tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS)) {
+ LIST_DEL(&agg_vsi_info->list_entry);
+ ice_free(hw, agg_vsi_info);
+ }
+ return status;
+}
+
+/**
+ * ice_sched_rm_unused_rl_prof - remove unused RL profile
+ * @pi: port information structure
+ *
+ * This function removes unused rate limit profiles from the HW and
+ * SW DB. The caller needs to hold scheduler lock.
+ */
+static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
+{
+ u16 ln;
+
+ for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ struct ice_aqc_rl_profile_info *rl_prof_tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
+ &pi->rl_prof_list[ln],
+ ice_aqc_rl_profile_info, list_entry) {
+ if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Removed rl profile\n");
+ }
+ }
+}
+
+/**
+ * ice_sched_update_elem - update element
+ * @hw: pointer to the HW struct
+ * @node: pointer to node
+ * @info: node info to update
+ *
+ * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * parameters of node from argument info data buffer (Info->data buf) and
+ * returns success or error on config sched element failure. The caller
+ * needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_aqc_txsched_elem_data *info)
+{
+ struct ice_aqc_conf_elem buf;
+ enum ice_status status;
+ u16 elem_cfgd = 0;
+ u16 num_elems = 1;
+
+ buf.generic[0] = *info;
+ /* Parent TEID is reserved field in this aq call */
+ buf.generic[0].parent_teid = 0;
+ /* Element type is reserved field in this aq call */
+ buf.generic[0].data.elem_type = 0;
+ /* Flags is reserved field in this aq call */
+ buf.generic[0].data.flags = 0;
+
+ /* Update HW DB */
+ /* Configure element node */
+ status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
+ &elem_cfgd, NULL);
+ if (status || elem_cfgd != num_elems) {
+ ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* Config success case */
+ /* Now update local SW DB */
+ /* Only copy the data portion of info buffer */
+ node->info.data = info->data;
+ return status;
+}
+
+/**
+ * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @bw_alloc: BW weight/allocation
+ *
+ * This function configures node element's BW allocation.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u16 bw_alloc)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ if (rl_type == ICE_MIN_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
+ } else if (rl_type == ICE_MAX_BW) {
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_alloc = CPU_TO_LE16(bw_alloc);
+ } else {
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_move_vsi_to_agg - moves VSI to new or default aggregator
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * Move or associate VSI to a new or default aggregator node.
+ */
+enum ice_status
+ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ u8 tc_bitmap)
+{
+ ice_bitmap_t bitmap = tc_bitmap;
+ enum ice_status status;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
+ (ice_bitmap_t *)&bitmap);
+ if (!status)
+ status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
+ (ice_bitmap_t *)&bitmap);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_rm_agg_cfg - remove aggregator configuration
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ *
+ * This function removes aggregator reference to VSI and delete aggregator ID
+ * info. It removes the aggregator configuration completely.
+ */
+enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
+{
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ ice_acquire_lock(&pi->sched_lock);
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit_ice_rm_agg_cfg;
+ }
+
+ ice_for_each_traffic_class(tc) {
+ status = ice_rm_agg_cfg_tc(pi, agg_info, tc, true);
+ if (status)
+ goto exit_ice_rm_agg_cfg;
+ }
+
+ if (ice_is_any_bit_set(agg_info->tc_bitmap, ICE_MAX_TRAFFIC_CLASS)) {
+ status = ICE_ERR_IN_USE;
+ goto exit_ice_rm_agg_cfg;
+ }
+
+ /* Safe to delete entry now */
+ LIST_DEL(&agg_info->list_entry);
+ ice_free(pi->hw, agg_info);
+
+ /* Remove unused RL profile IDs from HW and SW DB */
+ ice_sched_rm_unused_rl_prof(pi);
+
+exit_ice_rm_agg_cfg:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_set_clear_cir_bw_alloc - set or clear CIR BW alloc information
+ * @bw_t_info: bandwidth type information structure
+ * @bw_alloc: Bandwidth allocation information
+ *
+ * Save or clear CIR BW alloc information (bw_alloc) in the passed param
+ * bw_t_info.
+ */
+static void
+ice_set_clear_cir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
+{
+ bw_t_info->cir_bw.bw_alloc = bw_alloc;
+ if (bw_t_info->cir_bw.bw_alloc)
+ ice_set_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
+ else
+ ice_clear_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap);
+}
+
+/**
+ * ice_set_clear_eir_bw_alloc - set or clear EIR BW alloc information
+ * @bw_t_info: bandwidth type information structure
+ * @bw_alloc: Bandwidth allocation information
+ *
+ * Save or clear EIR BW alloc information (bw_alloc) in the passed param
+ * bw_t_info.
+ */
+static void
+ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
+{
+ bw_t_info->eir_bw.bw_alloc = bw_alloc;
+ if (bw_t_info->eir_bw.bw_alloc)
+ ice_set_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
+ else
+ ice_clear_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap);
+}
+
+/**
+ * ice_sched_save_vsi_bw_alloc - save VSI node's BW alloc information
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @rl_type: rate limit type min or max
+ * @bw_alloc: Bandwidth allocation information
+ *
+ * Save BW alloc information of VSI type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u16 bw_alloc)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
+ bw_alloc);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw_alloc(&vsi_ctx->sched.bw_t_info[tc],
+ bw_alloc);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_set_clear_cir_bw - set or clear CIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ ice_clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = 0;
+ } else {
+ /* Save type of BW information */
+ ice_set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->cir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_eir_bw - set or clear EIR BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved shared BW information.
+ */
+ ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ /* save EIR BW information */
+ ice_set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = bw;
+ }
+}
+
+/**
+ * ice_set_clear_shared_bw - set or clear shared BW
+ * @bw_t_info: bandwidth type information structure
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
+{
+ if (bw == ICE_SCHED_DFLT_BW) {
+ ice_clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = 0;
+ } else {
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element.
+ * First clear earlier saved EIR BW information.
+ */
+ ice_clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
+ bw_t_info->eir_bw.bw = 0;
+ /* save shared BW information */
+ ice_set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
+ bw_t_info->shared_bw = bw;
+ }
+}
+
+/**
+ * ice_sched_save_vsi_bw - save VSI node's BW information
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of VSI type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_set_clear_prio - set or clear priority information
+ * @bw_t_info: bandwidth type information structure
+ * @prio: priority to save
+ *
+ * Save or clear priority (prio) in the passed param bw_t_info.
+ */
+static void
+ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
+{
+ bw_t_info->generic = prio;
+ if (bw_t_info->generic)
+ ice_set_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
+ else
+ ice_clear_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap);
+}
+
+/**
+ * ice_sched_save_vsi_prio - save VSI node's priority information
+ * @pi: port information structure
+ * @vsi_handle: Software VSI handle
+ * @tc: traffic class
+ * @prio: priority to save
+ *
+ * Save priority information of VSI type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u8 prio)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return ICE_ERR_PARAM;
+ ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_save_agg_bw_alloc - save aggregator node's BW alloc information
+ * @pi: port information structure
+ * @agg_id: node aggregator ID
+ * @tc: traffic class
+ * @rl_type: rate limit type min or max
+ * @bw_alloc: bandwidth alloc information
+ *
+ * Save BW alloc information of AGG type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
+ enum ice_rl_type rl_type, u16 bw_alloc)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ return ICE_ERR_PARAM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw_alloc(&agg_info->bw_t_info[tc], bw_alloc);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_save_agg_bw - save aggregator node's BW information
+ * @pi: port information structure
+ * @agg_id: node aggregator ID
+ * @tc: traffic class
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of AGG type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ return ICE_ERR_PARAM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&agg_info->bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&agg_info->bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&agg_info->bw_t_info[tc], bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type, bw);
+ if (!status) {
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
+ ice_release_lock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_dflt_vsi_bw_lmt_per_tc - configure default VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ *
+ * This function configures default BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ enum ice_status status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ if (!status) {
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ ice_release_lock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_agg_bw_lmt_per_tc - configure aggregator BW limit per TC
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function applies BW limit to aggregator scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
+ tc, rl_type, bw);
+ if (!status) {
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
+ ice_release_lock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_agg_bw_dflt_lmt_per_tc - configure aggregator BW default limit per TC
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: traffic class
+ * @rl_type: min or max
+ *
+ * This function applies default BW limit to aggregator scheduling node based
+ * on TC information.
+ */
+enum ice_status
+ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ enum ice_status status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
+ tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ if (!status) {
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ ice_release_lock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_shared_lmt - configure VSI BW shared limit
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @bw: bandwidth in Kbps
+ *
+ * This function Configures shared rate limiter(SRL) of all VSI type nodes
+ * across all traffic classes for VSI matching handle.
+ */
+enum ice_status
+ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 bw)
+{
+ return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle, bw);
+}
+
+/**
+ * ice_cfg_vsi_bw_no_shared_lmt - configure VSI BW for no shared limiter
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes the shared rate limiter(SRL) of all VSI type nodes
+ * across all traffic classes for VSI matching handle.
+ */
+enum ice_status
+ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_cfg_agg_bw_shared_lmt - configure aggregator BW shared limit
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures the shared rate limiter(SRL) of all aggregator type
+ * nodes across all traffic classes for aggregator matching agg_id.
+ */
+enum ice_status
+ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
+{
+ return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, bw);
+}
+
+/**
+ * ice_cfg_agg_bw_no_shared_lmt - configure aggregator BW for no shared limiter
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ *
+ * This function removes the shared rate limiter(SRL) of all aggregator type
+ * nodes across all traffic classes for aggregator matching agg_id.
+ */
+enum ice_status
+ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
+{
+ return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_config_vsi_queue_priority - config VSI queue priority of node
+ * @pi: port information structure
+ * @num_qs: number of VSI queues
+ * @q_ids: queue IDs array
+ * @q_ids: queue IDs array
+ * @q_prio: queue priority array
+ *
+ * This function configures the queue node priority (Sibling Priority) of the
+ * passed in VSI's queue(s) for a given traffic class (TC).
+ */
+enum ice_status
+ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
+ u8 *q_prio)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ u16 i;
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ for (i = 0; i < num_qs; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, q_ids[i]);
+ if (!node || node->info.data.elem_type !=
+ ICE_AQC_ELEM_TYPE_LEAF) {
+ status = ICE_ERR_PARAM;
+ break;
+ }
+ /* Configure Priority */
+ status = ice_sched_cfg_sibl_node_prio(pi, node, q_prio[i]);
+ if (status)
+ break;
+ }
+
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_agg_vsi_priority_per_tc - config aggregator's VSI priority per TC
+ * @pi: port information structure
+ * @agg_id: Aggregator ID
+ * @num_vsis: number of VSI(s)
+ * @vsi_handle_arr: array of software VSI handles
+ * @node_prio: pointer to node priority
+ * @tc: traffic class
+ *
+ * This function configures the node priority (Sibling Priority) of the
+ * passed in VSI's for a given traffic class (TC) of an Aggregator ID.
+ */
+enum ice_status
+ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
+ u16 num_vsis, u16 *vsi_handle_arr,
+ u8 *node_prio, u8 tc)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_node *tc_node, *agg_node;
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_agg_info *agg_info;
+ bool agg_id_present = false;
+ struct ice_hw *hw = pi->hw;
+ u16 i;
+
+ ice_acquire_lock(&pi->sched_lock);
+ LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
+ list_entry)
+ if (agg_info->agg_id == agg_id) {
+ agg_id_present = true;
+ break;
+ }
+ if (!agg_id_present)
+ goto exit_agg_priority_per_tc;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ goto exit_agg_priority_per_tc;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ goto exit_agg_priority_per_tc;
+
+ if (num_vsis > hw->max_children[agg_node->tx_sched_layer])
+ goto exit_agg_priority_per_tc;
+
+ for (i = 0; i < num_vsis; i++) {
+ struct ice_sched_node *vsi_node;
+ bool vsi_handle_valid = false;
+ u16 vsi_handle;
+
+ status = ICE_ERR_PARAM;
+ vsi_handle = vsi_handle_arr[i];
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ goto exit_agg_priority_per_tc;
+ /* Verify child nodes before applying settings */
+ LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
+ ice_sched_agg_vsi_info, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle) {
+ /* cppcheck-suppress unreadVariable */
+ vsi_handle_valid = true;
+ break;
+ }
+
+ if (!vsi_handle_valid)
+ goto exit_agg_priority_per_tc;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ goto exit_agg_priority_per_tc;
+
+ if (ice_sched_find_node_in_subtree(hw, agg_node, vsi_node)) {
+ /* Configure Priority */
+ status = ice_sched_cfg_sibl_node_prio(pi, vsi_node,
+ node_prio[i]);
+ if (status)
+ break;
+ status = ice_sched_save_vsi_prio(pi, vsi_handle, tc,
+ node_prio[i]);
+ if (status)
+ break;
+ }
+ }
+
+exit_agg_priority_per_tc:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_alloc - config VSI BW alloc per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @ena_tcmap: enabled TC map
+ * @rl_type: Rate limit type CIR/EIR
+ * @bw_alloc: Array of BW alloc
+ *
+ * This function configures the BW allocation of the passed in VSI's
+ * node(s) for enabled traffic class.
+ */
+enum ice_status
+ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
+ enum ice_rl_type rl_type, u8 *bw_alloc)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+
+ /* Return success if no nodes are present across TC */
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *tc_node, *vsi_node;
+
+ if (!ice_is_tc_ena(ena_tcmap, tc))
+ continue;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ status = ice_sched_cfg_node_bw_alloc(pi->hw, vsi_node, rl_type,
+ bw_alloc[tc]);
+ if (status)
+ break;
+ status = ice_sched_save_vsi_bw_alloc(pi, vsi_handle, tc,
+ rl_type, bw_alloc[tc]);
+ if (status)
+ break;
+ }
+
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_agg_bw_alloc - config aggregator BW alloc
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @ena_tcmap: enabled TC map
+ * @rl_type: rate limit type CIR/EIR
+ * @bw_alloc: array of BW alloc
+ *
+ * This function configures the BW allocation of passed in aggregator for
+ * enabled traffic class(s).
+ */
+enum ice_status
+ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
+ enum ice_rl_type rl_type, u8 *bw_alloc)
+{
+ struct ice_sched_agg_info *agg_info;
+ bool agg_id_present = false;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ ice_acquire_lock(&pi->sched_lock);
+ LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
+ list_entry)
+ if (agg_info->agg_id == agg_id) {
+ agg_id_present = true;
+ break;
+ }
+ if (!agg_id_present) {
+ status = ICE_ERR_PARAM;
+ goto exit_cfg_agg_bw_alloc;
+ }
+
+ /* Return success if no nodes are present across TC */
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *tc_node, *agg_node;
+
+ if (!ice_is_tc_ena(ena_tcmap, tc))
+ continue;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ continue;
+
+ status = ice_sched_cfg_node_bw_alloc(hw, agg_node, rl_type,
+ bw_alloc[tc]);
+ if (status)
+ break;
+ status = ice_sched_save_agg_bw_alloc(pi, agg_id, tc, rl_type,
+ bw_alloc[tc]);
+ if (status)
+ break;
+ }
+
+exit_cfg_agg_bw_alloc:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
+ * @hw: pointer to the HW struct
+ * @bw: bandwidth in Kbps
+ *
+ * This function calculates the wakeup parameter of RL profile.
+ */
+static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
+{
+ s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
+ s32 wakeup_f_int;
+ u16 wakeup = 0;
+
+ /* Get the wakeup integer value */
+ bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
+ wakeup_int = DIV_64BIT(hw->psm_clk_freq, bytes_per_sec);
+ if (wakeup_int > 63) {
+ wakeup = (u16)((1 << 15) | wakeup_int);
+ } else {
+ /* Calculate fraction value up to 4 decimals
+ * Convert Integer value to a constant multiplier
+ */
+ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
+ wakeup_a = DIV_64BIT((s64)ICE_RL_PROF_MULTIPLIER *
+ hw->psm_clk_freq, bytes_per_sec);
+
+ /* Get Fraction value */
+ wakeup_f = wakeup_a - wakeup_b;
+
+ /* Round up the Fractional value via Ceil(Fractional value) */
+ if (wakeup_f > DIV_64BIT(ICE_RL_PROF_MULTIPLIER, 2))
+ wakeup_f += 1;
+
+ wakeup_f_int = (s32)DIV_64BIT(wakeup_f * ICE_RL_PROF_FRACTION,
+ ICE_RL_PROF_MULTIPLIER);
+ wakeup |= (u16)(wakeup_int << 9);
+ wakeup |= (u16)(0x1ff & wakeup_f_int);
+ }
+
+ return wakeup;
+}
+
+/**
+ * ice_sched_bw_to_rl_profile - convert BW to profile parameters
+ * @hw: pointer to the HW struct
+ * @bw: bandwidth in Kbps
+ * @profile: profile parameters to return
+ *
+ * This function converts the BW to profile structure format.
+ */
+static enum ice_status
+ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
+ struct ice_aqc_rl_profile_elem *profile)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ s64 bytes_per_sec, ts_rate, mv_tmp;
+ bool found = false;
+ s32 encode = 0;
+ s64 mv = 0;
+ s32 i;
+
+ /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
+ if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
+ return status;
+
+ /* Bytes per second from Kbps */
+ bytes_per_sec = DIV_64BIT(((s64)bw * 1000), BITS_PER_BYTE);
+
+ /* encode is 6 bits but really useful are 5 bits */
+ for (i = 0; i < 64; i++) {
+ u64 pow_result = BIT_ULL(i);
+
+ ts_rate = DIV_64BIT((s64)hw->psm_clk_freq,
+ pow_result * ICE_RL_PROF_TS_MULTIPLIER);
+ if (ts_rate <= 0)
+ continue;
+
+ /* Multiplier value */
+ mv_tmp = DIV_64BIT(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
+ ts_rate);
+
+ /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
+ mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
+
+ /* First multiplier value greater than the given
+ * accuracy bytes
+ */
+ if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
+ encode = i;
+ found = true;
+ break;
+ }
+ }
+ if (found) {
+ u16 wm;
+
+ wm = ice_sched_calc_wakeup(hw, bw);
+ profile->rl_multiply = CPU_TO_LE16(mv);
+ profile->wake_up_calc = CPU_TO_LE16(wm);
+ profile->rl_encode = CPU_TO_LE16(encode);
+ status = ICE_SUCCESS;
+ } else {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_add_rl_profile - add RL profile
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: specifies in which layer to create profile
+ *
+ * This function first checks the existing list for corresponding BW
+ * parameter. If it exists, it returns the associated profile otherwise
+ * it creates a new rate limit profile for requested BW, and adds it to
+ * the HW DB and local list. It returns the new profile or null on error.
+ * The caller needs to hold the scheduler lock.
+ */
+static struct ice_aqc_rl_profile_info *
+ice_sched_add_rl_profile(struct ice_port_info *pi,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_generic_elem *buf;
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ u16 profiles_added = 0, num_profiles = 1;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return NULL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!pi)
+ return NULL;
+ hw = pi->hw;
+ LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ ice_aqc_rl_profile_info, list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ rl_prof_elem->bw == bw)
+ /* Return existing profile ID info */
+ return rl_prof_elem;
+
+ /* Create new profile ID */
+ rl_prof_elem = (struct ice_aqc_rl_profile_info *)
+ ice_malloc(hw, sizeof(*rl_prof_elem));
+
+ if (!rl_prof_elem)
+ return NULL;
+
+ status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
+ if (status != ICE_SUCCESS)
+ goto exit_add_rl_prof;
+
+ rl_prof_elem->bw = bw;
+ /* layer_num is zero relative, and fw expects level from 1 to 9 */
+ rl_prof_elem->profile.level = layer_num + 1;
+ rl_prof_elem->profile.flags = profile_type;
+ rl_prof_elem->profile.max_burst_size = CPU_TO_LE16(hw->max_burst_size);
+
+ /* Create new entry in HW DB */
+ buf = (struct ice_aqc_rl_profile_generic_elem *)
+ &rl_prof_elem->profile;
+ status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
+ &profiles_added, NULL);
+ if (status || profiles_added != num_profiles)
+ goto exit_add_rl_prof;
+
+ /* Good entry - add in the list */
+ rl_prof_elem->prof_id_ref = 0;
+ LIST_ADD(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
+ return rl_prof_elem;
+
+exit_add_rl_prof:
+ ice_free(hw, rl_prof_elem);
+ return NULL;
+}
+
+/**
+ * ice_sched_cfg_node_bw_lmt - configure node sched params
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @rl_type: rate limit type CIR, EIR, or shared
+ * @rl_prof_id: rate limit profile ID
+ *
+ * This function configures node element's BW limit.
+ */
+static enum ice_status
+ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u16 rl_prof_id)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
+ data->cir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
+ break;
+ case ICE_MAX_BW:
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ return ICE_ERR_CFG;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx = CPU_TO_LE16(rl_prof_id);
+ break;
+ case ICE_SHARED_BW:
+ /* Check for removing shared BW */
+ if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
+ /* remove shared profile */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = 0; /* clear SRL field */
+
+ /* enable back EIR to default profile */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
+ data->eir_bw.bw_profile_idx =
+ CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
+ break;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
+ (LE16_TO_CPU(data->eir_bw.bw_profile_idx) !=
+ ICE_SCHED_DFLT_RL_PROF_ID))
+ return ICE_ERR_CFG;
+ /* EIR BW is set to default, disable it */
+ data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
+ /* Okay to enable shared BW now */
+ data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
+ data->srl_id = CPU_TO_LE16(rl_prof_id);
+ break;
+ default:
+ /* Unknown rate limit type */
+ return ICE_ERR_PARAM;
+ }
+
+ /* Configure element */
+ return ice_sched_update_elem(hw, node, &buf);
+}
+
+/**
+ * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
+ * @node: sched node
+ * @rl_type: rate limit type
+ *
+ * If existing profile matches, it returns the corresponding rate
+ * limit profile ID, otherwise it returns an invalid ID as error.
+ */
+static u16
+ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
+ struct ice_aqc_txsched_elem *data;
+
+ data = &node->info.data;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
+ rl_prof_id = LE16_TO_CPU(data->cir_bw.bw_profile_idx);
+ break;
+ case ICE_MAX_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
+ rl_prof_id = LE16_TO_CPU(data->eir_bw.bw_profile_idx);
+ break;
+ case ICE_SHARED_BW:
+ if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
+ rl_prof_id = LE16_TO_CPU(data->srl_id);
+ break;
+ default:
+ break;
+ }
+
+ return rl_prof_id;
+}
+
+/**
+ * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
+ * @pi: port information structure
+ * @rl_type: type of rate limit BW - min, max, or shared
+ * @layer_index: layer index
+ *
+ * This function returns requested profile creation layer.
+ */
+static u8
+ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
+ u8 layer_index)
+{
+ struct ice_hw *hw = pi->hw;
+
+ if (layer_index >= hw->num_tx_sched_layers)
+ return ICE_SCHED_INVAL_LAYER_NUM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ if (hw->layer_info[layer_index].max_cir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_MAX_BW:
+ if (hw->layer_info[layer_index].max_eir_rl_profiles)
+ return layer_index;
+ break;
+ case ICE_SHARED_BW:
+ /* if current layer doesn't support SRL profile creation
+ * then try a layer up or down.
+ */
+ if (hw->layer_info[layer_index].max_srl_profiles)
+ return layer_index;
+ else if (layer_index < hw->num_tx_sched_layers - 1 &&
+ hw->layer_info[layer_index + 1].max_srl_profiles)
+ return layer_index + 1;
+ else if (layer_index > 0 &&
+ hw->layer_info[layer_index - 1].max_srl_profiles)
+ return layer_index - 1;
+ break;
+ default:
+ break;
+ }
+ return ICE_SCHED_INVAL_LAYER_NUM;
+}
+
+/**
+ * ice_sched_get_srl_node - get shared rate limit node
+ * @node: tree node
+ * @srl_layer: shared rate limit layer
+ *
+ * This function returns SRL node to be used for shared rate limit purpose.
+ * The caller needs to hold scheduler lock.
+ */
+static struct ice_sched_node *
+ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
+{
+ if (srl_layer > node->tx_sched_layer)
+ return node->children[0];
+ else if (srl_layer < node->tx_sched_layer)
+ /* Node can't be created without a parent. It will always
+ * have a valid parent except root node.
+ */
+ return node->parent;
+ else
+ return node;
+}
+
+/**
+ * ice_sched_rm_rl_profile - remove RL profile ID
+ * @pi: port information structure
+ * @layer_num: layer number where profiles are saved
+ * @profile_type: profile type like EIR, CIR, or SRL
+ * @profile_id: profile ID to remove
+ *
+ * This function removes rate limit profile from layer 'layer_num' of type
+ * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
+ * scheduler lock.
+ */
+static enum ice_status
+ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
+ u16 profile_id)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_elem;
+ enum ice_status status = ICE_SUCCESS;
+
+ if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ return ICE_ERR_PARAM;
+ /* Check the existing list for RL profile */
+ LIST_FOR_EACH_ENTRY(rl_prof_elem, &pi->rl_prof_list[layer_num],
+ ice_aqc_rl_profile_info, list_entry)
+ if (rl_prof_elem->profile.flags == profile_type &&
+ LE16_TO_CPU(rl_prof_elem->profile.profile_id) ==
+ profile_id) {
+ if (rl_prof_elem->prof_id_ref)
+ rl_prof_elem->prof_id_ref--;
+
+ /* Remove old profile ID from database */
+ status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
+ if (status && status != ICE_ERR_IN_USE)
+ ice_debug(pi->hw, ICE_DBG_SCHED,
+ "Remove rl profile failed\n");
+ break;
+ }
+ if (status == ICE_ERR_IN_USE)
+ status = ICE_SUCCESS;
+ return status;
+}
+
+/**
+ * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ * @layer_num: layer number where RL profiles are saved
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u8 layer_num)
+{
+ enum ice_status status;
+ struct ice_hw *hw;
+ u8 profile_type;
+ u16 rl_prof_id;
+ u16 old_id;
+
+ hw = pi->hw;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_MAX_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
+ rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
+ break;
+ case ICE_SHARED_BW:
+ profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
+ /* No SRL is configured for default case */
+ rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* Remove stale RL profile ID */
+ if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
+ old_id == ICE_SCHED_INVAL_PROF_ID)
+ return ICE_SUCCESS;
+
+ return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
+}
+
+/**
+ * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @layer_num: layer number where rate limit profiles are saved
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth value
+ *
+ * This function prepares node element's bandwidth to SRL or EIR exclusively.
+ * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
+ * them may be set for any given element. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ u8 layer_num, enum ice_rl_type rl_type, u32 bw)
+{
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node passed in this case, it may be different node */
+ if (bw == ICE_SCHED_DFLT_BW)
+ /* SRL being removed, ice_sched_cfg_node_bw_lmt()
+ * enables EIR to default. EIR is not set in this
+ * case, so no additional action is required.
+ */
+ return ICE_SUCCESS;
+
+ /* SRL being configured, set EIR to default here.
+ * ice_sched_cfg_node_bw_lmt() disables EIR when it
+ * configures SRL
+ */
+ return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
+ layer_num);
+ } else if (rl_type == ICE_MAX_BW &&
+ node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
+ /* Remove Shared profile. Set default shared BW call
+ * removes shared profile for a node.
+ */
+ return ice_sched_set_node_bw_dflt(pi, node,
+ ICE_SHARED_BW,
+ layer_num);
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_set_node_bw - set node's bandwidth
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ * @layer_num: layer number
+ *
+ * This function adds new profile corresponding to requested BW, configures
+ * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
+ * ID from local database. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num)
+{
+ struct ice_aqc_rl_profile_info *rl_prof_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_hw *hw = pi->hw;
+ u16 old_id, rl_prof_id;
+
+ rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
+ if (!rl_prof_info)
+ return status;
+
+ rl_prof_id = LE16_TO_CPU(rl_prof_info->profile.profile_id);
+
+ /* Save existing RL prof ID for later clean up */
+ old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
+ /* Configure BW scheduling parameters */
+ status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
+ if (status)
+ return status;
+
+ /* New changes has been applied */
+ /* Increment the profile ID reference count */
+ rl_prof_info->prof_id_ref++;
+
+ /* Check for old ID removal */
+ if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
+ old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
+ return ICE_SUCCESS;
+
+ return ice_sched_rm_rl_profile(pi, layer_num,
+ rl_prof_info->profile.flags,
+ old_id);
+}
+
+/**
+ * ice_sched_set_node_bw_lmt - set node's BW limit
+ * @pi: port information structure
+ * @node: tree node
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * It updates node's BW limit parameters like BW RL profile ID of type CIR,
+ * EIR, or SRL. The caller needs to hold scheduler lock.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_sched_node *cfg_node = node;
+ enum ice_status status;
+
+ struct ice_hw *hw;
+ u8 layer_num;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ hw = pi->hw;
+ /* Remove unused RL profile IDs from HW and SW DB */
+ ice_sched_rm_unused_rl_prof(pi);
+ layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (layer_num >= hw->num_tx_sched_layers)
+ return ICE_ERR_PARAM;
+
+ if (rl_type == ICE_SHARED_BW) {
+ /* SRL node may be different */
+ cfg_node = ice_sched_get_srl_node(node, layer_num);
+ if (!cfg_node)
+ return ICE_ERR_CFG;
+ }
+ /* EIR BW and Shared BW profiles are mutually exclusive and
+ * hence only one of them may be set for any given element
+ */
+ status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
+ bw);
+ if (status)
+ return status;
+ if (bw == ICE_SCHED_DFLT_BW)
+ return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
+ layer_num);
+ return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
+}
+
+/**
+ * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
+ * @pi: port information structure
+ * @node: pointer to node structure
+ * @rl_type: rate limit type min, max, or shared
+ *
+ * This function configures node element's BW rate limit profile ID of
+ * type CIR, EIR, or SRL to default. This function needs to be called
+ * with the scheduler lock held.
+ */
+static enum ice_status
+ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
+ struct ice_sched_node *node,
+ enum ice_rl_type rl_type)
+{
+ return ice_sched_set_node_bw_lmt(pi, node, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_sched_validate_srl_node - Check node for SRL applicability
+ * @node: sched node to configure
+ * @sel_layer: selected SRL layer
+ *
+ * This function checks if the SRL can be applied to a selceted layer node on
+ * behalf of the requested node (first argument). This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
+{
+ /* SRL profiles are not available on all layers. Check if the
+ * SRL profile can be applied to a node above or below the
+ * requested node. SRL configuration is possible only if the
+ * selected layer's node has single child.
+ */
+ if (sel_layer == node->tx_sched_layer ||
+ ((sel_layer == node->tx_sched_layer + 1) &&
+ node->num_children == 1) ||
+ ((sel_layer == node->tx_sched_layer - 1) &&
+ (node->parent && node->parent->num_children == 1)))
+ return ICE_SUCCESS;
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_sched_save_q_bw - save queue node's BW information
+ * @q_ctx: queue context structure
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of queue type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
+{
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_set_q_bw_lmt - sets queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of queue scheduling node.
+ */
+static enum ice_status
+ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+ struct ice_q_ctx *q_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ ice_acquire_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
+ if (!q_ctx)
+ goto exit_q_bw_lmt;
+ node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
+ goto exit_q_bw_lmt;
+ }
+
+ /* Return error if it is not a leaf node */
+ if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
+ goto exit_q_bw_lmt;
+
+ /* SRL bandwidth layer selection */
+ if (rl_type == ICE_SHARED_BW) {
+ u8 sel_layer; /* selected layer */
+
+ sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node->tx_sched_layer);
+ if (sel_layer >= pi->hw->num_tx_sched_layers) {
+ status = ICE_ERR_PARAM;
+ goto exit_q_bw_lmt;
+ }
+ status = ice_sched_validate_srl_node(node, sel_layer);
+ if (status)
+ goto exit_q_bw_lmt;
+ }
+
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+ if (!status)
+ status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
+
+exit_q_bw_lmt:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_q_bw_lmt - configure queue BW limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type, u32 bw)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ bw);
+}
+
+/**
+ * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @q_handle: software queue handle
+ * @rl_type: min, max, or shared
+ *
+ * This function configures BW default limit of queue scheduling node.
+ */
+enum ice_status
+ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 q_handle, enum ice_rl_type rl_type)
+{
+ return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
+ ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_sched_save_tc_node_bw - save TC node BW limit
+ * @pi: port information structure
+ * @tc: TC number
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function saves the modified values of bandwidth settings for later
+ * replay purpose (restore) after reset.
+ */
+static enum ice_status
+ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return ICE_ERR_PARAM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&pi->tc_node_bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&pi->tc_node_bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&pi->tc_node_bw_t_info[tc], bw);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
+ * @pi: port information structure
+ * @tc: TC number
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures bandwidth limit of TC node.
+ */
+static enum ice_status
+ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *tc_node;
+
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return status;
+ ice_acquire_lock(&pi->sched_lock);
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ goto exit_set_tc_node_bw;
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, tc_node, rl_type, bw);
+ if (!status)
+ status = ice_sched_save_tc_node_bw(pi, tc, rl_type, bw);
+
+exit_set_tc_node_bw:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_tc_node_bw_lmt - configure TC node BW limit
+ * @pi: port information structure
+ * @tc: TC number
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of TC node.
+ * Note: The minimum guaranteed reservation is done via DCBX.
+ */
+enum ice_status
+ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, bw);
+}
+
+/**
+ * ice_cfg_tc_node_bw_dflt_lmt - configure TC node BW default limit
+ * @pi: port information structure
+ * @tc: TC number
+ * @rl_type: min or max
+ *
+ * This function configures BW default limit of TC node.
+ */
+enum ice_status
+ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ return ice_sched_set_tc_node_bw_lmt(pi, tc, rl_type, ICE_SCHED_DFLT_BW);
+}
+
+/**
+ * ice_sched_save_tc_node_bw_alloc - save TC node's BW alloc information
+ * @pi: port information structure
+ * @tc: traffic class
+ * @rl_type: rate limit type min or max
+ * @bw_alloc: Bandwidth allocation information
+ *
+ * Save BW alloc information of VSI type node for post replay use.
+ */
+static enum ice_status
+ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u16 bw_alloc)
+{
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return ICE_ERR_PARAM;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw_alloc(&pi->tc_node_bw_t_info[tc],
+ bw_alloc);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw_alloc(&pi->tc_node_bw_t_info[tc],
+ bw_alloc);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_set_tc_node_bw_alloc - set TC node BW alloc
+ * @pi: port information structure
+ * @tc: TC number
+ * @rl_type: min or max
+ * @bw_alloc: bandwidth alloc
+ *
+ * This function configures bandwidth alloc of TC node, also saves the
+ * changed settings for replay purpose, and return success if it succeeds
+ * in modifying bandwidth alloc setting.
+ */
+static enum ice_status
+ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u8 bw_alloc)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *tc_node;
+
+ if (tc >= ICE_MAX_TRAFFIC_CLASS)
+ return status;
+ ice_acquire_lock(&pi->sched_lock);
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ goto exit_set_tc_node_bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(pi->hw, tc_node, rl_type,
+ bw_alloc);
+ if (status)
+ goto exit_set_tc_node_bw_alloc;
+ status = ice_sched_save_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
+
+exit_set_tc_node_bw_alloc:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_tc_node_bw_alloc - configure TC node BW alloc
+ * @pi: port information structure
+ * @tc: TC number
+ * @rl_type: min or max
+ * @bw_alloc: bandwidth alloc
+ *
+ * This function configures BW limit of TC node.
+ * Note: The minimum guaranteed reservation is done via DCBX.
+ */
+enum ice_status
+ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
+ enum ice_rl_type rl_type, u8 bw_alloc)
+{
+ return ice_sched_set_tc_node_bw_alloc(pi, tc, rl_type, bw_alloc);
+}
+
+/**
+ * ice_sched_set_agg_bw_dflt_lmt - set aggregator node's BW limit to default
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function retrieves the aggregator ID based on VSI ID and TC,
+ * and sets node's BW limit to default. This function needs to be
+ * called with the scheduler lock held.
+ */
+enum ice_status
+ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *node;
+
+ node = vsi_ctx->sched.ag_node[tc];
+ if (!node)
+ continue;
+
+ /* Set min profile to default */
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MIN_BW);
+ if (status)
+ break;
+
+ /* Set max profile to default */
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, ICE_MAX_BW);
+ if (status)
+ break;
+
+ /* Remove shared profile, if there is one */
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node,
+ ICE_SHARED_BW);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_get_node_by_id_type - get node from ID type
+ * @pi: port information structure
+ * @id: identifier
+ * @agg_type: type of aggregator
+ * @tc: traffic class
+ *
+ * This function returns node identified by ID of type aggregator, and
+ * based on traffic class (TC). This function needs to be called with
+ * the scheduler lock held.
+ */
+static struct ice_sched_node *
+ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc)
+{
+ struct ice_sched_node *node = NULL;
+ struct ice_sched_node *child_node;
+
+ switch (agg_type) {
+ case ICE_AGG_TYPE_VSI: {
+ struct ice_vsi_ctx *vsi_ctx;
+ u16 vsi_handle = (u16)id;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ break;
+ /* Get sched_vsi_info */
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ break;
+ node = vsi_ctx->sched.vsi_node[tc];
+ break;
+ }
+
+ case ICE_AGG_TYPE_AGG: {
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (tc_node)
+ node = ice_sched_get_agg_node(pi, tc_node, id);
+ break;
+ }
+
+ case ICE_AGG_TYPE_Q:
+ /* The current implementation allows single queue to modify */
+ node = ice_sched_get_node(pi, id);
+ break;
+
+ case ICE_AGG_TYPE_QG:
+ /* The current implementation allows single qg to modify */
+ child_node = ice_sched_get_node(pi, id);
+ if (!child_node)
+ break;
+ node = child_node->parent;
+ break;
+
+ default:
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
+ * @pi: port information structure
+ * @id: ID (software VSI handle or AGG ID)
+ * @agg_type: aggregator type (VSI or AGG type node)
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of VSI or Aggregator scheduling node
+ * based on TC information from passed in argument BW.
+ */
+enum ice_status
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+
+ if (!pi)
+ return status;
+
+ if (rl_type == ICE_UNKNOWN_BW)
+ return status;
+
+ ice_acquire_lock(&pi->sched_lock);
+ node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
+ goto exit_set_node_bw_lmt_per_tc;
+ }
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+exit_set_node_bw_lmt_per_tc:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_sched_validate_vsi_srl_node - validate VSI SRL node
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function validates SRL node of the VSI node if available SRL layer is
+ * different than the VSI node layer on all TC(s).This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
+{
+ u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
+ u8 tc;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Return success if no nodes are present across TC */
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *tc_node, *vsi_node;
+ enum ice_rl_type rl_type = ICE_SHARED_BW;
+ enum ice_status status;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ /* SRL bandwidth layer selection */
+ if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
+ u8 node_layer = vsi_node->tx_sched_layer;
+ u8 layer_num;
+
+ layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node_layer);
+ if (layer_num >= pi->hw->num_tx_sched_layers)
+ return ICE_ERR_PARAM;
+ sel_layer = layer_num;
+ }
+
+ status = ice_sched_validate_srl_node(vsi_node, sel_layer);
+ if (status)
+ return status;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_sched_set_vsi_bw_shared_lmt - set VSI BW shared limit
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @bw: bandwidth in Kbps
+ *
+ * This function Configures shared rate limiter(SRL) of all VSI type nodes
+ * across all traffic classes for VSI matching handle. When BW value of
+ * ICE_SCHED_DFLT_BW is passed, it removes the SRL from the node.
+ */
+enum ice_status
+ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
+ u32 bw)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_validate_vsi_srl_node(pi, vsi_handle);
+ if (status)
+ goto exit_set_vsi_bw_shared_lmt;
+ /* Return success if no nodes are present across TC */
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *tc_node, *vsi_node;
+ enum ice_rl_type rl_type = ICE_SHARED_BW;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ if (bw == ICE_SCHED_DFLT_BW)
+ /* It removes existing SRL from the node */
+ status = ice_sched_set_node_bw_dflt_lmt(pi, vsi_node,
+ rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, vsi_node,
+ rl_type, bw);
+ if (status)
+ break;
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
+ if (status)
+ break;
+ }
+
+exit_set_vsi_bw_shared_lmt:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_sched_validate_agg_srl_node - validate AGG SRL node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ *
+ * This function validates SRL node of the AGG node if available SRL layer is
+ * different than the AGG node layer on all TC(s).This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
+{
+ u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
+ struct ice_sched_agg_info *agg_info;
+ bool agg_id_present = false;
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
+ list_entry)
+ if (agg_info->agg_id == agg_id) {
+ agg_id_present = true;
+ break;
+ }
+ if (!agg_id_present)
+ return ICE_ERR_PARAM;
+ /* Return success if no nodes are present across TC */
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *tc_node, *agg_node;
+ enum ice_rl_type rl_type = ICE_SHARED_BW;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ continue;
+ /* SRL bandwidth layer selection */
+ if (sel_layer == ICE_SCHED_INVAL_LAYER_NUM) {
+ u8 node_layer = agg_node->tx_sched_layer;
+ u8 layer_num;
+
+ layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
+ node_layer);
+ if (layer_num >= pi->hw->num_tx_sched_layers)
+ return ICE_ERR_PARAM;
+ sel_layer = layer_num;
+ }
+
+ status = ice_sched_validate_srl_node(agg_node, sel_layer);
+ if (status)
+ break;
+ }
+ return status;
+}
+
+/**
+ * ice_sched_set_agg_bw_shared_lmt - set aggregator BW shared limit
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures the shared rate limiter(SRL) of all aggregator type
+ * nodes across all traffic classes for aggregator matching agg_id. When
+ * BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
+ * node(s).
+ */
+enum ice_status
+ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 bw)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *tmp;
+ bool agg_id_present = false;
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_validate_agg_srl_node(pi, agg_id);
+ if (status)
+ goto exit_agg_bw_shared_lmt;
+
+ LIST_FOR_EACH_ENTRY_SAFE(agg_info, tmp, &pi->hw->agg_list,
+ ice_sched_agg_info, list_entry)
+ if (agg_info->agg_id == agg_id) {
+ agg_id_present = true;
+ break;
+ }
+
+ if (!agg_id_present) {
+ status = ICE_ERR_PARAM;
+ goto exit_agg_bw_shared_lmt;
+ }
+
+ /* Return success if no nodes are present across TC */
+ ice_for_each_traffic_class(tc) {
+ enum ice_rl_type rl_type = ICE_SHARED_BW;
+ struct ice_sched_node *tc_node, *agg_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ continue;
+
+ if (bw == ICE_SCHED_DFLT_BW)
+ /* It removes existing SRL from the node */
+ status = ice_sched_set_node_bw_dflt_lmt(pi, agg_node,
+ rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, agg_node,
+ rl_type, bw);
+ if (status)
+ break;
+ status = ice_sched_save_agg_bw(pi, agg_id, tc, rl_type, bw);
+ if (status)
+ break;
+ }
+
+exit_agg_bw_shared_lmt:
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_sched_cfg_sibl_node_prio - configure node sibling priority
+ * @pi: port information structure
+ * @node: sched node to configure
+ * @priority: sibling priority
+ *
+ * This function configures node element's sibling priority only. This
+ * function needs to be called with scheduler lock held.
+ */
+enum ice_status
+ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
+ struct ice_sched_node *node, u8 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ struct ice_hw *hw = pi->hw;
+ enum ice_status status;
+
+ if (!hw)
+ return ICE_ERR_PARAM;
+ buf = node->info;
+ data = &buf.data;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ priority = (priority << ICE_AQC_ELEM_GENERIC_PRIO_S) &
+ ICE_AQC_ELEM_GENERIC_PRIO_M;
+ data->generic &= ~ICE_AQC_ELEM_GENERIC_PRIO_M;
+ data->generic |= priority;
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_cfg_rl_burst_size - Set burst size value
+ * @hw: pointer to the HW struct
+ * @bytes: burst size in bytes
+ *
+ * This function configures/set the burst size to requested new value. The new
+ * burst size value is used for future rate limit calls. It doesn't change the
+ * existing or previously created RL profiles.
+ */
+enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+{
+ u16 burst_size_to_prog;
+
+ if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
+ bytes > ICE_MAX_BURST_SIZE_ALLOWED)
+ return ICE_ERR_PARAM;
+ if (ice_round_to_num(bytes, 64) <=
+ ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
+ /* 64 byte granularity case */
+ /* Disable MSB granularity bit */
+ burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
+ /* round number to nearest 64 byte granularity */
+ bytes = ice_round_to_num(bytes, 64);
+ /* The value is in 64 byte chunks */
+ burst_size_to_prog |= (u16)(bytes / 64);
+ } else {
+ /* k bytes granularity case */
+ /* Enable MSB granularity bit */
+ burst_size_to_prog = ICE_KBYTE_GRANULARITY;
+ /* round number to nearest 1024 granularity */
+ bytes = ice_round_to_num(bytes, 1024);
+ /* check rounding doesn't go beyond allowed */
+ if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
+ bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
+ /* The value is in k bytes */
+ burst_size_to_prog |= (u16)(bytes / 1024);
+ }
+ hw->max_burst_size = burst_size_to_prog;
+ return ICE_SUCCESS;
+}
+
+/*
+ * ice_sched_replay_node_prio - re-configure node priority
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @priority: priority value
+ *
+ * This function configures node element's priority value. It
+ * needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
+ u8 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+ enum ice_status status;
+
+ buf = node->info;
+ data = &buf.data;
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic = priority;
+
+ /* Configure element */
+ status = ice_sched_update_elem(hw, node, &buf);
+ return status;
+}
+
+/**
+ * ice_sched_replay_node_bw - replay node(s) BW
+ * @hw: pointer to the HW struct
+ * @node: sched node to configure
+ * @bw_t_info: BW type information
+ *
+ * This function restores node's BW from bw_t_info. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
+ struct ice_bw_type_info *bw_t_info)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status = ICE_ERR_PARAM;
+ u16 bw_alloc;
+
+ if (!node)
+ return status;
+ if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
+ return ICE_SUCCESS;
+ if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
+ status = ice_sched_replay_node_prio(hw, node,
+ bw_t_info->generic);
+ if (status)
+ return status;
+ }
+ if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
+ bw_t_info->cir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CIR_WT)) {
+ bw_alloc = bw_t_info->cir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR)) {
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
+ bw_t_info->eir_bw.bw);
+ if (status)
+ return status;
+ }
+ if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_EIR_WT)) {
+ bw_alloc = bw_t_info->eir_bw.bw_alloc;
+ status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
+ bw_alloc);
+ if (status)
+ return status;
+ }
+ if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_SHARED))
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
+ bw_t_info->shared_bw);
+ return status;
+}
+
+/**
+ * ice_sched_replay_agg_bw - replay aggregator node(s) BW
+ * @hw: pointer to the HW struct
+ * @agg_info: aggregator data structure
+ *
+ * This function re-creates aggregator type nodes. The caller needs to hold
+ * the scheduler lock.
+ */
+static enum ice_status
+ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
+{
+ struct ice_sched_node *tc_node, *agg_node;
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_any_bit_set(agg_info->bw_t_info[tc].bw_t_bitmap,
+ ICE_BW_TYPE_CNT))
+ continue;
+ tc_node = ice_sched_get_tc_node(hw->port_info, tc);
+ if (!tc_node) {
+ status = ICE_ERR_PARAM;
+ break;
+ }
+ agg_node = ice_sched_get_agg_node(hw->port_info, tc_node,
+ agg_info->agg_id);
+ if (!agg_node) {
+ status = ICE_ERR_PARAM;
+ break;
+ }
+ status = ice_sched_replay_node_bw(hw, agg_node,
+ &agg_info->bw_t_info[tc]);
+ if (status)
+ break;
+ }
+ return status;
+}
+
+/**
+ * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
+ * @pi: port info struct
+ * @tc_bitmap: 8 bits TC bitmap to check
+ * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
+ *
+ * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
+ * may be missing, it returns enabled TCs. This function needs to be called with
+ * scheduler lock held.
+ */
+static void
+ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi, ice_bitmap_t *tc_bitmap,
+ ice_bitmap_t *ena_tc_bitmap)
+{
+ u8 tc;
+
+ /* Some TC(s) may be missing after reset, adjust for replay */
+ ice_for_each_traffic_class(tc)
+ if (ice_is_tc_ena(*tc_bitmap, tc) &&
+ (ice_sched_get_tc_node(pi, tc)))
+ ice_set_bit(tc, ena_tc_bitmap);
+}
+
+/**
+ * ice_sched_replay_agg - recreate aggregator node(s)
+ * @hw: pointer to the HW struct
+ *
+ * This function recreate aggregator type nodes which are not replayed earlier.
+ * It also replay aggregator BW information. These aggregator nodes are not
+ * associated with VSI type node yet.
+ */
+void ice_sched_replay_agg(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+
+ ice_acquire_lock(&pi->sched_lock);
+ LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
+ list_entry) {
+ /* replay aggregator (re-create aggregator node) */
+ if (!ice_cmp_bitmap(agg_info->tc_bitmap,
+ agg_info->replay_tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS)) {
+ ice_declare_bitmap(replay_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ enum ice_status status;
+
+ ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ ice_sched_get_ena_tc_bitmap(pi,
+ agg_info->replay_tc_bitmap,
+ replay_bitmap);
+ status = ice_sched_cfg_agg(hw->port_info,
+ agg_info->agg_id,
+ ICE_AGG_TYPE_AGG,
+ replay_bitmap);
+ if (status) {
+ ice_info(hw, "Replay agg id[%d] failed\n",
+ agg_info->agg_id);
+ /* Move on to next one */
+ continue;
+ }
+ /* Replay aggregator node BW (restore aggregator BW) */
+ status = ice_sched_replay_agg_bw(hw, agg_info);
+ if (status)
+ ice_info(hw, "Replay agg bw [id=%d] failed\n",
+ agg_info->agg_id);
+ }
+ }
+ ice_release_lock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
+ * @hw: pointer to the HW struct
+ *
+ * This function initialize aggregator(s) TC bitmap to zero. A required
+ * preinit step for replaying aggregators.
+ */
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+
+ ice_acquire_lock(&pi->sched_lock);
+ LIST_FOR_EACH_ENTRY(agg_info, &hw->agg_list, ice_sched_agg_info,
+ list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ agg_info->tc_bitmap[0] = 0;
+ LIST_FOR_EACH_ENTRY(agg_vsi_info, &agg_info->agg_vsi_list,
+ ice_sched_agg_vsi_info, list_entry)
+ agg_vsi_info->tc_bitmap[0] = 0;
+ }
+ ice_release_lock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_replay_tc_node_bw - replay TC node(s) BW
+ * @pi: port information structure
+ *
+ * This function replay TC nodes.
+ */
+enum ice_status
+ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ if (!pi->hw)
+ return ICE_ERR_PARAM;
+ ice_acquire_lock(&pi->sched_lock);
+ ice_for_each_traffic_class(tc) {
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue; /* TC not present */
+ status = ice_sched_replay_node_bw(pi->hw, tc_node,
+ &pi->tc_node_bw_t_info[tc]);
+ if (status)
+ break;
+ }
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_sched_replay_vsi_bw - replay VSI type node(s) BW
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: 8 bits TC bitmap
+ *
+ * This function replays VSI type nodes bandwidth. This function needs to be
+ * called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
+ ice_bitmap_t *tc_bitmap)
+{
+ struct ice_sched_node *vsi_node, *tc_node;
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_bw_type_info *bw_t_info;
+ struct ice_vsi_ctx *vsi_ctx;
+ enum ice_status status = ICE_SUCCESS;
+ u8 tc;
+
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc))
+ continue;
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ continue;
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+ bw_t_info = &vsi_ctx->sched.bw_t_info[tc];
+ status = ice_sched_replay_node_bw(hw, vsi_node, bw_t_info);
+ if (status)
+ break;
+ }
+ return status;
+}
+
+/**
+ * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ *
+ * This function replays aggregator node, VSI to aggregator type nodes, and
+ * their node bandwidth information. This function needs to be called with
+ * scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+{
+ ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status;
+
+ ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+ if (!agg_info)
+ return ICE_SUCCESS; /* Not present in list - default Agg case */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info)
+ return ICE_SUCCESS; /* Not present in list - default Agg case */
+ ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
+ replay_bitmap);
+ /* Replay aggregator node associated to vsi_handle */
+ status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
+ ICE_AGG_TYPE_AGG, replay_bitmap);
+ if (status)
+ return status;
+ /* Replay aggregator node BW (restore aggregator BW) */
+ status = ice_sched_replay_agg_bw(hw, agg_info);
+ if (status)
+ return status;
+
+ ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
+ replay_bitmap);
+ /* Move this VSI (vsi_handle) to above aggregator */
+ status = ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
+ replay_bitmap);
+ if (status)
+ return status;
+ /* Replay VSI BW (restore VSI BW) */
+ return ice_sched_replay_vsi_bw(hw, vsi_handle,
+ agg_vsi_info->tc_bitmap);
+}
+
+/**
+ * ice_replay_vsi_agg - replay VSI to aggregator node
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ *
+ * This function replays association of VSI to aggregator type nodes, and
+ * node bandwidth information.
+ */
+enum ice_status
+ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status;
+
+ ice_acquire_lock(&pi->sched_lock);
+ status = ice_sched_replay_vsi_agg(hw, vsi_handle);
+ ice_release_lock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_sched_replay_q_bw - replay queue type node BW
+ * @pi: port information structure
+ * @q_ctx: queue context structure
+ *
+ * This function replays queue type node bandwidth. This function needs to be
+ * called with scheduler lock held.
+ */
+enum ice_status
+ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+{
+ struct ice_sched_node *q_node;
+
+ /* Following also checks the presence of node in tree */
+ q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
+ if (!q_node)
+ return ICE_ERR_PARAM;
+ return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
+}
Index: sys/dev/ice/ice_sriov.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_sriov.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_SRIOV_H_
+#define _ICE_SRIOV_H_
+
+#include "ice_common.h"
+
+enum ice_status
+ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen,
+ struct ice_sq_cd *cd);
+
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+#endif /* _ICE_SRIOV_H_ */
Index: sys/dev/ice/ice_sriov.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_sriov.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_common.h"
+#include "ice_adminq_cmd.h"
+#include "ice_sriov.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+enum ice_status
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = CPU_TO_LE32(vfid);
+
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+
+ if (msglen)
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to PF driver using mailbox queue. By default, this
+ * message is sent asynchronously, i.e. ice_sq_send_cmd()
+ * does not wait for completion before returning.
+ */
+enum ice_status
+ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
+ enum ice_status v_retval, u8 *msg, u16 msglen,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+
+ if (msglen)
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ u32 speed;
+
+ if (adv_link_support)
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ speed = ICE_LINK_SPEED_10MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = ICE_LINK_SPEED_100MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ speed = ICE_LINK_SPEED_1000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ speed = ICE_LINK_SPEED_2500MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = ICE_LINK_SPEED_5000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = ICE_LINK_SPEED_10000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = ICE_LINK_SPEED_20000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = ICE_LINK_SPEED_25000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ speed = ICE_LINK_SPEED_40000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ speed = ICE_LINK_SPEED_50000MBPS;
+ break;
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = ICE_LINK_SPEED_100000MBPS;
+ break;
+ default:
+ speed = ICE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ else
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ switch (link_speed) {
+ case ICE_AQ_LINK_SPEED_10MB:
+ case ICE_AQ_LINK_SPEED_100MB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
+ break;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ case ICE_AQ_LINK_SPEED_2500MB:
+ case ICE_AQ_LINK_SPEED_5GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ case ICE_AQ_LINK_SPEED_50GB:
+ case ICE_AQ_LINK_SPEED_100GB:
+ speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
+ break;
+ default:
+ speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
Index: sys/dev/ice/ice_status.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_status.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_STATUS_H_
+#define _ICE_STATUS_H_
+
+/* Error Codes */
+enum ice_status {
+ ICE_SUCCESS = 0,
+
+ /* Generic codes : Range -1..-49 */
+ ICE_ERR_PARAM = -1,
+ ICE_ERR_NOT_IMPL = -2,
+ ICE_ERR_NOT_READY = -3,
+ ICE_ERR_NOT_SUPPORTED = -4,
+ ICE_ERR_BAD_PTR = -5,
+ ICE_ERR_INVAL_SIZE = -6,
+ ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
+ ICE_ERR_RESET_FAILED = -9,
+ ICE_ERR_FW_API_VER = -10,
+ ICE_ERR_NO_MEMORY = -11,
+ ICE_ERR_CFG = -12,
+ ICE_ERR_OUT_OF_RANGE = -13,
+ ICE_ERR_ALREADY_EXISTS = -14,
+ ICE_ERR_DOES_NOT_EXIST = -15,
+ ICE_ERR_IN_USE = -16,
+ ICE_ERR_MAX_LIMIT = -17,
+ ICE_ERR_RESET_ONGOING = -18,
+ ICE_ERR_HW_TABLE = -19,
+ ICE_ERR_FW_DDP_MISMATCH = -20,
+
+ /* NVM specific error codes: Range -50..-59 */
+ ICE_ERR_NVM = -50,
+ ICE_ERR_NVM_CHECKSUM = -51,
+ ICE_ERR_BUF_TOO_SHORT = -52,
+ ICE_ERR_NVM_BLANK_MODE = -53,
+
+ /* ARQ/ASQ specific error codes. Range -100..-109 */
+ ICE_ERR_AQ_ERROR = -100,
+ ICE_ERR_AQ_TIMEOUT = -101,
+ ICE_ERR_AQ_FULL = -102,
+ ICE_ERR_AQ_NO_WORK = -103,
+ ICE_ERR_AQ_EMPTY = -104,
+};
+
+#endif /* _ICE_STATUS_H_ */
Index: sys/dev/ice/ice_strings.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_strings.c
@@ -0,0 +1,1034 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file ice_strings.c
+ * @brief functions to convert enumerated values to human readable strings
+ *
+ * Contains various functions which convert enumerated values into human
+ * readable strings. Primarily this is used for error values, such as the
+ * ice_status enum, the ice_aq_err values, or standard sys/errno.h values.
+ *
+ * Additionally, various other driver enumerations which are displayed via
+ * sysctl have converter functions.
+ *
+ * Some of the functions return struct ice_str_buf, instead of a character
+ * string pointer. This is a trick to allow the function to create a struct
+ * with space to convert unknown numeric values into a string, and return the
+ * contents via copying the struct memory back. The functions then have an
+ * associated macro to access the string value immediately. This allows the
+ * functions to return static strings for known values, and convert unknown
+ * values into a numeric representation. It also does not require
+ * pre-allocating storage at each callsite, or using a local static value
+ * which wouldn't be re-entrant, and could collide if multiple threads call
+ * the function. The extra copies are somewhat annoying, but generally the
+ * error functions aren't expected to be in a hot path so this is an
+ * acceptable trade off.
+ */
+
+#include "ice_lib.h"
+
+/**
+ * ice_aq_str - Convert an AdminQ error into a string
+ * @aq_err: the AQ error code to convert
+ *
+ * Convert the AdminQ status into its string name, if known. Otherwise, format
+ * the error as an integer.
+ */
+struct ice_str_buf
+_ice_aq_str(enum ice_aq_err aq_err)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (aq_err) {
+ case ICE_AQ_RC_OK:
+ str = "OK";
+ break;
+ case ICE_AQ_RC_EPERM:
+ str = "AQ_RC_EPERM";
+ break;
+ case ICE_AQ_RC_ENOENT:
+ str = "AQ_RC_ENOENT";
+ break;
+ case ICE_AQ_RC_ESRCH:
+ str = "AQ_RC_ESRCH";
+ break;
+ case ICE_AQ_RC_EINTR:
+ str = "AQ_RC_EINTR";
+ break;
+ case ICE_AQ_RC_EIO:
+ str = "AQ_RC_EIO";
+ break;
+ case ICE_AQ_RC_ENXIO:
+ str = "AQ_RC_ENXIO";
+ break;
+ case ICE_AQ_RC_E2BIG:
+ str = "AQ_RC_E2BIG";
+ break;
+ case ICE_AQ_RC_EAGAIN:
+ str = "AQ_RC_EAGAIN";
+ break;
+ case ICE_AQ_RC_ENOMEM:
+ str = "AQ_RC_ENOMEM";
+ break;
+ case ICE_AQ_RC_EACCES:
+ str = "AQ_RC_EACCES";
+ break;
+ case ICE_AQ_RC_EFAULT:
+ str = "AQ_RC_EFAULT";
+ break;
+ case ICE_AQ_RC_EBUSY:
+ str = "AQ_RC_EBUSY";
+ break;
+ case ICE_AQ_RC_EEXIST:
+ str = "AQ_RC_EEXIST";
+ break;
+ case ICE_AQ_RC_EINVAL:
+ str = "AQ_RC_EINVAL";
+ break;
+ case ICE_AQ_RC_ENOTTY:
+ str = "AQ_RC_ENOTTY";
+ break;
+ case ICE_AQ_RC_ENOSPC:
+ str = "AQ_RC_ENOSPC";
+ break;
+ case ICE_AQ_RC_ENOSYS:
+ str = "AQ_RC_ENOSYS";
+ break;
+ case ICE_AQ_RC_ERANGE:
+ str = "AQ_RC_ERANGE";
+ break;
+ case ICE_AQ_RC_EFLUSHED:
+ str = "AQ_RC_EFLUSHED";
+ break;
+ case ICE_AQ_RC_BAD_ADDR:
+ str = "AQ_RC_BAD_ADDR";
+ break;
+ case ICE_AQ_RC_EMODE:
+ str = "AQ_RC_EMODE";
+ break;
+ case ICE_AQ_RC_EFBIG:
+ str = "AQ_RC_EFBIG";
+ break;
+ case ICE_AQ_RC_ESBCOMP:
+ str = "AQ_RC_ESBCOMP";
+ break;
+ case ICE_AQ_RC_ENOSEC:
+ str = "AQ_RC_ENOSEC";
+ break;
+ case ICE_AQ_RC_EBADSIG:
+ str = "AQ_RC_EBADSIG";
+ break;
+ case ICE_AQ_RC_ESVN:
+ str = "AQ_RC_ESVN";
+ break;
+ case ICE_AQ_RC_EBADMAN:
+ str = "AQ_RC_EBADMAN";
+ break;
+ case ICE_AQ_RC_EBADBUF:
+ str = "AQ_RC_EBADBUF";
+ break;
+ case ICE_AQ_RC_EACCES_BMCU:
+ str = "AQ_RC_EACCES_BMCU";
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%d", aq_err);
+
+ return buf;
+}
+
+/**
+ * ice_status_str - convert status err code to a string
+ * @status: the status error code to convert
+ *
+ * Convert the status code into its string name if known.
+ *
+ * Otherwise, use the scratch space to format the status code into a number.
+ */
+struct ice_str_buf
+_ice_status_str(enum ice_status status)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (status) {
+ case ICE_SUCCESS:
+ str = "OK";
+ break;
+ case ICE_ERR_PARAM:
+ str = "ICE_ERR_PARAM";
+ break;
+ case ICE_ERR_NOT_IMPL:
+ str = "ICE_ERR_NOT_IMPL";
+ break;
+ case ICE_ERR_NOT_READY:
+ str = "ICE_ERR_NOT_READY";
+ break;
+ case ICE_ERR_NOT_SUPPORTED:
+ str = "ICE_ERR_NOT_SUPPORTED";
+ break;
+ case ICE_ERR_BAD_PTR:
+ str = "ICE_ERR_BAD_PTR";
+ break;
+ case ICE_ERR_INVAL_SIZE:
+ str = "ICE_ERR_INVAL_SIZE";
+ break;
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ str = "ICE_ERR_DEVICE_NOT_SUPPORTED";
+ break;
+ case ICE_ERR_RESET_FAILED:
+ str = "ICE_ERR_RESET_FAILED";
+ break;
+ case ICE_ERR_FW_API_VER:
+ str = "ICE_ERR_FW_API_VER";
+ break;
+ case ICE_ERR_NO_MEMORY:
+ str = "ICE_ERR_NO_MEMORY";
+ break;
+ case ICE_ERR_CFG:
+ str = "ICE_ERR_CFG";
+ break;
+ case ICE_ERR_OUT_OF_RANGE:
+ str = "ICE_ERR_OUT_OF_RANGE";
+ break;
+ case ICE_ERR_ALREADY_EXISTS:
+ str = "ICE_ERR_ALREADY_EXISTS";
+ break;
+ case ICE_ERR_NVM:
+ str = "ICE_ERR_NVM";
+ break;
+ case ICE_ERR_NVM_CHECKSUM:
+ str = "ICE_ERR_NVM_CHECKSUM";
+ break;
+ case ICE_ERR_BUF_TOO_SHORT:
+ str = "ICE_ERR_BUF_TOO_SHORT";
+ break;
+ case ICE_ERR_NVM_BLANK_MODE:
+ str = "ICE_ERR_NVM_BLANK_MODE";
+ break;
+ case ICE_ERR_IN_USE:
+ str = "ICE_ERR_IN_USE";
+ break;
+ case ICE_ERR_MAX_LIMIT:
+ str = "ICE_ERR_MAX_LIMIT";
+ break;
+ case ICE_ERR_RESET_ONGOING:
+ str = "ICE_ERR_RESET_ONGOING";
+ break;
+ case ICE_ERR_HW_TABLE:
+ str = "ICE_ERR_HW_TABLE";
+ break;
+ case ICE_ERR_DOES_NOT_EXIST:
+ str = "ICE_ERR_DOES_NOT_EXIST";
+ break;
+ case ICE_ERR_AQ_ERROR:
+ str = "ICE_ERR_AQ_ERROR";
+ break;
+ case ICE_ERR_AQ_TIMEOUT:
+ str = "ICE_ERR_AQ_TIMEOUT";
+ break;
+ case ICE_ERR_AQ_FULL:
+ str = "ICE_ERR_AQ_FULL";
+ break;
+ case ICE_ERR_AQ_NO_WORK:
+ str = "ICE_ERR_AQ_NO_WORK";
+ break;
+ case ICE_ERR_AQ_EMPTY:
+ str = "ICE_ERR_AQ_EMPTY";
+ break;
+ case ICE_ERR_FW_DDP_MISMATCH:
+ str = "ICE_ERR_FW_DDP_MISMATCH";
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%d", status);
+
+ return buf;
+}
+
+/**
+ * ice_err_str - convert error code to a string
+ * @err: the error code to convert
+ *
+ * Convert an error code into its string/macro name if known. Note, it doesn't
+ * handle negated errors.
+ *
+ * Otherwise, use the scratch space to format the error into a number.
+ */
+struct ice_str_buf
+_ice_err_str(int err)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (err) {
+ case 0:
+ str = "OK";
+ break;
+ case EPERM:
+ str = "EPERM";
+ break;
+ case ENOENT:
+ str = "ENOENT";
+ break;
+ case ESRCH:
+ str = "ESRCH";
+ break;
+ case EINTR:
+ str = "EINTR";
+ break;
+ case EIO:
+ str = "EIO";
+ break;
+ case ENXIO:
+ str = "ENXIO";
+ break;
+ case E2BIG:
+ str = "E2BIG";
+ break;
+ case ENOEXEC:
+ str = "ENOEXEC";
+ break;
+ case EBADF:
+ str = "EBADF";
+ break;
+ case ECHILD:
+ str = "ECHILD";
+ break;
+ case EDEADLK:
+ str = "EDEADLK";
+ break;
+ case ENOMEM:
+ str = "ENOMEM";
+ break;
+ case EACCES:
+ str = "EACCES";
+ break;
+ case EFAULT:
+ str = "EFAULT";
+ break;
+ case ENOTBLK:
+ str = "ENOTBLK";
+ break;
+ case EBUSY:
+ str = "EBUSY";
+ break;
+ case EEXIST:
+ str = "EEXIST";
+ break;
+ case EXDEV:
+ str = "EXDEV";
+ break;
+ case ENODEV:
+ str = "ENODEV";
+ break;
+ case ENOTDIR:
+ str = "ENOTDIR";
+ break;
+ case EISDIR:
+ str = "EISDIR";
+ break;
+ case EINVAL:
+ str = "EINVAL";
+ break;
+ case ENFILE:
+ str = "ENFILE";
+ break;
+ case EMFILE:
+ str = "EMFILE";
+ break;
+ case ENOTTY:
+ str = "ENOTTY";
+ break;
+ case ETXTBSY:
+ str = "ETXTBSY";
+ break;
+ case EFBIG:
+ str = "EFBIG";
+ break;
+ case ENOSPC:
+ str = "ENOSPC";
+ break;
+ case ESPIPE:
+ str = "ESPIPE";
+ break;
+ case EROFS:
+ str = "EROFS";
+ break;
+ case EMLINK:
+ str = "EMLINK";
+ break;
+ case EPIPE:
+ str = "EPIPE";
+ break;
+ case EDOM:
+ str = "EDOM";
+ break;
+ case ERANGE:
+ str = "ERANGE";
+ break;
+ case EAGAIN:
+ /* EWOULDBLOCK */
+ str = "EAGAIN";
+ break;
+ case EINPROGRESS:
+ str = "EINPROGRESS";
+ break;
+ case EALREADY:
+ str = "EALREADY";
+ break;
+ case ENOTSOCK:
+ str = "ENOTSOCK";
+ break;
+ case EDESTADDRREQ:
+ str = "EDESTADDRREQ";
+ break;
+ case EMSGSIZE:
+ str = "EMSGSIZE";
+ break;
+ case EPROTOTYPE:
+ str = "EPROTOTYPE";
+ break;
+ case ENOPROTOOPT:
+ str = "ENOPROTOOPT";
+ break;
+ case EPROTONOSUPPORT:
+ str = "EPROTONOSUPPORT";
+ break;
+ case ESOCKTNOSUPPORT:
+ str = "ESOCKTNOSUPPORT";
+ break;
+ case EOPNOTSUPP:
+ str = "EOPNOTSUPP";
+ break;
+ case EPFNOSUPPORT:
+ /* ENOTSUP */
+ str = "EPFNOSUPPORT";
+ break;
+ case EAFNOSUPPORT:
+ str = "EAFNOSUPPORT";
+ break;
+ case EADDRINUSE:
+ str = "EADDRINUSE";
+ break;
+ case EADDRNOTAVAIL:
+ str = "EADDRNOTAVAIL";
+ break;
+ case ENETDOWN:
+ str = "ENETDOWN";
+ break;
+ case ENETUNREACH:
+ str = "ENETUNREACH";
+ break;
+ case ENETRESET:
+ str = "ENETRESET";
+ break;
+ case ECONNABORTED:
+ str = "ECONNABORTED";
+ break;
+ case ECONNRESET:
+ str = "ECONNRESET";
+ break;
+ case ENOBUFS:
+ str = "ENOBUFS";
+ break;
+ case EISCONN:
+ str = "EISCONN";
+ break;
+ case ENOTCONN:
+ str = "ENOTCONN";
+ break;
+ case ESHUTDOWN:
+ str = "ESHUTDOWN";
+ break;
+ case ETOOMANYREFS:
+ str = "ETOOMANYREFS";
+ break;
+ case ETIMEDOUT:
+ str = "ETIMEDOUT";
+ break;
+ case ECONNREFUSED:
+ str = "ECONNREFUSED";
+ break;
+ case ELOOP:
+ str = "ELOOP";
+ break;
+ case ENAMETOOLONG:
+ str = "ENAMETOOLONG";
+ break;
+ case EHOSTDOWN:
+ str = "EHOSTDOWN";
+ break;
+ case EHOSTUNREACH:
+ str = "EHOSTUNREACH";
+ break;
+ case ENOTEMPTY:
+ str = "ENOTEMPTY";
+ break;
+ case EPROCLIM:
+ str = "EPROCLIM";
+ break;
+ case EUSERS:
+ str = "EUSERS";
+ break;
+ case EDQUOT:
+ str = "EDQUOT";
+ break;
+ case ESTALE:
+ str = "ESTALE";
+ break;
+ case EREMOTE:
+ str = "EREMOTE";
+ break;
+ case EBADRPC:
+ str = "EBADRPC";
+ break;
+ case ERPCMISMATCH:
+ str = "ERPCMISMATCH";
+ break;
+ case EPROGUNAVAIL:
+ str = "EPROGUNAVAIL";
+ break;
+ case EPROGMISMATCH:
+ str = "EPROGMISMATCH";
+ break;
+ case EPROCUNAVAIL:
+ str = "EPROCUNAVAIL";
+ break;
+ case ENOLCK:
+ str = "ENOLCK";
+ break;
+ case ENOSYS:
+ str = "ENOSYS";
+ break;
+ case EFTYPE:
+ str = "EFTYPE";
+ break;
+ case EAUTH:
+ str = "EAUTH";
+ break;
+ case ENEEDAUTH:
+ str = "ENEEDAUTH";
+ break;
+ case EIDRM:
+ str = "EIDRM";
+ break;
+ case ENOMSG:
+ str = "ENOMSG";
+ break;
+ case EOVERFLOW:
+ str = "EOVERFLOW";
+ break;
+ case ECANCELED:
+ str = "ECANCELED";
+ break;
+ case EILSEQ:
+ str = "EILSEQ";
+ break;
+ case ENOATTR:
+ str = "ENOATTR";
+ break;
+ case EDOOFUS:
+ str = "EDOOFUS";
+ break;
+ case EBADMSG:
+ str = "EBADMSG";
+ break;
+ case EMULTIHOP:
+ str = "EMULTIHOP";
+ break;
+ case ENOLINK:
+ str = "ENOLINK";
+ break;
+ case EPROTO:
+ str = "EPROTO";
+ break;
+ case ENOTCAPABLE:
+ str = "ENOTCAPABLE";
+ break;
+ case ECAPMODE:
+ str = "ECAPMODE";
+ break;
+ case ENOTRECOVERABLE:
+ str = "ENOTRECOVERABLE";
+ break;
+ case EOWNERDEAD:
+ str = "EOWNERDEAD";
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%d", err);
+
+ return buf;
+}
+
+/**
+ * ice_fec_str - convert fec mode enum to a string
+ * @mode: the enum value to convert
+ *
+ * Convert an FEC mode enum to a string for display in a sysctl or log message.
+ * Returns "Unknown" if the mode is not one of currently known FEC modes.
+ */
+const char *
+ice_fec_str(enum ice_fec_mode mode)
+{
+ switch (mode) {
+ case ICE_FEC_AUTO:
+ return ICE_FEC_STRING_AUTO;
+ case ICE_FEC_RS:
+ return ICE_FEC_STRING_RS;
+ case ICE_FEC_BASER:
+ return ICE_FEC_STRING_BASER;
+ case ICE_FEC_NONE:
+ return ICE_FEC_STRING_NONE;
+ }
+
+ /* The compiler generates errors on unhandled enum values if we omit
+ * the default case.
+ */
+ return "Unknown";
+}
+
+/**
+ * ice_fc_str - convert flow control mode enum to a string
+ * @mode: the enum value to convert
+ *
+ * Convert a flow control mode enum to a string for display in a sysctl or log
+ * message. Returns "Unknown" if the mode is not one of currently supported or
+ * known flow control modes.
+ */
+const char *
+ice_fc_str(enum ice_fc_mode mode)
+{
+ switch (mode) {
+ case ICE_FC_FULL:
+ return ICE_FC_STRING_FULL;
+ case ICE_FC_TX_PAUSE:
+ return ICE_FC_STRING_TX;
+ case ICE_FC_RX_PAUSE:
+ return ICE_FC_STRING_RX;
+ case ICE_FC_NONE:
+ return ICE_FC_STRING_NONE;
+ case ICE_FC_AUTO:
+ case ICE_FC_PFC:
+ case ICE_FC_DFLT:
+ break;
+ }
+
+ /* The compiler generates errors on unhandled enum values if we omit
+ * the default case.
+ */
+ return "Unknown";
+}
+
+/**
+ * ice_fltr_flag_str - Convert filter flags to a string
+ * @flag: the filter flags to convert
+ *
+ * Convert the u16 flag value of a filter into a readable string for
+ * outputting in a sysctl.
+ */
+struct ice_str_buf
+_ice_fltr_flag_str(u16 flag)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (flag) {
+ case ICE_FLTR_RX:
+ str = "RX";
+ break;
+ case ICE_FLTR_TX:
+ str = "TX";
+ break;
+ case ICE_FLTR_TX_RX:
+ str = "TX_RX";
+ break;
+ default:
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%u", flag);
+
+ return buf;
+}
+
+/**
+ * ice_fwd_act_str - convert filter action enum to a string
+ * @action: the filter action to convert
+ *
+ * Convert an enum value of type enum ice_sw_fwd_act_type into a string, for
+ * display in a sysctl filter list. Returns "UNKNOWN" for actions outside the
+ * enumeration type.
+ */
+const char *
+ice_fwd_act_str(enum ice_sw_fwd_act_type action)
+{
+ switch (action) {
+ case ICE_FWD_TO_VSI:
+ return "FWD_TO_VSI";
+ case ICE_FWD_TO_VSI_LIST:
+ return "FWD_TO_VSI_LIST";
+ case ICE_FWD_TO_Q:
+ return "FWD_TO_Q";
+ case ICE_FWD_TO_QGRP:
+ return "FWD_TO_QGRP";
+ case ICE_DROP_PACKET:
+ return "DROP_PACKET";
+ case ICE_INVAL_ACT:
+ return "INVAL_ACT";
+ }
+
+ /* The compiler generates errors on unhandled enum values if we omit
+ * the default case.
+ */
+ return "Unknown";
+}
+
+/**
+ * ice_mdd_tx_tclan_str - Convert MDD Tx TCLAN event to a string
+ * @event: the MDD event number to convert
+ *
+ * Convert the Tx TCLAN event value from the GL_MDET_TX_TCLAN register into
+ * a human readable string for logging of MDD events.
+ */
+struct ice_str_buf
+_ice_mdd_tx_tclan_str(u8 event)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (event) {
+ case 0:
+ str = "Wrong descriptor format/order";
+ break;
+ case 1:
+ str = "Descriptor fetch failed";
+ break;
+ case 2:
+ str = "Tail descriptor not EOP/NOP";
+ break;
+ case 3:
+ str = "False scheduling error";
+ break;
+ case 4:
+ str = "Tail value larger than ring len";
+ break;
+ case 5:
+ str = "Too many data commands";
+ break;
+ case 6:
+ str = "Zero packets sent in quanta";
+ break;
+ case 7:
+ str = "Packet too small or too big";
+ break;
+ case 8:
+ str = "TSO length doesn't match sum";
+ break;
+ case 9:
+ str = "TSO tail reached before TLEN";
+ break;
+ case 10:
+ str = "TSO max 3 descs for headers";
+ break;
+ case 11:
+ str = "EOP on header descriptor";
+ break;
+ case 12:
+ str = "MSS is 0 or TLEN is 0";
+ break;
+ case 13:
+ str = "CTX desc invalid IPSec fields";
+ break;
+ case 14:
+ str = "Quanta invalid # of SSO packets";
+ break;
+ case 15:
+ str = "Quanta bytes exceeds pkt_len*64";
+ break;
+ case 16:
+ str = "Quanta exceeds max_cmds_in_sq";
+ break;
+ case 17:
+ str = "incoherent last_lso_quanta";
+ break;
+ case 18:
+ str = "incoherent TSO TLEN";
+ break;
+ case 19:
+ str = "Quanta: too many descriptors";
+ break;
+ case 20:
+ str = "Quanta: # of packets mismatch";
+ break;
+ default:
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "Unknown Tx TCLAN event %u", event);
+
+ return buf;
+}
+
+/**
+ * ice_mdd_tx_pqm_str - Convert MDD Tx PQM event to a string
+ * @event: the MDD event number to convert
+ *
+ * Convert the Tx PQM event value from the GL_MDET_TX_PQM register into
+ * a human readable string for logging of MDD events.
+ */
+struct ice_str_buf
+_ice_mdd_tx_pqm_str(u8 event)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (event) {
+ case 0:
+ str = "PCI_DUMMY_COMP";
+ break;
+ case 1:
+ str = "PCI_UR_COMP";
+ break;
+ /* Index 2 is unused */
+ case 3:
+ str = "RCV_SH_BE_LSO";
+ break;
+ case 4:
+ str = "Q_FL_MNG_EPY_CH";
+ break;
+ case 5:
+ str = "Q_EPY_MNG_FL_CH";
+ break;
+ case 6:
+ str = "LSO_NUMDESCS_ZERO";
+ break;
+ case 7:
+ str = "LSO_LENGTH_ZERO";
+ break;
+ case 8:
+ str = "LSO_MSS_BELOW_MIN";
+ break;
+ case 9:
+ str = "LSO_MSS_ABOVE_MAX";
+ break;
+ case 10:
+ str = "LSO_HDR_SIZE_ZERO";
+ break;
+ case 11:
+ str = "RCV_CNT_BE_LSO";
+ break;
+ case 12:
+ str = "SKIP_ONE_QT_ONLY";
+ break;
+ case 13:
+ str = "LSO_PKTCNT_ZERO";
+ break;
+ case 14:
+ str = "SSO_LENGTH_ZERO";
+ break;
+ case 15:
+ str = "SSO_LENGTH_EXCEED";
+ break;
+ case 16:
+ str = "SSO_PKTCNT_ZERO";
+ break;
+ case 17:
+ str = "SSO_PKTCNT_EXCEED";
+ break;
+ case 18:
+ str = "SSO_NUMDESCS_ZERO";
+ break;
+ case 19:
+ str = "SSO_NUMDESCS_EXCEED";
+ break;
+ case 20:
+ str = "TAIL_GT_RING_LENGTH";
+ break;
+ case 21:
+ str = "RESERVED_DBL_TYPE";
+ break;
+ case 22:
+ str = "ILLEGAL_HEAD_DROP_DBL";
+ break;
+ case 23:
+ str = "LSO_OVER_COMMS_Q";
+ break;
+ case 24:
+ str = "ILLEGAL_VF_QNUM";
+ break;
+ case 25:
+ str = "QTAIL_GT_RING_LENGTH";
+ break;
+ default:
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "Unknown Tx PQM event %u", event);
+
+ return buf;
+}
+
+/**
+ * ice_mdd_rx_str - Convert MDD Rx queue event to a string
+ * @event: the MDD event number to convert
+ *
+ * Convert the Rx queue event value from the GL_MDET_RX register into a human
+ * readable string for logging of MDD events.
+ */
+struct ice_str_buf
+_ice_mdd_rx_str(u8 event)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (event) {
+ case 1:
+ str = "Descriptor fetch failed";
+ break;
+ default:
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "Unknown Rx event %u", event);
+
+ return buf;
+}
+
+/**
+ * ice_state_to_str - Convert the state enum to a string value
+ * @state: the state bit to convert
+ *
+ * Converts a given state bit to its human readable string name. If the enum
+ * value is unknown, returns NULL;
+ */
+const char *
+ice_state_to_str(enum ice_state state)
+{
+ switch (state) {
+ case ICE_STATE_CONTROLQ_EVENT_PENDING:
+ return "CONTROLQ_EVENT_PENDING";
+ case ICE_STATE_VFLR_PENDING:
+ return "VFLR_PENDING";
+ case ICE_STATE_MDD_PENDING:
+ return "MDD_PENDING";
+ case ICE_STATE_RESET_OICR_RECV:
+ return "RESET_OICR_RECV";
+ case ICE_STATE_RESET_PFR_REQ:
+ return "RESET_PFR_REQ";
+ case ICE_STATE_PREPARED_FOR_RESET:
+ return "PREPARED_FOR_RESET";
+ case ICE_STATE_RESET_FAILED:
+ return "RESET_FAILED";
+ case ICE_STATE_DRIVER_INITIALIZED:
+ return "DRIVER_INITIALIZED";
+ case ICE_STATE_NO_MEDIA:
+ return "NO_MEDIA";
+ case ICE_STATE_RECOVERY_MODE:
+ return "RECOVERY_MODE";
+ case ICE_STATE_ROLLBACK_MODE:
+ return "ROLLBACK_MODE";
+ case ICE_STATE_LINK_STATUS_REPORTED:
+ return "LINK_STATUS_REPORTED";
+ case ICE_STATE_DETACHING:
+ return "DETACHING";
+ case ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING:
+ return "LINK_DEFAULT_OVERRIDE_PENDING";
+ case ICE_STATE_LAST:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_fw_lldp_status - Convert FW LLDP status to a string
+ * @lldp_status: firmware LLDP status value to convert
+ *
+ * Given the FW LLDP status, convert it to a human readable string.
+ */
+struct ice_str_buf
+_ice_fw_lldp_status(u32 lldp_status)
+{
+ struct ice_str_buf buf = { .str = "" };
+ const char *str = NULL;
+
+ switch (lldp_status)
+ {
+ case ICE_LLDP_ADMINSTATUS_DIS:
+ str = "DISABLED";
+ break;
+ case ICE_LLDP_ADMINSTATUS_ENA_RX:
+ str = "ENA_RX";
+ break;
+ case ICE_LLDP_ADMINSTATUS_ENA_TX:
+ str = "ENA_TX";
+ break;
+ case ICE_LLDP_ADMINSTATUS_ENA_RXTX:
+ str = "ENA_RXTX";
+ break;
+ case 0xF:
+ str = "NVM_DEFAULT";
+ break;
+ }
+
+ if (str)
+ snprintf(buf.str, ICE_STR_BUF_LEN, "%s", str);
+ else
+ snprintf(buf.str, ICE_STR_BUF_LEN, "Unknown LLDP status %u", lldp_status);
+
+ return buf;
+}
Index: sys/dev/ice/ice_switch.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_switch.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_SWITCH_H_
+#define _ICE_SWITCH_H_
+
+#include "ice_common.h"
+#include "ice_protocol_type.h"
+
+#define ICE_SW_CFG_MAX_BUF_LEN 2048
+#define ICE_MAX_SW 256
+#define ICE_DFLT_VSI_INVAL 0xff
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX)
+
+/* Worst case buffer length for ice_aqc_opc_get_res_alloc */
+#define ICE_MAX_RES_TYPES 0x80
+#define ICE_AQ_GET_RES_ALLOC_BUF_LEN \
+ (ICE_MAX_RES_TYPES * sizeof(struct ice_aqc_get_res_resp_elem))
+
+#define ICE_VSI_INVAL_ID 0xFFFF
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+
+/* VSI context structure for add/get/update/free operations */
+struct ice_vsi_ctx {
+ u16 vsi_num;
+ u16 vsis_allocd;
+ u16 vsis_unallocated;
+ u16 flags;
+ struct ice_aqc_vsi_props info;
+ struct ice_sched_vsi_info sched;
+ u8 alloc_from_pool;
+ u8 vf_num;
+ u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
+};
+
+/* This is to be used by add/update mirror rule Admin Queue command */
+struct ice_mir_rule_buf {
+ u16 vsi_idx; /* VSI index */
+
+ /* For each VSI, user can specify whether corresponding VSI
+ * should be added/removed to/from mirror rule
+ *
+ * add mirror rule: this should always be TRUE.
+ * update mirror rule: add(true) or remove(false) VSI to/from
+ * mirror rule
+ */
+ u8 add;
+};
+
+/* Switch recipe ID enum values are specific to hardware */
+enum ice_sw_lkup_type {
+ ICE_SW_LKUP_ETHERTYPE = 0,
+ ICE_SW_LKUP_MAC = 1,
+ ICE_SW_LKUP_MAC_VLAN = 2,
+ ICE_SW_LKUP_PROMISC = 3,
+ ICE_SW_LKUP_VLAN = 4,
+ ICE_SW_LKUP_DFLT = 5,
+ ICE_SW_LKUP_ETHERTYPE_MAC = 8,
+ ICE_SW_LKUP_PROMISC_VLAN = 9,
+ ICE_SW_LKUP_LAST
+};
+
+/* type of filter src ID */
+enum ice_src_id {
+ ICE_SRC_ID_UNKNOWN = 0,
+ ICE_SRC_ID_VSI,
+ ICE_SRC_ID_QUEUE,
+ ICE_SRC_ID_LPORT,
+};
+
+struct ice_fltr_info {
+ /* Look up information: how to look up packet */
+ enum ice_sw_lkup_type lkup_type;
+ /* Forward action: filter action to do after lookup */
+ enum ice_sw_fwd_act_type fltr_act;
+ /* rule ID returned by firmware once filter rule is created */
+ u16 fltr_rule_id;
+ u16 flag;
+
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+ enum ice_src_id src_id;
+
+ union {
+ struct {
+ u8 mac_addr[ETH_ALEN];
+ } mac;
+ struct {
+ u8 mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ } mac_vlan;
+ struct {
+ u16 vlan_id;
+ } vlan;
+ /* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
+ * if just using ethertype as filter. Set lkup_type as
+ * ICE_SW_LKUP_ETHERTYPE_MAC if MAC also needs to be
+ * passed in as filter.
+ */
+ struct {
+ u16 ethertype;
+ u8 mac_addr[ETH_ALEN]; /* optional */
+ } ethertype_mac;
+ } l_data; /* Make sure to zero out the memory of l_data before using
+ * it or only set the data associated with lookup match
+ * rest everything should be zero
+ */
+
+ /* Depending on filter action */
+ union {
+ /* queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 hw_vsi_id:10;
+ u16 vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+
+ /* Sw VSI handle */
+ u16 vsi_handle;
+
+ /* Set to num_queues if action is ICE_FWD_TO_QGRP. This field
+ * determines the range of queues the packet needs to be forwarded to.
+ * Note that qgrp_size must be set to a power of 2.
+ */
+ u8 qgrp_size;
+
+ /* Rule creations populate these indicators basing on the switch type */
+ u8 lb_en; /* Indicate if packet can be looped back */
+ u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
+};
+
+struct ice_adv_lkup_elem {
+ enum ice_protocol_type type;
+ union ice_prot_hdr h_u; /* Header values */
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+};
+
+struct ice_sw_act_ctrl {
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+ u16 flag;
+ enum ice_sw_fwd_act_type fltr_act;
+ /* Depending on filter action */
+ union {
+ /* This is a queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 vsi_id:10;
+ u16 hw_vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+ /* software VSI handle */
+ u16 vsi_handle;
+ u8 qgrp_size;
+};
+
+struct ice_rule_query_data {
+ /* Recipe ID for which the requested rule was added */
+ u16 rid;
+ /* Rule ID that was added or is supposed to be removed */
+ u16 rule_id;
+ /* vsi_handle for which Rule was added or is supposed to be removed */
+ u16 vsi_handle;
+};
+
+struct ice_adv_rule_info {
+ enum ice_sw_tunnel_type tun_type;
+ struct ice_sw_act_ctrl sw_act;
+ u32 priority;
+ u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
+ u16 fltr_rule_id;
+};
+
+/* A collection of one or more four word recipe */
+struct ice_sw_recipe {
+ /* For a chained recipe the root recipe is what should be used for
+ * programming rules
+ */
+ u8 is_root;
+ u8 root_rid;
+ u8 recp_created;
+
+ /* Number of extraction words */
+ u8 n_ext_words;
+ /* Protocol ID and Offset pair (extraction word) to describe the
+ * recipe
+ */
+ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
+ u16 word_masks[ICE_MAX_CHAIN_WORDS];
+
+ /* if this recipe is a collection of other recipe */
+ u8 big_recp;
+
+ /* if this recipe is part of another bigger recipe then chain index
+ * corresponding to this recipe
+ */
+ u8 chain_idx;
+
+ /* if this recipe is a collection of other recipe then count of other
+ * recipes and recipe IDs of those recipes
+ */
+ u8 n_grp_count;
+
+ /* Bit map specifying the IDs associated with this group of recipe */
+ ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
+
+ enum ice_sw_tunnel_type tun_type;
+
+ /* List of type ice_fltr_mgmt_list_entry or adv_rule */
+ u8 adv_rule;
+ struct LIST_HEAD_TYPE filt_rules;
+ struct LIST_HEAD_TYPE filt_replay_rules;
+
+ struct ice_lock filt_rule_lock; /* protect filter rule structure */
+
+ /* Profiles this recipe should be associated with */
+ struct LIST_HEAD_TYPE fv_list;
+
+ /* Profiles this recipe is associated with */
+ u8 num_profs, *prof_ids;
+
+ /* Possible result indexes are 44, 45, 46 and 47 */
+#define ICE_POSSIBLE_RES_IDX 0x0000F00000000000ULL
+ ice_declare_bitmap(res_idxs, ICE_MAX_FV_WORDS);
+
+ /* This allows user to specify the recipe priority.
+ * For now, this becomes 'fwd_priority' when recipe
+ * is created, usually recipes can have 'fwd' and 'join'
+ * priority.
+ */
+ u8 priority;
+
+ struct LIST_HEAD_TYPE rg_list;
+
+ /* AQ buffer associated with this recipe */
+ struct ice_aqc_recipe_data_elem *root_buf;
+ /* This struct saves the fv_words for a given lookup */
+ struct ice_prot_lkup_ext lkup_exts;
+};
+
+/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
+struct ice_vsi_list_map_info {
+ struct LIST_ENTRY_TYPE list_entry;
+ ice_declare_bitmap(vsi_map, ICE_MAX_VSI);
+ u16 vsi_list_id;
+ /* counter to track how many rules are reusing this VSI list */
+ u16 ref_cnt;
+};
+
+struct ice_fltr_list_entry {
+ struct LIST_ENTRY_TYPE list_entry;
+ enum ice_status status;
+ struct ice_fltr_info fltr_info;
+};
+
+/**
+ * enum ice_fltr_marker - Marker for syncing OS and driver filter lists
+ * @ICE_FLTR_NOT_FOUND: initial state, indicates filter has not been found
+ * @ICE_FLTR_FOUND: set when a filter has been found in both lists
+ *
+ * This enumeration is used to help sync an operating system provided filter
+ * list with the filters previously added.
+ *
+ * This is required for FreeBSD because the operating system does not provide
+ * individual indications of whether a filter has been added or deleted, but
+ * instead just notifies the driver with the entire new list.
+ *
+ * To use this marker state, the driver shall initially reset all filters to
+ * the ICE_FLTR_NOT_FOUND state. Then, for each filter in the OS list, it
+ * shall search the driver list for the filter. If found, the filter state
+ * will be set to ICE_FLTR_FOUND. If not found, that filter will be added.
+ * Finally, the driver shall search the internal filter list for all filters
+ * still marked as ICE_FLTR_NOT_FOUND and remove them.
+ */
+enum ice_fltr_marker {
+ ICE_FLTR_NOT_FOUND,
+ ICE_FLTR_FOUND,
+};
+
+/* This defines an entry in the list that maintains MAC or VLAN membership
+ * to HW list mapping, since multiple VSIs can subscribe to the same MAC or
+ * VLAN. As an optimization the VSI list should be created only when a
+ * second VSI becomes a subscriber to the same MAC address. VSI lists are always
+ * used for VLAN membership.
+ */
+struct ice_fltr_mgmt_list_entry {
+ /* back pointer to VSI list ID to VSI list mapping */
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+#define ICE_INVAL_LG_ACT_INDEX 0xffff
+ u16 lg_act_idx;
+#define ICE_INVAL_SW_MARKER_ID 0xffff
+ u16 sw_marker_id;
+ struct LIST_ENTRY_TYPE list_entry;
+ struct ice_fltr_info fltr_info;
+#define ICE_INVAL_COUNTER_ID 0xff
+ u8 counter_index;
+ enum ice_fltr_marker marker;
+};
+
+struct ice_adv_fltr_mgmt_list_entry {
+ struct LIST_ENTRY_TYPE list_entry;
+
+ struct ice_adv_lkup_elem *lkups;
+ struct ice_adv_rule_info rule_info;
+ u16 lkups_cnt;
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+};
+
+enum ice_promisc_flags {
+ ICE_PROMISC_UCAST_RX = 0x1,
+ ICE_PROMISC_UCAST_TX = 0x2,
+ ICE_PROMISC_MCAST_RX = 0x4,
+ ICE_PROMISC_MCAST_TX = 0x8,
+ ICE_PROMISC_BCAST_RX = 0x10,
+ ICE_PROMISC_BCAST_TX = 0x20,
+ ICE_PROMISC_VLAN_RX = 0x40,
+ ICE_PROMISC_VLAN_TX = 0x80,
+};
+
+/* VSI related commands */
+enum ice_status
+ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd);
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_all_vsi_ctx(struct ice_hw *hw);
+enum ice_status
+ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
+ u16 count, struct ice_mir_rule_buf *mr_buf,
+ struct ice_sq_cd *cd, u16 *rule_id);
+enum ice_status
+ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
+ u32 *ctl_bitmask);
+enum ice_status
+ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
+ u32 ctl_bitmask);
+/* Switch config */
+enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+
+enum ice_status
+ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id);
+enum ice_status
+ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id);
+
+/* Switch/bridge related commands */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
+enum ice_status
+ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
+ u16 *counter_id);
+enum ice_status
+ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id);
+enum ice_status
+ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
+ u16 buf_size, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_get_allocd_res_desc_resp *buf,
+ u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
+ struct ice_sq_cd *cd);
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list);
+enum ice_status
+ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list);
+void ice_rem_all_sw_rules_info(struct ice_hw *hw);
+enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
+enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list);
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list);
+
+enum ice_status
+ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
+ u16 sw_marker);
+enum ice_status
+ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info);
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
+
+/* Promisc/defport setup for VSIs */
+enum ice_status
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction);
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc);
+
+/* Get VSIs Promisc/defport settings */
+enum ice_status
+ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid);
+enum ice_status
+ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid);
+
+enum ice_status ice_replay_all_fltr(struct ice_hw *hw);
+
+enum ice_status
+ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list);
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
+
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+
+#endif /* _ICE_SWITCH_H_ */
Index: sys/dev/ice/ice_switch.c
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_switch.c
@@ -0,0 +1,4126 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#include "ice_switch.h"
+#include "ice_flex_type.h"
+#include "ice_flow.h"
+
+#define ICE_ETH_DA_OFFSET 0
+#define ICE_ETH_ETHTYPE_OFFSET 12
+#define ICE_ETH_VLAN_TCI_OFFSET 14
+#define ICE_MAX_VLAN_ID 0xFFF
+
+/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
+ * struct to configure any switch filter rules.
+ * {DA (6 bytes), SA(6 bytes),
+ * Ether type (2 bytes for header without VLAN tag) OR
+ * VLAN tag (4 bytes for header with VLAN tag) }
+ *
+ * Word on Hardcoded values
+ * byte 0 = 0x2: to identify it as locally administered DA MAC
+ * byte 6 = 0x2: to identify it as locally administered SA MAC
+ * byte 12 = 0x81 & byte 13 = 0x00:
+ * In case of VLAN filter first two bytes defines ether type (0x8100)
+ * and remaining two bytes are placeholder for programming a given VLAN ID
+ * In case of Ether type filter it is treated as header without VLAN tag
+ * and byte 12 and 13 is used to program a given Ether type instead
+ */
+#define DUMMY_ETH_HDR_LEN 16
+static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
+ 0x2, 0, 0, 0, 0, 0,
+ 0x81, 0, 0, 0};
+
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
+#define ICE_SW_RULE_LG_ACT_SIZE(n) \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_lg_act) - \
+ sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
+ ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
+#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
+ (sizeof(struct ice_aqc_sw_rules_elem) - \
+ sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
+ sizeof(struct ice_sw_rule_vsi_list) - \
+ sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
+ ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
+
+/**
+ * ice_init_def_sw_recp - initialize the recipe book keeping tables
+ * @hw: pointer to the HW struct
+ * @recp_list: pointer to sw recipe list
+ *
+ * Allocate memory for the entire recipe table and initialize the structures/
+ * entries corresponding to basic recipes.
+ */
+enum ice_status
+ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
+{
+ struct ice_sw_recipe *recps;
+ u8 i;
+
+ recps = (struct ice_sw_recipe *)
+ ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
+ if (!recps)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ recps[i].root_rid = i;
+ INIT_LIST_HEAD(&recps[i].filt_rules);
+ INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ INIT_LIST_HEAD(&recps[i].rg_list);
+ ice_init_lock(&recps[i].filt_rule_lock);
+ }
+
+ *recp_list = recps;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_aq_get_sw_cfg - get switch configuration
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of the buffer available for response
+ * @req_desc: pointer to requested descriptor
+ * @num_elems: pointer to number of elements
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get switch configuration (0x0200) to be placed in 'buff'.
+ * This admin command returns information such as initial VSI/port number
+ * and switch ID it belongs to.
+ *
+ * NOTE: *req_desc is both an input/output parameter.
+ * The caller of this function first calls this function with *request_desc set
+ * to 0. If the response from f/w has *req_desc set to 0, all the switch
+ * configuration information has been returned; if non-zero (meaning not all
+ * the information was returned), the caller should call this function again
+ * with *req_desc set to the previous value returned by f/w to get the
+ * next block of switch configuration information.
+ *
+ * *num_elems is output only parameter. This reflects the number of elements
+ * in response buffer. The caller of this function to use *num_elems while
+ * parsing the response buffer.
+ */
+static enum ice_status
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ u16 buf_size, u16 *req_desc, u16 *num_elems,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_sw_cfg *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
+ cmd = &desc.params.get_sw_conf;
+ cmd->element = CPU_TO_LE16(*req_desc);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ *req_desc = LE16_TO_CPU(cmd->element);
+ *num_elems = LE16_TO_CPU(cmd->num_elems);
+ }
+
+ return status;
+}
+
+/**
+ * ice_alloc_sw - allocate resources specific to switch
+ * @hw: pointer to the HW struct
+ * @ena_stats: true to turn on VEB stats
+ * @shared_res: true for shared resource, false for dedicated resource
+ * @sw_id: switch ID returned
+ * @counter_id: VEB counter ID returned
+ *
+ * allocates switch resources (SWID and VEB counter) (0x0208)
+ */
+enum ice_status
+ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
+ u16 *counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ struct ice_aqc_res_elem *sw_ele;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = sizeof(*sw_buf);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer for switch ID.
+ * The number of resource entries in buffer is passed as 1 since only a
+ * single switch/VEB instance is allocated, and hence a single sw_id
+ * is requested.
+ */
+ sw_buf->num_elems = CPU_TO_LE16(1);
+ sw_buf->res_type =
+ CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
+ (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED));
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+
+ if (status)
+ goto ice_alloc_sw_exit;
+
+ sw_ele = &sw_buf->elem[0];
+ *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
+
+ if (ena_stats) {
+ /* Prepare buffer for VEB Counter */
+ enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
+ struct ice_aqc_alloc_free_res_elem *counter_buf;
+ struct ice_aqc_res_elem *counter_ele;
+
+ counter_buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!counter_buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_alloc_sw_exit;
+ }
+
+ /* The number of resource entries in buffer is passed as 1 since
+ * only a single switch/VEB instance is allocated, and hence a
+ * single VEB counter is requested.
+ */
+ counter_buf->num_elems = CPU_TO_LE16(1);
+ counter_buf->res_type =
+ CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED);
+ status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
+ opc, NULL);
+
+ if (status) {
+ ice_free(hw, counter_buf);
+ goto ice_alloc_sw_exit;
+ }
+ counter_ele = &counter_buf->elem[0];
+ *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
+ ice_free(hw, counter_buf);
+ }
+
+ice_alloc_sw_exit:
+ ice_free(hw, sw_buf);
+ return status;
+}
+
+/**
+ * ice_free_sw - free resources specific to switch
+ * @hw: pointer to the HW struct
+ * @sw_id: switch ID returned
+ * @counter_id: VEB counter ID returned
+ *
+ * free switch resources (SWID and VEB counter) (0x0209)
+ *
+ * NOTE: This function frees multiple resources. It continues
+ * releasing other resources even after it encounters error.
+ * The error code returned is the last error it encountered.
+ */
+enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
+ enum ice_status status, ret_status;
+ u16 buf_len;
+
+ buf_len = sizeof(*sw_buf);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Prepare buffer to free for switch ID res.
+ * The number of resource entries in buffer is passed as 1 since only a
+ * single switch/VEB instance is freed, and hence a single sw_id
+ * is released.
+ */
+ sw_buf->num_elems = CPU_TO_LE16(1);
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
+ sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
+
+ ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+
+ if (ret_status)
+ ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
+
+ /* Prepare buffer to free for VEB Counter resource */
+ counter_buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!counter_buf) {
+ ice_free(hw, sw_buf);
+ return ICE_ERR_NO_MEMORY;
+ }
+
+ /* The number of resource entries in buffer is passed as 1 since only a
+ * single switch/VEB instance is freed, and hence a single VEB counter
+ * is released
+ */
+ counter_buf->num_elems = CPU_TO_LE16(1);
+ counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
+ counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "VEB counter resource could not be freed\n");
+ ret_status = status;
+ }
+
+ ice_free(hw, counter_buf);
+ ice_free(hw, sw_buf);
+ return ret_status;
+}
+
+/**
+ * ice_aq_add_vsi
+ * @hw: pointer to the HW struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware (0x0210)
+ */
+enum ice_status
+ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *res;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ res = &desc.params.add_update_free_vsi_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
+
+ if (!vsi_ctx->alloc_from_pool)
+ cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
+ ICE_AQ_VSI_IS_VALID);
+ cmd->vf_id = vsi_ctx->vf_num;
+
+ cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cd);
+
+ if (!status) {
+ vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
+ vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_free_vsi
+ * @hw: pointer to the HW struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware (0x0213)
+ */
+enum ice_status
+ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = &desc.params.add_update_free_vsi_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
+
+ cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+ if (keep_vsi_alloc)
+ cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_update_vsi
+ * @hw: pointer to the HW struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware (0x0211)
+ */
+enum ice_status
+ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_update_free_vsi_resp *resp;
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = &desc.params.add_update_free_vsi_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
+
+ cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cd);
+
+ if (!status) {
+ vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_vsi_valid - check whether the VSI is valid or not
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ *
+ * check whether the VSI is valid or not
+ */
+bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
+{
+ return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_get_hw_vsi_num - return the HW VSI number
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ *
+ * return the HW VSI number
+ * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
+ */
+u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
+{
+ return hw->vsi_ctx[vsi_handle]->vsi_num;
+}
+
+/**
+ * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ *
+ * return the VSI context entry for a given VSI handle
+ */
+struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
+}
+
+/**
+ * ice_save_vsi_ctx - save the VSI context for a given VSI handle
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @vsi: VSI context pointer
+ *
+ * save the VSI context entry for a given VSI handle
+ */
+static void
+ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
+{
+ hw->vsi_ctx[vsi_handle] = vsi;
+}
+
+/**
+ * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ */
+static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ u8 i;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return;
+ ice_for_each_traffic_class(i) {
+ if (vsi->lan_q_ctx[i]) {
+ ice_free(hw, vsi->lan_q_ctx[i]);
+ vsi->lan_q_ctx[i] = NULL;
+ }
+ }
+}
+
+/**
+ * ice_clear_vsi_ctx - clear the VSI context entry
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ *
+ * clear the VSI context entry
+ */
+static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (vsi) {
+ ice_clear_vsi_q_ctx(hw, vsi_handle);
+ ice_free(hw, vsi);
+ hw->vsi_ctx[vsi_handle] = NULL;
+ }
+}
+
+/**
+ * ice_clear_all_vsi_ctx - clear all the VSI context entries
+ * @hw: pointer to the HW struct
+ */
+void ice_clear_all_vsi_ctx(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_MAX_VSI; i++)
+ ice_clear_vsi_ctx(hw, i);
+}
+
+/**
+ * ice_add_vsi - add VSI context to the hardware and VSI handle list
+ * @hw: pointer to the HW struct
+ * @vsi_handle: unique VSI handle provided by drivers
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware also add it into the VSI handle list.
+ * If this function gets called after reset for existing VSIs then update
+ * with the new HW VSI number in the corresponding VSI handle list entry.
+ */
+enum ice_status
+ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ struct ice_vsi_ctx *tmp_vsi_ctx;
+ enum ice_status status;
+
+ if (vsi_handle >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+ status = ice_aq_add_vsi(hw, vsi_ctx, cd);
+ if (status)
+ return status;
+ tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!tmp_vsi_ctx) {
+ /* Create a new VSI context */
+ tmp_vsi_ctx = (struct ice_vsi_ctx *)
+ ice_malloc(hw, sizeof(*tmp_vsi_ctx));
+ if (!tmp_vsi_ctx) {
+ ice_aq_free_vsi(hw, vsi_ctx, false, cd);
+ return ICE_ERR_NO_MEMORY;
+ }
+ *tmp_vsi_ctx = *vsi_ctx;
+
+ ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
+ } else {
+ /* update with new HW VSI num */
+ tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_free_vsi- free VSI context from hardware and VSI handle list
+ * @hw: pointer to the HW struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
+ * @cd: pointer to command details structure or NULL
+ *
+ * Free VSI context info from hardware as well as from VSI handle list
+ */
+enum ice_status
+ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ bool keep_vsi_alloc, struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
+ if (!status)
+ ice_clear_vsi_ctx(hw, vsi_handle);
+ return status;
+}
+
+/**
+ * ice_update_vsi
+ * @hw: pointer to the HW struct
+ * @vsi_handle: unique VSI handle
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update VSI context in the hardware
+ */
+enum ice_status
+ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
+ return ice_aq_update_vsi(hw, vsi_ctx, cd);
+}
+
+/**
+ * ice_aq_get_vsi_params
+ * @hw: pointer to the HW struct
+ * @vsi_ctx: pointer to a VSI context struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get VSI context info from hardware (0x0212)
+ */
+enum ice_status
+ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_update_free_vsi *cmd;
+ struct ice_aqc_get_vsi_resp *resp;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.vsi_cmd;
+ resp = &desc.params.get_vsi_resp;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
+
+ cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
+
+ status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cd);
+ if (!status) {
+ vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
+ ICE_AQ_VSI_NUM_M;
+ vsi_ctx->vf_num = resp->vf_id;
+ vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_add_update_mir_rule - add/update a mirror rule
+ * @hw: pointer to the HW struct
+ * @rule_type: Rule Type
+ * @dest_vsi: VSI number to which packets will be mirrored
+ * @count: length of the list
+ * @mr_buf: buffer for list of mirrored VSI numbers
+ * @cd: pointer to command details structure or NULL
+ * @rule_id: Rule ID
+ *
+ * Add/Update Mirror Rule (0x260).
+ */
+enum ice_status
+ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
+ u16 count, struct ice_mir_rule_buf *mr_buf,
+ struct ice_sq_cd *cd, u16 *rule_id)
+{
+ struct ice_aqc_add_update_mir_rule *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ __le16 *mr_list = NULL;
+ u16 buf_size = 0;
+
+ switch (rule_type) {
+ case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
+ case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
+ /* Make sure count and mr_buf are set for these rule_types */
+ if (!(count && mr_buf))
+ return ICE_ERR_PARAM;
+
+ buf_size = count * sizeof(__le16);
+ mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
+ if (!mr_list)
+ return ICE_ERR_NO_MEMORY;
+ break;
+ case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
+ case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
+ /* Make sure count and mr_buf are not set for these
+ * rule_types
+ */
+ if (count || mr_buf)
+ return ICE_ERR_PARAM;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_SW,
+ "Error due to unsupported rule_type %u\n", rule_type);
+ return ICE_ERR_OUT_OF_RANGE;
+ }
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
+
+ /* Pre-process 'mr_buf' items for add/update of virtual port
+ * ingress/egress mirroring (but not physical port ingress/egress
+ * mirroring)
+ */
+ if (mr_buf) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ u16 id;
+
+ id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
+
+ /* Validate specified VSI number, make sure it is less
+ * than ICE_MAX_VSI, if not return with error.
+ */
+ if (id >= ICE_MAX_VSI) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Error VSI index (%u) out-of-range\n",
+ id);
+ ice_free(hw, mr_list);
+ return ICE_ERR_OUT_OF_RANGE;
+ }
+
+ /* add VSI to mirror rule */
+ if (mr_buf[i].add)
+ mr_list[i] =
+ CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
+ else /* remove VSI from mirror rule */
+ mr_list[i] = CPU_TO_LE16(id);
+ }
+ }
+
+ cmd = &desc.params.add_update_rule;
+ if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
+ cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
+ ICE_AQC_RULE_ID_VALID_M);
+ cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
+ cmd->num_entries = CPU_TO_LE16(count);
+ cmd->dest = CPU_TO_LE16(dest_vsi);
+
+ status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
+ if (!status)
+ *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
+
+ ice_free(hw, mr_list);
+
+ return status;
+}
+
+/**
+ * ice_aq_delete_mir_rule - delete a mirror rule
+ * @hw: pointer to the HW struct
+ * @rule_id: Mirror rule ID (to be deleted)
+ * @keep_allocd: if set, the VSI stays part of the PF allocated res,
+ * otherwise it is returned to the shared pool
+ * @cd: pointer to command details structure or NULL
+ *
+ * Delete Mirror Rule (0x261).
+ */
+enum ice_status
+ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_delete_mir_rule *cmd;
+ struct ice_aq_desc desc;
+
+ /* rule_id should be in the range 0...63 */
+ if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
+ return ICE_ERR_OUT_OF_RANGE;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
+
+ cmd = &desc.params.del_rule;
+ rule_id |= ICE_AQC_RULE_ID_VALID_M;
+ cmd->rule_id = CPU_TO_LE16(rule_id);
+
+ if (keep_allocd)
+ cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_alloc_free_vsi_list
+ * @hw: pointer to the HW struct
+ * @vsi_list_id: VSI list ID returned or used for lookup
+ * @lkup_type: switch rule filter lookup type
+ * @opc: switch rules population command type - pass in the command opcode
+ *
+ * allocates or free a VSI list resource
+ */
+static enum ice_status
+ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
+ enum ice_sw_lkup_type lkup_type,
+ enum ice_adminq_opc opc)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ struct ice_aqc_res_elem *vsi_ele;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = sizeof(*sw_buf);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+ sw_buf->num_elems = CPU_TO_LE16(1);
+
+ if (lkup_type == ICE_SW_LKUP_MAC ||
+ lkup_type == ICE_SW_LKUP_MAC_VLAN ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ lkup_type == ICE_SW_LKUP_PROMISC ||
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_LAST) {
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
+ } else if (lkup_type == ICE_SW_LKUP_VLAN) {
+ sw_buf->res_type =
+ CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
+ } else {
+ status = ICE_ERR_PARAM;
+ goto ice_aq_alloc_free_vsi_list_exit;
+ }
+
+ if (opc == ice_aqc_opc_free_res)
+ sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
+ if (status)
+ goto ice_aq_alloc_free_vsi_list_exit;
+
+ if (opc == ice_aqc_opc_alloc_res) {
+ vsi_ele = &sw_buf->elem[0];
+ *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
+ }
+
+ice_aq_alloc_free_vsi_list_exit:
+ ice_free(hw, sw_buf);
+ return status;
+}
+
+/**
+ * ice_aq_set_storm_ctrl - Sets storm control configuration
+ * @hw: pointer to the HW struct
+ * @bcast_thresh: represents the upper threshold for broadcast storm control
+ * @mcast_thresh: represents the upper threshold for multicast storm control
+ * @ctl_bitmask: storm control control knobs
+ *
+ * Sets the storm control configuration (0x0280)
+ */
+enum ice_status
+ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
+ u32 ctl_bitmask)
+{
+ struct ice_aqc_storm_cfg *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.storm_conf;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
+
+ cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
+ cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
+ cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_storm_ctrl - gets storm control configuration
+ * @hw: pointer to the HW struct
+ * @bcast_thresh: represents the upper threshold for broadcast storm control
+ * @mcast_thresh: represents the upper threshold for multicast storm control
+ * @ctl_bitmask: storm control control knobs
+ *
+ * Gets the storm control configuration (0x0281)
+ */
+enum ice_status
+ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
+ u32 *ctl_bitmask)
+{
+ enum ice_status status;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
+
+ if (bcast_thresh)
+ *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
+ ICE_AQ_THRESHOLD_M;
+ if (mcast_thresh)
+ *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
+ ICE_AQ_THRESHOLD_M;
+ if (ctl_bitmask)
+ *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_sw_rules - add/update/remove switch rules
+ * @hw: pointer to the HW struct
+ * @rule_list: pointer to switch rule population list
+ * @rule_list_sz: total size of the rule list in bytes
+ * @num_rules: number of switch rules in the rule_list
+ * @opc: switch rules population command type - pass in the command opcode
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
+ */
+static enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (opc != ice_aqc_opc_add_sw_rules &&
+ opc != ice_aqc_opc_update_sw_rules &&
+ opc != ice_aqc_opc_remove_sw_rules)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opc);
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+ desc.params.sw_rules.num_rules_fltr_entry_index =
+ CPU_TO_LE16(num_rules);
+ return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
+}
+
+/* ice_init_port_info - Initialize port_info with switch configuration data
+ * @pi: pointer to port_info
+ * @vsi_port_num: VSI number or port number
+ * @type: Type of switch element (port or VSI)
+ * @swid: switch ID of the switch the element is attached to
+ * @pf_vf_num: PF or VF number
+ * @is_vf: true if the element is a VF, false otherwise
+ */
+static void
+ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
+ u16 swid, u16 pf_vf_num, bool is_vf)
+{
+ switch (type) {
+ case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
+ pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
+ pi->sw_id = swid;
+ pi->pf_vf_num = pf_vf_num;
+ pi->is_vf = is_vf;
+ pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
+ pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
+ break;
+ default:
+ ice_debug(pi->hw, ICE_DBG_SW,
+ "incorrect VSI/port type received\n");
+ break;
+ }
+}
+
+/* ice_get_initial_sw_cfg - Get initial port and default VSI data
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+{
+ struct ice_aqc_get_sw_cfg_resp *rbuf;
+ enum ice_status status;
+ u8 num_total_ports;
+ u16 req_desc = 0;
+ u16 num_elems;
+ u8 j = 0;
+ u16 i;
+
+ num_total_ports = 1;
+
+ rbuf = (struct ice_aqc_get_sw_cfg_resp *)
+ ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
+
+ if (!rbuf)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Multiple calls to ice_aq_get_sw_cfg may be required
+ * to get all the switch configuration information. The need
+ * for additional calls is indicated by ice_aq_get_sw_cfg
+ * writing a non-zero value in req_desc
+ */
+ do {
+ status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
+ &req_desc, &num_elems, NULL);
+
+ if (status)
+ break;
+
+ for (i = 0; i < num_elems; i++) {
+ struct ice_aqc_get_sw_cfg_resp_elem *ele;
+ u16 pf_vf_num, swid, vsi_port_num;
+ bool is_vf = false;
+ u8 res_type;
+
+ ele = rbuf[i].elements;
+ vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
+ ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
+
+ pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
+ ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
+
+ swid = LE16_TO_CPU(ele->swid);
+
+ if (LE16_TO_CPU(ele->pf_vf_num) &
+ ICE_AQC_GET_SW_CONF_RESP_IS_VF)
+ is_vf = true;
+
+ res_type = (u8)(LE16_TO_CPU(ele->vsi_port_num) >>
+ ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
+
+ switch (res_type) {
+ case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
+ case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
+ if (j == num_total_ports) {
+ ice_debug(hw, ICE_DBG_SW,
+ "more ports than expected\n");
+ status = ICE_ERR_CFG;
+ goto out;
+ }
+ ice_init_port_info(hw->port_info,
+ vsi_port_num, res_type, swid,
+ pf_vf_num, is_vf);
+ j++;
+ break;
+ default:
+ break;
+ }
+ }
+ } while (req_desc && !status);
+
+out:
+ ice_free(hw, (void *)rbuf);
+ return status;
+}
+
+/**
+ * ice_fill_sw_info - Helper function to populate lb_en and lan_en
+ * @hw: pointer to the hardware structure
+ * @fi: filter info structure to fill/update
+ *
+ * This helper function populates the lb_en and lan_en elements of the provided
+ * ice_fltr_info struct using the switch's type and characteristics of the
+ * switch rule being configured.
+ */
+static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
+{
+ fi->lb_en = false;
+ fi->lan_en = false;
+ if ((fi->flag & ICE_FLTR_TX) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ /* Setting LB for prune actions will result in replicated
+ * packets to the internal switch that will be dropped.
+ */
+ if (fi->lkup_type != ICE_SW_LKUP_VLAN)
+ fi->lb_en = true;
+
+ /* Set lan_en to TRUE if
+ * 1. The switch is a VEB AND
+ * 2
+ * 2.1 The lookup is a directional lookup like ethertype,
+ * promiscuous, ethertype-MAC, promiscuous-VLAN
+ * and default-port OR
+ * 2.2 The lookup is VLAN, OR
+ * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
+ * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
+ *
+ * OR
+ *
+ * The switch is a VEPA.
+ *
+ * In all other cases, the LAN enable has to be set to false.
+ */
+ if (hw->evb_veb) {
+ if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC ||
+ fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ fi->lkup_type == ICE_SW_LKUP_DFLT ||
+ fi->lkup_type == ICE_SW_LKUP_VLAN ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC &&
+ !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
+ fi->lan_en = true;
+ } else {
+ fi->lan_en = true;
+ }
+ }
+}
+
+/**
+ * ice_fill_sw_rule - Helper function to fill switch rule structure
+ * @hw: pointer to the hardware structure
+ * @f_info: entry containing packet forwarding information
+ * @s_rule: switch rule structure to be filled in based on mac_entry
+ * @opc: switch rules population command type - pass in the command opcode
+ */
+static void
+ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
+ struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
+{
+ u16 vlan_id = ICE_MAX_VLAN_ID + 1;
+ void *daddr = NULL;
+ u16 eth_hdr_sz;
+ u8 *eth_hdr;
+ u32 act = 0;
+ __be16 *off;
+ u8 q_rgn;
+
+ if (opc == ice_aqc_opc_remove_sw_rules) {
+ s_rule->pdata.lkup_tx_rx.act = 0;
+ s_rule->pdata.lkup_tx_rx.index =
+ CPU_TO_LE16(f_info->fltr_rule_id);
+ s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ return;
+ }
+
+ eth_hdr_sz = sizeof(dummy_eth_header);
+ eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+
+ /* initialize the ether header with a dummy header */
+ ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
+ ice_fill_sw_info(hw, f_info);
+
+ switch (f_info->fltr_act) {
+ case ICE_FWD_TO_VSI:
+ act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
+ ICE_SINGLE_ACT_VSI_ID_M;
+ if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_VSI_LIST:
+ act |= ICE_SINGLE_ACT_VSI_LIST;
+ act |= (f_info->fwd_id.vsi_list_id <<
+ ICE_SINGLE_ACT_VSI_LIST_ID_S) &
+ ICE_SINGLE_ACT_VSI_LIST_ID_M;
+ if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_Q:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_QGRP:
+ q_rgn = f_info->qgrp_size > 0 ?
+ (u8)ice_ilog2(f_info->qgrp_size) : 0;
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
+ ICE_SINGLE_ACT_Q_REGION_M;
+ break;
+ default:
+ return;
+ }
+
+ if (f_info->lb_en)
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ if (f_info->lan_en)
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+
+ switch (f_info->lkup_type) {
+ case ICE_SW_LKUP_MAC:
+ daddr = f_info->l_data.mac.mac_addr;
+ break;
+ case ICE_SW_LKUP_VLAN:
+ vlan_id = f_info->l_data.vlan.vlan_id;
+ if (f_info->fltr_act == ICE_FWD_TO_VSI ||
+ f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ act |= ICE_SINGLE_ACT_PRUNE;
+ act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
+ }
+ break;
+ case ICE_SW_LKUP_ETHERTYPE_MAC:
+ daddr = f_info->l_data.ethertype_mac.mac_addr;
+ /* fall-through */
+ case ICE_SW_LKUP_ETHERTYPE:
+ off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
+ break;
+ case ICE_SW_LKUP_MAC_VLAN:
+ daddr = f_info->l_data.mac_vlan.mac_addr;
+ vlan_id = f_info->l_data.mac_vlan.vlan_id;
+ break;
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ vlan_id = f_info->l_data.mac_vlan.vlan_id;
+ /* fall-through */
+ case ICE_SW_LKUP_PROMISC:
+ daddr = f_info->l_data.mac_vlan.mac_addr;
+ break;
+ default:
+ break;
+ }
+
+ s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
+ CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
+ CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
+
+ /* Recipe set depending on lookup type */
+ s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
+ s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
+ s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
+
+ if (daddr)
+ ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
+ ICE_NONDMA_TO_NONDMA);
+
+ if (!(vlan_id > ICE_MAX_VLAN_ID)) {
+ off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
+ *off = CPU_TO_BE16(vlan_id);
+ }
+
+ /* Create the switch rule with the final dummy Ethernet header */
+ if (opc != ice_aqc_opc_update_sw_rules)
+ s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
+}
+
+/**
+ * ice_add_marker_act
+ * @hw: pointer to the hardware structure
+ * @m_ent: the management entry for which sw marker needs to be added
+ * @sw_marker: sw marker to tag the Rx descriptor with
+ * @l_id: large action resource ID
+ *
+ * Create a large action to hold software marker and update the switch rule
+ * entry pointed by m_ent with newly created large action
+ */
+static enum ice_status
+ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
+ u16 sw_marker, u16 l_id)
+{
+ struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
+ /* For software marker we need 3 large actions
+ * 1. FWD action: FWD TO VSI or VSI LIST
+ * 2. GENERIC VALUE action to hold the profile ID
+ * 3. GENERIC VALUE action to hold the software marker ID
+ */
+ const u16 num_lg_acts = 3;
+ enum ice_status status;
+ u16 lg_act_size;
+ u16 rules_size;
+ u32 act;
+ u16 id;
+
+ if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+
+ /* Create two back-to-back switch rules and submit them to the HW using
+ * one memory buffer:
+ * 1. Large Action
+ * 2. Look up Tx Rx
+ */
+ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
+ rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
+ if (!lg_act)
+ return ICE_ERR_NO_MEMORY;
+
+ rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
+
+ /* Fill in the first switch rule i.e. large action */
+ lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
+ lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
+ lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
+
+ /* First action VSI forwarding or VSI list forwarding depending on how
+ * many VSIs
+ */
+ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
+ m_ent->fltr_info.fwd_id.hw_vsi_id;
+
+ act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
+ ICE_LG_ACT_VSI_LIST_ID_M;
+ if (m_ent->vsi_count > 1)
+ act |= ICE_LG_ACT_VSI_LIST;
+ lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
+
+ /* Second action descriptor type */
+ act = ICE_LG_ACT_GENERIC;
+
+ act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+ lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
+
+ act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
+ ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
+
+ /* Third action Marker value */
+ act |= ICE_LG_ACT_GENERIC;
+ act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
+ ICE_LG_ACT_GENERIC_VALUE_M;
+
+ lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
+
+ /* call the fill switch rule to fill the lookup Tx Rx structure */
+ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
+ ice_aqc_opc_update_sw_rules);
+
+ /* Update the action to point to the large action ID */
+ rx_tx->pdata.lkup_tx_rx.act =
+ CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
+ ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
+ ICE_SINGLE_ACT_PTR_VAL_M));
+
+ /* Use the filter rule ID of the previously created rule with single
+ * act. Once the update happens, hardware will treat this as large
+ * action
+ */
+ rx_tx->pdata.lkup_tx_rx.index =
+ CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
+
+ status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
+ ice_aqc_opc_update_sw_rules, NULL);
+ if (!status) {
+ m_ent->lg_act_idx = l_id;
+ m_ent->sw_marker_id = sw_marker;
+ }
+
+ ice_free(hw, lg_act);
+ return status;
+}
+
+/**
+ * ice_add_counter_act - add/update filter rule with counter action
+ * @hw: pointer to the hardware structure
+ * @m_ent: the management entry for which counter needs to be added
+ * @counter_id: VLAN counter ID returned as part of allocate resource
+ * @l_id: large action resource ID
+ */
+static enum ice_status
+ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
+ u16 counter_id, u16 l_id)
+{
+ struct ice_aqc_sw_rules_elem *lg_act;
+ struct ice_aqc_sw_rules_elem *rx_tx;
+ enum ice_status status;
+ /* 2 actions will be added while adding a large action counter */
+ const int num_acts = 2;
+ u16 lg_act_size;
+ u16 rules_size;
+ u16 f_rule_id;
+ u32 act;
+ u16 id;
+
+ if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+
+ /* Create two back-to-back switch rules and submit them to the HW using
+ * one memory buffer:
+ * 1. Large Action
+ * 2. Look up Tx Rx
+ */
+ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
+ rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
+ rules_size);
+ if (!lg_act)
+ return ICE_ERR_NO_MEMORY;
+
+ rx_tx = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)lg_act + lg_act_size);
+
+ /* Fill in the first switch rule i.e. large action */
+ lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
+ lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
+ lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
+
+ /* First action VSI forwarding or VSI list forwarding depending on how
+ * many VSIs
+ */
+ id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
+ m_ent->fltr_info.fwd_id.hw_vsi_id;
+
+ act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
+ act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
+ ICE_LG_ACT_VSI_LIST_ID_M;
+ if (m_ent->vsi_count > 1)
+ act |= ICE_LG_ACT_VSI_LIST;
+ lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
+
+ /* Second action counter ID */
+ act = ICE_LG_ACT_STAT_COUNT;
+ act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
+ ICE_LG_ACT_STAT_COUNT_M;
+ lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
+
+ /* call the fill switch rule to fill the lookup Tx Rx structure */
+ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
+ ice_aqc_opc_update_sw_rules);
+
+ act = ICE_SINGLE_ACT_PTR;
+ act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
+ rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
+
+ /* Use the filter rule ID of the previously created rule with single
+ * act. Once the update happens, hardware will treat this as large
+ * action
+ */
+ f_rule_id = m_ent->fltr_info.fltr_rule_id;
+ rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
+
+ status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
+ ice_aqc_opc_update_sw_rules, NULL);
+ if (!status) {
+ m_ent->lg_act_idx = l_id;
+ m_ent->counter_index = counter_id;
+ }
+
+ ice_free(hw, lg_act);
+ return status;
+}
+
+/**
+ * ice_create_vsi_list_map
+ * @hw: pointer to the hardware structure
+ * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
+ * @num_vsi: number of VSI handles in the array
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
+ *
+ * Helper function to create a new entry of VSI list ID to VSI mapping
+ * using the given VSI list ID
+ */
+static struct ice_vsi_list_map_info *
+ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
+ u16 vsi_list_id)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_vsi_list_map_info *v_map;
+ int i;
+
+ v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
+ sizeof(*v_map));
+ if (!v_map)
+ return NULL;
+
+ v_map->vsi_list_id = vsi_list_id;
+ v_map->ref_cnt = 1;
+ for (i = 0; i < num_vsi; i++)
+ ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
+
+ LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
+ return v_map;
+}
+
+/**
+ * ice_update_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
+ * @remove: Boolean value to indicate if this is a remove action
+ * @opc: switch rules population command type - pass in the command opcode
+ * @lkup_type: lookup type of the filter
+ *
+ * Call AQ command to add a new switch rule or update existing switch rule
+ * using the given VSI list ID
+ */
+static enum ice_status
+ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
+ u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+ u16 rule_type;
+ int i;
+
+ if (!num_vsi)
+ return ICE_ERR_PARAM;
+
+ if (lkup_type == ICE_SW_LKUP_MAC ||
+ lkup_type == ICE_SW_LKUP_MAC_VLAN ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ lkup_type == ICE_SW_LKUP_PROMISC ||
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_LAST)
+ rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_VSI_LIST_SET;
+ else if (lkup_type == ICE_SW_LKUP_VLAN)
+ rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
+ ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
+ else
+ return ICE_ERR_PARAM;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
+ s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ for (i = 0; i < num_vsi; i++) {
+ if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
+ status = ICE_ERR_PARAM;
+ goto exit;
+ }
+ /* AQ call requires hw_vsi_id(s) */
+ s_rule->pdata.vsi_list.vsi[i] =
+ CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
+ }
+
+ s_rule->type = CPU_TO_LE16(rule_type);
+ s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
+ s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
+
+ status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
+
+exit:
+ ice_free(hw, s_rule);
+ return status;
+}
+
+/**
+ * ice_create_vsi_list_rule - Creates and populates a VSI list rule
+ * @hw: pointer to the HW struct
+ * @vsi_handle_arr: array of VSI handles to form a VSI list
+ * @num_vsi: number of VSI handles in the array
+ * @vsi_list_id: stores the ID of the VSI list to be created
+ * @lkup_type: switch rule filter's lookup type
+ */
+static enum ice_status
+ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
+ u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
+{
+ enum ice_status status;
+
+ status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
+ ice_aqc_opc_alloc_res);
+ if (status)
+ return status;
+
+ /* Update the newly created VSI list to include the specified VSIs */
+ return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
+ *vsi_list_id, false,
+ ice_aqc_opc_add_sw_rules, lkup_type);
+}
+
+/**
+ * ice_create_pkt_fwd_rule
+ * @hw: pointer to the hardware structure
+ * @recp_list: corresponding filter management list
+ * @f_entry: entry containing packet forwarding information
+ *
+ * Create switch rule with given filter information and add an entry
+ * to the corresponding filter management list to track this switch rule
+ * and VSI mapping
+ */
+static enum ice_status
+ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+
+ s_rule = (struct ice_aqc_sw_rules_elem *)
+ ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ fm_entry = (struct ice_fltr_mgmt_list_entry *)
+ ice_malloc(hw, sizeof(*fm_entry));
+ if (!fm_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_create_pkt_fwd_rule_exit;
+ }
+
+ fm_entry->fltr_info = f_entry->fltr_info;
+
+ /* Initialize all the fields for the management entry */
+ fm_entry->vsi_count = 1;
+ fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
+ fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
+ fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
+
+ ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
+ ice_aqc_opc_add_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ ice_aqc_opc_add_sw_rules, NULL);
+ if (status) {
+ ice_free(hw, fm_entry);
+ goto ice_create_pkt_fwd_rule_exit;
+ }
+
+ f_entry->fltr_info.fltr_rule_id =
+ LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
+ fm_entry->fltr_info.fltr_rule_id =
+ LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
+
+ /* The book keeping entries will get removed when base driver
+ * calls remove filter AQ command
+ */
+ LIST_ADD(&fm_entry->list_entry, &recp_list->filt_rules);
+
+ice_create_pkt_fwd_rule_exit:
+ ice_free(hw, s_rule);
+ return status;
+}
+
+/**
+ * ice_update_pkt_fwd_rule
+ * @hw: pointer to the hardware structure
+ * @f_info: filter information for switch rule
+ *
+ * Call AQ command to update a previously created switch rule with a
+ * VSI list ID
+ */
+static enum ice_status
+ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+
+ s_rule = (struct ice_aqc_sw_rules_elem *)
+ ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
+
+ s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
+
+ /* Update switch rule with new rule set to forward VSI list */
+ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+
+ ice_free(hw, s_rule);
+ return status;
+}
+
+/**
+ * ice_update_sw_rule_bridge_mode
+ * @hw: pointer to the HW struct
+ *
+ * Updates unicast switch filter rules based on VEB/VEPA mode
+ */
+enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = ICE_SUCCESS;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+
+ ice_acquire_lock(rule_lock);
+ LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
+ list_entry) {
+ struct ice_fltr_info *fi = &fm_entry->fltr_info;
+ u8 *addr = fi->l_data.mac.mac_addr;
+
+ /* Update unicast Tx rules to reflect the selected
+ * VEB/VEPA mode
+ */
+ if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ status = ice_update_pkt_fwd_rule(hw, fi);
+ if (status)
+ break;
+ }
+ }
+
+ ice_release_lock(rule_lock);
+
+ return status;
+}
+
+/**
+ * ice_add_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @m_entry: pointer to current filter management list entry
+ * @cur_fltr: filter information from the book keeping entry
+ * @new_fltr: filter information with the new VSI to be added
+ *
+ * Call AQ command to add or update previously created VSI list with new VSI.
+ *
+ * Helper function to do book keeping associated with adding filter information
+ * The algorithm to do the book keeping is described below :
+ * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
+ * if only one VSI has been added till now
+ * Allocate a new VSI list and add two VSIs
+ * to this list using switch rule command
+ * Update the previously created switch rule with the
+ * newly created VSI list ID
+ * if a VSI list was previously created
+ * Add the new VSI to the previously created VSI list set
+ * using the update switch rule command
+ */
+static enum ice_status
+ice_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_fltr_mgmt_list_entry *m_entry,
+ struct ice_fltr_info *cur_fltr,
+ struct ice_fltr_info *new_fltr)
+{
+ enum ice_status status = ICE_SUCCESS;
+ u16 vsi_list_id = 0;
+
+ if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
+ cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
+ return ICE_ERR_NOT_IMPL;
+
+ if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
+ new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
+ (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
+ cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
+ return ICE_ERR_NOT_IMPL;
+
+ if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
+ /* Only one entry existed in the mapping and it was not already
+ * a part of a VSI list. So, create a VSI list with the old and
+ * new VSIs.
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ return ICE_ERR_ALREADY_EXISTS;
+
+ vsi_handle_arr[0] = cur_fltr->vsi_handle;
+ vsi_handle_arr[1] = new_fltr->vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id,
+ new_fltr->lkup_type);
+ if (status)
+ return status;
+
+ tmp_fltr = *new_fltr;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ /* Update the previous switch rule of "MAC forward to VSI" to
+ * "MAC fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ return status;
+
+ cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
+ cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
+ m_entry->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+
+ /* If this entry was large action then the large action needs
+ * to be updated to point to FWD to VSI list
+ */
+ if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
+ status =
+ ice_add_marker_act(hw, m_entry,
+ m_entry->sw_marker_id,
+ m_entry->lg_act_idx);
+ } else {
+ u16 vsi_handle = new_fltr->vsi_handle;
+ enum ice_adminq_opc opcode;
+
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
+ /* A rule already exists with the new VSI being added */
+ if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
+ return ICE_SUCCESS;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+ */
+ vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
+ opcode = ice_aqc_opc_update_sw_rules;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false, opcode,
+ new_fltr->lkup_type);
+ /* update VSI list mapping info with new VSI ID */
+ if (!status)
+ ice_set_bit(vsi_handle,
+ m_entry->vsi_list_info->vsi_map);
+ }
+ if (!status)
+ m_entry->vsi_count++;
+ return status;
+}
+
+/**
+ * ice_find_rule_entry - Search a rule entry
+ * @list_head: head of rule list
+ * @f_info: rule information
+ *
+ * Helper function to search for a given rule entry
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_rule_entry(struct LIST_HEAD_TYPE *list_head,
+ struct ice_fltr_info *f_info)
+{
+ struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
+
+ LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
+ list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->flag == list_itr->fltr_info.flag) {
+ ret = list_itr;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
+ * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
+ * @recp_list: VSI lists needs to be searched
+ * @vsi_handle: VSI handle to be found in VSI list
+ * @vsi_list_id: VSI list ID found containing vsi_handle
+ *
+ * Helper function to search a VSI list with single entry containing given VSI
+ * handle element. This can be extended further to search VSI list with more
+ * than 1 vsi_count. Returns pointer to VSI list entry if found.
+ */
+static struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
+ u16 *vsi_list_id)
+{
+ struct ice_vsi_list_map_info *map_info = NULL;
+ struct LIST_HEAD_TYPE *list_head;
+
+ list_head = &recp_list->filt_rules;
+ if (recp_list->adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+
+ LIST_FOR_EACH_ENTRY(list_itr, list_head,
+ ice_adv_fltr_mgmt_list_entry,
+ list_entry) {
+ if (list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (ice_is_bit_set(map_info->vsi_map,
+ vsi_handle)) {
+ *vsi_list_id = map_info->vsi_list_id;
+ return map_info;
+ }
+ }
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *list_itr;
+
+ LIST_FOR_EACH_ENTRY(list_itr, list_head,
+ ice_fltr_mgmt_list_entry,
+ list_entry) {
+ if (list_itr->vsi_count == 1 &&
+ list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (ice_is_bit_set(map_info->vsi_map,
+ vsi_handle)) {
+ *vsi_list_id = map_info->vsi_list_id;
+ return map_info;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * ice_add_rule_internal - add rule for a given lookup type
+ * @hw: pointer to the hardware structure
+ * @recp_list: recipe list for which rule has to be added
+ * @lport: logic port number on which function add rule
+ * @f_entry: structure containing MAC forwarding information
+ *
+ * Adds or updates the rule lists for a given recipe
+ */
+static enum ice_status
+ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
+ u8 lport, struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ struct ice_fltr_mgmt_list_entry *m_entry;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+
+ /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
+ if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ rule_lock = &recp_list->filt_rule_lock;
+
+ ice_acquire_lock(rule_lock);
+ new_fltr = &f_entry->fltr_info;
+ if (new_fltr->flag & ICE_FLTR_RX)
+ new_fltr->src = lport;
+ else if (new_fltr->flag & ICE_FLTR_TX)
+ new_fltr->src =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ m_entry = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
+ if (!m_entry) {
+ status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
+ goto exit_add_rule_internal;
+ }
+
+ cur_fltr = &m_entry->fltr_info;
+ status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
+
+exit_add_rule_internal:
+ ice_release_lock(rule_lock);
+ return status;
+}
+
+/**
+ * ice_remove_vsi_list_rule
+ * @hw: pointer to the hardware structure
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
+ * @lkup_type: switch rule filter lookup type
+ *
+ * The VSI list should be emptied before this function is called to remove the
+ * VSI list.
+ */
+static enum ice_status
+ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
+ enum ice_sw_lkup_type lkup_type)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status status;
+ u16 s_rule_size;
+
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
+
+ /* Free the vsi_list resource that we allocated. It is assumed that the
+ * list is empty at this point.
+ */
+ status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
+ ice_aqc_opc_free_res);
+
+ ice_free(hw, s_rule);
+ return status;
+}
+
+/**
+ * ice_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_fltr_mgmt_list_entry *fm_list)
+{
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status = ICE_SUCCESS;
+ u16 vsi_list_id;
+
+ if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = fm_list->fltr_info.lkup_type;
+ vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+
+ if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
+ struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ tmp_fltr_info.vsi_handle = rem_vsi_handle;
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr_info.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+
+ fm_list->fltr_info = tmp_fltr_info;
+ }
+
+ if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
+ (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
+ struct ice_vsi_list_map_info *vsi_list_info =
+ fm_list->vsi_list_info;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ LIST_DEL(&vsi_list_info->list_entry);
+ ice_free(hw, vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_remove_rule_internal - Remove a filter rule of a given type
+ *
+ * @hw: pointer to the hardware structure
+ * @recp_list: recipe list for which the rule needs to removed
+ * @f_entry: rule entry containing filter information
+ */
+static enum ice_status
+ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_mgmt_list_entry *list_elem;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = ICE_SUCCESS;
+ bool remove_rule = false;
+ u16 vsi_handle;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+
+ rule_lock = &recp_list->filt_rule_lock;
+ ice_acquire_lock(rule_lock);
+ list_elem = ice_find_rule_entry(&recp_list->filt_rules,
+ &f_entry->fltr_info);
+ if (!list_elem) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+
+ if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (!list_elem->vsi_list_info) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ } else if (list_elem->vsi_list_info->ref_cnt > 1) {
+ /* a ref_cnt > 1 indicates that the vsi_list is being
+ * shared by multiple rules. Decrement the ref_cnt and
+ * remove this rule, but do not modify the list, as it
+ * is in-use by other rules.
+ */
+ list_elem->vsi_list_info->ref_cnt--;
+ remove_rule = true;
+ } else {
+ /* a ref_cnt of 1 indicates the vsi_list is only used
+ * by one rule. However, the original removal request is only
+ * for a single VSI. Update the vsi_list first, and only
+ * remove the rule if there are no further VSIs in this list.
+ */
+ vsi_handle = f_entry->fltr_info.vsi_handle;
+ status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status)
+ goto exit;
+ /* if VSI count goes to zero after updating the VSI list */
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+
+ if (remove_rule) {
+ /* Remove the lookup rule */
+ struct ice_aqc_sw_rules_elem *s_rule;
+
+ s_rule = (struct ice_aqc_sw_rules_elem *)
+ ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto exit;
+ }
+
+ ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
+ ice_aqc_opc_remove_sw_rules);
+
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+
+ /* Remove a book keeping from the list */
+ ice_free(hw, s_rule);
+
+ if (status)
+ goto exit;
+
+ LIST_DEL(&list_elem->list_entry);
+ ice_free(hw, list_elem);
+ }
+exit:
+ ice_release_lock(rule_lock);
+ return status;
+}
+
+/**
+ * ice_aq_get_res_alloc - get allocated resources
+ * @hw: pointer to the HW struct
+ * @num_entries: pointer to u16 to store the number of resource entries returned
+ * @buf: pointer to user-supplied buffer
+ * @buf_size: size of buff
+ * @cd: pointer to command details structure or NULL
+ *
+ * The user-supplied buffer must be large enough to store the resource
+ * information for all resource types. Each resource type is an
+ * ice_aqc_get_res_resp_data_elem structure.
+ */
+enum ice_status
+ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_res_alloc *resp;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+
+ if (!buf)
+ return ICE_ERR_BAD_PTR;
+
+ if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
+ return ICE_ERR_INVAL_SIZE;
+
+ resp = &desc.params.get_res;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+
+ if (!status && num_entries)
+ *num_entries = LE16_TO_CPU(resp->resp_elem_num);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_res_descs - get allocated resource descriptors
+ * @hw: pointer to the hardware structure
+ * @num_entries: number of resource entries in buffer
+ * @buf: Indirect buffer to hold data parameters and response
+ * @buf_size: size of buffer for indirect commands
+ * @res_type: resource type
+ * @res_shared: is resource shared
+ * @desc_id: input - first desc ID to start; output - next desc ID
+ * @cd: pointer to command details structure or NULL
+ */
+enum ice_status
+ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
+ struct ice_aqc_get_allocd_res_desc_resp *buf,
+ u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_get_allocd_res_desc *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ cmd = &desc.params.get_res_desc;
+
+ if (!buf)
+ return ICE_ERR_PARAM;
+
+ if (buf_size != (num_entries * sizeof(*buf)))
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
+
+ cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | (res_shared ?
+ ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
+ cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status)
+ *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
+
+ return status;
+}
+
+/**
+ * ice_add_mac_rule - Add a MAC address based filter rule
+ * @hw: pointer to the hardware structure
+ * @m_list: list of MAC addresses and forwarding information
+ * @sw: pointer to switch info struct for which function add rule
+ * @lport: logic port number on which function add rule
+ *
+ * IMPORTANT: When the ucast_shared flag is set to false and m_list has
+ * multiple unicast addresses, the function assumes that all the
+ * addresses are unique in a given add_mac call. It doesn't
+ * check for duplicates in this case, removing duplicates from a given
+ * list should be taken care of in the caller of this function.
+ */
+static enum ice_status
+ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
+ struct ice_switch_info *sw, u8 lport)
+{
+ struct ice_sw_recipe *recp_list = &sw->recp_list[ICE_SW_LKUP_MAC];
+ struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
+ struct ice_fltr_list_entry *m_list_itr;
+ struct LIST_HEAD_TYPE *rule_head;
+ u16 total_elem_left, s_rule_size;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = ICE_SUCCESS;
+ u16 num_unicast = 0;
+ u8 elem_sent;
+
+ s_rule = NULL;
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
+ list_entry) {
+ u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
+ u16 hw_vsi_id;
+
+ m_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ vsi_handle = m_list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ /* update the src in case it is VSI num */
+ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
+ return ICE_ERR_PARAM;
+ m_list_itr->fltr_info.src = hw_vsi_id;
+ if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
+ IS_ZERO_ETHER_ADDR(add))
+ return ICE_ERR_PARAM;
+ if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
+ /* Don't overwrite the unicast address */
+ ice_acquire_lock(rule_lock);
+ if (ice_find_rule_entry(rule_head,
+ &m_list_itr->fltr_info)) {
+ ice_release_lock(rule_lock);
+ return ICE_ERR_ALREADY_EXISTS;
+ }
+ ice_release_lock(rule_lock);
+ num_unicast++;
+ } else if (IS_MULTICAST_ETHER_ADDR(add) ||
+ (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
+ m_list_itr->status =
+ ice_add_rule_internal(hw, recp_list, lport,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
+ }
+ }
+
+ ice_acquire_lock(rule_lock);
+ /* Exit if no suitable entries were found for adding bulk switch rule */
+ if (!num_unicast) {
+ status = ICE_SUCCESS;
+ goto ice_add_mac_exit;
+ }
+
+ /* Allocate switch rule buffer for the bulk update for unicast */
+ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ s_rule = (struct ice_aqc_sw_rules_elem *)
+ ice_calloc(hw, num_unicast, s_rule_size);
+ if (!s_rule) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_add_mac_exit;
+ }
+
+ r_iter = s_rule;
+ LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
+ list_entry) {
+ struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
+
+ if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
+ ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
+ ice_aqc_opc_add_sw_rules);
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + s_rule_size);
+ }
+ }
+
+ /* Call AQ bulk switch rule update for all unicast addresses */
+ r_iter = s_rule;
+ /* Call AQ switch rule in AQ_MAX chunk */
+ for (total_elem_left = num_unicast; total_elem_left > 0;
+ total_elem_left -= elem_sent) {
+ struct ice_aqc_sw_rules_elem *entry = r_iter;
+
+ elem_sent = MIN_T(u8, total_elem_left,
+ (ICE_AQ_MAX_BUF_LEN / s_rule_size));
+ status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
+ elem_sent, ice_aqc_opc_add_sw_rules,
+ NULL);
+ if (status)
+ goto ice_add_mac_exit;
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + (elem_sent * s_rule_size));
+ }
+
+ /* Fill up rule ID based on the value returned from FW */
+ r_iter = s_rule;
+ LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
+ list_entry) {
+ struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+
+ if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
+ f_info->fltr_rule_id =
+ LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
+ f_info->fltr_act = ICE_FWD_TO_VSI;
+ /* Create an entry to track this MAC address */
+ fm_entry = (struct ice_fltr_mgmt_list_entry *)
+ ice_malloc(hw, sizeof(*fm_entry));
+ if (!fm_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto ice_add_mac_exit;
+ }
+ fm_entry->fltr_info = *f_info;
+ fm_entry->vsi_count = 1;
+ /* The book keeping entries will get removed when
+ * base driver calls remove filter AQ command
+ */
+
+ LIST_ADD(&fm_entry->list_entry, rule_head);
+ r_iter = (struct ice_aqc_sw_rules_elem *)
+ ((u8 *)r_iter + s_rule_size);
+ }
+ }
+
+ice_add_mac_exit:
+ ice_release_lock(rule_lock);
+ if (s_rule)
+ ice_free(hw, s_rule);
+ return status;
+}
+
+/**
+ * ice_add_mac - Add a MAC address based filter rule
+ * @hw: pointer to the hardware structure
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ * Function add MAC rule for logical port from HW struct
+ */
+enum ice_status
+ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+{
+ if (!m_list || !hw)
+ return ICE_ERR_PARAM;
+
+ return ice_add_mac_rule(hw, m_list, hw->switch_info,
+ hw->port_info->lport);
+}
+
+/**
+ * ice_add_vlan_internal - Add one VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @recp_list: recipe list for which rule has to be added
+ * @f_entry: filter entry containing one VLAN information
+ */
+static enum ice_status
+ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
+ struct ice_fltr_list_entry *f_entry)
+{
+ struct ice_fltr_mgmt_list_entry *v_list_itr;
+ struct ice_fltr_info *new_fltr, *cur_fltr;
+ enum ice_sw_lkup_type lkup_type;
+ u16 vsi_list_id = 0, vsi_handle;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = ICE_SUCCESS;
+
+ if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
+ return ICE_ERR_PARAM;
+
+ f_entry->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
+ new_fltr = &f_entry->fltr_info;
+
+ /* VLAN ID should only be 12 bits */
+ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
+ return ICE_ERR_PARAM;
+
+ if (new_fltr->src_id != ICE_SRC_ID_VSI)
+ return ICE_ERR_PARAM;
+
+ new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
+ lkup_type = new_fltr->lkup_type;
+ vsi_handle = new_fltr->vsi_handle;
+ rule_lock = &recp_list->filt_rule_lock;
+ ice_acquire_lock(rule_lock);
+ v_list_itr = ice_find_rule_entry(&recp_list->filt_rules, new_fltr);
+ if (!v_list_itr) {
+ struct ice_vsi_list_map_info *map_info = NULL;
+
+ if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
+ /* All VLAN pruning rules use a VSI list. Check if
+ * there is already a VSI list containing VSI that we
+ * want to add. If found, use the same vsi_list_id for
+ * this new VLAN rule or else create a new list.
+ */
+ map_info = ice_find_vsi_list_entry(recp_list,
+ vsi_handle,
+ &vsi_list_id);
+ if (!map_info) {
+ status = ice_create_vsi_list_rule(hw,
+ &vsi_handle,
+ 1,
+ &vsi_list_id,
+ lkup_type);
+ if (status)
+ goto exit;
+ }
+ /* Convert the action to forwarding to a VSI list. */
+ new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
+ new_fltr->fwd_id.vsi_list_id = vsi_list_id;
+ }
+
+ status = ice_create_pkt_fwd_rule(hw, recp_list, f_entry);
+ if (!status) {
+ v_list_itr = ice_find_rule_entry(&recp_list->filt_rules,
+ new_fltr);
+ if (!v_list_itr) {
+ status = ICE_ERR_DOES_NOT_EXIST;
+ goto exit;
+ }
+ /* reuse VSI list for new rule and increment ref_cnt */
+ if (map_info) {
+ v_list_itr->vsi_list_info = map_info;
+ map_info->ref_cnt++;
+ } else {
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle,
+ 1, vsi_list_id);
+ }
+ }
+ } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
+ /* Update existing VSI list to add new VSI ID only if it used
+ * by one VLAN rule.
+ */
+ cur_fltr = &v_list_itr->fltr_info;
+ status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
+ new_fltr);
+ } else {
+ /* If VLAN rule exists and VSI list being used by this rule is
+ * referenced by more than 1 VLAN rule. Then create a new VSI
+ * list appending previous VSI with new VSI and update existing
+ * VLAN rule to point to new VSI list ID
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+ u16 cur_handle;
+
+ /* Current implementation only supports reusing VSI list with
+ * one VSI count. We should never hit below condition
+ */
+ if (v_list_itr->vsi_count > 1 &&
+ v_list_itr->vsi_list_info->ref_cnt > 1) {
+ ice_debug(hw, ICE_DBG_SW,
+ "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
+ status = ICE_ERR_CFG;
+ goto exit;
+ }
+
+ cur_handle =
+ ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_handle == vsi_handle) {
+ status = ICE_ERR_ALREADY_EXISTS;
+ goto exit;
+ }
+
+ vsi_handle_arr[0] = cur_handle;
+ vsi_handle_arr[1] = vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id, lkup_type);
+ if (status)
+ goto exit;
+
+ tmp_fltr = v_list_itr->fltr_info;
+ tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ /* Update the previous switch rule to a new VSI list which
+ * includes current VSI that is requested
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ goto exit;
+
+ /* before overriding VSI list map info. decrement ref_cnt of
+ * previous VSI list
+ */
+ v_list_itr->vsi_list_info->ref_cnt--;
+
+ /* now update to newly created list */
+ v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
+ v_list_itr->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ v_list_itr->vsi_count++;
+ }
+
+exit:
+ ice_release_lock(rule_lock);
+ return status;
+}
+
+/**
+ * ice_add_vlan_rule - Add VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @v_list: list of VLAN entries and forwarding information
+ * @sw: pointer to switch info struct for which function add rule
+ */
+static enum ice_status
+ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
+ struct ice_switch_info *sw)
+{
+ struct ice_fltr_list_entry *v_list_itr;
+ struct ice_sw_recipe *recp_list;
+
+ recp_list = &sw->recp_list[ICE_SW_LKUP_VLAN];
+ LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
+ list_entry) {
+ if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
+ return ICE_ERR_PARAM;
+ v_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ v_list_itr->status = ice_add_vlan_internal(hw, recp_list,
+ v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_add_vlan - Add a VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @v_list: list of VLAN and forwarding information
+ *
+ * Function add VLAN rule for logical port from HW struct
+ */
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
+{
+ if (!v_list || !hw)
+ return ICE_ERR_PARAM;
+
+ return ice_add_vlan_rule(hw, v_list, hw->switch_info);
+}
+
+/**
+ * ice_add_eth_mac_rule - Add ethertype and MAC based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ether type MAC filter, MAC is optional
+ * @sw: pointer to switch info struct for which function add rule
+ * @lport: logic port number on which function add rule
+ *
+ * This function requires the caller to populate the entries in
+ * the filter list with the necessary fields (including flags to
+ * indicate Tx or Rx rules).
+ */
+static enum ice_status
+ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
+ struct ice_switch_info *sw, u8 lport)
+{
+ struct ice_fltr_list_entry *em_list_itr;
+
+ LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
+ list_entry) {
+ struct ice_sw_recipe *recp_list;
+ enum ice_sw_lkup_type l_type;
+
+ l_type = em_list_itr->fltr_info.lkup_type;
+ recp_list = &sw->recp_list[l_type];
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->status = ice_add_rule_internal(hw, recp_list,
+ lport,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return ICE_SUCCESS;
+}
+
+enum ice_status
+/**
+ * ice_add_eth_mac - Add a ethertype based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ethertype and forwarding information
+ *
+ * Function add ethertype rule for logical port from HW struct
+ */
+ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
+{
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ return ice_add_eth_mac_rule(hw, em_list, hw->switch_info,
+ hw->port_info->lport);
+}
+
+/**
+ * ice_remove_eth_mac_rule - Remove an ethertype (or MAC) based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ethertype or ethertype MAC entries
+ * @sw: pointer to switch info struct for which function add rule
+ */
+static enum ice_status
+ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
+ struct ice_switch_info *sw)
+{
+ struct ice_fltr_list_entry *em_list_itr, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
+ list_entry) {
+ struct ice_sw_recipe *recp_list;
+ enum ice_sw_lkup_type l_type;
+
+ l_type = em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ recp_list = &sw->recp_list[l_type];
+ em_list_itr->status = ice_remove_rule_internal(hw, recp_list,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_remove_eth_mac - remove a ethertype based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ethertype and forwarding information
+ *
+ */
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
+{
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info);
+}
+
+/**
+ * ice_rem_sw_rule_info
+ * @hw: pointer to the hardware structure
+ * @rule_head: pointer to the switch list structure that we want to delete
+ */
+static void
+ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
+{
+ if (!LIST_EMPTY(rule_head)) {
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct ice_fltr_mgmt_list_entry *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
+ ice_fltr_mgmt_list_entry, list_entry) {
+ LIST_DEL(&entry->list_entry);
+ ice_free(hw, entry);
+ }
+ }
+}
+
+/**
+ * ice_rem_all_sw_rules_info
+ * @hw: pointer to the hardware structure
+ */
+void ice_rem_all_sw_rules_info(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct LIST_HEAD_TYPE *rule_head;
+
+ rule_head = &sw->recp_list[i].filt_rules;
+ if (!sw->recp_list[i].adv_rule)
+ ice_rem_sw_rule_info(hw, rule_head);
+ }
+}
+
+/**
+ * ice_cfg_dflt_vsi - change state of VSI to set/clear default
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: VSI handle to set as default
+ * @set: true to add the above mentioned switch rule, false to remove it
+ * @direction: ICE_FLTR_RX or ICE_FLTR_TX
+ *
+ * add filter rule to set/unset given VSI as default VSI for the switch
+ * (represented by swid)
+ */
+enum ice_status
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_info f_info;
+ struct ice_hw *hw = pi->hw;
+ enum ice_adminq_opc opcode;
+ enum ice_status status;
+ u16 s_rule_size;
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
+
+ f_info.lkup_type = ICE_SW_LKUP_DFLT;
+ f_info.flag = direction;
+ f_info.fltr_act = ICE_FWD_TO_VSI;
+ f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+
+ if (f_info.flag & ICE_FLTR_RX) {
+ f_info.src = pi->lport;
+ f_info.src_id = ICE_SRC_ID_LPORT;
+ if (!set)
+ f_info.fltr_rule_id =
+ pi->dflt_rx_vsi_rule_id;
+ } else if (f_info.flag & ICE_FLTR_TX) {
+ f_info.src_id = ICE_SRC_ID_VSI;
+ f_info.src = hw_vsi_id;
+ if (!set)
+ f_info.fltr_rule_id =
+ pi->dflt_tx_vsi_rule_id;
+ }
+
+ if (set)
+ opcode = ice_aqc_opc_add_sw_rules;
+ else
+ opcode = ice_aqc_opc_remove_sw_rules;
+
+ ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
+
+ status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
+ if (status || !(f_info.flag & ICE_FLTR_TX_RX))
+ goto out;
+ if (set) {
+ u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
+
+ if (f_info.flag & ICE_FLTR_TX) {
+ pi->dflt_tx_vsi_num = hw_vsi_id;
+ pi->dflt_tx_vsi_rule_id = index;
+ } else if (f_info.flag & ICE_FLTR_RX) {
+ pi->dflt_rx_vsi_num = hw_vsi_id;
+ pi->dflt_rx_vsi_rule_id = index;
+ }
+ } else {
+ if (f_info.flag & ICE_FLTR_TX) {
+ pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
+ pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
+ } else if (f_info.flag & ICE_FLTR_RX) {
+ pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
+ pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
+ }
+ }
+
+out:
+ ice_free(hw, s_rule);
+ return status;
+}
+
+/**
+ * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
+ * @list_head: head of rule list
+ * @f_info: rule information
+ *
+ * Helper function to search for a unicast rule entry - this is to be used
+ * to remove unicast MAC filter that is not shared with other VSIs on the
+ * PF switch.
+ *
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
+ struct ice_fltr_info *f_info)
+{
+ struct ice_fltr_mgmt_list_entry *list_itr;
+
+ LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
+ list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->fwd_id.hw_vsi_id ==
+ list_itr->fltr_info.fwd_id.hw_vsi_id &&
+ f_info->flag == list_itr->fltr_info.flag)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
+ * ice_remove_mac_rule - remove a MAC based filter rule
+ * @hw: pointer to the hardware structure
+ * @m_list: list of MAC addresses and forwarding information
+ * @recp_list: list from which function remove MAC address
+ *
+ * This function removes either a MAC filter rule or a specific VSI from a
+ * VSI list for a multicast MAC address.
+ *
+ * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
+ * ice_add_mac. Caller should be aware that this call will only work if all
+ * the entries passed into m_list were added previously. It will not attempt to
+ * do a partial remove of entries that were found.
+ */
+static enum ice_status
+ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
+ struct ice_sw_recipe *recp_list)
+{
+ struct ice_fltr_list_entry *list_itr, *tmp;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+
+ if (!m_list)
+ return ICE_ERR_PARAM;
+
+ rule_lock = &recp_list->filt_rule_lock;
+ LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
+ list_entry) {
+ enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+ u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
+
+ if (l_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+
+ vsi_handle = list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ list_itr->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
+ /* Don't remove the unicast address that belongs to
+ * another VSI on the switch, since it is not being
+ * shared...
+ */
+ ice_acquire_lock(rule_lock);
+ if (!ice_find_ucast_rule_entry(&recp_list->filt_rules,
+ &list_itr->fltr_info)) {
+ ice_release_lock(rule_lock);
+ return ICE_ERR_DOES_NOT_EXIST;
+ }
+ ice_release_lock(rule_lock);
+ }
+ list_itr->status = ice_remove_rule_internal(hw, recp_list,
+ list_itr);
+ if (list_itr->status)
+ return list_itr->status;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_remove_mac - remove a MAC address based filter rule
+ * @hw: pointer to the hardware structure
+ * @m_list: list of MAC addresses and forwarding information
+ *
+ */
+enum ice_status
+ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+{
+ struct ice_sw_recipe *recp_list;
+
+ recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
+ return ice_remove_mac_rule(hw, m_list, recp_list);
+}
+
+/**
+ * ice_remove_vlan_rule - Remove VLAN based filter rule
+ * @hw: pointer to the hardware structure
+ * @v_list: list of VLAN entries and forwarding information
+ * @recp_list: list from which function remove VLAN
+ */
+static enum ice_status
+ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
+ struct ice_sw_recipe *recp_list)
+{
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
+
+ LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
+ list_entry) {
+ enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_VLAN)
+ return ICE_ERR_PARAM;
+ v_list_itr->status = ice_remove_rule_internal(hw, recp_list,
+ v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_remove_vlan - remove a VLAN address based filter rule
+ * @hw: pointer to the hardware structure
+ * @v_list: list of VLAN and forwarding information
+ *
+ */
+enum ice_status
+ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
+{
+ struct ice_sw_recipe *recp_list;
+
+ if (!v_list || !hw)
+ return ICE_ERR_PARAM;
+
+ recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_VLAN];
+ return ice_remove_vlan_rule(hw, v_list, recp_list);
+}
+
+/**
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
+ vsi_handle))));
+}
+
+/**
+ * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ * @vsi_list_head: pointer to the list to add entry to
+ * @fi: pointer to fltr_info of filter entry to copy & add
+ *
+ * Helper function, used when creating a list of filters to remove from
+ * a specific VSI. The entry added to vsi_list_head is a COPY of the
+ * original filter entry, with the exception of fltr_info.fltr_act and
+ * fltr_info.fwd_id fields. These are set such that later logic can
+ * extract which VSI to remove the fltr from, and pass on that information.
+ */
+static enum ice_status
+ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
+ struct LIST_HEAD_TYPE *vsi_list_head,
+ struct ice_fltr_info *fi)
+{
+ struct ice_fltr_list_entry *tmp;
+
+ /* this memory is freed up in the caller function
+ * once filters for this VSI are removed
+ */
+ tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp->fltr_info = *fi;
+
+ /* Overwrite these fields to indicate which VSI to remove filter from,
+ * so find and remove logic can extract the information from the
+ * list entries. Note that original entries will still have proper
+ * values.
+ */
+ tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ tmp->fltr_info.vsi_handle = vsi_handle;
+ tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ LIST_ADD(&tmp->list_entry, vsi_list_head);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_add_to_vsi_fltr_list - Add VSI filters to the list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ * @lkup_list_head: pointer to the list that has certain lookup type filters
+ * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
+ *
+ * Locates all filters in lkup_list_head that are used by the given VSI,
+ * and adds COPIES of those entries to vsi_list_head (intended to be used
+ * to remove the listed filters).
+ * Note that this means all entries in vsi_list_head must be explicitly
+ * deallocated by the caller when done with list.
+ */
+static enum ice_status
+ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
+ struct LIST_HEAD_TYPE *lkup_list_head,
+ struct LIST_HEAD_TYPE *vsi_list_head)
+{
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ enum ice_status status = ICE_SUCCESS;
+
+ /* check to make sure VSI ID is valid and within boundary */
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
+ ice_fltr_mgmt_list_entry, list_entry) {
+ struct ice_fltr_info *fi;
+
+ fi = &fm_entry->fltr_info;
+ if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+ continue;
+
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ vsi_list_head, fi);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/**
+ * ice_determine_promisc_mask
+ * @fi: filter info to parse
+ *
+ * Helper function to determine which ICE_PROMISC_ mask corresponds
+ * to given filter into.
+ */
+static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
+{
+ u16 vid = fi->l_data.mac_vlan.vlan_id;
+ u8 *macaddr = fi->l_data.mac.mac_addr;
+ bool is_tx_fltr = false;
+ u8 promisc_mask = 0;
+
+ if (fi->flag == ICE_FLTR_TX)
+ is_tx_fltr = true;
+
+ if (IS_BROADCAST_ETHER_ADDR(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
+ else if (IS_MULTICAST_ETHER_ADDR(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
+ else if (IS_UNICAST_ETHER_ADDR(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
+ if (vid)
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
+
+ return promisc_mask;
+}
+
+/**
+ * ice_get_vsi_promisc - get promiscuous mode of given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to retrieve info from
+ * @promisc_mask: pointer to mask to be filled in
+ * @vid: VLAN ID of promisc VLAN VSI
+ */
+enum ice_status
+ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ *vid = 0;
+ *promisc_mask = 0;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
+
+ ice_acquire_lock(rule_lock);
+ LIST_FOR_EACH_ENTRY(itr, rule_head,
+ ice_fltr_mgmt_list_entry, list_entry) {
+ /* Continue if this filter doesn't apply to this VSI or the
+ * VSI ID is not in the VSI map for this filter
+ */
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+
+ *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
+ }
+ ice_release_lock(rule_lock);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to retrieve info from
+ * @promisc_mask: pointer to mask to be filled in
+ * @vid: VLAN ID of promisc VLAN VSI
+ */
+enum ice_status
+ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
+ u16 *vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ *vid = 0;
+ *promisc_mask = 0;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
+
+ ice_acquire_lock(rule_lock);
+ LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
+ list_entry) {
+ /* Continue if this filter doesn't apply to this VSI or the
+ * VSI ID is not in the VSI map for this filter
+ */
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+
+ *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
+ }
+ ice_release_lock(rule_lock);
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_remove_promisc - Remove promisc based filter rules
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe ID for which the rule needs to removed
+ * @v_list: list of promisc entries
+ */
+static enum ice_status
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
+ struct LIST_HEAD_TYPE *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
+ struct ice_sw_recipe *recp_list;
+
+ recp_list = &hw->switch_info->recp_list[recp_id];
+ LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
+ list_entry) {
+ v_list_itr->status =
+ ice_remove_rule_internal(hw, recp_list, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *fm_entry, *tmp;
+ struct LIST_HEAD_TYPE remove_list_head;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = ICE_SUCCESS;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ else
+ recipe_id = ICE_SW_LKUP_PROMISC;
+
+ rule_head = &sw->recp_list[recipe_id].filt_rules;
+ rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
+
+ INIT_LIST_HEAD(&remove_list_head);
+
+ ice_acquire_lock(rule_lock);
+ LIST_FOR_EACH_ENTRY(itr, rule_head,
+ ice_fltr_mgmt_list_entry, list_entry) {
+ struct ice_fltr_info *fltr_info;
+ u8 fltr_promisc_mask = 0;
+
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+ fltr_info = &itr->fltr_info;
+
+ if (recipe_id == ICE_SW_LKUP_PROMISC_VLAN &&
+ vid != fltr_info->l_data.mac_vlan.vlan_id)
+ continue;
+
+ fltr_promisc_mask |= ice_determine_promisc_mask(fltr_info);
+
+ /* Skip if filter is not completely specified by given mask */
+ if (fltr_promisc_mask & ~promisc_mask)
+ continue;
+
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ &remove_list_head,
+ fltr_info);
+ if (status) {
+ ice_release_lock(rule_lock);
+ goto free_fltr_list;
+ }
+ }
+ ice_release_lock(rule_lock);
+
+ status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
+
+free_fltr_list:
+ LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
+ ice_fltr_list_entry, list_entry) {
+ LIST_DEL(&fm_entry->list_entry);
+ ice_free(hw, fm_entry);
+ }
+
+ return status;
+}
+
+/**
+ * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
+{
+ enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_fltr_info new_fltr;
+ enum ice_status status = ICE_SUCCESS;
+ bool is_tx_fltr;
+ u16 hw_vsi_id;
+ int pkt_type;
+ u8 recipe_id;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
+
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
+ new_fltr.l_data.mac_vlan.vlan_id = vid;
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ } else {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
+ recipe_id = ICE_SW_LKUP_PROMISC;
+ }
+
+ /* Separate filters must be set for each direction/packet type
+ * combination, so we will loop over the mask value, store the
+ * individual type, and clear it out in the input mask as it
+ * is found.
+ */
+ while (promisc_mask) {
+ struct ice_sw_recipe *recp_list;
+ u8 *mac_addr;
+
+ pkt_type = 0;
+ is_tx_fltr = false;
+
+ if (promisc_mask & ICE_PROMISC_UCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_RX;
+ pkt_type = UCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_TX;
+ pkt_type = UCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_RX;
+ pkt_type = MCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_TX;
+ pkt_type = MCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_RX;
+ pkt_type = BCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_TX;
+ pkt_type = BCAST_FLTR;
+ is_tx_fltr = true;
+ }
+
+ /* Check for VLAN promiscuous flag */
+ if (promisc_mask & ICE_PROMISC_VLAN_RX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_RX;
+ } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_TX;
+ is_tx_fltr = true;
+ }
+
+ /* Set filter DA based on packet type */
+ mac_addr = new_fltr.l_data.mac.mac_addr;
+ if (pkt_type == BCAST_FLTR) {
+ ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
+ } else if (pkt_type == MCAST_FLTR ||
+ pkt_type == UCAST_FLTR) {
+ /* Use the dummy ether header DA */
+ ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
+ ICE_NONDMA_TO_NONDMA);
+ if (pkt_type == MCAST_FLTR)
+ mac_addr[0] |= 0x1; /* Set multicast bit */
+ }
+
+ /* Need to reset this to zero for all iterations */
+ new_fltr.flag = 0;
+ if (is_tx_fltr) {
+ new_fltr.flag |= ICE_FLTR_TX;
+ new_fltr.src = hw_vsi_id;
+ } else {
+ new_fltr.flag |= ICE_FLTR_RX;
+ new_fltr.src = hw->port_info->lport;
+ }
+
+ new_fltr.fltr_act = ICE_FWD_TO_VSI;
+ new_fltr.vsi_handle = vsi_handle;
+ new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_list_entry.fltr_info = new_fltr;
+ recp_list = &hw->switch_info->recp_list[recipe_id];
+
+ status = ice_add_rule_internal(hw, recp_list,
+ hw->port_info->lport,
+ &f_list_entry);
+ if (status != ICE_SUCCESS)
+ goto set_promisc_exit;
+ }
+
+set_promisc_exit:
+ return status;
+}
+
+/**
+ * ice_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ *
+ * Configure VSI with all associated VLANs to given promiscuous mode(s)
+ */
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *list_itr, *tmp;
+ struct LIST_HEAD_TYPE vsi_list_head;
+ struct LIST_HEAD_TYPE *vlan_head;
+ struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
+ enum ice_status status;
+ u16 vlan_id;
+
+ INIT_LIST_HEAD(&vsi_list_head);
+ vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ ice_acquire_lock(vlan_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
+ &vsi_list_head);
+ ice_release_lock(vlan_lock);
+ if (status)
+ goto free_fltr_list;
+
+ LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
+ list_entry) {
+ vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
+ if (rm_vlan_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ if (status)
+ break;
+ }
+
+free_fltr_list:
+ LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
+ ice_fltr_list_entry, list_entry) {
+ LIST_DEL(&list_itr->list_entry);
+ ice_free(hw, list_itr);
+ }
+ return status;
+}
+
+/**
+ * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ * @recp_list: recipe list from which function remove fltr
+ * @lkup: switch rule filter lookup type
+ */
+static void
+ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_sw_recipe *recp_list,
+ enum ice_sw_lkup_type lkup)
+{
+ struct ice_fltr_list_entry *fm_entry;
+ struct LIST_HEAD_TYPE remove_list_head;
+ struct LIST_HEAD_TYPE *rule_head;
+ struct ice_fltr_list_entry *tmp;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status;
+
+ INIT_LIST_HEAD(&remove_list_head);
+ rule_lock = &recp_list[lkup].filt_rule_lock;
+ rule_head = &recp_list[lkup].filt_rules;
+ ice_acquire_lock(rule_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
+ &remove_list_head);
+ ice_release_lock(rule_lock);
+ if (status)
+ return;
+
+ switch (lkup) {
+ case ICE_SW_LKUP_MAC:
+ ice_remove_mac_rule(hw, &remove_list_head, &recp_list[lkup]);
+ break;
+ case ICE_SW_LKUP_VLAN:
+ ice_remove_vlan_rule(hw, &remove_list_head, &recp_list[lkup]);
+ break;
+ case ICE_SW_LKUP_PROMISC:
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ ice_remove_promisc(hw, lkup, &remove_list_head);
+ break;
+ case ICE_SW_LKUP_MAC_VLAN:
+ ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
+ break;
+ case ICE_SW_LKUP_ETHERTYPE:
+ case ICE_SW_LKUP_ETHERTYPE_MAC:
+ ice_remove_eth_mac(hw, &remove_list_head);
+ break;
+ case ICE_SW_LKUP_DFLT:
+ ice_debug(hw, ICE_DBG_SW,
+ "Remove filters for this lookup type hasn't been implemented yet\n");
+ break;
+ case ICE_SW_LKUP_LAST:
+ ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
+ break;
+ }
+
+ LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
+ ice_fltr_list_entry, list_entry) {
+ LIST_DEL(&fm_entry->list_entry);
+ ice_free(hw, fm_entry);
+ }
+}
+
+/**
+ * ice_remove_vsi_fltr_rule - Remove all filters for a VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ * @sw: pointer to switch info struct
+ */
+static void
+ice_remove_vsi_fltr_rule(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_switch_info *sw)
+{
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_MAC_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_PROMISC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_VLAN);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_DFLT);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_ETHERTYPE);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_ETHERTYPE_MAC);
+ ice_remove_vsi_lkup_fltr(hw, vsi_handle,
+ sw->recp_list, ICE_SW_LKUP_PROMISC_VLAN);
+}
+
+/**
+ * ice_remove_vsi_fltr - Remove all filters for a VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to remove filters from
+ */
+void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
+{
+ ice_remove_vsi_fltr_rule(hw, vsi_handle, hw->switch_info);
+}
+
+/**
+ * ice_alloc_res_cntr - allocating resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries requested for FD resource type
+ * @counter_id: counter index returned by AQ call
+ */
+static enum ice_status
+ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 *counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Allocate resource */
+ buf_len = sizeof(*buf);
+ buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = CPU_TO_LE16(num_items);
+ buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (status)
+ goto exit;
+
+ *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
+
+exit:
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_free_res_cntr - free resource counter
+ * @hw: pointer to the hardware structure
+ * @type: type of resource
+ * @alloc_shared: if set it is shared else dedicated
+ * @num_items: number of entries to be freed for FD resource type
+ * @counter_id: counter ID resource which needs to be freed
+ */
+static enum ice_status
+ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
+ u16 counter_id)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ /* Free resource */
+ buf_len = sizeof(*buf);
+ buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ buf->num_elems = CPU_TO_LE16(num_items);
+ buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) | alloc_shared);
+ buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
+
+ status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
+ ice_aqc_opc_free_res, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW,
+ "counter resource could not be freed\n");
+
+ ice_free(hw, buf);
+ return status;
+}
+
+/**
+ * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
+ * @hw: pointer to the hardware structure
+ * @counter_id: returns counter index
+ */
+enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
+{
+ return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
+ counter_id);
+}
+
+/**
+ * ice_free_vlan_res_counter - Free counter resource for VLAN type
+ * @hw: pointer to the hardware structure
+ * @counter_id: counter index to be freed
+ */
+enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
+{
+ return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
+ ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
+ counter_id);
+}
+
+/**
+ * ice_alloc_res_lg_act - add large action resource
+ * @hw: pointer to the hardware structure
+ * @l_id: large action ID to fill it in
+ * @num_acts: number of actions to hold with a large action entry
+ */
+static enum ice_status
+ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
+ return ICE_ERR_PARAM;
+
+ /* Allocate resource for large action */
+ buf_len = sizeof(*sw_buf);
+ sw_buf = (struct ice_aqc_alloc_free_res_elem *)
+ ice_malloc(hw, buf_len);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = CPU_TO_LE16(1);
+
+ /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
+ * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
+ * If num_acts is greater than 2, then use
+ * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
+ * The num_acts cannot exceed 4. This was ensured at the
+ * beginning of the function.
+ */
+ if (num_acts == 1)
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
+ else if (num_acts == 2)
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
+ else
+ sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
+
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
+
+ ice_free(hw, sw_buf);
+ return status;
+}
+
+/**
+ * ice_add_mac_with_sw_marker - add filter with sw marker
+ * @hw: pointer to the hardware structure
+ * @f_info: filter info structure containing the MAC filter information
+ * @sw_marker: sw marker to tag the Rx descriptor with
+ */
+enum ice_status
+ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
+ u16 sw_marker)
+{
+ struct ice_fltr_mgmt_list_entry *m_entry;
+ struct ice_fltr_list_entry fl_info;
+ struct ice_sw_recipe *recp_list;
+ struct LIST_HEAD_TYPE l_head;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status ret;
+ bool entry_exists;
+ u16 lg_act_id;
+
+ if (f_info->fltr_act != ICE_FWD_TO_VSI)
+ return ICE_ERR_PARAM;
+
+ if (f_info->lkup_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+
+ if (sw_marker == ICE_INVAL_SW_MARKER_ID)
+ return ICE_ERR_PARAM;
+
+ if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
+ return ICE_ERR_PARAM;
+ f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
+
+ /* Add filter if it doesn't exist so then the adding of large
+ * action always results in update
+ */
+
+ INIT_LIST_HEAD(&l_head);
+ fl_info.fltr_info = *f_info;
+ LIST_ADD(&fl_info.list_entry, &l_head);
+
+ entry_exists = false;
+ ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
+ hw->port_info->lport);
+ if (ret == ICE_ERR_ALREADY_EXISTS)
+ entry_exists = true;
+ else if (ret)
+ return ret;
+
+ recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
+ rule_lock = &recp_list->filt_rule_lock;
+ ice_acquire_lock(rule_lock);
+ /* Get the book keeping entry for the filter */
+ m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
+ if (!m_entry)
+ goto exit_error;
+
+ /* If counter action was enabled for this rule then don't enable
+ * sw marker large action
+ */
+ if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
+ ret = ICE_ERR_PARAM;
+ goto exit_error;
+ }
+
+ /* if same marker was added before */
+ if (m_entry->sw_marker_id == sw_marker) {
+ ret = ICE_ERR_ALREADY_EXISTS;
+ goto exit_error;
+ }
+
+ /* Allocate a hardware table entry to hold large act. Three actions
+ * for marker based large action
+ */
+ ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
+ if (ret)
+ goto exit_error;
+
+ if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
+ goto exit_error;
+
+ /* Update the switch rule to add the marker action */
+ ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
+ if (!ret) {
+ ice_release_lock(rule_lock);
+ return ret;
+ }
+
+exit_error:
+ ice_release_lock(rule_lock);
+ /* only remove entry if it did not exist previously */
+ if (!entry_exists)
+ ret = ice_remove_mac(hw, &l_head);
+
+ return ret;
+}
+
+/**
+ * ice_add_mac_with_counter - add filter with counter enabled
+ * @hw: pointer to the hardware structure
+ * @f_info: pointer to filter info structure containing the MAC filter
+ * information
+ */
+enum ice_status
+ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
+{
+ struct ice_fltr_mgmt_list_entry *m_entry;
+ struct ice_fltr_list_entry fl_info;
+ struct ice_sw_recipe *recp_list;
+ struct LIST_HEAD_TYPE l_head;
+ struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status ret;
+ bool entry_exist;
+ u16 counter_id;
+ u16 lg_act_id;
+
+ if (f_info->fltr_act != ICE_FWD_TO_VSI)
+ return ICE_ERR_PARAM;
+
+ if (f_info->lkup_type != ICE_SW_LKUP_MAC)
+ return ICE_ERR_PARAM;
+
+ if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
+ return ICE_ERR_PARAM;
+ f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
+ recp_list = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC];
+
+ entry_exist = false;
+
+ rule_lock = &recp_list->filt_rule_lock;
+
+ /* Add filter if it doesn't exist so then the adding of large
+ * action always results in update
+ */
+ INIT_LIST_HEAD(&l_head);
+
+ fl_info.fltr_info = *f_info;
+ LIST_ADD(&fl_info.list_entry, &l_head);
+
+ ret = ice_add_mac_rule(hw, &l_head, hw->switch_info,
+ hw->port_info->lport);
+ if (ret == ICE_ERR_ALREADY_EXISTS)
+ entry_exist = true;
+ else if (ret)
+ return ret;
+
+ ice_acquire_lock(rule_lock);
+ m_entry = ice_find_rule_entry(&recp_list->filt_rules, f_info);
+ if (!m_entry) {
+ ret = ICE_ERR_BAD_PTR;
+ goto exit_error;
+ }
+
+ /* Don't enable counter for a filter for which sw marker was enabled */
+ if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
+ ret = ICE_ERR_PARAM;
+ goto exit_error;
+ }
+
+ /* If a counter was already enabled then don't need to add again */
+ if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
+ ret = ICE_ERR_ALREADY_EXISTS;
+ goto exit_error;
+ }
+
+ /* Allocate a hardware table entry to VLAN counter */
+ ret = ice_alloc_vlan_res_counter(hw, &counter_id);
+ if (ret)
+ goto exit_error;
+
+ /* Allocate a hardware table entry to hold large act. Two actions for
+ * counter based large action
+ */
+ ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
+ if (ret)
+ goto exit_error;
+
+ if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
+ goto exit_error;
+
+ /* Update the switch rule to add the counter action */
+ ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
+ if (!ret) {
+ ice_release_lock(rule_lock);
+ return ret;
+ }
+
+exit_error:
+ ice_release_lock(rule_lock);
+ /* only remove entry if it did not exist previously */
+ if (!entry_exist)
+ ret = ice_remove_mac(hw, &l_head);
+
+ return ret;
+}
+
+/**
+ * ice_replay_fltr - Replay all the filters stored by a specific list head
+ * @hw: pointer to the hardware structure
+ * @list_head: list for which filters needs to be replayed
+ * @recp_id: Recipe ID for which rules need to be replayed
+ */
+static enum ice_status
+ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
+{
+ struct ice_fltr_mgmt_list_entry *itr;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_sw_recipe *recp_list;
+ u8 lport = hw->port_info->lport;
+ struct LIST_HEAD_TYPE l_head;
+
+ if (LIST_EMPTY(list_head))
+ return status;
+
+ recp_list = &hw->switch_info->recp_list[recp_id];
+ /* Move entries from the given list_head to a temporary l_head so that
+ * they can be replayed. Otherwise when trying to re-add the same
+ * filter, the function will return already exists
+ */
+ LIST_REPLACE_INIT(list_head, &l_head);
+
+ /* Mark the given list_head empty by reinitializing it so filters
+ * could be added again by *handler
+ */
+ LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
+ list_entry) {
+ struct ice_fltr_list_entry f_entry;
+
+ f_entry.fltr_info = itr->fltr_info;
+ if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
+ status = ice_add_rule_internal(hw, recp_list, lport,
+ &f_entry);
+ if (status != ICE_SUCCESS)
+ goto end;
+ continue;
+ }
+
+ /* Add a filter per VSI separately */
+ while (1) {
+ u16 vsi_handle;
+
+ vsi_handle =
+ ice_find_first_bit(itr->vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ break;
+
+ ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.vsi_handle = vsi_handle;
+ f_entry.fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ if (recp_id == ICE_SW_LKUP_VLAN)
+ status = ice_add_vlan_internal(hw, recp_list,
+ &f_entry);
+ else
+ status = ice_add_rule_internal(hw, recp_list,
+ lport,
+ &f_entry);
+ if (status != ICE_SUCCESS)
+ goto end;
+ }
+ }
+end:
+ /* Clear the filter management list */
+ ice_rem_sw_rule_info(hw, &l_head);
+ return status;
+}
+
+/**
+ * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
+ * @hw: pointer to the hardware structure
+ *
+ * NOTE: This function does not clean up partially added filters on error.
+ * It is up to caller of the function to issue a reset or fail early.
+ */
+enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status = ICE_SUCCESS;
+ u8 i;
+
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
+
+ status = ice_replay_fltr(hw, i, head);
+ if (status != ICE_SUCCESS)
+ return status;
+ }
+ return status;
+}
+
+/**
+ * ice_replay_vsi_fltr - Replay filters for requested VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ * @recp_id: Recipe ID for which rules need to be replayed
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
+ * It is required to pass valid VSI handle.
+ */
+static enum ice_status
+ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
+ struct LIST_HEAD_TYPE *list_head)
+{
+ struct ice_fltr_mgmt_list_entry *itr;
+ enum ice_status status = ICE_SUCCESS;
+ struct ice_sw_recipe *recp_list;
+ u16 hw_vsi_id;
+
+ if (LIST_EMPTY(list_head))
+ return status;
+ recp_list = &hw->switch_info->recp_list[recp_id];
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
+ list_entry) {
+ struct ice_fltr_list_entry f_entry;
+
+ f_entry.fltr_info = itr->fltr_info;
+ if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
+ itr->fltr_info.vsi_handle == vsi_handle) {
+ /* update the src in case it is VSI num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ status = ice_add_rule_internal(hw, recp_list,
+ hw->port_info->lport,
+ &f_entry);
+ if (status != ICE_SUCCESS)
+ goto end;
+ continue;
+ }
+ if (!itr->vsi_list_info ||
+ !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
+ continue;
+ /* Clearing it so that the logic can add it back */
+ ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
+ f_entry.fltr_info.vsi_handle = vsi_handle;
+ f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
+ /* update the src in case it is VSI num */
+ if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
+ f_entry.fltr_info.src = hw_vsi_id;
+ if (recp_id == ICE_SW_LKUP_VLAN)
+ status = ice_add_vlan_internal(hw, recp_list, &f_entry);
+ else
+ status = ice_add_rule_internal(hw, recp_list,
+ hw->port_info->lport,
+ &f_entry);
+ if (status != ICE_SUCCESS)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/**
+ * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ *
+ * Replays filters for requested VSI via vsi_handle.
+ */
+enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ enum ice_status status = ICE_SUCCESS;
+ u8 i;
+
+ /* Update the recipes that were created */
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct LIST_HEAD_TYPE *head;
+
+ head = &sw->recp_list[i].filt_replay_rules;
+ if (!sw->recp_list[i].adv_rule)
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ if (status != ICE_SUCCESS)
+ return status;
+ }
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
+ * @hw: pointer to the HW struct
+ *
+ * Deletes the filter replay rules.
+ */
+void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ u8 i;
+
+ if (!sw)
+ return;
+
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
+ struct LIST_HEAD_TYPE *l_head;
+
+ l_head = &sw->recp_list[i].filt_replay_rules;
+ if (!sw->recp_list[i].adv_rule)
+ ice_rem_sw_rule_info(hw, l_head);
+ }
+ }
+}
Index: sys/dev/ice/ice_type.h
===================================================================
--- /dev/null
+++ sys/dev/ice/ice_type.h
@@ -0,0 +1,1072 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _ICE_TYPE_H_
+#define _ICE_TYPE_H_
+
+#define ETH_ALEN 6
+
+#define ETH_HEADER_LEN 14
+
+#define BIT(a) (1UL << (a))
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+
+#define BITS_PER_BYTE 8
+
+#define _FORCE_
+
+#define ICE_BYTES_PER_WORD 2
+#define ICE_BYTES_PER_DWORD 4
+#define ICE_MAX_TRAFFIC_CLASS 8
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+#define ice_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+#include "ice_status.h"
+#include "ice_hw_autogen.h"
+#include "ice_devids.h"
+#include "ice_osdep.h"
+#include "ice_bitops.h" /* Must come before ice_controlq.h */
+#include "ice_controlq.h"
+#include "ice_lan_tx_rx.h"
+#include "ice_flex_type.h"
+#include "ice_protocol_type.h"
+
+static inline bool ice_is_tc_ena(ice_bitmap_t bitmap, u8 tc)
+{
+ return !!(bitmap & BIT(tc));
+}
+
+#define DIV_64BIT(n, d) ((n) / (d))
+
+static inline u64 round_up_64bit(u64 a, u32 b)
+{
+ return DIV_64BIT(((a) + (b) / 2), (b));
+}
+
+static inline u32 ice_round_to_num(u32 N, u32 R)
+{
+ return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
+ ((((N) + (R) - 1) / (R)) * (R)));
+}
+
+/* Driver always calls main vsi_handle first */
+#define ICE_MAIN_VSI_HANDLE 0
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define ICE_MS_TO_GTIME(time) ((time) * 1000)
+
+/* Data type manipulation macros. */
+#define ICE_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define ICE_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+#define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF))
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+#define ICE_DBG_TRACE BIT_ULL(0) /* for function-trace only */
+#define ICE_DBG_INIT BIT_ULL(1)
+#define ICE_DBG_RELEASE BIT_ULL(2)
+#define ICE_DBG_FW_LOG BIT_ULL(3)
+#define ICE_DBG_LINK BIT_ULL(4)
+#define ICE_DBG_PHY BIT_ULL(5)
+#define ICE_DBG_QCTX BIT_ULL(6)
+#define ICE_DBG_NVM BIT_ULL(7)
+#define ICE_DBG_LAN BIT_ULL(8)
+#define ICE_DBG_FLOW BIT_ULL(9)
+#define ICE_DBG_DCB BIT_ULL(10)
+#define ICE_DBG_DIAG BIT_ULL(11)
+#define ICE_DBG_FD BIT_ULL(12)
+#define ICE_DBG_SW BIT_ULL(13)
+#define ICE_DBG_SCHED BIT_ULL(14)
+
+#define ICE_DBG_PKG BIT_ULL(16)
+#define ICE_DBG_RES BIT_ULL(17)
+#define ICE_DBG_AQ_MSG BIT_ULL(24)
+#define ICE_DBG_AQ_DESC BIT_ULL(25)
+#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
+#define ICE_DBG_AQ_CMD BIT_ULL(27)
+#define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \
+ ICE_DBG_AQ_DESC | \
+ ICE_DBG_AQ_DESC_BUF | \
+ ICE_DBG_AQ_CMD)
+
+#define ICE_DBG_USER BIT_ULL(31)
+#define ICE_DBG_ALL 0xFFFFFFFFFFFFFFFFULL
+
+#define IS_UNICAST_ETHER_ADDR(addr) \
+ ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 0))
+
+#define IS_MULTICAST_ETHER_ADDR(addr) \
+ ((bool)((((u8 *)(addr))[0] % ((u8)0x2)) == 1))
+
+/* Check whether an address is broadcast. */
+#define IS_BROADCAST_ETHER_ADDR(addr) \
+ ((bool)((((u16 *)(addr))[0] == ((u16)0xffff))))
+
+#define IS_ZERO_ETHER_ADDR(addr) \
+ (((bool)((((u16 *)(addr))[0] == ((u16)0x0)))) && \
+ ((bool)((((u16 *)(addr))[1] == ((u16)0x0)))) && \
+ ((bool)((((u16 *)(addr))[2] == ((u16)0x0)))))
+
+#ifndef IS_ETHER_ADDR_EQUAL
+#define IS_ETHER_ADDR_EQUAL(addr1, addr2) \
+ (((bool)((((u16 *)(addr1))[0] == ((u16 *)(addr2))[0]))) && \
+ ((bool)((((u16 *)(addr1))[1] == ((u16 *)(addr2))[1]))) && \
+ ((bool)((((u16 *)(addr1))[2] == ((u16 *)(addr2))[2]))))
+#endif
+
+enum ice_aq_res_ids {
+ ICE_NVM_RES_ID = 1,
+ ICE_SPD_RES_ID,
+ ICE_CHANGE_LOCK_RES_ID,
+ ICE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* FW update timeout definitions are in milliseconds */
+#define ICE_NVM_TIMEOUT 180000
+#define ICE_CHANGE_LOCK_TIMEOUT 1000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
+enum ice_aq_res_access_type {
+ ICE_RES_READ = 1,
+ ICE_RES_WRITE
+};
+
+struct ice_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[32];
+};
+
+enum ice_fc_mode {
+ ICE_FC_NONE = 0,
+ ICE_FC_RX_PAUSE,
+ ICE_FC_TX_PAUSE,
+ ICE_FC_FULL,
+ ICE_FC_AUTO,
+ ICE_FC_PFC,
+ ICE_FC_DFLT
+};
+
+enum ice_phy_cache_mode {
+ ICE_FC_MODE = 0,
+ ICE_SPEED_MODE,
+ ICE_FEC_MODE
+};
+
+enum ice_fec_mode {
+ ICE_FEC_NONE = 0,
+ ICE_FEC_RS,
+ ICE_FEC_BASER,
+ ICE_FEC_AUTO
+};
+
+struct ice_phy_cache_mode_data {
+ union {
+ enum ice_fec_mode curr_user_fec_req;
+ enum ice_fc_mode curr_user_fc_req;
+ u16 curr_user_speed_req;
+ } data;
+};
+
+enum ice_set_fc_aq_failures {
+ ICE_SET_FC_AQ_FAIL_NONE = 0,
+ ICE_SET_FC_AQ_FAIL_GET,
+ ICE_SET_FC_AQ_FAIL_SET,
+ ICE_SET_FC_AQ_FAIL_UPDATE
+};
+
+/* These are structs for managing the hardware information and the operations */
+/* MAC types */
+enum ice_mac_type {
+ ICE_MAC_UNKNOWN = 0,
+ ICE_MAC_VF,
+ ICE_MAC_E810,
+ ICE_MAC_GENERIC,
+};
+
+/* Media Types */
+enum ice_media_type {
+ ICE_MEDIA_UNKNOWN = 0,
+ ICE_MEDIA_FIBER,
+ ICE_MEDIA_BASET,
+ ICE_MEDIA_BACKPLANE,
+ ICE_MEDIA_DA,
+};
+
+/* Software VSI types. */
+enum ice_vsi_type {
+ ICE_VSI_PF = 0,
+ ICE_VSI_VF = 1,
+ ICE_VSI_LB = 6,
+};
+
+struct ice_link_status {
+ /* Refer to ice_aq_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u8 topo_media_conflict;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
+ * ice_aqc_get_phy_caps structure
+ */
+ u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Different data queue types: These are mainly for SW consumption. */
+enum ice_q {
+ ICE_DATA_Q_DOORBELL,
+ ICE_DATA_Q_CMPL,
+ ICE_DATA_Q_QUANTA,
+ ICE_DATA_Q_RX,
+ ICE_DATA_Q_TX,
+};
+
+/* Different reset sources for which a disable queue AQ call has to be made in
+ * order to clean the Tx scheduler as a part of the reset
+ */
+enum ice_disq_rst_src {
+ ICE_NO_RESET = 0,
+ ICE_VM_RESET,
+ ICE_VF_RESET,
+};
+
+/* PHY info such as phy_type, etc... */
+struct ice_phy_info {
+ struct ice_link_status link_info;
+ struct ice_link_status link_info_old;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ enum ice_media_type media_type;
+ u8 get_link_info;
+ /* Please refer to struct ice_aqc_get_link_status_data to get
+ * detail of enable bit in curr_user_speed_req
+ */
+ u16 curr_user_speed_req;
+ enum ice_fec_mode curr_user_fec_req;
+ enum ice_fc_mode curr_user_fc_req;
+ struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg;
+};
+
+#define ICE_MAX_NUM_MIRROR_RULES 64
+
+/* Common HW capabilities for SW use */
+struct ice_hw_common_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define ICE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageablity mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define ICE_MGMT_MODE_PASS_THRU_MODE_M 0xF
+#define ICE_MGMT_MODE_CTL_INTERFACE_M 0xF0
+#define ICE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
+
+ u32 mgmt_protocols_mctp;
+#define ICE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define ICE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define ICE_MGMT_MODE_PROTO_OEM BIT(2)
+#define ICE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define ICE_MAX_SUPPORTED_GPIO_LED 12
+#define ICE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[ICE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[ICE_MAX_SUPPORTED_GPIO_SDP];
+
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define ICE_WOL_SUPPORT_M BIT(0)
+#define ICE_ACPI_PROG_MTHD_M BIT(1)
+#define ICE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool nvm_unified_update;
+#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+};
+
+/* Function specific capabilities */
+struct ice_hw_func_caps {
+ struct ice_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+};
+
+/* Device wide capabilities */
+struct ice_hw_dev_caps {
+ struct ice_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_funcs;
+};
+
+/* Information about MAC such as address, etc... */
+struct ice_mac_info {
+ u8 lan_addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 port_addr[ETH_ALEN];
+ u8 wol_addr[ETH_ALEN];
+};
+
+/* PCI bus types */
+enum ice_bus_type {
+ ice_bus_unknown = 0,
+ ice_bus_pci_express,
+ ice_bus_embedded, /* Is device Embedded versus card */
+ ice_bus_reserved
+};
+
+/* PCI bus speeds */
+enum ice_pcie_bus_speed {
+ ice_pcie_speed_unknown = 0xff,
+ ice_pcie_speed_2_5GT = 0x14,
+ ice_pcie_speed_5_0GT = 0x15,
+ ice_pcie_speed_8_0GT = 0x16,
+ ice_pcie_speed_16_0GT = 0x17
+};
+
+/* PCI bus widths */
+enum ice_pcie_link_width {
+ ice_pcie_lnk_width_resrv = 0x00,
+ ice_pcie_lnk_x1 = 0x01,
+ ice_pcie_lnk_x2 = 0x02,
+ ice_pcie_lnk_x4 = 0x04,
+ ice_pcie_lnk_x8 = 0x08,
+ ice_pcie_lnk_x12 = 0x0C,
+ ice_pcie_lnk_x16 = 0x10,
+ ice_pcie_lnk_x32 = 0x20,
+ ice_pcie_lnk_width_unknown = 0xff,
+};
+
+/* Reset types used to determine which kind of reset was requested. These
+ * defines match what the RESET_TYPE field of the GLGEN_RSTAT register.
+ * ICE_RESET_PFR does not match any RESET_TYPE field in the GLGEN_RSTAT register
+ * because its reset source is different than the other types listed.
+ */
+enum ice_reset_req {
+ ICE_RESET_POR = 0,
+ ICE_RESET_INVAL = 0,
+ ICE_RESET_CORER = 1,
+ ICE_RESET_GLOBR = 2,
+ ICE_RESET_EMPR = 3,
+ ICE_RESET_PFR = 4,
+};
+
+/* Bus parameters */
+struct ice_bus_info {
+ enum ice_pcie_bus_speed speed;
+ enum ice_pcie_link_width width;
+ enum ice_bus_type type;
+ u16 domain_num;
+ u16 device;
+ u8 func;
+ u8 bus_num;
+};
+
+/* Flow control (FC) parameters */
+struct ice_fc_info {
+ enum ice_fc_mode current_mode; /* FC mode in effect */
+ enum ice_fc_mode req_mode; /* FC mode requested by caller */
+};
+
+/* Option ROM version information */
+struct ice_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+};
+
+/* NVM Information */
+struct ice_nvm_info {
+ struct ice_orom_info orom; /* Option ROM version info */
+ u32 eetrack; /* NVM data version */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 major_ver; /* major version of dev starter */
+ u8 minor_ver; /* minor version of dev starter */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present)*/
+};
+
+struct ice_link_default_override_tlv {
+ u8 options;
+#define ICE_LINK_OVERRIDE_OPT_M 0x3F
+#define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0)
+#define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1)
+#define ICE_LINK_OVERRIDE_PORT_DIS BIT(2)
+#define ICE_LINK_OVERRIDE_EN BIT(3)
+#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4)
+#define ICE_LINK_OVERRIDE_EEE_EN BIT(5)
+ u8 phy_config;
+#define ICE_LINK_OVERRIDE_PHY_CFG_S 8
+#define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S)
+#define ICE_LINK_OVERRIDE_PAUSE_M 0x3
+#define ICE_LINK_OVERRIDE_LESM_EN BIT(6)
+#define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7)
+ u8 fec_options;
+#define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF
+ u8 rsvd1;
+ u64 phy_type_low;
+ u64 phy_type_high;
+};
+
+#define ICE_NVM_VER_LEN 32
+
+/* netlist version information */
+struct ice_netlist_ver_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Max number of port to queue branches w.r.t topology */
+#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
+
+#define ice_for_each_traffic_class(_i) \
+ for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+
+/* ICE_DFLT_AGG_ID means that all new VM(s)/VSI node connects
+ * to driver defined policy for default aggregator
+ */
+#define ICE_INVAL_TEID 0xFFFFFFFF
+#define ICE_DFLT_AGG_ID 0
+
+struct ice_sched_node {
+ struct ice_sched_node *parent;
+ struct ice_sched_node *sibling; /* next sibling in the same layer */
+ struct ice_sched_node **children;
+ struct ice_aqc_txsched_elem_data info;
+ u32 agg_id; /* aggregator group ID */
+ u16 vsi_handle;
+ u8 in_use; /* suspended or in use */
+ u8 tx_sched_layer; /* Logical Layer (1-9) */
+ u8 num_children;
+ u8 tc_num;
+ u8 owner;
+#define ICE_SCHED_NODE_OWNER_LAN 0
+#define ICE_SCHED_NODE_OWNER_AE 1
+#define ICE_SCHED_NODE_OWNER_RDMA 2
+};
+
+/* Access Macros for Tx Sched Elements data */
+#define ICE_TXSCHED_GET_NODE_TEID(x) LE32_TO_CPU((x)->info.node_teid)
+#define ICE_TXSCHED_GET_PARENT_TEID(x) LE32_TO_CPU((x)->info.parent_teid)
+#define ICE_TXSCHED_GET_CIR_RL_ID(x) \
+ LE16_TO_CPU((x)->info.cir_bw.bw_profile_idx)
+#define ICE_TXSCHED_GET_EIR_RL_ID(x) \
+ LE16_TO_CPU((x)->info.eir_bw.bw_profile_idx)
+#define ICE_TXSCHED_GET_SRL_ID(x) LE16_TO_CPU((x)->info.srl_id)
+#define ICE_TXSCHED_GET_CIR_BWALLOC(x) \
+ LE16_TO_CPU((x)->info.cir_bw.bw_alloc)
+#define ICE_TXSCHED_GET_EIR_BWALLOC(x) \
+ LE16_TO_CPU((x)->info.eir_bw.bw_alloc)
+
+struct ice_sched_rl_profile {
+ u32 rate; /* In Kbps */
+ struct ice_aqc_rl_profile_elem info;
+};
+
+/* The aggregator type determines if identifier is for a VSI group,
+ * aggregator group, aggregator of queues, or queue group.
+ */
+enum ice_agg_type {
+ ICE_AGG_TYPE_UNKNOWN = 0,
+ ICE_AGG_TYPE_TC,
+ ICE_AGG_TYPE_AGG, /* aggregator */
+ ICE_AGG_TYPE_VSI,
+ ICE_AGG_TYPE_QG,
+ ICE_AGG_TYPE_Q
+};
+
+/* Rate limit types */
+enum ice_rl_type {
+ ICE_UNKNOWN_BW = 0,
+ ICE_MIN_BW, /* for CIR profile */
+ ICE_MAX_BW, /* for EIR profile */
+ ICE_SHARED_BW /* for shared profile */
+};
+
+#define ICE_SCHED_MIN_BW 500 /* in Kbps */
+#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
+#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
+#define ICE_SCHED_NO_PRIORITY 0
+#define ICE_SCHED_NO_BW_WT 0
+#define ICE_SCHED_DFLT_RL_PROF_ID 0
+#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
+#define ICE_SCHED_DFLT_BW_WT 1
+#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
+#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
+
+/* Access Macros for Tx Sched RL Profile data */
+#define ICE_TXSCHED_GET_RL_PROF_ID(p) LE16_TO_CPU((p)->info.profile_id)
+#define ICE_TXSCHED_GET_RL_MBS(p) LE16_TO_CPU((p)->info.max_burst_size)
+#define ICE_TXSCHED_GET_RL_MULTIPLIER(p) LE16_TO_CPU((p)->info.rl_multiply)
+#define ICE_TXSCHED_GET_RL_WAKEUP_MV(p) LE16_TO_CPU((p)->info.wake_up_calc)
+#define ICE_TXSCHED_GET_RL_ENCODE(p) LE16_TO_CPU((p)->info.rl_encode)
+
+/* The following tree example shows the naming conventions followed under
+ * ice_port_info struct for default scheduler tree topology.
+ *
+ * A tree on a port
+ * * ---> root node
+ * (TC0)/ / / / \ \ \ \(TC7) ---> num_branches (range:1- 8)
+ * * * * * * * * * |
+ * / |
+ * * |
+ * / |-> num_elements (range:1 - 9)
+ * * | implies num_of_layers
+ * / |
+ * (a)* |
+ *
+ * (a) is the last_node_teid(not of type Leaf). A leaf node is created under
+ * (a) as child node where queues get added, add Tx/Rx queue admin commands;
+ * need TEID of (a) to add queues.
+ *
+ * This tree
+ * -> has 8 branches (one for each TC)
+ * -> First branch (TC0) has 4 elements
+ * -> has 4 layers
+ * -> (a) is the topmost layer node created by firmware on branch 0
+ *
+ * Note: Above asterisk tree covers only basic terminology and scenario.
+ * Refer to the documentation for more info.
+ */
+
+ /* Data structure for saving BW information */
+enum ice_bw_type {
+ ICE_BW_TYPE_PRIO,
+ ICE_BW_TYPE_CIR,
+ ICE_BW_TYPE_CIR_WT,
+ ICE_BW_TYPE_EIR,
+ ICE_BW_TYPE_EIR_WT,
+ ICE_BW_TYPE_SHARED,
+ ICE_BW_TYPE_CNT /* This must be last */
+};
+
+struct ice_bw {
+ u32 bw;
+ u16 bw_alloc;
+};
+
+struct ice_bw_type_info {
+ ice_declare_bitmap(bw_t_bitmap, ICE_BW_TYPE_CNT);
+ u8 generic;
+ struct ice_bw cir_bw;
+ struct ice_bw eir_bw;
+ u32 shared_bw;
+};
+
+/* VSI queue context structure for given TC */
+struct ice_q_ctx {
+ u16 q_handle;
+ u32 q_teid;
+ /* bw_t_info saves queue BW information */
+ struct ice_bw_type_info bw_t_info;
+};
+
+/* VSI type list entry to locate corresponding VSI/aggregator nodes */
+struct ice_sched_vsi_info {
+ struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
+ u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
+ /* bw_t_info saves VSI BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct ice_dcb_ets_cfg {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prio_table[ICE_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS];
+ u8 tsatable[ICE_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct ice_dcb_pfc_cfg {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcena;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct ice_dcb_app_priority_table {
+ u16 prot_id;
+ u8 priority;
+ u8 selector;
+};
+
+#define ICE_MAX_USER_PRIORITY 8
+#define ICE_DCBX_MAX_APPS 32
+#define ICE_LLDPDU_SIZE 1500
+#define ICE_TLV_STATUS_OPER 0x1
+#define ICE_TLV_STATUS_SYNC 0x2
+#define ICE_TLV_STATUS_ERR 0x4
+#define ICE_APP_PROT_ID_FCOE 0x8906
+#define ICE_APP_PROT_ID_ISCSI 0x0cbc
+#define ICE_APP_PROT_ID_FIP 0x8914
+#define ICE_APP_SEL_ETHTYPE 0x1
+#define ICE_APP_SEL_TCPIP 0x2
+#define ICE_CEE_APP_SEL_ETHTYPE 0x0
+#define ICE_CEE_APP_SEL_TCPIP 0x1
+
+struct ice_dcbx_cfg {
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct ice_dcb_ets_cfg etscfg;
+ struct ice_dcb_ets_cfg etsrec;
+ struct ice_dcb_pfc_cfg pfc;
+ struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ u8 dcbx_mode;
+#define ICE_DCBX_MODE_CEE 0x1
+#define ICE_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define ICE_DCBX_APPS_NON_WILLING 0x1
+};
+
+struct ice_port_info {
+ struct ice_sched_node *root; /* Root Node per Port */
+ struct ice_hw *hw; /* back pointer to HW instance */
+ u32 last_node_teid; /* scheduler last node info */
+ u16 sw_id; /* Initial switch ID belongs to port */
+ u16 pf_vf_num;
+ u8 port_state;
+#define ICE_SCHED_PORT_STATE_INIT 0x0
+#define ICE_SCHED_PORT_STATE_READY 0x1
+ u8 lport;
+#define ICE_LPORT_MASK 0xff
+ u16 dflt_tx_vsi_rule_id;
+ u16 dflt_tx_vsi_num;
+ u16 dflt_rx_vsi_rule_id;
+ u16 dflt_rx_vsi_num;
+ struct ice_fc_info fc;
+ struct ice_mac_info mac;
+ struct ice_phy_info phy;
+ struct ice_lock sched_lock; /* protect access to TXSched tree */
+ struct ice_sched_node *
+ sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ /* List contain profile ID(s) and other params per layer */
+ struct LIST_HEAD_TYPE rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct ice_bw_type_info tc_node_bw_t_info[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ /* DCBX info */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ /* LLDP/DCBX Status */
+ u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp:1;
+ u8 is_vf:1;
+};
+
+struct ice_switch_info {
+ struct LIST_HEAD_TYPE vsi_list_map_head;
+ struct ice_sw_recipe *recp_list;
+ u16 prof_res_bm_init;
+
+ ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
+};
+
+/* Port hardware description */
+struct ice_hw {
+ u8 *hw_addr;
+ void *back;
+ struct ice_aqc_layer_props *layer_info;
+ struct ice_port_info *port_info;
+ /* 2D Array for each Tx Sched RL Profile type */
+ struct ice_sched_rl_profile **cir_profiles;
+ struct ice_sched_rl_profile **eir_profiles;
+ struct ice_sched_rl_profile **srl_profiles;
+ /* PSM clock frequency for calculating RL profile params */
+ u32 psm_clk_freq;
+ u64 debug_mask; /* BITMAP for debug mask */
+ enum ice_mac_type mac_type;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+
+ u8 pf_id; /* device profile info */
+
+ u16 max_burst_size; /* driver sets this value */
+
+ /* Tx Scheduler values */
+ u8 num_tx_sched_layers;
+ u8 num_tx_sched_phys_layers;
+ u8 flattened_layers;
+ u8 max_cgds;
+ u8 sw_entry_point_layer;
+ u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM];
+ struct LIST_HEAD_TYPE agg_list; /* lists all aggregator */
+ struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
+ u8 evb_veb; /* true for VEB, false for VEPA */
+ u8 reset_ongoing; /* true if HW is in reset, false otherwise */
+ struct ice_bus_info bus;
+ struct ice_nvm_info nvm;
+ struct ice_hw_dev_caps dev_caps; /* device capabilities */
+ struct ice_hw_func_caps func_caps; /* function capabilities */
+ struct ice_netlist_ver_info netlist_ver; /* netlist version info */
+
+ struct ice_switch_info *switch_info; /* switch filter lists */
+
+ /* Control Queue info */
+ struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info mailboxq;
+
+ u8 api_branch; /* API branch version */
+ u8 api_maj_ver; /* API major version */
+ u8 api_min_ver; /* API minor version */
+ u8 api_patch; /* API patch version */
+ u8 fw_branch; /* firmware branch version */
+ u8 fw_maj_ver; /* firmware major version */
+ u8 fw_min_ver; /* firmware minor version */
+ u8 fw_patch; /* firmware patch version */
+ u32 fw_build; /* firmware build number */
+
+/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
+ * register. Used for determining the ITR/INTRL granularity during
+ * initialization.
+ */
+#define ICE_MAX_AGG_BW_200G 0x0
+#define ICE_MAX_AGG_BW_100G 0X1
+#define ICE_MAX_AGG_BW_50G 0x2
+#define ICE_MAX_AGG_BW_25G 0x3
+ /* ITR granularity for different speeds */
+#define ICE_ITR_GRAN_ABOVE_25 2
+#define ICE_ITR_GRAN_MAX_25 4
+ /* ITR granularity in 1 us */
+ u8 itr_gran;
+ /* INTRL granularity for different speeds */
+#define ICE_INTRL_GRAN_ABOVE_25 4
+#define ICE_INTRL_GRAN_MAX_25 8
+ /* INTRL granularity in 1 us */
+ u8 intrl_gran;
+
+ u8 ucast_shared; /* true if VSIs can share unicast addr */
+
+#define ICE_PHY_PER_NAC 1
+#define ICE_MAX_QUAD 2
+#define ICE_NUM_QUAD_TYPE 2
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PHY_0_LAST_QUAD 1
+#define ICE_PORTS_PER_PHY 8
+#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+
+ /* Active package version (currently active) */
+ struct ice_pkg_ver active_pkg_ver;
+ u32 active_track_id;
+ u8 active_pkg_name[ICE_PKG_NAME_SIZE];
+ u8 active_pkg_in_nvm;
+
+ enum ice_aq_err pkg_dwnld_status;
+
+ /* Driver's package ver - (from the Metadata seg) */
+ struct ice_pkg_ver pkg_ver;
+ u8 pkg_name[ICE_PKG_NAME_SIZE];
+
+ /* Driver's Ice package version (from the Ice seg) */
+ struct ice_pkg_ver ice_pkg_ver;
+ u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
+
+ /* Pointer to the ice segment */
+ struct ice_seg *seg;
+
+ /* Pointer to allocated copy of pkg memory */
+ u8 *pkg_copy;
+ u32 pkg_size;
+
+ /* tunneling info */
+ struct ice_lock tnl_lock;
+ struct ice_tunnel_table tnl;
+
+ /* HW block tables */
+ struct ice_blk_info blk[ICE_BLK_COUNT];
+ struct ice_lock fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
+ struct LIST_HEAD_TYPE fl_profs[ICE_BLK_COUNT];
+ struct ice_lock rss_locks; /* protect RSS configuration */
+ struct LIST_HEAD_TYPE rss_list_head;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct ice_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+ u64 rx_no_desc; /* repc */
+ u64 rx_errors; /* repc */
+};
+
+#define ICE_MAX_UP 8
+
+/* Statistics collected per VEB per User Priority (UP) for up to 8 UPs */
+struct ice_veb_up_stats {
+ u64 up_rx_pkts[ICE_MAX_UP];
+ u64 up_rx_bytes[ICE_MAX_UP];
+ u64 up_tx_pkts[ICE_MAX_UP];
+ u64 up_tx_bytes[ICE_MAX_UP];
+};
+
+/* Statistics collected by the MAC */
+struct ice_hw_port_stats {
+ /* eth stats collected by the port */
+ struct ice_eth_stats eth;
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_len_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_pkt_dropped; /* mspdc */
+ /* EEE LPI */
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
+ u64 tx_lpi_count; /* etlpic */
+ u64 rx_lpi_count; /* erlpic */
+};
+
+enum ice_sw_fwd_act_type {
+ ICE_FWD_TO_VSI = 0,
+ ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
+ ICE_FWD_TO_Q,
+ ICE_FWD_TO_QGRP,
+ ICE_DROP_PACKET,
+ ICE_INVAL_ACT
+};
+
+/* Checksum and Shadow RAM pointers */
+#define ICE_SR_NVM_CTRL_WORD 0x00
+#define ICE_SR_PHY_ANALOG_PTR 0x04
+#define ICE_SR_OPTION_ROM_PTR 0x05
+#define ICE_SR_RO_PCIR_REGS_AUTO_LOAD_PTR 0x06
+#define ICE_SR_AUTO_GENERATED_POINTERS_PTR 0x07
+#define ICE_SR_PCIR_REGS_AUTO_LOAD_PTR 0x08
+#define ICE_SR_EMP_GLOBAL_MODULE_PTR 0x09
+#define ICE_SR_EMP_IMAGE_PTR 0x0B
+#define ICE_SR_PE_IMAGE_PTR 0x0C
+#define ICE_SR_CSR_PROTECTED_LIST_PTR 0x0D
+#define ICE_SR_MNG_CFG_PTR 0x0E
+#define ICE_SR_EMP_MODULE_PTR 0x0F
+#define ICE_SR_PBA_BLOCK_PTR 0x16
+#define ICE_SR_BOOT_CFG_PTR 0x132
+#define ICE_SR_NVM_WOL_CFG 0x19
+#define ICE_NVM_OROM_VER_OFF 0x02
+#define ICE_SR_NVM_DEV_STARTER_VER 0x18
+#define ICE_SR_ALTERNATE_SAN_MAC_ADDR_PTR 0x27
+#define ICE_SR_PERMANENT_SAN_MAC_ADDR_PTR 0x28
+#define ICE_SR_NVM_MAP_VER 0x29
+#define ICE_SR_NVM_IMAGE_VER 0x2A
+#define ICE_SR_NVM_STRUCTURE_VER 0x2B
+#define ICE_SR_NVM_EETRACK_LO 0x2D
+#define ICE_SR_NVM_EETRACK_HI 0x2E
+#define ICE_NVM_VER_LO_SHIFT 0
+#define ICE_NVM_VER_LO_MASK (0xff << ICE_NVM_VER_LO_SHIFT)
+#define ICE_NVM_VER_HI_SHIFT 12
+#define ICE_NVM_VER_HI_MASK (0xf << ICE_NVM_VER_HI_SHIFT)
+#define ICE_OEM_EETRACK_ID 0xffffffff
+#define ICE_OROM_VER_PATCH_SHIFT 0
+#define ICE_OROM_VER_PATCH_MASK (0xff << ICE_OROM_VER_PATCH_SHIFT)
+#define ICE_OROM_VER_BUILD_SHIFT 8
+#define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT)
+#define ICE_OROM_VER_SHIFT 24
+#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
+#define ICE_SR_VPD_PTR 0x2F
+#define ICE_SR_PXE_SETUP_PTR 0x30
+#define ICE_SR_PXE_CFG_CUST_OPTIONS_PTR 0x31
+#define ICE_SR_NVM_ORIGINAL_EETRACK_LO 0x34
+#define ICE_SR_NVM_ORIGINAL_EETRACK_HI 0x35
+#define ICE_SR_VLAN_CFG_PTR 0x37
+#define ICE_SR_POR_REGS_AUTO_LOAD_PTR 0x38
+#define ICE_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
+#define ICE_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
+#define ICE_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define ICE_SR_PHY_CFG_SCRIPT_PTR 0x3D
+#define ICE_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define ICE_SR_SW_CHECKSUM_WORD 0x3F
+#define ICE_SR_PFA_PTR 0x40
+#define ICE_SR_1ST_SCRATCH_PAD_PTR 0x41
+#define ICE_SR_1ST_NVM_BANK_PTR 0x42
+#define ICE_SR_NVM_BANK_SIZE 0x43
+#define ICE_SR_1ND_OROM_BANK_PTR 0x44
+#define ICE_SR_OROM_BANK_SIZE 0x45
+#define ICE_SR_NETLIST_BANK_PTR 0x46
+#define ICE_SR_NETLIST_BANK_SIZE 0x47
+#define ICE_SR_EMP_SR_SETTINGS_PTR 0x48
+#define ICE_SR_CONFIGURATION_METADATA_PTR 0x4D
+#define ICE_SR_IMMEDIATE_VALUES_PTR 0x4E
+#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134
+#define ICE_SR_POR_REGISTERS_AUTOLOAD_PTR 0x118
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define ICE_SR_VPD_SIZE_WORDS 512
+#define ICE_SR_PCIE_ALT_SIZE_WORDS 512
+#define ICE_SR_CTRL_WORD_1_S 0x06
+#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
+
+/* Shadow RAM related */
+#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define ICE_SR_BUF_ALIGNMENT 4096
+#define ICE_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define ICE_SR_SW_CHECKSUM_BASE 0xBABA
+
+/* Link override related */
+#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
+#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2
+#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2
+#define ICE_FW_API_LINK_OVERRIDE_MAJ 1
+#define ICE_FW_API_LINK_OVERRIDE_MIN 5
+#define ICE_FW_API_LINK_OVERRIDE_PATCH 2
+
+#define ICE_PBA_FLAG_DFLT 0xFAFA
+/* Hash redirection LUT for VSI - maximum array size */
+#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+
+/*
+ * Defines for values in the VF_PE_DB_SIZE bits in the GLPCI_LBARCTRL register.
+ * This is needed to determine the BAR0 space for the VFs
+ */
+#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_0KB 0x0
+#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_8KB 0x1
+#define GLPCI_LBARCTRL_VF_PE_DB_SIZE_64KB 0x2
+
+#endif /* _ICE_TYPE_H_ */
Index: sys/dev/ice/if_ice_iflib.c
===================================================================
--- /dev/null
+++ sys/dev/ice/if_ice_iflib.c
@@ -0,0 +1,2874 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+/**
+ * @file if_ice_iflib.c
+ * @brief iflib driver implementation
+ *
+ * Contains the main entry point for the iflib driver implementation. It
+ * implements the various ifdi driver methods, and sets up the module and
+ * driver values to load an iflib driver.
+ */
+
+#include "ice_iflib.h"
+#include "ice_drv_info.h"
+#include "ice_switch.h"
+#include "ice_sched.h"
+
+#include <sys/module.h>
+#include <sys/sockio.h>
+#include <sys/smp.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+/*
+ * Device method prototypes
+ */
+
+static void *ice_register(device_t);
+static int ice_if_attach_pre(if_ctx_t);
+static int ice_attach_pre_recovery_mode(struct ice_softc *sc);
+static int ice_if_attach_post(if_ctx_t);
+static void ice_attach_post_recovery_mode(struct ice_softc *sc);
+static int ice_if_detach(if_ctx_t);
+static int ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
+static int ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets);
+static int ice_if_msix_intr_assign(if_ctx_t ctx, int msix);
+static void ice_if_queues_free(if_ctx_t ctx);
+static int ice_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
+static void ice_if_intr_enable(if_ctx_t ctx);
+static void ice_if_intr_disable(if_ctx_t ctx);
+static int ice_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
+static int ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
+static int ice_if_promisc_set(if_ctx_t ctx, int flags);
+static void ice_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr);
+static int ice_if_media_change(if_ctx_t ctx);
+static void ice_if_init(if_ctx_t ctx);
+static void ice_if_timer(if_ctx_t ctx, uint16_t qid);
+static void ice_if_update_admin_status(if_ctx_t ctx);
+static void ice_if_multi_set(if_ctx_t ctx);
+static void ice_if_vlan_register(if_ctx_t ctx, u16 vtag);
+static void ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag);
+static void ice_if_stop(if_ctx_t ctx);
+static uint64_t ice_if_get_counter(if_ctx_t ctx, ift_counter counter);
+static int ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
+static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
+
+static int ice_msix_que(void *arg);
+static int ice_msix_admin(void *arg);
+
+/*
+ * Helper function prototypes
+ */
+static int ice_pci_mapping(struct ice_softc *sc);
+static void ice_free_pci_mapping(struct ice_softc *sc);
+static void ice_update_link_status(struct ice_softc *sc, bool update_media);
+static void ice_init_device_features(struct ice_softc *sc);
+static void ice_init_tx_tracking(struct ice_vsi *vsi);
+static void ice_handle_reset_event(struct ice_softc *sc);
+static void ice_handle_pf_reset_request(struct ice_softc *sc);
+static void ice_prepare_for_reset(struct ice_softc *sc);
+static int ice_rebuild_pf_vsi_qmap(struct ice_softc *sc);
+static void ice_rebuild(struct ice_softc *sc);
+static void ice_rebuild_recovery_mode(struct ice_softc *sc);
+static void ice_free_irqvs(struct ice_softc *sc);
+static void ice_update_rx_mbuf_sz(struct ice_softc *sc);
+static void ice_poll_for_media_avail(struct ice_softc *sc);
+static void ice_setup_scctx(struct ice_softc *sc);
+static int ice_allocate_msix(struct ice_softc *sc);
+static void ice_admin_timer(void *arg);
+static void ice_transition_recovery_mode(struct ice_softc *sc);
+static void ice_transition_safe_mode(struct ice_softc *sc);
+
+/*
+ * Device Interface Declaration
+ */
+
+/**
+ * @var ice_methods
+ * @brief ice driver method entry points
+ *
+ * List of device methods implementing the generic device interface used by
+ * the device stack to interact with the ice driver. Since this is an iflib
+ * driver, most of the methods point to the generic iflib implementation.
+ */
+static device_method_t ice_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_register, ice_register),
+ DEVMETHOD(device_probe, iflib_device_probe_vendor),
+ DEVMETHOD(device_attach, iflib_device_attach),
+ DEVMETHOD(device_detach, iflib_device_detach),
+ DEVMETHOD(device_shutdown, iflib_device_shutdown),
+ DEVMETHOD(device_suspend, iflib_device_suspend),
+ DEVMETHOD(device_resume, iflib_device_resume),
+ DEVMETHOD_END
+};
+
+/**
+ * @var ice_iflib_methods
+ * @brief iflib method entry points
+ *
+ * List of device methods used by the iflib stack to interact with this
+ * driver. These are the real main entry points used to interact with this
+ * driver.
+ */
+static device_method_t ice_iflib_methods[] = {
+ DEVMETHOD(ifdi_attach_pre, ice_if_attach_pre),
+ DEVMETHOD(ifdi_attach_post, ice_if_attach_post),
+ DEVMETHOD(ifdi_detach, ice_if_detach),
+ DEVMETHOD(ifdi_tx_queues_alloc, ice_if_tx_queues_alloc),
+ DEVMETHOD(ifdi_rx_queues_alloc, ice_if_rx_queues_alloc),
+ DEVMETHOD(ifdi_msix_intr_assign, ice_if_msix_intr_assign),
+ DEVMETHOD(ifdi_queues_free, ice_if_queues_free),
+ DEVMETHOD(ifdi_mtu_set, ice_if_mtu_set),
+ DEVMETHOD(ifdi_intr_enable, ice_if_intr_enable),
+ DEVMETHOD(ifdi_intr_disable, ice_if_intr_disable),
+ DEVMETHOD(ifdi_rx_queue_intr_enable, ice_if_rx_queue_intr_enable),
+ DEVMETHOD(ifdi_tx_queue_intr_enable, ice_if_tx_queue_intr_enable),
+ DEVMETHOD(ifdi_promisc_set, ice_if_promisc_set),
+ DEVMETHOD(ifdi_media_status, ice_if_media_status),
+ DEVMETHOD(ifdi_media_change, ice_if_media_change),
+ DEVMETHOD(ifdi_init, ice_if_init),
+ DEVMETHOD(ifdi_stop, ice_if_stop),
+ DEVMETHOD(ifdi_timer, ice_if_timer),
+ DEVMETHOD(ifdi_update_admin_status, ice_if_update_admin_status),
+ DEVMETHOD(ifdi_multi_set, ice_if_multi_set),
+ DEVMETHOD(ifdi_vlan_register, ice_if_vlan_register),
+ DEVMETHOD(ifdi_vlan_unregister, ice_if_vlan_unregister),
+ DEVMETHOD(ifdi_get_counter, ice_if_get_counter),
+ DEVMETHOD(ifdi_priv_ioctl, ice_if_priv_ioctl),
+ DEVMETHOD(ifdi_i2c_req, ice_if_i2c_req),
+ DEVMETHOD_END
+};
+
+/**
+ * @var ice_driver
+ * @brief driver structure for the generic device stack
+ *
+ * driver_t definition used to setup the generic device methods.
+ */
+static driver_t ice_driver = {
+ .name = "ice",
+ .methods = ice_methods,
+ .size = sizeof(struct ice_softc),
+};
+
+/**
+ * @var ice_iflib_driver
+ * @brief driver structure for the iflib stack
+ *
+ * driver_t definition used to setup the iflib device methods.
+ */
+static driver_t ice_iflib_driver = {
+ .name = "ice",
+ .methods = ice_iflib_methods,
+ .size = sizeof(struct ice_softc),
+};
+
+extern struct if_txrx ice_txrx;
+extern struct if_txrx ice_recovery_txrx;
+
+/**
+ * @var ice_sctx
+ * @brief ice driver shared context
+ *
+ * Structure defining shared values (context) that is used by all instances of
+ * the device. Primarily used to setup details about how the iflib stack
+ * should treat this driver. Also defines the default, minimum, and maximum
+ * number of descriptors in each ring.
+ */
+static struct if_shared_ctx ice_sctx = {
+ .isc_magic = IFLIB_MAGIC,
+ .isc_q_align = PAGE_SIZE,
+
+ .isc_tx_maxsize = ICE_MAX_FRAME_SIZE,
+ /* We could technically set this as high as ICE_MAX_DMA_SEG_SIZE, but
+ * that doesn't make sense since that would be larger than the maximum
+ * size of a single packet.
+ */
+ .isc_tx_maxsegsize = ICE_MAX_FRAME_SIZE,
+
+ /* XXX: This is only used by iflib to ensure that
+ * scctx->isc_tx_tso_size_max + the VLAN header is a valid size.
+ */
+ .isc_tso_maxsize = ICE_TSO_SIZE + sizeof(struct ether_vlan_header),
+ /* XXX: This is used by iflib to set the number of segments in the TSO
+ * DMA tag. However, scctx->isc_tx_tso_segsize_max is used to set the
+ * related ifnet parameter.
+ */
+ .isc_tso_maxsegsize = ICE_MAX_DMA_SEG_SIZE,
+
+ .isc_rx_maxsize = ICE_MAX_FRAME_SIZE,
+ .isc_rx_nsegments = ICE_MAX_RX_SEGS,
+ .isc_rx_maxsegsize = ICE_MAX_FRAME_SIZE,
+
+ .isc_nfl = 1,
+ .isc_ntxqs = 1,
+ .isc_nrxqs = 1,
+
+ .isc_admin_intrcnt = 1,
+ .isc_vendor_info = ice_vendor_info_array,
+ .isc_driver_version = __DECONST(char *, ice_driver_version),
+ .isc_driver = &ice_iflib_driver,
+
+ /*
+ * IFLIB_NEED_SCRATCH ensures that mbufs have scratch space available
+ * for hardware checksum offload
+ *
+ * IFLIB_TSO_INIT_IP ensures that the TSO packets have zeroed out the
+ * IP sum field, required by our hardware to calculate valid TSO
+ * checksums.
+ *
+ * IFLIB_ADMIN_ALWAYS_RUN ensures that the administrative task runs
+ * even when the interface is down.
+ *
+ * IFLIB_SKIP_MSIX allows the driver to handle allocating MSI-X
+ * vectors manually instead of relying on iflib code to do this.
+ */
+ .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP |
+ IFLIB_ADMIN_ALWAYS_RUN | IFLIB_SKIP_MSIX,
+
+ .isc_nrxd_min = {ICE_MIN_DESC_COUNT},
+ .isc_ntxd_min = {ICE_MIN_DESC_COUNT},
+ .isc_nrxd_max = {ICE_IFLIB_MAX_DESC_COUNT},
+ .isc_ntxd_max = {ICE_IFLIB_MAX_DESC_COUNT},
+ .isc_nrxd_default = {ICE_DEFAULT_DESC_COUNT},
+ .isc_ntxd_default = {ICE_DEFAULT_DESC_COUNT},
+};
+
+/**
+ * @var ice_devclass
+ * @brief ice driver device class
+ *
+ * device class used to setup the ice driver module kobject class.
+ */
+devclass_t ice_devclass;
+DRIVER_MODULE(ice, pci, ice_driver, ice_devclass, ice_module_event_handler, 0);
+
+MODULE_VERSION(ice, 1);
+MODULE_DEPEND(ice, pci, 1, 1, 1);
+MODULE_DEPEND(ice, ether, 1, 1, 1);
+MODULE_DEPEND(ice, iflib, 1, 1, 1);
+MODULE_DEPEND(ice, firmware, 1, 1, 1);
+
+IFLIB_PNP_INFO(pci, ice, ice_vendor_info_array);
+
+/* Static driver-wide sysctls */
+#include "ice_iflib_sysctls.h"
+
+/**
+ * ice_pci_mapping - Map PCI BAR memory
+ * @sc: device private softc
+ *
+ * Map PCI BAR 0 for device operation.
+ */
+static int
+ice_pci_mapping(struct ice_softc *sc)
+{
+ int rc;
+
+ /* Map BAR0 */
+ rc = ice_map_bar(sc->dev, &sc->bar0, 0);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * ice_free_pci_mapping - Release PCI BAR memory
+ * @sc: device private softc
+ *
+ * Release PCI BARs which were previously mapped by ice_pci_mapping().
+ */
+static void
+ice_free_pci_mapping(struct ice_softc *sc)
+{
+ /* Free BAR0 */
+ ice_free_bar(sc->dev, &sc->bar0);
+}
+
+/*
+ * Device methods
+ */
+
+/**
+ * ice_register - register device method callback
+ * @dev: the device being registered
+ *
+ * Returns a pointer to the shared context structure, which is used by iflib.
+ */
+static void *
+ice_register(device_t dev __unused)
+{
+ return &ice_sctx;
+} /* ice_register */
+
+/**
+ * ice_setup_scctx - Setup the iflib softc context structure
+ * @sc: the device private structure
+ *
+ * Setup the parameters in if_softc_ctx_t structure used by the iflib stack
+ * when loading.
+ */
+static void
+ice_setup_scctx(struct ice_softc *sc)
+{
+ if_softc_ctx_t scctx = sc->scctx;
+ struct ice_hw *hw = &sc->hw;
+ bool safe_mode, recovery_mode;
+
+ safe_mode = ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE);
+ recovery_mode = ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE);
+
+ /*
+ * If the driver loads in Safe mode or Recovery mode, limit iflib to
+ * a single queue pair.
+ */
+ if (safe_mode || recovery_mode) {
+ scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
+ scctx->isc_ntxqsets_max = 1;
+ scctx->isc_nrxqsets_max = 1;
+ } else {
+ /*
+ * iflib initially sets the isc_ntxqsets and isc_nrxqsets to
+ * the values of the override sysctls. Cache these initial
+ * values so that the driver can be aware of what the iflib
+ * sysctl value is when setting up MSI-X vectors.
+ */
+ sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets;
+ sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets;
+
+ if (scctx->isc_ntxqsets == 0)
+ scctx->isc_ntxqsets = hw->func_caps.common_cap.rss_table_size;
+ if (scctx->isc_nrxqsets == 0)
+ scctx->isc_nrxqsets = hw->func_caps.common_cap.rss_table_size;
+
+ scctx->isc_ntxqsets_max = hw->func_caps.common_cap.num_txq;
+ scctx->isc_nrxqsets_max = hw->func_caps.common_cap.num_rxq;
+
+ /*
+ * Sanity check that the iflib sysctl values are within the
+ * maximum supported range.
+ */
+ if (sc->ifc_sysctl_ntxqs > scctx->isc_ntxqsets_max)
+ sc->ifc_sysctl_ntxqs = scctx->isc_ntxqsets_max;
+ if (sc->ifc_sysctl_nrxqs > scctx->isc_nrxqsets_max)
+ sc->ifc_sysctl_nrxqs = scctx->isc_nrxqsets_max;
+ }
+
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct ice_tx_desc), DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
+ * sizeof(union ice_32b_rx_flex_desc), DBA_ALIGN);
+
+ scctx->isc_tx_nsegments = ICE_MAX_TX_SEGS;
+ scctx->isc_tx_tso_segments_max = ICE_MAX_TSO_SEGS;
+ scctx->isc_tx_tso_size_max = ICE_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = ICE_MAX_DMA_SEG_SIZE;
+
+ scctx->isc_msix_bar = PCIR_BAR(ICE_MSIX_BAR);
+ scctx->isc_rss_table_size = hw->func_caps.common_cap.rss_table_size;
+
+ /*
+ * If the driver loads in recovery mode, disable Tx/Rx functionality
+ */
+ if (recovery_mode)
+ scctx->isc_txrx = &ice_recovery_txrx;
+ else
+ scctx->isc_txrx = &ice_txrx;
+
+ /*
+ * If the driver loads in Safe mode or Recovery mode, disable
+ * advanced features including hardware offloads.
+ */
+ if (safe_mode || recovery_mode) {
+ scctx->isc_capenable = ICE_SAFE_CAPS;
+ scctx->isc_tx_csum_flags = 0;
+ } else {
+ scctx->isc_capenable = ICE_FULL_CAPS;
+ scctx->isc_tx_csum_flags = ICE_CSUM_OFFLOAD;
+ }
+
+ scctx->isc_capabilities = scctx->isc_capenable;
+} /* ice_setup_scctx */
+
+/**
+ * ice_if_attach_pre - Early device attach logic
+ * @ctx: the iflib context structure
+ *
+ * Called by iflib during the attach process. Earliest main driver entry
+ * point which performs necessary hardware and driver initialization. Called
+ * before the Tx and Rx queues are allocated.
+ */
+static int
+ice_if_attach_pre(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ enum ice_fw_modes fw_mode;
+ enum ice_status status;
+ if_softc_ctx_t scctx;
+ struct ice_hw *hw;
+ device_t dev;
+ int err;
+
+ device_printf(iflib_get_dev(ctx), "Loading the iflib ice driver\n");
+
+ sc->ctx = ctx;
+ sc->media = iflib_get_media(ctx);
+ sc->sctx = iflib_get_sctx(ctx);
+ sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx);
+
+ dev = sc->dev = iflib_get_dev(ctx);
+ scctx = sc->scctx = iflib_get_softc_ctx(ctx);
+
+ hw = &sc->hw;
+ hw->back = sc;
+
+ snprintf(sc->admin_mtx_name, sizeof(sc->admin_mtx_name),
+ "%s:admin", device_get_nameunit(dev));
+ mtx_init(&sc->admin_mtx, sc->admin_mtx_name, NULL, MTX_DEF);
+ callout_init_mtx(&sc->admin_timer, &sc->admin_mtx, 0);
+
+ ASSERT_CTX_LOCKED(sc);
+
+ if (ice_pci_mapping(sc)) {
+ err = (ENXIO);
+ goto destroy_admin_timer;
+ }
+
+ /* Save off the PCI information */
+ ice_save_pci_info(hw, dev);
+
+ /* create tunables as early as possible */
+ ice_add_device_tunables(sc);
+
+ /* Setup ControlQ lengths */
+ ice_set_ctrlq_len(hw);
+
+ fw_mode = ice_get_fw_mode(hw);
+ if (fw_mode == ICE_FW_MODE_REC) {
+ device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+
+ err = ice_attach_pre_recovery_mode(sc);
+ if (err)
+ goto free_pci_mapping;
+
+ return (0);
+ }
+
+ /* Initialize the hw data structure */
+ status = ice_init_hw(hw);
+ if (status) {
+ if (status == ICE_ERR_FW_API_VER) {
+ /* Enter recovery mode, so that the driver remains
+ * loaded. This way, if the system administrator
+ * cannot update the driver, they may still attempt to
+ * downgrade the NVM.
+ */
+ err = ice_attach_pre_recovery_mode(sc);
+ if (err)
+ goto free_pci_mapping;
+
+ return (0);
+ } else {
+ err = EIO;
+ device_printf(dev, "Unable to initialize hw, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+ goto free_pci_mapping;
+ }
+
+ /* Notify firmware of the device driver version */
+ err = ice_send_version(sc);
+ if (err)
+ goto deinit_hw;
+
+ ice_load_pkg_file(sc);
+
+ err = ice_init_link_events(sc);
+ if (err) {
+ device_printf(dev, "ice_init_link_events failed: %s\n",
+ ice_err_str(err));
+ goto deinit_hw;
+ }
+
+ ice_print_nvm_version(sc);
+
+ ice_init_device_features(sc);
+
+ /* Setup the MAC address */
+ iflib_set_mac(ctx, hw->port_info->mac.lan_addr);
+
+ /* Setup the iflib softc context structure */
+ ice_setup_scctx(sc);
+
+ /* Initialize the Tx queue manager */
+ err = ice_resmgr_init(&sc->tx_qmgr, hw->func_caps.common_cap.num_txq);
+ if (err) {
+ device_printf(dev, "Unable to initialize Tx queue manager: %s\n",
+ ice_err_str(err));
+ goto deinit_hw;
+ }
+
+ /* Initialize the Rx queue manager */
+ err = ice_resmgr_init(&sc->rx_qmgr, hw->func_caps.common_cap.num_rxq);
+ if (err) {
+ device_printf(dev, "Unable to initialize Rx queue manager: %s\n",
+ ice_err_str(err));
+ goto free_tx_qmgr;
+ }
+
+ /* Initialize the interrupt resource manager */
+ err = ice_alloc_intr_tracking(sc);
+ if (err)
+ /* Errors are already printed */
+ goto free_rx_qmgr;
+
+ /* Determine maximum number of VSIs we'll prepare for */
+ sc->num_available_vsi = min(ICE_MAX_VSI_AVAILABLE,
+ hw->func_caps.guar_num_vsi);
+
+ if (!sc->num_available_vsi) {
+ err = EIO;
+ device_printf(dev, "No VSIs allocated to host\n");
+ goto free_intr_tracking;
+ }
+
+ /* Allocate storage for the VSI pointers */
+ sc->all_vsi = (struct ice_vsi **)
+ malloc(sizeof(struct ice_vsi *) * sc->num_available_vsi,
+ M_ICE, M_WAITOK | M_ZERO);
+ if (!sc->all_vsi) {
+ err = ENOMEM;
+ device_printf(dev, "Unable to allocate VSI array\n");
+ goto free_intr_tracking;
+ }
+
+ /*
+ * Prepare the statically allocated primary PF VSI in the softc
+ * structure. Other VSIs will be dynamically allocated as needed.
+ */
+ ice_setup_pf_vsi(sc);
+
+ err = ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max,
+ scctx->isc_nrxqsets_max);
+ if (err) {
+ device_printf(dev, "Unable to allocate VSI Queue maps\n");
+ goto free_main_vsi;
+ }
+
+ /* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */
+ err = ice_allocate_msix(sc);
+ if (err)
+ goto free_main_vsi;
+
+ return 0;
+
+free_main_vsi:
+ /* ice_release_vsi will free the queue maps if they were allocated */
+ ice_release_vsi(&sc->pf_vsi);
+ free(sc->all_vsi, M_ICE);
+ sc->all_vsi = NULL;
+free_intr_tracking:
+ ice_free_intr_tracking(sc);
+free_rx_qmgr:
+ ice_resmgr_destroy(&sc->rx_qmgr);
+free_tx_qmgr:
+ ice_resmgr_destroy(&sc->tx_qmgr);
+deinit_hw:
+ ice_deinit_hw(hw);
+free_pci_mapping:
+ ice_free_pci_mapping(sc);
+destroy_admin_timer:
+ mtx_lock(&sc->admin_mtx);
+ callout_stop(&sc->admin_timer);
+ mtx_unlock(&sc->admin_mtx);
+ mtx_destroy(&sc->admin_mtx);
+ return err;
+} /* ice_if_attach_pre */
+
+/**
+ * ice_attach_pre_recovery_mode - Limited driver attach_pre for FW recovery
+ * @sc: the device private softc
+ *
+ * Loads the device driver in limited Firmware Recovery mode, intended to
+ * allow users to update the firmware to attempt to recover the device.
+ *
+ * @remark We may enter recovery mode in case either (a) the firmware is
+ * detected to be in an invalid state and must be re-programmed, or (b) the
+ * driver detects that the loaded firmware has a non-compatible API version
+ * that the driver cannot operate with.
+ */
+static int
+ice_attach_pre_recovery_mode(struct ice_softc *sc)
+{
+ ice_set_state(&sc->state, ICE_STATE_RECOVERY_MODE);
+
+ /* Setup the iflib softc context */
+ ice_setup_scctx(sc);
+
+ /* Setup the PF VSI back pointer */
+ sc->pf_vsi.sc = sc;
+
+ /*
+ * We still need to allocate MSI-X vectors since we need one vector to
+ * run the administrative admin interrupt
+ */
+ return ice_allocate_msix(sc);
+}
+
+/**
+ * ice_update_link_status - notify OS of link state change
+ * @sc: device private softc structure
+ * @update_media: true if we should update media even if link didn't change
+ *
+ * Called to notify iflib core of link status changes. Should be called once
+ * during attach_post, and whenever link status changes during runtime.
+ *
+ * This call only updates the currently supported media types if the link
+ * status changed, or if update_media is set to true.
+ */
+static void
+ice_update_link_status(struct ice_softc *sc, bool update_media)
+{
+ struct ice_hw *hw = &sc->hw;
+
+ /* Never report link up when in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ /* Report link status to iflib only once each time it changes */
+ if (!ice_testandset_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED)) {
+ if (sc->link_up) { /* link is up */
+ uint64_t baudrate = ice_aq_speed_to_rate(sc->hw.port_info);
+
+ iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate);
+
+ ice_link_up_msg(sc);
+
+ update_media = true;
+ } else { /* link is down */
+ iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0);
+
+ update_media = true;
+ }
+ }
+
+ /* Update the supported media types */
+ if (update_media) {
+ enum ice_status status = ice_add_media_types(sc, sc->media);
+ if (status)
+ device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+
+ /* TODO: notify VFs of link state change */
+}
+
+/**
+ * ice_if_attach_post - Late device attach logic
+ * @ctx: the iflib context structure
+ *
+ * Called by iflib to finish up attaching the device. Performs any attach
+ * logic which must wait until after the Tx and Rx queues have been
+ * allocated.
+ */
+static int
+ice_if_attach_post(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ int err;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* We don't yet support loading if MSI-X is not supported */
+ if (sc->scctx->isc_intr != IFLIB_INTR_MSIX) {
+ device_printf(sc->dev, "The ice driver does not support loading without MSI-X\n");
+ return (ENOTSUP);
+ }
+
+ /* The ifnet structure hasn't yet been initialized when the attach_pre
+ * handler is called, so wait until attach_post to setup the
+ * isc_max_frame_size.
+ */
+
+ sc->ifp = ifp;
+ sc->scctx->isc_max_frame_size = ifp->if_mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
+
+ /*
+ * If we are in recovery mode, only perform a limited subset of
+ * initialization to support NVM recovery.
+ */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
+ ice_attach_post_recovery_mode(sc);
+ return (0);
+ }
+
+ sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size;
+
+ err = ice_initialize_vsi(&sc->pf_vsi);
+ if (err) {
+ device_printf(sc->dev, "Unable to initialize Main VSI: %s\n",
+ ice_err_str(err));
+ return err;
+ }
+
+ /* Configure the main PF VSI for RSS */
+ err = ice_config_rss(&sc->pf_vsi);
+ if (err) {
+ device_printf(sc->dev,
+ "Unable to configure RSS for the main VSI, err %s\n",
+ ice_err_str(err));
+ return err;
+ }
+
+ /* Configure switch to drop transmitted LLDP and PAUSE frames */
+ err = ice_cfg_pf_ethertype_filters(sc);
+ if (err)
+ return err;
+
+ ice_get_and_print_bus_info(sc);
+
+ ice_set_link_management_mode(sc);
+
+ ice_init_saved_phy_cfg(sc);
+
+ ice_add_device_sysctls(sc);
+
+ /* Get DCBX/LLDP state and start DCBX agent */
+ ice_init_dcb_setup(sc);
+
+ /* Setup link configuration parameters */
+ ice_init_link_configuration(sc);
+ ice_update_link_status(sc, true);
+
+ /* Configure interrupt causes for the administrative interrupt */
+ ice_configure_misc_interrupts(sc);
+
+ /* Enable ITR 0 right away, so that we can handle admin interrupts */
+ ice_enable_intr(&sc->hw, sc->irqvs[0].me);
+
+ /* Start the admin timer */
+ mtx_lock(&sc->admin_mtx);
+ callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
+ mtx_unlock(&sc->admin_mtx);
+
+ return 0;
+} /* ice_if_attach_post */
+
+/**
+ * ice_attach_post_recovery_mode - Limited driver attach_post for FW recovery
+ * @sc: the device private softc
+ *
+ * Performs minimal work to prepare the driver to recover an NVM in case the
+ * firmware is in recovery mode.
+ */
+static void
+ice_attach_post_recovery_mode(struct ice_softc *sc)
+{
+ /* Configure interrupt causes for the administrative interrupt */
+ ice_configure_misc_interrupts(sc);
+
+ /* Enable ITR 0 right away, so that we can handle admin interrupts */
+ ice_enable_intr(&sc->hw, sc->irqvs[0].me);
+
+ /* Start the admin timer */
+ mtx_lock(&sc->admin_mtx);
+ callout_reset(&sc->admin_timer, hz/2, ice_admin_timer, sc);
+ mtx_unlock(&sc->admin_mtx);
+}
+
+/**
+ * ice_free_irqvs - Free IRQ vector memory
+ * @sc: the device private softc structure
+ *
+ * Free IRQ vector memory allocated during ice_if_msix_intr_assign.
+ */
+static void
+ice_free_irqvs(struct ice_softc *sc)
+{
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ if_ctx_t ctx = sc->ctx;
+ int i;
+
+ /* If the irqvs array is NULL, then there are no vectors to free */
+ if (sc->irqvs == NULL)
+ return;
+
+ /* Free the IRQ vectors */
+ for (i = 0; i < sc->num_irq_vectors; i++)
+ iflib_irq_free(ctx, &sc->irqvs[i].irq);
+
+ /* Clear the irqv pointers */
+ for (i = 0; i < vsi->num_rx_queues; i++)
+ vsi->rx_queues[i].irqv = NULL;
+
+ for (i = 0; i < vsi->num_tx_queues; i++)
+ vsi->tx_queues[i].irqv = NULL;
+
+ /* Release the vector array memory */
+ free(sc->irqvs, M_ICE);
+ sc->irqvs = NULL;
+ sc->num_irq_vectors = 0;
+}
+
+/**
+ * ice_if_detach - Device driver detach logic
+ * @ctx: iflib context structure
+ *
+ * Perform device shutdown logic to detach the device driver.
+ *
+ * Note that there is no guarantee of the ordering of ice_if_queues_free() and
+ * ice_if_detach(). It is possible for the functions to be called in either
+ * order, and they must not assume to have a strict ordering.
+ */
+static int
+ice_if_detach(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ int i;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Indicate that we're detaching */
+ ice_set_state(&sc->state, ICE_STATE_DETACHING);
+
+ /* Stop the admin timer */
+ mtx_lock(&sc->admin_mtx);
+ callout_stop(&sc->admin_timer);
+ mtx_unlock(&sc->admin_mtx);
+ mtx_destroy(&sc->admin_mtx);
+
+ /* Free allocated media types */
+ ifmedia_removeall(sc->media);
+
+ /* Free the Tx and Rx sysctl contexts, and assign NULL to the node
+ * pointers. Note, the calls here and those in ice_if_queues_free()
+ * are *BOTH* necessary, as we cannot guarantee which path will be
+ * run first
+ */
+ ice_vsi_del_txqs_ctx(vsi);
+ ice_vsi_del_rxqs_ctx(vsi);
+
+ /* Release MSI-X resources */
+ ice_free_irqvs(sc);
+
+ for (i = 0; i < sc->num_available_vsi; i++) {
+ if (sc->all_vsi[i])
+ ice_release_vsi(sc->all_vsi[i]);
+ }
+
+ if (sc->all_vsi) {
+ free(sc->all_vsi, M_ICE);
+ sc->all_vsi = NULL;
+ }
+
+ /* Release MSI-X memory */
+ pci_release_msi(sc->dev);
+
+ if (sc->msix_table != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->msix_table),
+ sc->msix_table);
+ sc->msix_table = NULL;
+ }
+
+ ice_free_intr_tracking(sc);
+
+ /* Destroy the queue managers */
+ ice_resmgr_destroy(&sc->tx_qmgr);
+ ice_resmgr_destroy(&sc->rx_qmgr);
+
+ if (!ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ ice_deinit_hw(&sc->hw);
+
+ ice_free_pci_mapping(sc);
+
+ return 0;
+} /* ice_if_detach */
+
+/**
+ * ice_if_tx_queues_alloc - Allocate Tx queue memory
+ * @ctx: iflib context structure
+ * @vaddrs: virtual addresses for the queue memory
+ * @paddrs: physical addresses for the queue memory
+ * @ntxqs: the number of Tx queues per set (should always be 1)
+ * @ntxqsets: the number of Tx queue sets to allocate
+ *
+ * Called by iflib to allocate Tx queues for the device. Allocates driver
+ * memory to track each queue, the status arrays used for descriptor
+ * status reporting, and Tx queue sysctls.
+ */
+static int
+ice_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int __invariant_only ntxqs, int ntxqsets)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_tx_queue *txq;
+ int err, i, j;
+
+ MPASS(ntxqs == 1);
+ MPASS(sc->scctx->isc_ntxd[0] <= ICE_MAX_DESC_COUNT);
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Do not bother allocating queues if we're in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (0);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->tx_queues =
+ (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_WAITOK | M_ZERO))) {
+ device_printf(sc->dev, "Unable to allocate Tx queue memory\n");
+ return (ENOMEM);
+ }
+
+ /* Allocate report status arrays */
+ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
+ if (!(txq->tx_rsq =
+ (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_WAITOK))) {
+ device_printf(sc->dev, "Unable to allocate tx_rsq memory\n");
+ err = ENOMEM;
+ goto free_tx_queues;
+ }
+ /* Initialize report status array */
+ for (j = 0; j < sc->scctx->isc_ntxd[0]; j++)
+ txq->tx_rsq[j] = QIDX_INVALID;
+ }
+
+ /* Assign queues from PF space to the main VSI */
+ err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap, ntxqsets);
+ if (err) {
+ device_printf(sc->dev, "Unable to assign PF queues: %s\n",
+ ice_err_str(err));
+ goto free_tx_queues;
+ }
+ vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS;
+
+ /* Add Tx queue sysctls context */
+ ice_vsi_add_txqs_ctx(vsi);
+
+ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
+ txq->me = i;
+ txq->vsi = vsi;
+
+ /* store the queue size for easier access */
+ txq->desc_count = sc->scctx->isc_ntxd[0];
+
+ /* get the virtual and physical address of the hardware queues */
+ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
+ txq->tx_base = (struct ice_tx_desc *)vaddrs[i];
+ txq->tx_paddr = paddrs[i];
+
+ ice_add_txq_sysctls(txq);
+ }
+
+ vsi->num_tx_queues = ntxqsets;
+
+ return (0);
+
+free_tx_queues:
+ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) {
+ if (txq->tx_rsq != NULL) {
+ free(txq->tx_rsq, M_ICE);
+ txq->tx_rsq = NULL;
+ }
+ }
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+ return err;
+}
+
+/**
+ * ice_if_rx_queues_alloc - Allocate Rx queue memory
+ * @ctx: iflib context structure
+ * @vaddrs: virtual addresses for the queue memory
+ * @paddrs: physical addresses for the queue memory
+ * @nrxqs: number of Rx queues per set (should always be 1)
+ * @nrxqsets: number of Rx queue sets to allocate
+ *
+ * Called by iflib to allocate Rx queues for the device. Allocates driver
+ * memory to track each queue, as well as sets up the Rx queue sysctls.
+ */
+static int
+ice_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int __invariant_only nrxqs, int nrxqsets)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_rx_queue *rxq;
+ int err, i;
+
+ MPASS(nrxqs == 1);
+ MPASS(sc->scctx->isc_nrxd[0] <= ICE_MAX_DESC_COUNT);
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Do not bother allocating queues if we're in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (0);
+
+ /* Allocate queue structure memory */
+ if (!(vsi->rx_queues =
+ (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_WAITOK | M_ZERO))) {
+ device_printf(sc->dev, "Unable to allocate Rx queue memory\n");
+ return (ENOMEM);
+ }
+
+ /* Assign queues from PF space to the main VSI */
+ err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap, nrxqsets);
+ if (err) {
+ device_printf(sc->dev, "Unable to assign PF queues: %s\n",
+ ice_err_str(err));
+ goto free_rx_queues;
+ }
+ vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS;
+
+ /* Add Rx queue sysctls context */
+ ice_vsi_add_rxqs_ctx(vsi);
+
+ for (i = 0, rxq = vsi->rx_queues; i < nrxqsets; i++, rxq++) {
+ rxq->me = i;
+ rxq->vsi = vsi;
+
+ /* store the queue size for easier access */
+ rxq->desc_count = sc->scctx->isc_nrxd[0];
+
+ /* get the virtual and physical address of the hardware queues */
+ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
+ rxq->rx_base = (union ice_32b_rx_flex_desc *)vaddrs[i];
+ rxq->rx_paddr = paddrs[i];
+
+ ice_add_rxq_sysctls(rxq);
+ }
+
+ vsi->num_rx_queues = nrxqsets;
+
+ return (0);
+
+free_rx_queues:
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+ return err;
+}
+
+/**
+ * ice_if_queues_free - Free queue memory
+ * @ctx: the iflib context structure
+ *
+ * Free queue memory allocated by ice_if_tx_queues_alloc() and
+ * ice_if_rx_queues_alloc().
+ *
+ * There is no guarantee that ice_if_queues_free() and ice_if_detach() will be
+ * called in the same order. It's possible for ice_if_queues_free() to be
+ * called prior to ice_if_detach(), and vice versa.
+ *
+ * For this reason, the main VSI is a static member of the ice_softc, which is
+ * not free'd until after iflib finishes calling both of these functions.
+ *
+ * Thus, care must be taken in how we manage the memory being freed by this
+ * function, and in what tasks it can and must perform.
+ */
+static void
+ice_if_queues_free(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_tx_queue *txq;
+ int i;
+
+ /* Free the Tx and Rx sysctl contexts, and assign NULL to the node
+ * pointers. Note, the calls here and those in ice_if_detach()
+ * are *BOTH* necessary, as we cannot guarantee which path will be
+ * run first
+ */
+ ice_vsi_del_txqs_ctx(vsi);
+ ice_vsi_del_rxqs_ctx(vsi);
+
+ /* Release MSI-X IRQ vectors, if not yet released in ice_if_detach */
+ ice_free_irqvs(sc);
+
+ if (vsi->tx_queues != NULL) {
+ /* free the tx_rsq arrays */
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
+ if (txq->tx_rsq != NULL) {
+ free(txq->tx_rsq, M_ICE);
+ txq->tx_rsq = NULL;
+ }
+ }
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+ vsi->num_tx_queues = 0;
+ }
+ if (vsi->rx_queues != NULL) {
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+ vsi->num_rx_queues = 0;
+ }
+}
+
+/**
+ * ice_msix_que - Fast interrupt handler for MSI-X receive queues
+ * @arg: The Rx queue memory
+ *
+ * Interrupt filter function for iflib MSI-X interrupts. Called by iflib when
+ * an MSI-X interrupt for a given queue is triggered. Currently this just asks
+ * iflib to schedule the main Rx thread.
+ */
+static int
+ice_msix_que(void *arg)
+{
+ struct ice_rx_queue __unused *rxq = (struct ice_rx_queue *)arg;
+
+ /* TODO: dynamic ITR algorithm?? */
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+/**
+ * ice_msix_admin - Fast interrupt handler for MSI-X admin interrupt
+ * @arg: pointer to device softc memory
+ *
+ * Called by iflib when an administrative interrupt occurs. Should perform any
+ * fast logic for handling the interrupt cause, and then indicate whether the
+ * admin task needs to be queued.
+ */
+static int
+ice_msix_admin(void *arg)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ u32 oicr;
+
+ /* There is no safe way to modify the enabled miscellaneous causes of
+ * the OICR vector at runtime, as doing so would be prone to race
+ * conditions. Reading PFINT_OICR will unmask the associated interrupt
+ * causes and allow future interrupts to occur. The admin interrupt
+ * vector will not be re-enabled until after we exit this function,
+ * but any delayed tasks must be resilient against possible "late
+ * arrival" interrupts that occur while we're already handling the
+ * task. This is done by using state bits and serializing these
+ * delayed tasks via the admin status task function.
+ */
+ oicr = rd32(hw, PFINT_OICR);
+
+ /* Processing multiple controlq interrupts on a single vector does not
+ * provide an indication of which controlq triggered the interrupt.
+ * We might try reading the INTEVENT bit of the respective PFINT_*_CTL
+ * registers. However, the INTEVENT bit is not guaranteed to be set as
+ * it gets automatically cleared when the hardware acknowledges the
+ * interrupt.
+ *
+ * This means we don't really have a good indication of whether or
+ * which controlq triggered this interrupt. We'll just notify the
+ * admin task that it should check all the controlqs.
+ */
+ ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING);
+
+ if (oicr & PFINT_OICR_VFLR_M) {
+ ice_set_state(&sc->state, ICE_STATE_VFLR_PENDING);
+ }
+
+ if (oicr & PFINT_OICR_MAL_DETECT_M) {
+ ice_set_state(&sc->state, ICE_STATE_MDD_PENDING);
+ }
+
+ if (oicr & PFINT_OICR_GRST_M) {
+ u32 reset;
+
+ reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
+ GLGEN_RSTAT_RESET_TYPE_S;
+
+ if (reset == ICE_RESET_CORER)
+ sc->soft_stats.corer_count++;
+ else if (reset == ICE_RESET_GLOBR)
+ sc->soft_stats.globr_count++;
+ else
+ sc->soft_stats.empr_count++;
+
+ /* There are a couple of bits at play for handling resets.
+ * First, the ICE_STATE_RESET_OICR_RECV bit is used to
+ * indicate that the driver has received an OICR with a reset
+ * bit active, indicating that a CORER/GLOBR/EMPR is about to
+ * happen. Second, we set hw->reset_ongoing to indicate that
+ * the hardware is in reset. We will set this back to false as
+ * soon as the driver has determined that the hardware is out
+ * of reset.
+ *
+ * If the driver wishes to trigger a reqest, it can set one of
+ * the ICE_STATE_RESET_*_REQ bits, which will trigger the
+ * correct type of reset.
+ */
+ if (!ice_testandset_state(&sc->state, ICE_STATE_RESET_OICR_RECV))
+ hw->reset_ongoing = true;
+ }
+
+ if (oicr & PFINT_OICR_ECC_ERR_M) {
+ device_printf(dev, "ECC Error detected!\n");
+ ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
+ }
+
+ if (oicr & PFINT_OICR_PE_CRITERR_M) {
+ device_printf(dev, "Critical Protocol Engine Error detected!\n");
+ ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
+ }
+
+ if (oicr & PFINT_OICR_PCI_EXCEPTION_M) {
+ device_printf(dev, "PCI Exception detected!\n");
+ ice_set_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
+ }
+
+ if (oicr & PFINT_OICR_HMC_ERR_M) {
+ /* Log the HMC errors, but don't disable the interrupt cause */
+ ice_log_hmc_error(hw, dev);
+ }
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+/**
+ * ice_allocate_msix - Allocate MSI-X vectors for the interface
+ * @sc: the device private softc
+ *
+ * Map the MSI-X bar, and then request MSI-X vectors in a two-stage process.
+ *
+ * First, determine a suitable total number of vectors based on the number
+ * of CPUs, RSS buckets, the administrative vector, and other demands such as
+ * RDMA.
+ *
+ * Request the desired amount of vectors, and see how many we obtain. If we
+ * don't obtain as many as desired, reduce the demands by lowering the number
+ * of requested queues or reducing the demand from other features such as
+ * RDMA.
+ *
+ * @remark This function is required because the driver sets the
+ * IFLIB_SKIP_MSIX flag indicating that the driver will manage MSI-X vectors
+ * manually.
+ *
+ * @remark This driver will only use MSI-X vectors. If this is not possible,
+ * neither MSI or legacy interrupts will be tried.
+ *
+ * @post on success this function must set the following scctx parameters:
+ * isc_vectors, isc_nrxqsets, isc_ntxqsets, and isc_intr.
+ *
+ * @returns zero on success or an error code on failure.
+ */
+static int
+ice_allocate_msix(struct ice_softc *sc)
+{
+ bool iflib_override_queue_count = false;
+ if_softc_ctx_t scctx = sc->scctx;
+ device_t dev = sc->dev;
+ cpuset_t cpus;
+ int bar, queues, vectors, requested;
+ int err = 0;
+
+ /* Allocate the MSI-X bar */
+ bar = scctx->isc_msix_bar;
+ sc->msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE);
+ if (!sc->msix_table) {
+ device_printf(dev, "Unable to map MSI-X table\n");
+ return (ENOMEM);
+ }
+
+ /* Check if the iflib queue count sysctls have been set */
+ if (sc->ifc_sysctl_ntxqs || sc->ifc_sysctl_nrxqs)
+ iflib_override_queue_count = true;
+
+ err = bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus);
+ if (err) {
+ device_printf(dev, "%s: Unable to fetch the CPU list: %s\n",
+ __func__, ice_err_str(err));
+ CPU_COPY(&all_cpus, &cpus);
+ }
+
+ /* Attempt to mimic behavior of iflib_msix_init */
+ if (iflib_override_queue_count) {
+ /*
+ * If the override sysctls have been set, limit the queues to
+ * the number of logical CPUs.
+ */
+ queues = mp_ncpus;
+ } else {
+ /*
+ * Otherwise, limit the queue count to the CPUs associated
+ * with the NUMA node the device is associated with.
+ */
+ queues = CPU_COUNT(&cpus);
+ }
+
+ /* Clamp to the number of RSS buckets */
+ queues = imin(queues, rss_getnumbuckets());
+
+ /*
+ * Clamp the number of queue pairs to the minimum of the requested Tx
+ * and Rx queues.
+ */
+ queues = imin(queues, sc->ifc_sysctl_ntxqs ?: scctx->isc_ntxqsets);
+ queues = imin(queues, sc->ifc_sysctl_nrxqs ?: scctx->isc_nrxqsets);
+
+ /*
+ * Determine the number of vectors to request. Note that we also need
+ * to allocate one vector for administrative tasks.
+ */
+ requested = queues + 1;
+
+ vectors = requested;
+
+ err = pci_alloc_msix(dev, &vectors);
+ if (err) {
+ device_printf(dev, "Failed to allocate %d MSI-X vectors, err %s\n",
+ vectors, ice_err_str(err));
+ goto err_free_msix_table;
+ }
+
+ /* If we don't receive enough vectors, reduce demands */
+ if (vectors < requested) {
+ int diff = requested - vectors;
+
+ device_printf(dev, "Requested %d MSI-X vectors, but got only %d\n",
+ requested, vectors);
+
+ /*
+ * If we still have a difference, we need to reduce the number
+ * of queue pairs.
+ *
+ * However, we still need at least one vector for the admin
+ * interrupt and one queue pair.
+ */
+ if (queues <= diff) {
+ device_printf(dev, "Unable to allocate sufficient MSI-X vectors\n");
+ err = (ERANGE);
+ goto err_pci_release_msi;
+ }
+
+ queues -= diff;
+ }
+
+ device_printf(dev, "Using %d Tx and Rx queues\n", queues);
+ device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
+ vectors);
+
+ scctx->isc_vectors = vectors;
+ scctx->isc_nrxqsets = queues;
+ scctx->isc_ntxqsets = queues;
+ scctx->isc_intr = IFLIB_INTR_MSIX;
+
+ /* Interrupt allocation tracking isn't required in recovery mode,
+ * since neither RDMA nor VFs are enabled.
+ */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (0);
+
+ /* Keep track of which interrupt indices are being used for what */
+ sc->lan_vectors = vectors;
+ err = ice_resmgr_assign_contiguous(&sc->imgr, sc->pf_imap, sc->lan_vectors);
+ if (err) {
+ device_printf(dev, "Unable to assign PF interrupt mapping: %s\n",
+ ice_err_str(err));
+ goto err_pci_release_msi;
+ }
+
+ return (0);
+
+err_pci_release_msi:
+ pci_release_msi(dev);
+err_free_msix_table:
+ if (sc->msix_table != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->msix_table),
+ sc->msix_table);
+ sc->msix_table = NULL;
+ }
+
+ return (err);
+}
+
+/**
+ * ice_if_msix_intr_assign - Assign MSI-X interrupt vectors to queues
+ * @ctx: the iflib context structure
+ * @msix: the number of vectors we were assigned
+ *
+ * Called by iflib to assign MSI-X vectors to queues. Currently requires that
+ * we get at least the same number of vectors as we have queues, and that we
+ * always have the same number of Tx and Rx queues.
+ *
+ * Tx queues use a softirq instead of using their own hardware interrupt.
+ */
+static int
+ice_if_msix_intr_assign(if_ctx_t ctx, int msix)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ int err, i, vector;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ if (vsi->num_rx_queues != vsi->num_tx_queues) {
+ device_printf(sc->dev,
+ "iflib requested %d Tx queues, and %d Rx queues, but the driver isn't able to support a differing number of Tx and Rx queues\n",
+ vsi->num_tx_queues, vsi->num_rx_queues);
+ return (EOPNOTSUPP);
+ }
+
+ if (msix < (vsi->num_rx_queues + 1)) {
+ device_printf(sc->dev,
+ "Not enough MSI-X vectors to assign one vector to each queue pair\n");
+ return (EOPNOTSUPP);
+ }
+
+ /* Save the number of vectors for future use */
+ sc->num_irq_vectors = vsi->num_rx_queues + 1;
+
+ /* Allocate space to store the IRQ vector data */
+ if (!(sc->irqvs =
+ (struct ice_irq_vector *) malloc(sizeof(struct ice_irq_vector) * (sc->num_irq_vectors),
+ M_ICE, M_NOWAIT))) {
+ device_printf(sc->dev,
+ "Unable to allocate irqv memory\n");
+ return (ENOMEM);
+ }
+
+ /* Administrative interrupt events will use vector 0 */
+ err = iflib_irq_alloc_generic(ctx, &sc->irqvs[0].irq, 1, IFLIB_INTR_ADMIN,
+ ice_msix_admin, sc, 0, "admin");
+ if (err) {
+ device_printf(sc->dev,
+ "Failed to register Admin queue handler: %s\n",
+ ice_err_str(err));
+ goto free_irqvs;
+ }
+ sc->irqvs[0].me = 0;
+
+ /* Do not allocate queue interrupts when in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (0);
+
+ for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++) {
+ struct ice_rx_queue *rxq = &vsi->rx_queues[i];
+ struct ice_tx_queue *txq = &vsi->tx_queues[i];
+ int rid = vector + 1;
+ char irq_name[16];
+
+ snprintf(irq_name, sizeof(irq_name), "rxq%d", i);
+ err = iflib_irq_alloc_generic(ctx, &sc->irqvs[vector].irq, rid,
+ IFLIB_INTR_RX, ice_msix_que,
+ rxq, rxq->me, irq_name);
+ if (err) {
+ device_printf(sc->dev,
+ "Failed to allocate q int %d err: %s\n",
+ i, ice_err_str(err));
+ vector--;
+ i--;
+ goto fail;
+ }
+ sc->irqvs[vector].me = vector;
+ rxq->irqv = &sc->irqvs[vector];
+
+ bzero(irq_name, sizeof(irq_name));
+
+ snprintf(irq_name, sizeof(irq_name), "txq%d", i);
+ iflib_softirq_alloc_generic(ctx, &sc->irqvs[vector].irq,
+ IFLIB_INTR_TX, txq,
+ txq->me, irq_name);
+ txq->irqv = &sc->irqvs[vector];
+ }
+
+ return (0);
+fail:
+ for (; i >= 0; i--, vector--)
+ iflib_irq_free(ctx, &sc->irqvs[vector].irq);
+ iflib_irq_free(ctx, &sc->irqvs[0].irq);
+free_irqvs:
+ free(sc->irqvs, M_ICE);
+ sc->irqvs = NULL;
+ return err;
+}
+
+/**
+ * ice_if_mtu_set - Set the device MTU
+ * @ctx: iflib context structure
+ * @mtu: the MTU requested
+ *
+ * Called by iflib to configure the device's Maximum Transmission Unit (MTU).
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static int
+ice_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Do not support configuration when in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ if (mtu < ICE_MIN_MTU || mtu > ICE_MAX_MTU)
+ return (EINVAL);
+
+ sc->scctx->isc_max_frame_size = mtu +
+ ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
+
+ sc->pf_vsi.max_frame_size = sc->scctx->isc_max_frame_size;
+
+ return (0);
+}
+
+/**
+ * ice_if_intr_enable - Enable device interrupts
+ * @ctx: iflib context structure
+ *
+ * Called by iflib to request enabling device interrupts.
+ */
+static void
+ice_if_intr_enable(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Enable ITR 0 */
+ ice_enable_intr(hw, sc->irqvs[0].me);
+
+ /* Do not enable queue interrupts in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ /* Enable all queue interrupts */
+ for (int i = 0; i < vsi->num_rx_queues; i++)
+ ice_enable_intr(hw, vsi->rx_queues[i].irqv->me);
+}
+
+/**
+ * ice_if_intr_disable - Disable device interrupts
+ * @ctx: iflib context structure
+ *
+ * Called by iflib to request disabling device interrupts.
+ */
+static void
+ice_if_intr_disable(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_hw *hw = &sc->hw;
+ unsigned int i;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* IFDI_INTR_DISABLE may be called prior to interrupts actually being
+ * assigned to queues. Instead of assuming that the interrupt
+ * assignment in the rx_queues structure is valid, just disable all
+ * possible interrupts
+ *
+ * Note that we choose not to disable ITR 0 because this handles the
+ * AdminQ interrupts, and we want to keep processing these even when
+ * the interface is offline.
+ */
+ for (i = 1; i < hw->func_caps.common_cap.num_msix_vectors; i++)
+ ice_disable_intr(hw, i);
+}
+
+/**
+ * ice_if_rx_queue_intr_enable - Enable a specific Rx queue interrupt
+ * @ctx: iflib context structure
+ * @rxqid: the Rx queue to enable
+ *
+ * Enable a specific Rx queue interrupt.
+ *
+ * This function is not protected by the iflib CTX lock.
+ */
+static int
+ice_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ /* Do not enable queue interrupts in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ ice_enable_intr(hw, vsi->rx_queues[rxqid].irqv->me);
+ return (0);
+}
+
+/**
+ * ice_if_tx_queue_intr_enable - Enable a specific Tx queue interrupt
+ * @ctx: iflib context structure
+ * @txqid: the Tx queue to enable
+ *
+ * Enable a specific Tx queue interrupt.
+ *
+ * This function is not protected by the iflib CTX lock.
+ */
+static int
+ice_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ /* Do not enable queue interrupts in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ ice_enable_intr(hw, vsi->tx_queues[txqid].irqv->me);
+ return (0);
+}
+
+/**
+ * ice_if_promisc_set - Set device promiscuous mode
+ * @ctx: iflib context structure
+ * @flags: promiscuous flags to configure
+ *
+ * Called by iflib to configure device promiscuous mode.
+ *
+ * @remark Calls to this function will always overwrite the previous setting
+ */
+static int
+ice_if_promisc_set(if_ctx_t ctx, int flags)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ bool promisc_enable = flags & IFF_PROMISC;
+ bool multi_enable = flags & IFF_ALLMULTI;
+
+ /* Do not support configuration when in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return (ENOSYS);
+
+ if (multi_enable)
+ return (EOPNOTSUPP);
+
+ if (promisc_enable) {
+ status = ice_set_vsi_promisc(hw, sc->pf_vsi.idx,
+ ICE_VSI_PROMISC_MASK, 0);
+ if (status && status != ICE_ERR_ALREADY_EXISTS) {
+ device_printf(dev,
+ "Failed to enable promiscuous mode for PF VSI, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ } else {
+ status = ice_clear_vsi_promisc(hw, sc->pf_vsi.idx,
+ ICE_VSI_PROMISC_MASK, 0);
+ if (status) {
+ device_printf(dev,
+ "Failed to disable promiscuous mode for PF VSI, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ return (EIO);
+ }
+ }
+
+ return (0);
+}
+
+/**
+ * ice_if_media_change - Change device media
+ * @ctx: device ctx structure
+ *
+ * Called by iflib when a media change is requested. This operation is not
+ * supported by the hardware, so we just return an error code.
+ */
+static int
+ice_if_media_change(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ device_printf(sc->dev, "Media change is not supported.\n");
+ return (ENODEV);
+}
+
+/**
+ * ice_if_media_status - Report current device media
+ * @ctx: iflib context structure
+ * @ifmr: ifmedia request structure to update
+ *
+ * Updates the provided ifmr with current device media status, including link
+ * status and media type.
+ */
+static void
+ice_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ice_link_status *li = &sc->hw.port_info->phy.link_info;
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ /* Never report link up or media types when in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ if (!sc->link_up)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active |= IFM_FDX;
+
+ if (li->phy_type_low)
+ ifmr->ifm_active |= ice_get_phy_type_low(li->phy_type_low);
+ else if (li->phy_type_high)
+ ifmr->ifm_active |= ice_get_phy_type_high(li->phy_type_high);
+ else
+ ifmr->ifm_active |= IFM_UNKNOWN;
+
+ /* Report flow control status as well */
+ if (li->an_info & ICE_AQ_LINK_PAUSE_TX)
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+ if (li->an_info & ICE_AQ_LINK_PAUSE_RX)
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+}
+
+/**
+ * ice_init_tx_tracking - Initialize Tx queue software tracking values
+ * @vsi: the VSI to initialize
+ *
+ * Initialize Tx queue software tracking values, including the Report Status
+ * queue, and related software tracking values.
+ */
+static void
+ice_init_tx_tracking(struct ice_vsi *vsi)
+{
+ struct ice_tx_queue *txq;
+ size_t j;
+ int i;
+
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
+
+ txq->tx_rs_cidx = txq->tx_rs_pidx = 0;
+
+ /* Initialize the last processed descriptor to be the end of
+ * the ring, rather than the start, so that we avoid an
+ * off-by-one error in ice_ift_txd_credits_update for the
+ * first packet.
+ */
+ txq->tx_cidx_processed = txq->desc_count - 1;
+
+ for (j = 0; j < txq->desc_count; j++)
+ txq->tx_rsq[j] = QIDX_INVALID;
+ }
+}
+
+/**
+ * ice_update_rx_mbuf_sz - Update the Rx buffer size for all queues
+ * @sc: the device softc
+ *
+ * Called to update the Rx queue mbuf_sz parameter for configuring the receive
+ * buffer sizes when programming hardware.
+ */
+static void
+ice_update_rx_mbuf_sz(struct ice_softc *sc)
+{
+ uint32_t mbuf_sz = iflib_get_rx_mbuf_sz(sc->ctx);
+ struct ice_vsi *vsi = &sc->pf_vsi;
+
+ MPASS(mbuf_sz <= UINT16_MAX);
+ vsi->mbuf_sz = mbuf_sz;
+}
+
+/**
+ * ice_if_init - Initialize the device
+ * @ctx: iflib ctx structure
+ *
+ * Called by iflib to bring the device up, i.e. ifconfig ice0 up. Initializes
+ * device filters and prepares the Tx and Rx engines.
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_if_init(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ device_t dev = sc->dev;
+ int err;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
+ device_printf(sc->dev, "request to start interface cannot be completed as the device failed to reset\n");
+ return;
+ }
+
+ if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
+ device_printf(sc->dev, "request to start interface while device is prepared for impending reset\n");
+ return;
+ }
+
+ ice_update_rx_mbuf_sz(sc);
+
+ /* Update the MAC address... User might use a LAA */
+ err = ice_update_laa_mac(sc);
+ if (err) {
+ device_printf(dev,
+ "LAA address change failed, err %s\n",
+ ice_err_str(err));
+ return;
+ }
+
+ /* Initialize software Tx tracking values */
+ ice_init_tx_tracking(&sc->pf_vsi);
+
+ err = ice_cfg_vsi_for_tx(&sc->pf_vsi);
+ if (err) {
+ device_printf(dev,
+ "Unable to configure the main VSI for Tx: %s\n",
+ ice_err_str(err));
+ return;
+ }
+
+ err = ice_cfg_vsi_for_rx(&sc->pf_vsi);
+ if (err) {
+ device_printf(dev,
+ "Unable to configure the main VSI for Rx: %s\n",
+ ice_err_str(err));
+ goto err_cleanup_tx;
+ }
+
+ err = ice_control_rx_queues(&sc->pf_vsi, true);
+ if (err) {
+ device_printf(dev,
+ "Unable to enable Rx rings for transmit: %s\n",
+ ice_err_str(err));
+ goto err_cleanup_tx;
+ }
+
+ err = ice_cfg_pf_default_mac_filters(sc);
+ if (err) {
+ device_printf(dev,
+ "Unable to configure default MAC filters: %s\n",
+ ice_err_str(err));
+ goto err_stop_rx;
+ }
+
+ /* We use software interrupts for Tx, so we only program the hardware
+ * interrupts for Rx.
+ */
+ ice_configure_rxq_interrupts(&sc->pf_vsi);
+ ice_configure_rx_itr(&sc->pf_vsi);
+
+ /* Configure promiscuous mode */
+ ice_if_promisc_set(ctx, if_getflags(sc->ifp));
+
+ ice_set_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED);
+ return;
+
+err_stop_rx:
+ ice_control_rx_queues(&sc->pf_vsi, false);
+err_cleanup_tx:
+ ice_vsi_disable_tx(&sc->pf_vsi);
+}
+
+/**
+ * ice_poll_for_media_avail - Re-enable link if media is detected
+ * @sc: device private structure
+ *
+ * Intended to be called from the driver's timer function, this function
+ * sends the Get Link Status AQ command and re-enables HW link if the
+ * command says that media is available.
+ *
+ * If the driver doesn't have the "NO_MEDIA" state set, then this does nothing,
+ * since media removal events are supposed to be sent to the driver through
+ * a link status event.
+ */
+static void
+ice_poll_for_media_avail(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi = hw->port_info;
+
+ if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) {
+ pi->phy.get_link_info = true;
+ ice_get_link_status(pi, &sc->link_up);
+
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ enum ice_status status;
+
+ /* Re-enable link and re-apply user link settings */
+ ice_apply_saved_phy_cfg(sc);
+
+ /* Update the OS about changes in media capability */
+ status = ice_add_media_types(sc, sc->media);
+ if (status)
+ device_printf(sc->dev, "Error adding device media types: %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+
+ ice_clear_state(&sc->state, ICE_STATE_NO_MEDIA);
+ }
+ }
+}
+
+/**
+ * ice_if_timer - called by iflib periodically
+ * @ctx: iflib ctx structure
+ * @qid: the queue this timer was called for
+ *
+ * This callback is triggered by iflib periodically. We use it to update the
+ * hw statistics.
+ *
+ * @remark this function is not protected by the iflib CTX lock.
+ */
+static void
+ice_if_timer(if_ctx_t ctx, uint16_t qid)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ uint64_t prev_link_xoff_rx = sc->stats.cur.link_xoff_rx;
+
+ if (qid != 0)
+ return;
+
+ /* Do not attempt to update stats when in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ /* Update device statistics */
+ ice_update_pf_stats(sc);
+
+ /*
+ * For proper watchdog management, the iflib stack needs to know if
+ * we've been paused during the last interval. Check if the
+ * link_xoff_rx stat changed, and set the isc_pause_frames, if so.
+ */
+ if (sc->stats.cur.link_xoff_rx != prev_link_xoff_rx)
+ sc->scctx->isc_pause_frames = 1;
+
+ /* Update the primary VSI stats */
+ ice_update_vsi_hw_stats(&sc->pf_vsi);
+}
+
+/**
+ * ice_admin_timer - called periodically to trigger the admin task
+ * @arg: callout(9) argument pointing to the device private softc structure
+ *
+ * Timer function used as part of a callout(9) timer that will periodically
+ * trigger the admin task, even when the interface is down.
+ *
+ * @remark this function is not called by iflib and is not protected by the
+ * iflib CTX lock.
+ *
+ * @remark because this is a callout function, it cannot sleep and should not
+ * attempt taking the iflib CTX lock.
+ */
+static void
+ice_admin_timer(void *arg)
+{
+ struct ice_softc *sc = (struct ice_softc *)arg;
+
+ /* Fire off the admin task */
+ iflib_admin_intr_deferred(sc->ctx);
+
+ /* Reschedule the admin timer */
+ callout_schedule(&sc->admin_timer, hz/2);
+}
+
+/**
+ * ice_transition_recovery_mode - Transition to recovery mode
+ * @sc: the device private softc
+ *
+ * Called when the driver detects that the firmware has entered recovery mode
+ * at run time.
+ */
+static void
+ice_transition_recovery_mode(struct ice_softc *sc)
+{
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ int i;
+
+ device_printf(sc->dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+
+ /* Tell the stack that the link has gone down */
+ iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0);
+
+ /* Request that the device be re-initialized */
+ ice_request_stack_reinit(sc);
+
+ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
+
+ ice_vsi_del_txqs_ctx(vsi);
+ ice_vsi_del_rxqs_ctx(vsi);
+
+ for (i = 0; i < sc->num_available_vsi; i++) {
+ if (sc->all_vsi[i])
+ ice_release_vsi(sc->all_vsi[i]);
+ }
+ sc->num_available_vsi = 0;
+
+ if (sc->all_vsi) {
+ free(sc->all_vsi, M_ICE);
+ sc->all_vsi = NULL;
+ }
+
+ /* Destroy the interrupt manager */
+ ice_resmgr_destroy(&sc->imgr);
+ /* Destroy the queue managers */
+ ice_resmgr_destroy(&sc->tx_qmgr);
+ ice_resmgr_destroy(&sc->rx_qmgr);
+
+ ice_deinit_hw(&sc->hw);
+}
+
+/**
+ * ice_transition_safe_mode - Transition to safe mode
+ * @sc: the device private softc
+ *
+ * Called when the driver attempts to reload the DDP package during a device
+ * reset, and the new download fails. If so, we must transition to safe mode
+ * at run time.
+ *
+ * @remark although safe mode normally allocates only a single queue, we can't
+ * change the number of queues dynamically when using iflib. Due to this, we
+ * do not attempt to reduce the number of queues.
+ */
+static void
+ice_transition_safe_mode(struct ice_softc *sc)
+{
+ /* Indicate that we are in Safe mode */
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en);
+
+ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
+
+ ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
+ ice_clear_bit(ICE_FEATURE_RSS, sc->feat_en);
+}
+
+/**
+ * ice_if_update_admin_status - update admin status
+ * @ctx: iflib ctx structure
+ *
+ * Called by iflib to update the admin status. For our purposes, this means
+ * check the adminq, and update the link status. It's ultimately triggered by
+ * our admin interrupt, or by the ice_if_timer periodically.
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_if_update_admin_status(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ enum ice_fw_modes fw_mode;
+ bool reschedule = false;
+ u16 pending = 0;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Check if the firmware entered recovery mode at run time */
+ fw_mode = ice_get_fw_mode(&sc->hw);
+ if (fw_mode == ICE_FW_MODE_REC) {
+ if (!ice_testandset_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
+ /* If we just entered recovery mode, log a warning to
+ * the system administrator and deinit driver state
+ * that is no longer functional.
+ */
+ ice_transition_recovery_mode(sc);
+ }
+ } else if (fw_mode == ICE_FW_MODE_ROLLBACK) {
+ if (!ice_testandset_state(&sc->state, ICE_STATE_ROLLBACK_MODE)) {
+ /* Rollback mode isn't fatal, but we don't want to
+ * repeatedly post a message about it.
+ */
+ ice_print_rollback_msg(&sc->hw);
+ }
+ }
+
+ /* Handle global reset events */
+ ice_handle_reset_event(sc);
+
+ /* Handle PF reset requests */
+ ice_handle_pf_reset_request(sc);
+
+ /* Handle MDD events */
+ ice_handle_mdd_event(sc);
+
+ if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED) ||
+ ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET) ||
+ ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
+ /*
+ * If we know the control queues are disabled, skip processing
+ * the control queues entirely.
+ */
+ ;
+ } else if (ice_testandclear_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING)) {
+ ice_process_ctrlq(sc, ICE_CTL_Q_ADMIN, &pending);
+ if (pending > 0)
+ reschedule = true;
+
+ ice_process_ctrlq(sc, ICE_CTL_Q_MAILBOX, &pending);
+ if (pending > 0)
+ reschedule = true;
+ }
+
+ /* Poll for link up */
+ ice_poll_for_media_avail(sc);
+
+ /* Check and update link status */
+ ice_update_link_status(sc, false);
+
+ /*
+ * If there are still messages to process, we need to reschedule
+ * ourselves. Otherwise, we can just re-enable the interrupt. We'll be
+ * woken up at the next interrupt or timer event.
+ */
+ if (reschedule) {
+ ice_set_state(&sc->state, ICE_STATE_CONTROLQ_EVENT_PENDING);
+ iflib_admin_intr_deferred(ctx);
+ } else {
+ ice_enable_intr(&sc->hw, sc->irqvs[0].me);
+ }
+}
+
+/**
+ * ice_prepare_for_reset - Prepare device for an impending reset
+ * @sc: The device private softc
+ *
+ * Prepare the driver for an impending reset, shutting down VSIs, clearing the
+ * scheduler setup, and shutting down controlqs. Uses the
+ * ICE_STATE_PREPARED_FOR_RESET to indicate whether we've already prepared the
+ * driver for reset or not.
+ */
+static void
+ice_prepare_for_reset(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+
+ /* If we're already prepared, there's nothing to do */
+ if (ice_testandset_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET))
+ return;
+
+ log(LOG_INFO, "%s: preparing to reset device logic\n", sc->ifp->if_xname);
+
+ /* In recovery mode, hardware is not initialized */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ /* Release the main PF VSI queue mappings */
+ ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap,
+ sc->pf_vsi.num_tx_queues);
+ ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap,
+ sc->pf_vsi.num_rx_queues);
+
+ ice_clear_hw_tbls(hw);
+
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
+
+ ice_shutdown_all_ctrlq(hw);
+}
+
+/**
+ * ice_rebuild_pf_vsi_qmap - Rebuild the main PF VSI queue mapping
+ * @sc: the device softc pointer
+ *
+ * Loops over the Tx and Rx queues for the main PF VSI and reassigns the queue
+ * mapping after a reset occurred.
+ */
+static int
+ice_rebuild_pf_vsi_qmap(struct ice_softc *sc)
+{
+ struct ice_vsi *vsi = &sc->pf_vsi;
+ struct ice_tx_queue *txq;
+ struct ice_rx_queue *rxq;
+ int err, i;
+
+ /* Re-assign Tx queues from PF space to the main VSI */
+ err = ice_resmgr_assign_contiguous(&sc->tx_qmgr, vsi->tx_qmap,
+ vsi->num_tx_queues);
+ if (err) {
+ device_printf(sc->dev, "Unable to re-assign PF Tx queues: %s\n",
+ ice_err_str(err));
+ return (err);
+ }
+
+ /* Re-assign Rx queues from PF space to this VSI */
+ err = ice_resmgr_assign_contiguous(&sc->rx_qmgr, vsi->rx_qmap,
+ vsi->num_rx_queues);
+ if (err) {
+ device_printf(sc->dev, "Unable to re-assign PF Rx queues: %s\n",
+ ice_err_str(err));
+ goto err_release_tx_queues;
+ }
+
+ vsi->qmap_type = ICE_RESMGR_ALLOC_CONTIGUOUS;
+
+ /* Re-assign Tx queue tail pointers */
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++)
+ txq->tail = QTX_COMM_DBELL(vsi->tx_qmap[i]);
+
+ /* Re-assign Rx queue tail pointers */
+ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++)
+ rxq->tail = QRX_TAIL(vsi->rx_qmap[i]);
+
+ return (0);
+
+err_release_tx_queues:
+ ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap,
+ sc->pf_vsi.num_tx_queues);
+
+ return (err);
+}
+
+/* determine if the iflib context is active */
+#define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING))
+
+/**
+ * ice_rebuild_recovery_mode - Rebuild driver state while in recovery mode
+ * @sc: The device private softc
+ *
+ * Handle a driver rebuild while in recovery mode. This will only rebuild the
+ * limited functionality supported while in recovery mode.
+ */
+static void
+ice_rebuild_recovery_mode(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+
+ /* enable PCIe bus master */
+ pci_enable_busmaster(dev);
+
+ /* Configure interrupt causes for the administrative interrupt */
+ ice_configure_misc_interrupts(sc);
+
+ /* Enable ITR 0 right away, so that we can handle admin interrupts */
+ ice_enable_intr(&sc->hw, sc->irqvs[0].me);
+
+ /* Now that the rebuild is finished, we're no longer prepared to reset */
+ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
+
+ log(LOG_INFO, "%s: device rebuild successful\n", sc->ifp->if_xname);
+
+ /* In order to completely restore device functionality, the iflib core
+ * needs to be reset. We need to request an iflib reset. Additionally,
+ * because the state of IFC_DO_RESET is cached within task_fn_admin in
+ * the iflib core, we also want re-run the admin task so that iflib
+ * resets immediately instead of waiting for the next interrupt.
+ */
+ ice_request_stack_reinit(sc);
+
+ return;
+}
+
+/**
+ * ice_rebuild - Rebuild driver state post reset
+ * @sc: The device private softc
+ *
+ * Restore driver state after a reset occurred. Restart the controlqs, setup
+ * the hardware port, and re-enable the VSIs.
+ */
+static void
+ice_rebuild(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ enum ice_status status;
+ int err;
+
+ sc->rebuild_ticks = ticks;
+
+ /* If we're rebuilding, then a reset has succeeded. */
+ ice_clear_state(&sc->state, ICE_STATE_RESET_FAILED);
+
+ /*
+ * If the firmware is in recovery mode, only restore the limited
+ * functionality supported by recovery mode.
+ */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE)) {
+ ice_rebuild_recovery_mode(sc);
+ return;
+ }
+
+ /* enable PCIe bus master */
+ pci_enable_busmaster(dev);
+
+ status = ice_init_all_ctrlq(hw);
+ if (status) {
+ device_printf(dev, "failed to re-init controlqs, err %s\n",
+ ice_status_str(status));
+ goto err_shutdown_ctrlq;
+ }
+
+ /* Query the allocated resources for Tx scheduler */
+ status = ice_sched_query_res_alloc(hw);
+ if (status) {
+ device_printf(dev,
+ "Failed to query scheduler resources, err %s aq_err %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ goto err_shutdown_ctrlq;
+ }
+
+ err = ice_send_version(sc);
+ if (err)
+ goto err_shutdown_ctrlq;
+
+ err = ice_init_link_events(sc);
+ if (err) {
+ device_printf(dev, "ice_init_link_events failed: %s\n",
+ ice_err_str(err));
+ goto err_shutdown_ctrlq;
+ }
+
+ status = ice_clear_pf_cfg(hw);
+ if (status) {
+ device_printf(dev, "failed to clear PF configuration, err %s\n",
+ ice_status_str(status));
+ goto err_shutdown_ctrlq;
+ }
+
+ ice_clear_pxe_mode(hw);
+
+ status = ice_get_caps(hw);
+ if (status) {
+ device_printf(dev, "failed to get capabilities, err %s\n",
+ ice_status_str(status));
+ goto err_shutdown_ctrlq;
+ }
+
+ status = ice_sched_init_port(hw->port_info);
+ if (status) {
+ device_printf(dev, "failed to initialize port, err %s\n",
+ ice_status_str(status));
+ goto err_sched_cleanup;
+ }
+
+ /* If we previously loaded the package, it needs to be reloaded now */
+ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) {
+ status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ if (status) {
+ ice_log_pkg_init(sc, &status);
+
+ ice_transition_safe_mode(sc);
+ }
+ }
+
+ ice_reset_pf_stats(sc);
+
+ err = ice_rebuild_pf_vsi_qmap(sc);
+ if (err) {
+ device_printf(sc->dev, "Unable to re-assign main VSI queues, err %s\n",
+ ice_err_str(err));
+ goto err_sched_cleanup;
+ }
+ err = ice_initialize_vsi(&sc->pf_vsi);
+ if (err) {
+ device_printf(sc->dev, "Unable to re-initialize Main VSI, err %s\n",
+ ice_err_str(err));
+ goto err_release_queue_allocations;
+ }
+
+ /* Replay all VSI configuration */
+ err = ice_replay_all_vsi_cfg(sc);
+ if (err)
+ goto err_deinit_pf_vsi;
+
+ /* Reconfigure the main PF VSI for RSS */
+ err = ice_config_rss(&sc->pf_vsi);
+ if (err) {
+ device_printf(sc->dev,
+ "Unable to reconfigure RSS for the main VSI, err %s\n",
+ ice_err_str(err));
+ goto err_deinit_pf_vsi;
+ }
+
+ /* Refresh link status */
+ ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED);
+ sc->hw.port_info->phy.get_link_info = true;
+ ice_get_link_status(sc->hw.port_info, &sc->link_up);
+ ice_update_link_status(sc, true);
+
+ /* Configure interrupt causes for the administrative interrupt */
+ ice_configure_misc_interrupts(sc);
+
+ /* Enable ITR 0 right away, so that we can handle admin interrupts */
+ ice_enable_intr(&sc->hw, sc->irqvs[0].me);
+
+ /* Now that the rebuild is finished, we're no longer prepared to reset */
+ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
+
+ log(LOG_INFO, "%s: device rebuild successful\n", sc->ifp->if_xname);
+
+ /* In order to completely restore device functionality, the iflib core
+ * needs to be reset. We need to request an iflib reset. Additionally,
+ * because the state of IFC_DO_RESET is cached within task_fn_admin in
+ * the iflib core, we also want re-run the admin task so that iflib
+ * resets immediately instead of waiting for the next interrupt.
+ */
+ ice_request_stack_reinit(sc);
+
+ return;
+
+err_deinit_pf_vsi:
+ ice_deinit_vsi(&sc->pf_vsi);
+err_release_queue_allocations:
+ ice_resmgr_release_map(&sc->tx_qmgr, sc->pf_vsi.tx_qmap,
+ sc->pf_vsi.num_tx_queues);
+ ice_resmgr_release_map(&sc->rx_qmgr, sc->pf_vsi.rx_qmap,
+ sc->pf_vsi.num_rx_queues);
+err_sched_cleanup:
+ ice_sched_cleanup_all(hw);
+err_shutdown_ctrlq:
+ ice_shutdown_all_ctrlq(hw);
+ ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
+ device_printf(dev, "Driver rebuild failed, please reload the device driver\n");
+}
+
+/**
+ * ice_handle_reset_event - Handle reset events triggered by OICR
+ * @sc: The device private softc
+ *
+ * Handle reset events triggered by an OICR notification. This includes CORER,
+ * GLOBR, and EMPR resets triggered by software on this or any other PF or by
+ * firmware.
+ *
+ * @pre assumes the iflib context lock is held, and will unlock it while
+ * waiting for the hardware to finish reset.
+ */
+static void
+ice_handle_reset_event(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+ device_t dev = sc->dev;
+
+ /* When a CORER, GLOBR, or EMPR is about to happen, the hardware will
+ * trigger an OICR interrupt. Our OICR handler will determine when
+ * this occurs and set the ICE_STATE_RESET_OICR_RECV bit as
+ * appropriate.
+ */
+ if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_OICR_RECV))
+ return;
+
+ ice_prepare_for_reset(sc);
+
+ /*
+ * Release the iflib context lock and wait for the device to finish
+ * resetting.
+ */
+ IFLIB_CTX_UNLOCK(sc);
+ status = ice_check_reset(hw);
+ IFLIB_CTX_LOCK(sc);
+ if (status) {
+ device_printf(dev, "Device never came out of reset, err %s\n",
+ ice_status_str(status));
+ ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
+ return;
+ }
+
+ /* We're done with the reset, so we can rebuild driver state */
+ sc->hw.reset_ongoing = false;
+ ice_rebuild(sc);
+
+ /* In the unlikely event that a PF reset request occurs at the same
+ * time as a global reset, clear the request now. This avoids
+ * resetting a second time right after we reset due to a global event.
+ */
+ if (ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ))
+ device_printf(dev, "Ignoring PFR request that occurred while a reset was ongoing\n");
+}
+
+/**
+ * ice_handle_pf_reset_request - Initiate PF reset requested by software
+ * @sc: The device private softc
+ *
+ * Initiate a PF reset requested by software. We handle this in the admin task
+ * so that only one thread actually handles driver preparation and cleanup,
+ * rather than having multiple threads possibly attempt to run this code
+ * simultaneously.
+ *
+ * @pre assumes the iflib context lock is held and will unlock it while
+ * waiting for the PF reset to complete.
+ */
+static void
+ice_handle_pf_reset_request(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ enum ice_status status;
+
+ /* Check for PF reset requests */
+ if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ))
+ return;
+
+ /* Make sure we're prepared for reset */
+ ice_prepare_for_reset(sc);
+
+ /*
+ * Release the iflib context lock and wait for the device to finish
+ * resetting.
+ */
+ IFLIB_CTX_UNLOCK(sc);
+ status = ice_reset(hw, ICE_RESET_PFR);
+ IFLIB_CTX_LOCK(sc);
+ if (status) {
+ device_printf(sc->dev, "device PF reset failed, err %s\n",
+ ice_status_str(status));
+ ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
+ return;
+ }
+
+ sc->soft_stats.pfr_count++;
+ ice_rebuild(sc);
+}
+
+/**
+ * ice_init_device_features - Init device driver features
+ * @sc: driver softc structure
+ *
+ * @pre assumes that the function capabilities bits have been set up by
+ * ice_init_hw().
+ */
+static void
+ice_init_device_features(struct ice_softc *sc)
+{
+ /*
+ * A failed pkg file download triggers safe mode, disabling advanced
+ * device feature support
+ */
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE))
+ return;
+
+ /* Set capabilities that the driver supports */
+ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_LENIENT_LINK_MODE, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_DEFAULT_OVERRIDE, sc->feat_cap);
+
+ /* Disable features due to hardware limitations... */
+ if (!sc->hw.func_caps.common_cap.rss_table_size)
+ ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
+
+ /* Disable capabilities not supported by the OS */
+ ice_disable_unsupported_features(sc->feat_cap);
+
+ /* RSS is always enabled for iflib */
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS))
+ ice_set_bit(ICE_FEATURE_RSS, sc->feat_en);
+}
+
+/**
+ * ice_if_multi_set - Callback to update Multicast filters in HW
+ * @ctx: iflib ctx structure
+ *
+ * Called by iflib in response to SIOCDELMULTI and SIOCADDMULTI. Must search
+ * the if_multiaddrs list and determine which filters have been added or
+ * removed from the list, and update HW programming to reflect the new list.
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_if_multi_set(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ int err;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Do not handle multicast configuration in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ err = ice_sync_multicast_filters(sc);
+ if (err) {
+ device_printf(sc->dev,
+ "Failed to synchronize multicast filter list: %s\n",
+ ice_err_str(err));
+ return;
+ }
+}
+
+/**
+ * ice_if_vlan_register - Register a VLAN with the hardware
+ * @ctx: iflib ctx pointer
+ * @vtag: VLAN to add
+ *
+ * Programs the main PF VSI with a hardware filter for the given VLAN.
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_if_vlan_register(if_ctx_t ctx, u16 vtag)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ enum ice_status status;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Do not handle VLAN configuration in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ status = ice_add_vlan_hw_filter(&sc->pf_vsi, vtag);
+ if (status) {
+ device_printf(sc->dev,
+ "Failure adding VLAN %d to main VSI, err %s aq_err %s\n",
+ vtag, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_if_vlan_unregister - Remove a VLAN filter from the hardware
+ * @ctx: iflib ctx pointer
+ * @vtag: VLAN to add
+ *
+ * Removes the previously programmed VLAN filter from the main PF VSI.
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ enum ice_status status;
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Do not handle VLAN configuration in recovery mode */
+ if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
+ return;
+
+ status = ice_remove_vlan_hw_filter(&sc->pf_vsi, vtag);
+ if (status) {
+ device_printf(sc->dev,
+ "Failure removing VLAN %d from main VSI, err %s aq_err %s\n",
+ vtag, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_if_stop - Stop the device
+ * @ctx: iflib context structure
+ *
+ * Called by iflib to stop the device and bring it down. (i.e. ifconfig ice0
+ * down)
+ *
+ * @pre assumes the caller holds the iflib CTX lock
+ */
+static void
+ice_if_stop(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /*
+ * The iflib core may call IFDI_STOP prior to the first call to
+ * IFDI_INIT. This will cause us to attempt to remove MAC filters we
+ * don't have, and disable Tx queues which aren't yet configured.
+ * Although it is likely these extra operations are harmless, they do
+ * cause spurious warning messages to be displayed, which may confuse
+ * users.
+ *
+ * To avoid these messages, we use a state bit indicating if we've
+ * been initialized. It will be set when ice_if_init is called, and
+ * cleared here in ice_if_stop.
+ */
+ if (!ice_testandclear_state(&sc->state, ICE_STATE_DRIVER_INITIALIZED))
+ return;
+
+ if (ice_test_state(&sc->state, ICE_STATE_RESET_FAILED)) {
+ device_printf(sc->dev, "request to stop interface cannot be completed as the device failed to reset\n");
+ return;
+ }
+
+ if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) {
+ device_printf(sc->dev, "request to stop interface while device is prepared for impending reset\n");
+ return;
+ }
+
+ /* Remove the MAC filters, stop Tx, and stop Rx. We don't check the
+ * return of these functions because there's nothing we can really do
+ * if they fail, and the functions already print error messages.
+ * Just try to shut down as much as we can.
+ */
+ ice_rm_pf_default_mac_filters(sc);
+
+ /* Dissociate the Tx and Rx queues from the interrupts */
+ ice_flush_txq_interrupts(&sc->pf_vsi);
+ ice_flush_rxq_interrupts(&sc->pf_vsi);
+
+ /* Disable the Tx and Rx queues */
+ ice_vsi_disable_tx(&sc->pf_vsi);
+ ice_control_rx_queues(&sc->pf_vsi, false);
+}
+
+/**
+ * ice_if_get_counter - Get current value of an ifnet statistic
+ * @ctx: iflib context pointer
+ * @counter: ifnet counter to read
+ *
+ * Reads the current value of an ifnet counter for the device.
+ *
+ * This function is not protected by the iflib CTX lock.
+ */
+static uint64_t
+ice_if_get_counter(if_ctx_t ctx, ift_counter counter)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ /* Return the counter for the main PF VSI */
+ return ice_get_ifnet_counter(&sc->pf_vsi, counter);
+}
+
+/**
+ * ice_request_stack_reinit - Request that iflib re-initialize
+ * @sc: the device private softc
+ *
+ * Request that the device be brought down and up, to re-initialize. For
+ * example, this may be called when a device reset occurs, or when Tx and Rx
+ * queues need to be re-initialized.
+ *
+ * This is required because the iflib state is outside the driver, and must be
+ * re-initialized if we need to resart Tx and Rx queues.
+ */
+void
+ice_request_stack_reinit(struct ice_softc *sc)
+{
+ if (CTX_ACTIVE(sc->ctx)) {
+ iflib_request_reset(sc->ctx);
+ iflib_admin_intr_deferred(sc->ctx);
+ }
+}
+
+/**
+ * ice_driver_is_detaching - Check if the driver is detaching/unloading
+ * @sc: device private softc
+ *
+ * Returns true if the driver is detaching, false otherwise.
+ *
+ * @remark on newer kernels, take advantage of iflib_in_detach in order to
+ * report detachment correctly as early as possible.
+ *
+ * @remark this function is used by various code paths that want to avoid
+ * running if the driver is about to be removed. This includes sysctls and
+ * other driver access points. Note that it does not fully resolve
+ * detach-based race conditions as it is possible for a thread to race with
+ * iflib_in_detach.
+ */
+bool
+ice_driver_is_detaching(struct ice_softc *sc)
+{
+ return (ice_test_state(&sc->state, ICE_STATE_DETACHING) ||
+ iflib_in_detach(sc->ctx));
+}
+
+/**
+ * ice_if_priv_ioctl - Device private ioctl handler
+ * @ctx: iflib context pointer
+ * @command: The ioctl command issued
+ * @data: ioctl specific data
+ *
+ * iflib callback for handling custom driver specific ioctls.
+ *
+ * @pre Assumes that the iflib context lock is held.
+ */
+static int
+ice_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ struct ifdrv *ifd;
+ device_t dev = sc->dev;
+
+ if (data == NULL)
+ return (EINVAL);
+
+ ASSERT_CTX_LOCKED(sc);
+
+ /* Make sure the command type is valid */
+ switch (command) {
+ case SIOCSDRVSPEC:
+ case SIOCGDRVSPEC:
+ /* Accepted commands */
+ break;
+ case SIOCGPRIVATE_0:
+ /*
+ * Although we do not support this ioctl command, it's
+ * expected that iflib will forward it to the IFDI_PRIV_IOCTL
+ * handler. Do not print a message in this case
+ */
+ return (ENOTSUP);
+ default:
+ /*
+ * If we get a different command for this function, it's
+ * definitely unexpected, so log a message indicating what
+ * command we got for debugging purposes.
+ */
+ device_printf(dev, "%s: unexpected ioctl command %08lx\n",
+ __func__, command);
+ return (EINVAL);
+ }
+
+ ifd = (struct ifdrv *)data;
+
+ switch (ifd->ifd_cmd) {
+ case ICE_NVM_ACCESS:
+ return ice_handle_nvm_access_ioctl(sc, ifd);
+ default:
+ return EINVAL;
+ }
+}
+
+/**
+ * ice_if_i2c_req - I2C request handler for iflib
+ * @ctx: iflib context pointer
+ * @req: The I2C parameters to use
+ *
+ * Read from the port's I2C eeprom using the parameters from the ioctl.
+ *
+ * @remark The iflib-only part is pretty simple.
+ */
+static int
+ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ return ice_handle_i2c_req(sc, req);
+}
+
Index: sys/dev/ice/virtchnl.h
===================================================================
--- /dev/null
+++ sys/dev/ice/virtchnl.h
@@ -0,0 +1,923 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _VIRTCHNL_H_
+#define _VIRTCHNL_H_
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the drivers for all devices starting from our 40G product line
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * The Firmware copies the cookie fields when sending messages between the
+ * PF and VF, but uses all other fields internally. Due to this limitation,
+ * we must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the VSI indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value
+ * is of status_code type, defined in the shared type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of
+ * these opcodes. The VF driver must first validate the API version of the
+ * PF driver, then request a reset, then get resources, then configure
+ * queues and interrupts. After these operations are complete, the VF
+ * driver may start its queues, optionally add MAC and VLAN filters, and
+ * process traffic.
+ */
+
+/* START GENERIC DEFINES
+ * Need to ensure the following enums and defines hold the same meaning and
+ * value in current and future projects
+ */
+
+/* Error Codes */
+enum virtchnl_status_code {
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_STATUS_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_NO_MEMORY = -18,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64,
+};
+
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
+#define VIRTCHNL_LINK_SPEED_2_5GB_SHIFT 0x0
+#define VIRTCHNL_LINK_SPEED_100MB_SHIFT 0x1
+#define VIRTCHNL_LINK_SPEED_1000MB_SHIFT 0x2
+#define VIRTCHNL_LINK_SPEED_10GB_SHIFT 0x3
+#define VIRTCHNL_LINK_SPEED_40GB_SHIFT 0x4
+#define VIRTCHNL_LINK_SPEED_20GB_SHIFT 0x5
+#define VIRTCHNL_LINK_SPEED_25GB_SHIFT 0x6
+#define VIRTCHNL_LINK_SPEED_5GB_SHIFT 0x7
+
+enum virtchnl_link_speed {
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+ VIRTCHNL_LINK_SPEED_100MB = BIT(VIRTCHNL_LINK_SPEED_100MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_1GB = BIT(VIRTCHNL_LINK_SPEED_1000MB_SHIFT),
+ VIRTCHNL_LINK_SPEED_10GB = BIT(VIRTCHNL_LINK_SPEED_10GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_40GB = BIT(VIRTCHNL_LINK_SPEED_40GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_20GB = BIT(VIRTCHNL_LINK_SPEED_20GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_25GB = BIT(VIRTCHNL_LINK_SPEED_25GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_2_5GB = BIT(VIRTCHNL_LINK_SPEED_2_5GB_SHIFT),
+ VIRTCHNL_LINK_SPEED_5GB = BIT(VIRTCHNL_LINK_SPEED_5GB_SHIFT),
+};
+
+/* for hsplit_0 field of Rx HMC context */
+/* deprecated with AVF 1.0 */
+enum virtchnl_rx_hsplit {
+ VIRTCHNL_RX_HSPLIT_NO_SPLIT = 0,
+ VIRTCHNL_RX_HSPLIT_SPLIT_L2 = 1,
+ VIRTCHNL_RX_HSPLIT_SPLIT_IP = 2,
+ VIRTCHNL_RX_HSPLIT_SPLIT_TCP_UDP = 4,
+ VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8,
+};
+
+#define VIRTCHNL_ETH_LENGTH_OF_ADDRESS 6
+/* END GENERIC DEFINES */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ * Use of "advanced opcode" features must be negotiated as part of capabilities
+ * exchange and are not considered part of base mode feature set.
+ */
+ VIRTCHNL_OP_UNKNOWN = 0,
+ VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ VIRTCHNL_OP_RESET_VF = 2,
+ VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+ VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+ VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+ VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+ VIRTCHNL_OP_ENABLE_QUEUES = 8,
+ VIRTCHNL_OP_DISABLE_QUEUES = 9,
+ VIRTCHNL_OP_ADD_ETH_ADDR = 10,
+ VIRTCHNL_OP_DEL_ETH_ADDR = 11,
+ VIRTCHNL_OP_ADD_VLAN = 12,
+ VIRTCHNL_OP_DEL_VLAN = 13,
+ VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+ VIRTCHNL_OP_GET_STATS = 15,
+ VIRTCHNL_OP_RSVD = 16,
+ VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ /* opcode 19 is reserved */
+ /* opcodes 20, 21, and 22 are reserved */
+ VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ VIRTCHNL_OP_SET_RSS_HENA = 26,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28,
+ VIRTCHNL_OP_REQUEST_QUEUES = 29,
+ VIRTCHNL_OP_ENABLE_CHANNELS = 30,
+ VIRTCHNL_OP_DISABLE_CHANNELS = 31,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
+ /* opcodes 34, 35, 36, 37 and 38 are reserved */
+ /* opcodes 39, 40, 41 and 42 are reserved */
+ /* opcode 42 is reserved */
+};
+
+/* These macros are used to generate compilation errors if a structure/union
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure/union is not of the correct size, otherwise it creates an enum
+ * that is never used.
+ */
+#define VIRTCHNL_CHECK_STRUCT_LEN(n, X) enum virtchnl_static_assert_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+#define VIRTCHNL_CHECK_UNION_LEN(n, X) enum virtchnl_static_asset_enum_##X \
+ { virtchnl_static_assert_##X = (n)/((sizeof(union X) == (n)) ? 1 : 0) }
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ enum virtchnl_status_code v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
+
+/* Message descriptions and data structures. */
+
+/* VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+#define VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
+struct virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_version_info);
+
+#define VF_IS_V10(_v) (((_v)->major == 1) && ((_v)->minor == 0))
+#define VF_IS_V11(_ver) (((_ver)->major == 1) && ((_ver)->minor == 1))
+
+/* VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* VSI types that use VIRTCHNL interface for VF-PF communication. VSI_SRIOV
+ * vsi_type should always be 6 for backward compatibility. Add other fields
+ * as needed.
+ */
+enum virtchnl_vsi_type {
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+};
+
+/* VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * virtchnl_vf_resource and one or more
+ * virtchnl_vsi_resource structures.
+ */
+
+struct virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum virtchnl_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
+
+/* VF capability flags
+ * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including
+ * TX/RX Checksum offloading and TSO for non-tunnelled packets.
+ */
+#define VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define VIRTCHNL_VF_OFFLOAD_IWARP 0x00000002
+#define VIRTCHNL_VF_OFFLOAD_RSVD 0x00000004
+#define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008
+#define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010
+#define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020
+#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040
+#define VIRTCHNL_VF_OFFLOAD_CRC 0x00000080
+#define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+#define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
+#define VIRTCHNL_VF_OFFLOAD_ADQ_V2 0X01000000
+#define VIRTCHNL_VF_OFFLOAD_USO 0X02000000
+ /* 0X40000000 is reserved */
+ /* 0X80000000 is reserved */
+
+/* Define below the capability flags that are not offloads */
+#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
+#define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \
+ VIRTCHNL_VF_OFFLOAD_VLAN | \
+ VIRTCHNL_VF_OFFLOAD_RSS_PF)
+
+struct virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_cap_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+
+ struct virtchnl_vsi_resource vsi_res[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_vf_resource);
+
+/* VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled; /* deprecated with AVF 1.0 */
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr; /* deprecated with AVF 1.0 */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
+
+/* VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
+ */
+
+/* Rx queue config info */
+struct virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled; /* deprecated with AVF 1.0 */
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u8 crc_disable;
+ u8 pad1[3];
+ u64 dma_ring_addr;
+ enum virtchnl_rx_hsplit rx_split_pos; /* deprecated with AVF 1.0 */
+ u32 pad2;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_rxq_info);
+
+/* VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ * NOTE: The VF is not required to configure all queues in a single request.
+ * It may send multiple messages. PF drivers must correctly handle all VF
+ * requests.
+ */
+struct virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct virtchnl_txq_info txq;
+ struct virtchnl_rxq_info rxq;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(64, virtchnl_queue_pair_info);
+
+struct virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ struct virtchnl_queue_pair_info qpair[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
+/* VIRTCHNL_OP_REQUEST_QUEUES
+ * VF sends this message to request the PF to allocate additional queues to
+ * this VF. Each VF gets a guaranteed number of queues on init but asking for
+ * additional queues must be negotiated. This is a best effort request as it
+ * is possible the PF does not have enough queues left to support the request.
+ * If the PF cannot support the number requested it will respond with the
+ * maximum number it is able to support. If the request is successful, PF will
+ * then reset the VF to institute required changes.
+ */
+
+/* VF resource request */
+struct virtchnl_vf_res_request {
+ u16 num_queue_pairs;
+};
+
+/* VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0. The VF may not request
+ * that vector 0 be used for traffic.
+ * PF configures interrupt mapping and returns status.
+ * NOTE: due to hardware requirements, all active queues (both TX and RX)
+ * should be mapped to interrupts, even if the driver intends to operate
+ * only in polling mode. In this case the interrupt may be disabled, but
+ * the ITR timer will still run to trigger writebacks.
+ */
+struct virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_vector_map);
+
+struct virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct virtchnl_vector_map vecmap[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(14, virtchnl_irq_map_info);
+
+/* VIRTCHNL_OP_ENABLE_QUEUES
+ * VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ * NOTE: The VF is not required to enable/disable all queues in a single
+ * request. It may send multiple messages.
+ * PF drivers must correctly handle all VF requests.
+ */
+struct virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_select);
+
+/* VIRTCHNL_OP_ADD_ETH_ADDR
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* VIRTCHNL_OP_DEL_ETH_ADDR
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct virtchnl_ether_addr {
+ u8 addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_ether_addr);
+
+struct virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct virtchnl_ether_addr list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_ether_addr_list);
+
+/* VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_vlan_filter_list);
+
+/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_promisc_info);
+
+#define FLAG_VF_UNICAST_PROMISC 0x00000001
+#define FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct virtchnl_eth_stats in an external buffer.
+ */
+
+struct virtchnl_eth_stats {
+ u64 rx_bytes; /* received bytes */
+ u64 rx_unicast; /* received unicast pkts */
+ u64 rx_multicast; /* received multicast pkts */
+ u64 rx_broadcast; /* received broadcast pkts */
+ u64 rx_discards;
+ u64 rx_unknown_protocol;
+ u64 tx_bytes; /* transmitted bytes */
+ u64 tx_unicast; /* transmitted unicast pkts */
+ u64 tx_multicast; /* transmitted multicast pkts */
+ u64 tx_broadcast; /* transmitted broadcast pkts */
+ u64 tx_discards;
+ u64 tx_errors;
+};
+
+/* VIRTCHNL_OP_CONFIG_RSS_KEY
+ * VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
+
+struct virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table */
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
+
+/* VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ */
+struct virtchnl_rss_hena {
+ u64 hena;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_rss_hena);
+
+/* This is used by PF driver to enforce how many channels can be supported.
+ * When ADQ_V2 capability is negotiated, it will allow 16 channels otherwise
+ * PF driver will allow only max 4 channels
+ */
+#define VIRTCHNL_MAX_ADQ_CHANNELS 4
+#define VIRTCHNL_MAX_ADQ_V2_CHANNELS 16
+
+/* VIRTCHNL_OP_ENABLE_CHANNELS
+ * VIRTCHNL_OP_DISABLE_CHANNELS
+ * VF sends these messages to enable or disable channels based on
+ * the user specified queue count and queue offset for each traffic class.
+ * This struct encompasses all the information that the PF needs from
+ * VF to create a channel.
+ */
+struct virtchnl_channel_info {
+ u16 count; /* number of queues in a channel */
+ u16 offset; /* queues in a channel start from 'offset' */
+ u32 pad;
+ u64 max_tx_rate;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_channel_info);
+
+struct virtchnl_tc_info {
+ u32 num_tc;
+ u32 pad;
+ struct virtchnl_channel_info list[1];
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_tc_info);
+
+/* VIRTCHNL_ADD_CLOUD_FILTER
+ * VIRTCHNL_DEL_CLOUD_FILTER
+ * VF sends these messages to add or delete a cloud filter based on the
+ * user specified match and action filters. These structures encompass
+ * all the information that the PF needs from the VF to add/delete a
+ * cloud filter.
+ */
+
+struct virtchnl_l4_spec {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* vlan_prio is part of this 16 bit field even from OS perspective
+ * vlan_id:12 is actual vlan_id, then vlanid:bit14..12 is vlan_prio
+ * in future, when decided to offload vlan_prio, pass that information
+ * as part of the "vlan_id" field, Bit14..12
+ */
+ __be16 vlan_id;
+ __be16 pad; /* reserved for future use */
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(52, virtchnl_l4_spec);
+
+union virtchnl_flow_spec {
+ struct virtchnl_l4_spec tcp_spec;
+ u8 buffer[128]; /* reserved for future use */
+};
+
+VIRTCHNL_CHECK_UNION_LEN(128, virtchnl_flow_spec);
+
+enum virtchnl_action {
+ /* action types */
+ VIRTCHNL_ACTION_DROP = 0,
+ VIRTCHNL_ACTION_TC_REDIRECT,
+};
+
+enum virtchnl_flow_type {
+ /* flow types */
+ VIRTCHNL_TCP_V4_FLOW = 0,
+ VIRTCHNL_TCP_V6_FLOW,
+ VIRTCHNL_UDP_V4_FLOW,
+ VIRTCHNL_UDP_V6_FLOW,
+};
+
+struct virtchnl_filter {
+ union virtchnl_flow_spec data;
+ union virtchnl_flow_spec mask;
+ enum virtchnl_flow_type flow_type;
+ enum virtchnl_action action;
+ u32 action_meta;
+ u8 field_flags;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter);
+
+/* VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum virtchnl_event_codes {
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+
+#define PF_EVENT_SEVERITY_INFO 0
+#define PF_EVENT_SEVERITY_ATTENTION 1
+#define PF_EVENT_SEVERITY_ACTION_REQUIRED 2
+#define PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct virtchnl_pf_event {
+ enum virtchnl_event_codes event;
+ union {
+ /* If the PF driver does not support the new speed reporting
+ * capabilities then use link_event else use link_event_adv to
+ * get the speed and link information. The ability to understand
+ * new speeds is indicated by setting the capability flag
+ * VIRTCHNL_VF_CAP_ADV_LINK_SPEED in vf_cap_flags parameter
+ * in virtchnl_vf_resource struct and can be used to determine
+ * which link event struct to use below.
+ */
+ struct {
+ enum virtchnl_link_speed link_speed;
+ u8 link_status;
+ } link_event;
+ struct {
+ /* link_speed provided in Mbps */
+ u32 link_speed;
+ u8 link_status;
+ } link_event_adv;
+ } event_data;
+
+ int severity;
+};
+
+VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_pf_event);
+
+/* Since VF messages are limited by u16 size, precalculate the maximum possible
+ * values of nested elements in virtchnl structures that virtual channel can
+ * possibly handle in a single message.
+ */
+enum virtchnl_vector_limits {
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vsi_queue_config_info)) /
+ sizeof(struct virtchnl_queue_pair_info),
+
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_irq_map_info)) /
+ sizeof(struct virtchnl_vector_map),
+
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_ether_addr_list)) /
+ sizeof(struct virtchnl_ether_addr),
+
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_vlan_filter_list)) /
+ sizeof(u16),
+
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX =
+ ((u16)(~0) - sizeof(struct virtchnl_tc_info)) /
+ sizeof(struct virtchnl_channel_info),
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum virtchnl_vfr_states {
+ VIRTCHNL_VFR_INPROGRESS = 0,
+ VIRTCHNL_VFR_COMPLETED,
+ VIRTCHNL_VFR_VFACTIVE,
+};
+
+/**
+ * virtchnl_vc_validate_vf_msg
+ * @ver: Virtchnl version info
+ * @v_opcode: Opcode for the message
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * validate msg format against struct for each opcode
+ */
+static inline int
+virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
+ u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len = 0;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct virtchnl_version_info);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ if (VF_IS_V11(ver))
+ valid_len = sizeof(u32);
+ break;
+ case VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct virtchnl_txq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct virtchnl_rxq_info);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_vsi_queue_config_info *vqc =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+
+ if (vqc->num_queue_pairs == 0 || vqc->num_queue_pairs >
+ VIRTCHNL_OP_CONFIG_VSI_QUEUES_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ virtchnl_queue_pair_info));
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_irq_map_info *vimi =
+ (struct virtchnl_irq_map_info *)msg;
+
+ if (vimi->num_vectors == 0 || vimi->num_vectors >
+ VIRTCHNL_OP_CONFIG_IRQ_MAP_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vimi->num_vectors *
+ sizeof(struct virtchnl_vector_map));
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ valid_len = sizeof(struct virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_ether_addr_list *veal =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ if (veal->num_elements == 0 || veal->num_elements >
+ VIRTCHNL_OP_ADD_DEL_ETH_ADDR_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += veal->num_elements *
+ sizeof(struct virtchnl_ether_addr);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ case VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+
+ if (vfl->num_elements == 0 || vfl->num_elements >
+ VIRTCHNL_OP_ADD_DEL_VLAN_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += vfl->num_elements * sizeof(u16);
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct virtchnl_promisc_info);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct virtchnl_queue_select);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+
+ if (vrk->key_len == 0) {
+ /* zero length is allowed as input */
+ break;
+ }
+
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct virtchnl_rss_lut *vrl =
+ (struct virtchnl_rss_lut *)msg;
+
+ if (vrl->lut_entries == 0) {
+ /* zero entries is allowed as input */
+ break;
+ }
+
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct virtchnl_rss_hena);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ valid_len = sizeof(struct virtchnl_vf_res_request);
+ break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ valid_len = sizeof(struct virtchnl_tc_info);
+ if (msglen >= valid_len) {
+ struct virtchnl_tc_info *vti =
+ (struct virtchnl_tc_info *)msg;
+
+ if (vti->num_tc == 0 || vti->num_tc >
+ VIRTCHNL_OP_ENABLE_CHANNELS_MAX) {
+ err_msg_format = true;
+ break;
+ }
+
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
+ }
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ valid_len = sizeof(struct virtchnl_filter);
+ break;
+ /* These are always errors coming from the VF. */
+ case VIRTCHNL_OP_EVENT:
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ /* few more checks */
+ if (err_msg_format || valid_len != msglen)
+ return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH;
+
+ return 0;
+}
+#endif /* _VIRTCHNL_H_ */
Index: sys/dev/ice/virtchnl_inline_ipsec.h
===================================================================
--- /dev/null
+++ sys/dev/ice/virtchnl_inline_ipsec.h
@@ -0,0 +1,449 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2020, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*$FreeBSD$*/
+
+#ifndef _VIRTCHNL_INLINE_IPSEC_H_
+#define _VIRTCHNL_INLINE_IPSEC_H_
+
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM 3
+#define VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM 16
+#define VIRTCHNL_IPSEC_MAX_TX_DESC_NUM 128
+#define VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER 2
+#define VIRTCHNL_IPSEC_MAX_KEY_LEN 128
+#define VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM 8
+#define VIRTCHNL_IPSEC_SELECTED_SA_DESTROY 0
+#define VIRTCHNL_IPSEC_ALL_SA_DESTROY 1
+
+/* crypto type */
+#define VIRTCHNL_AUTH 1
+#define VIRTCHNL_CIPHER 2
+#define VIRTCHNL_AEAD 3
+
+/* algorithm type */
+/* Hash Algorithm */
+#define VIRTCHNL_NO_ALG 0 /* NULL algorithm */
+#define VIRTCHNL_AES_CBC_MAC 1 /* AES-CBC-MAC algorithm */
+#define VIRTCHNL_AES_CMAC 2 /* AES CMAC algorithm */
+#define VIRTCHNL_AES_GMAC 3 /* AES GMAC algorithm */
+#define VIRTCHNL_AES_XCBC_MAC 4 /* AES XCBC algorithm */
+#define VIRTCHNL_MD5_HMAC 5 /* HMAC using MD5 algorithm */
+#define VIRTCHNL_SHA1_HMAC 6 /* HMAC using 128 bit SHA algorithm */
+#define VIRTCHNL_SHA224_HMAC 7 /* HMAC using 224 bit SHA algorithm */
+#define VIRTCHNL_SHA256_HMAC 8 /* HMAC using 256 bit SHA algorithm */
+#define VIRTCHNL_SHA384_HMAC 9 /* HMAC using 384 bit SHA algorithm */
+#define VIRTCHNL_SHA512_HMAC 10 /* HMAC using 512 bit SHA algorithm */
+#define VIRTCHNL_SHA3_224_HMAC 11 /* HMAC using 224 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_256_HMAC 12 /* HMAC using 256 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_384_HMAC 13 /* HMAC using 384 bit SHA3 algorithm */
+#define VIRTCHNL_SHA3_512_HMAC 14 /* HMAC using 512 bit SHA3 algorithm */
+/* Cipher Algorithm */
+#define VIRTCHNL_3DES_CBC 15 /* Triple DES algorithm in CBC mode */
+#define VIRTCHNL_AES_CBC 16 /* AES algorithm in CBC mode */
+#define VIRTCHNL_AES_CTR 17 /* AES algorithm in Counter mode */
+/* AEAD Algorithm */
+#define VIRTCHNL_AES_CCM 18 /* AES algorithm in CCM mode */
+#define VIRTCHNL_AES_GCM 19 /* AES algorithm in GCM mode */
+#define VIRTCHNL_CHACHA20_POLY1305 20 /* algorithm of ChaCha20-Poly1305 */
+
+/* protocol type */
+#define VIRTCHNL_PROTO_ESP 1
+#define VIRTCHNL_PROTO_AH 2
+#define VIRTCHNL_PROTO_RSVD1 3
+
+/* sa mode */
+#define VIRTCHNL_SA_MODE_TRANSPORT 1
+#define VIRTCHNL_SA_MODE_TUNNEL 2
+#define VIRTCHNL_SA_MODE_TRAN_TUN 3
+#define VIRTCHNL_SA_MODE_UNKNOWN 4
+
+/* sa direction */
+#define VIRTCHNL_DIR_INGRESS 1
+#define VIRTCHNL_DIR_EGRESS 2
+#define VIRTCHNL_DIR_INGRESS_EGRESS 3
+
+/* sa termination */
+#define VIRTCHNL_TERM_SOFTWARE 1
+#define VIRTCHNL_TERM_HARDWARE 2
+
+/* sa ip type */
+#define VIRTCHNL_IPV4 1
+#define VIRTCHNL_IPV6 2
+
+/* Not all valid, if certain field is invalid, set 1 for all bits */
+struct virtchnl_algo_cap {
+ u32 algo_type;
+
+ u16 block_size;
+
+ u16 min_key_size;
+ u16 max_key_size;
+ u16 inc_key_size;
+
+ u16 min_iv_size;
+ u16 max_iv_size;
+ u16 inc_iv_size;
+
+ u16 min_digest_size;
+ u16 max_digest_size;
+ u16 inc_digest_size;
+
+ u16 min_aad_size;
+ u16 max_aad_size;
+ u16 inc_aad_size;
+};
+
+/* vf record the capability of crypto from the virtchnl */
+struct virtchnl_sym_crypto_cap {
+ u8 crypto_type;
+ u8 algo_cap_num;
+ struct virtchnl_algo_cap algo_cap_list[VIRTCHNL_IPSEC_MAX_ALGO_CAP_NUM];
+};
+
+/* VIRTCHNL_OP_GET_IPSEC_CAP
+ * VF pass virtchnl_ipsec_cap to PF
+ * and PF return capability of ipsec from virtchnl.
+ */
+struct virtchnl_ipsec_cap {
+ /* max number of SA per VF */
+ u16 max_sa_num;
+
+ /* IPsec SA Protocol - value ref VIRTCHNL_PROTO_XXX */
+ u8 virtchnl_protocol_type;
+
+ /* IPsec SA Mode - value ref VIRTCHNL_SA_MODE_XXX */
+ u8 virtchnl_sa_mode;
+
+ /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+ u8 virtchnl_direction;
+
+ /* type of esn - !0:enable/0:disable */
+ u8 esn_enabled;
+
+ /* type of udp_encap - !0:enable/0:disable */
+ u8 udp_encap_enabled;
+
+ /* termination mode - value ref VIRTCHNL_TERM_XXX */
+ u8 termination_mode;
+
+ /* SA index mode - !0:enable/0:disable */
+ u8 sa_index_sw_enabled;
+
+ /* auditing mode - !0:enable/0:disable */
+ u8 audit_enabled;
+
+ /* lifetime byte limit - !0:enable/0:disable */
+ u8 byte_limit_enabled;
+
+ /* drop on authentication failure - !0:enable/0:disable */
+ u8 drop_on_auth_fail_enabled;
+
+ /* anti-replay window check - !0:enable/0:disable */
+ u8 arw_check_enabled;
+
+ /* number of supported crypto capability */
+ u8 crypto_cap_num;
+
+ /* descriptor ID */
+ u16 desc_id;
+
+ /* crypto capabilities */
+ struct virtchnl_sym_crypto_cap cap[VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM];
+};
+
+/* using desc_id to record the format of rx descriptor */
+struct virtchnl_rx_desc_fmt {
+ u16 desc_id;
+};
+
+/* using desc_id to record the format of tx descriptor */
+struct virtchnl_tx_desc_fmt {
+ u8 desc_num;
+ u16 desc_ids[VIRTCHNL_IPSEC_MAX_TX_DESC_NUM];
+};
+
+/* configuration of crypto function */
+struct virtchnl_ipsec_crypto_cfg_item {
+ u8 crypto_type;
+
+ u32 algo_type;
+
+ /* Length of valid IV data. */
+ u16 iv_len;
+
+ /* Length of digest */
+ u16 digest_len;
+
+ /* The length of the symmetric key */
+ u16 key_len;
+
+ /* key data buffer */
+ u8 key_data[VIRTCHNL_IPSEC_MAX_KEY_LEN];
+};
+
+struct virtchnl_ipsec_sym_crypto_cfg {
+ struct virtchnl_ipsec_crypto_cfg_item
+ items[VIRTCHNL_IPSEC_MAX_CRYPTO_ITEM_NUMBER];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_CREATE
+ * VF send this SA configuration to PF using virtchnl;
+ * PF create SA as configuration and PF driver will return
+ * an unique index (sa_idx) for the created SA.
+ */
+struct virtchnl_ipsec_sa_cfg {
+ /* IPsec SA Protocol - AH/ESP */
+ u8 virtchnl_protocol_type;
+
+ /* termination mode - value ref VIRTCHNL_TERM_XXX */
+ u8 virtchnl_termination;
+
+ /* type of outer IP - IPv4/IPv6 */
+ u8 virtchnl_ip_type;
+
+ /* type of esn - !0:enable/0:disable */
+ u8 esn_enabled;
+
+ /* udp encap - !0:enable/0:disable */
+ u8 udp_encap_enabled;
+
+ /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+ u8 virtchnl_direction;
+
+ /* reserved */
+ u8 reserved1;
+
+ /* SA security parameter index */
+ u32 spi;
+
+ /* outer src ip address */
+ u8 src_addr[16];
+
+ /* outer dst ip address */
+ u8 dst_addr[16];
+
+ /* SA salt */
+ u32 salt;
+
+ /* SPD reference. Used to link an SA with its policy.
+ * PF drivers may ignore this field.
+ */
+ u16 spd_ref;
+
+ /* high 32 bits of esn */
+ u32 esn_hi;
+
+ /* low 32 bits of esn */
+ u32 esn_low;
+
+ /* When enabled, sa_index must be valid */
+ u8 sa_index_en;
+
+ /* SA index when sa_index_en is true */
+ u32 sa_index;
+
+ /* auditing mode - enable/disable */
+ u8 audit_en;
+
+ /* lifetime byte limit - enable/disable
+ * When enabled, byte_limit_hard and byte_limit_soft
+ * must be valid.
+ */
+ u8 byte_limit_en;
+
+ /* hard byte limit count */
+ u64 byte_limit_hard;
+
+ /* soft byte limit count */
+ u64 byte_limit_soft;
+
+ /* drop on authentication failure - enable/disable */
+ u8 drop_on_auth_fail_en;
+
+ /* anti-reply window check - enable/disable
+ * When enabled, arw_size must be valid.
+ */
+ u8 arw_check_en;
+
+ /* size of arw window, offset by 1. Setting to 0
+ * represents ARW window size of 1. Setting to 127
+ * represents ARW window size of 128
+ */
+ u8 arw_size;
+
+ /* no ip offload mode - enable/disable
+ * When enabled, ip type and address must not be valid.
+ */
+ u8 no_ip_offload_en;
+
+ /* SA Domain. Used to logical separate an SADB into groups.
+ * PF drivers supporting a single group ignore this field.
+ */
+ u16 sa_domain;
+
+ /* crypto configuration */
+ struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_UPDATE
+ * VF send configuration of index of SA to PF
+ * PF will update SA according to configuration
+ */
+struct virtchnl_ipsec_sa_update {
+ u32 sa_index; /* SA to update */
+ u32 esn_hi; /* high 32 bits of esn */
+ u32 esn_low; /* low 32 bits of esn */
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_DESTROY
+ * VF send configuration of index of SA to PF
+ * PF will destroy SA according to configuration
+ * flag bitmap indicate all SA or just selected SA will
+ * be destroyed
+ */
+struct virtchnl_ipsec_sa_destroy {
+ /* VIRTCHNL_SELECTED_SA_DESTROY: selected SA will be destroyed.
+ * VIRTCHNL_ALL_SA_DESTROY: all SA will be destroyed.
+ */
+ u8 flag;
+
+ u8 pad1; /* pading */
+ u16 pad2; /* pading */
+
+ /* selected SA index */
+ u32 sa_index[VIRTCHNL_IPSEC_MAX_SA_DESTROY_NUM];
+};
+
+/* VIRTCHNL_OP_IPSEC_SA_READ
+ * VF send this SA configuration to PF using virtchnl;
+ * PF read SA and will return configuration for the created SA.
+ */
+struct virtchnl_ipsec_sa_read {
+ /* SA valid - invalid/valid */
+ u8 valid;
+
+ /* SA active - inactive/active */
+ u8 active;
+
+ /* SA SN rollover - not_rollover/rollover */
+ u8 sn_rollover;
+
+ /* IPsec SA Protocol - AH/ESP */
+ u8 virtchnl_protocol_type;
+
+ /* termination mode - value ref VIRTCHNL_TERM_XXX */
+ u8 virtchnl_termination;
+
+ /* auditing mode - enable/disable */
+ u8 audit_en;
+
+ /* lifetime byte limit - enable/disable
+ * When set to limit, byte_limit_hard and byte_limit_soft
+ * must be valid.
+ */
+ u8 byte_limit_en;
+
+ /* hard byte limit count */
+ u64 byte_limit_hard;
+
+ /* soft byte limit count */
+ u64 byte_limit_soft;
+
+ /* drop on authentication failure - enable/disable */
+ u8 drop_on_auth_fail_en;
+
+ /* anti-replay window check - enable/disable
+ * When set to check, arw_size, arw_top, and arw must be valid
+ */
+ u8 arw_check_en;
+
+ /* size of arw window, offset by 1. Setting to 0
+ * represents ARW window size of 1. Setting to 127
+ * represents ARW window size of 128
+ */
+ u8 arw_size;
+
+ /* reserved */
+ u8 reserved1;
+
+ /* top of anti-replay-window */
+ u64 arw_top;
+
+ /* anti-replay-window */
+ u8 arw[16];
+
+ /* packets processed */
+ u64 packets_processed;
+
+ /* bytes processed */
+ u64 bytes_processed;
+
+ /* packets dropped */
+ u32 packets_dropped;
+
+ /* authentication failures */
+ u32 auth_fails;
+
+ /* ARW check failures */
+ u32 arw_fails;
+
+ /* type of esn - enable/disable */
+ u8 esn;
+
+ /* IPSec SA Direction - value ref VIRTCHNL_DIR_XXX */
+ u8 virtchnl_direction;
+
+ /* SA security parameter index */
+ u32 spi;
+
+ /* SA salt */
+ u32 salt;
+
+ /* high 32 bits of esn */
+ u32 esn_hi;
+
+ /* low 32 bits of esn */
+ u32 esn_low;
+
+ /* SA Domain. Used to logical separate an SADB into groups.
+ * PF drivers supporting a single group ignore this field.
+ */
+ u16 sa_domain;
+
+ /* SPD reference. Used to link an SA with its policy.
+ * PF drivers may ignore this field.
+ */
+ u16 spd_ref;
+
+ /* crypto configuration. Salt and keys are set to 0 */
+ struct virtchnl_ipsec_sym_crypto_cfg crypto_cfg;
+};
+
+#endif /* _VIRTCHNL_INLINE_IPSEC_H_ */
Index: sys/modules/Makefile
===================================================================
--- sys/modules/Makefile
+++ sys/modules/Makefile
@@ -142,6 +142,8 @@
${_iavf} \
${_ibcore} \
${_ichwd} \
+ ${_ice} \
+ ${_ice_ddp} \
${_ida} \
if_bridge \
if_disc \
@@ -532,6 +534,13 @@
_cxgbe= cxgbe
.endif
+.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "arm64"
+_ice= ice
+.if ${MK_SOURCELESS_UCODE} != "no"
+_ice_ddp= ice_ddp
+.endif
+.endif
+
# These rely on 64bit atomics
.if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \
${MACHINE_CPUARCH} != "mips"
Index: sys/modules/ice/Makefile
===================================================================
--- /dev/null
+++ sys/modules/ice/Makefile
@@ -0,0 +1,15 @@
+#$FreeBSD$
+
+.PATH: ${SRCTOP}/sys/dev/ice
+
+KMOD = if_ice
+SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h
+SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h
+SRCS += ice_lib.c ice_osdep.c ice_resmgr.c ice_strings.c
+SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c
+
+# Shared source
+SRCS += ice_common.c ice_controlq.c ice_dcb.c ice_flex_pipe.c ice_flow.c
+SRCS += ice_nvm.c ice_sched.c ice_sriov.c ice_switch.c
+
+.include <bsd.kmod.mk>
Index: sys/modules/ice_ddp/Makefile
===================================================================
--- /dev/null
+++ sys/modules/ice_ddp/Makefile
@@ -0,0 +1,24 @@
+# $FreeBSD$
+
+# Find the highest version DDP package file and build a .ko for it
+PKG_FILE != find ${SRCTOP}/sys/contrib/dev/ice -name 'ice-*.pkg' | sort -V | tail -1
+
+.if empty(PKG_FILE)
+.error Unable to locate the DDP package binary file
+.endif
+
+.info Found ${PKG_FILE}
+
+PKG_NAME != basename ${PKG_FILE}
+PKG_VER_STR != basename -s .pkg ${PKG_NAME}
+PKG_VER_STR := ${PKG_VER_STR:S/^ice-//}
+PKG_VER_STR := ${PKG_VER_STR:S/-signed$//}
+PKG_VER_MAJ != echo ${PKG_VER_STR} | cut -d. -f1
+PKG_VER_MIN != echo ${PKG_VER_STR} | cut -d. -f2
+PKG_VER_UPD != echo ${PKG_VER_STR} | cut -d. -f3
+PKG_VER_DFT != echo ${PKG_VER_STR} | cut -d. -f4
+PKG_VERSION != printf "0x%02x%02x%02x%02x" "${PKG_VER_MAJ}" "${PKG_VER_MIN}" "${PKG_VER_UPD}" "${PKG_VER_DFT}"
+
+KMOD := ice_ddp
+FIRMWS := ${PKG_FILE}:ice_ddp:${PKG_VERSION}
+.include <bsd.kmod.mk>
Index: tools/kerneldoc/subsys/Doxyfile-dev_ice
===================================================================
--- /dev/null
+++ tools/kerneldoc/subsys/Doxyfile-dev_ice
@@ -0,0 +1,21 @@
+# Doxyfile 1.5.2
+
+# $FreeBSD$
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+PROJECT_NAME = "FreeBSD kernel ice device code"
+OUTPUT_DIRECTORY = $(DOXYGEN_DEST_PATH)/dev_ice/
+EXTRACT_ALL = YES # TODO: ice has @file comments.. disable this?
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT = $(DOXYGEN_SRC_PATH)/dev/ice/ \
+ $(NOTREVIEWED)
+
+GENERATE_TAGFILE = dev_ice/dev_ice.tag
+
+@INCLUDE_PATH = $(DOXYGEN_INCLUDE_PATH)
+@INCLUDE = common-Doxyfile
+

File Metadata

Mime Type
text/plain
Expires
Tue, Dec 30, 7:10 PM (2 h, 49 m)
Storage Engine
local-disk
Storage Format
Raw Data
Storage Handle
25/e8/8b704fba0d2097385449f09ed58d
Default Alt Text
D21959.diff (2 MB)

Event Timeline