diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -179,6 +179,8 @@ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" +dev/ice/ice_ddp_common.c optional ice pci \ + compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031b00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ @@ -189,8 +191,8 @@ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ - dependency "$S/contrib/dev/ice/ice-1.3.27.0.pkg" \ - compile-with "${CP} $S/contrib/dev/ice/ice-1.3.27.0.pkg ice_ddp.fw" \ + dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ + compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/ioat/ioat.c optional ioat pci diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -294,6 +294,8 @@ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" +dev/ice/ice_ddp_common.c optional ice pci \ + compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031b00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ @@ -304,8 +306,8 @@ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ - dependency "$S/contrib/dev/ice/ice-1.3.27.0.pkg" \ - compile-with "${CP} $S/contrib/dev/ice/ice-1.3.27.0.pkg ice_ddp.fw" \ + dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ + compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc --- a/sys/conf/files.powerpc +++ b/sys/conf/files.powerpc @@ -42,60 +42,62 @@ dev/iicbus/ofw_iicbus_if.m optional iicbus aim dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_opal.c optional powernv ipmi -dev/ice/if_ice_iflib.c optional ice pci powerpc64 \ +dev/ice/if_ice_iflib.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_lib.c optional ice pci powerpc64 \ +dev/ice/ice_lib.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_osdep.c optional ice pci powerpc64 \ +dev/ice/ice_osdep.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_resmgr.c optional ice pci powerpc64 \ +dev/ice/ice_resmgr.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_strings.c optional ice pci powerpc64 \ +dev/ice/ice_strings.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 \ +dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 \ +dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_common.c optional ice pci powerpc64 \ +dev/ice/ice_common.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_controlq.c optional ice pci powerpc64 \ +dev/ice/ice_controlq.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_dcb.c optional ice pci powerpc64 \ +dev/ice/ice_dcb.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_flex_pipe.c optional ice pci powerpc64 \ +dev/ice/ice_flex_pipe.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_flow.c optional ice pci powerpc64 \ +dev/ice/ice_flow.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_nvm.c optional ice pci powerpc64 \ +dev/ice/ice_nvm.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_sched.c optional ice pci powerpc64 \ +dev/ice/ice_sched.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_switch.c optional ice pci powerpc64 \ +dev/ice/ice_switch.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_vlan_mode.c optional ice pci powerpc64 \ +dev/ice/ice_vlan_mode.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_fw_logging.c optional ice pci powerpc64 \ +dev/ice/ice_fw_logging.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_fwlog.c optional ice pci powerpc64 \ +dev/ice/ice_fwlog.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_rdma.c optional ice pci powerpc64 \ +dev/ice/ice_rdma.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/irdma_if.m optional ice pci powerpc64 \ +dev/ice/irdma_if.m optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_M} -I$S/dev/ice" -dev/ice/irdma_di_if.m optional ice pci powerpc64 \ +dev/ice/irdma_di_if.m optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_M} -I$S/dev/ice" -ice_ddp.c optional ice_ddp powerpc64 \ +dev/ice/ice_ddp_common.c optional ice pci powerpc64 | ice pci powerpc64le \ + compile-with "${NORMAL_C} -I$S/dev/ice" +ice_ddp.c optional ice_ddp powerpc64 | ice pci powerpc64le \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031b00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" -ice_ddp.fwo optional ice_ddp powerpc64 \ +ice_ddp.fwo optional ice_ddp powerpc64 | ice pci powerpc64le \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" -ice_ddp.fw optional ice_ddp powerpc64 \ - dependency "$S/contrib/dev/ice/ice-1.3.27.0.pkg" \ - compile-with "${CP} $S/contrib/dev/ice/ice-1.3.27.0.pkg ice_ddp.fw" \ +ice_ddp.fw optional ice_ddp powerpc64 | ice pci powerpc64le \ + dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ + compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/ixl/if_ixl.c optional ixl pci powerpc64 \ diff --git a/sys/contrib/dev/ice/README b/sys/contrib/dev/ice/README --- a/sys/contrib/dev/ice/README +++ b/sys/contrib/dev/ice/README @@ -1,14 +1,16 @@ -OS Default Dynamic Device Personalization (DDP) Package +OS Default Dynamic Device Personalization (DDP) Package ====================================================================== -July 7, 2020 +May 12, 2022 Contents ======== - Overview +- Supported Operating Systems - Safe Mode - Notes -- Installation & Troubleshooting +- Installation +- Troubleshooting - Legal @@ -24,12 +26,13 @@ default settings. DDP profiles can also be rolled back without rebooting the system. -The DDP package loads during device initialization. The driver checks to see if -the DDP package is present and compatible. If this file exists, the driver will -load it into the device. If the DDP package file is missing or incompatible -with the driver, the driver will go into Safe Mode where it will use the -configuration contained in the device's NVM. Refer to the Intel(R) Ethernet -Adapters and Devices User Guide for more information on Safe Mode. +The DDP package loads during device initialization or driver runtime, depending +on the operating system. The driver checks to see if the DDP package is present +and compatible. If this file exists, the driver will load it into the device. +If the DDP package file is missing or incompatible with the driver, the driver +will go into Safe Mode where it will use the configuration contained in the +device's NVM. Refer to the Intel(R) Ethernet Adapters and Devices User Guide +for more information on Safe Mode. A general-purpose, default DDP package is automatically installed with all supported Intel Ethernet 800 Series drivers on supported operating systems. @@ -59,6 +62,18 @@ - MPLS (up to 5 consecutive MPLS labels in the outermost Layer 2 header group) +Supported Operating Systems +=========================== +This DDP package is supported on the following operating systems: +- Microsoft* Windows Server* +- Linux* +- FreeBSD* +- VMware* ESXi* + +Refer to the Intel(R) Ethernet Adapters and Devices User Guide for currently +supported versions of these operating systems. + + Safe Mode ========= Safe Mode disables advanced and performance features, and supports only basic @@ -71,40 +86,39 @@ Notes ===== -- You cannot update the DDP package if any PF drivers are already loaded. To -overwrite a package, unload all PFs and then reload the driver with the new -package. +- In Linux, FreeBSD, and Windows, you cannot update the DDP package if any PF +drivers are already loaded. To overwrite a package, unload all PFs and then +reload the driver with the new package. -- Except for Linux, you can only use one DDP package per driver, even if you -have more than one device installed that uses the driver. +- In ESXi, use esxcli to load and unload DDP packages for specific market +segments during driver runtime. + +- In FreeBSD and Windows, you can only use one DDP package per driver, even if +you have more than one device installed that uses the driver. -- Only the first loaded PF per device can download a package for that device. +- In Linux, FreeBSD, and Windows, only the first loaded PF per device can +download a package for that device. In ESXi, you can load different DDP +packages for different PFs associated with a device. - If you are using DPDK, see the DPDK documentation at https://www.dpdk.org/ for installation instructions and more information. -Installation and Troubleshooting -================================ - -Microsoft* Windows* -------------------- -The DDP package is installed as part of the driver binary. You don't need to -take additional steps to install the DDP package file. +Installation +============ -If you encounter issues with the DDP package file, download the latest driver. - - -ESX ---- -The DDP package is installed as part of the driver binary. You don't need to -take additional steps to install the DDP package file. +Microsoft Windows +----------------- +TO INSTALL THE OS DEFAULT DDP PACKAGE: -If you encounter issues with the DDP package file, download the latest driver. +The default DDP package is installed as part of the driver binary. You don't +need to take additional steps to install the DDP package file. FreeBSD ------- +TO INSTALL THE OS DEFAULT DDP PACKAGE: + The FreeBSD driver automatically installs the default DDP package file during driver installation. See the base driver README for general installation and building instructions. @@ -112,15 +126,14 @@ The DDP package loads during device initialization. The driver looks for the ice_ddp module and checks that it contains a valid DDP package file. -If you encounter issues with the DDP package file, you may need to download an -updated driver or ice_ddp module. See the log messages for more information. - NOTE: It's important to do 'make install' during initial ice driver installation so that the driver loads the DDP package automatically. Linux ----- +TO INSTALL THE OS DEFAULT DDP PACKAGE: + The Linux driver automatically installs the default DDP package file during driver installation. Read the base driver README for general installation and building instructions. @@ -131,9 +144,8 @@ The ice.pkg file is a symbolic link to the default DDP package file installed by the linux-firmware software package or the out-of-tree driver installation. -If you encounter issues with the DDP package file, you may need to download an -updated driver or DDP package file. Refer to the log messages for more -information. + +TO INSTALL A DDP PACKAGE FOR SPECIFIC MARKET SEGMENTS: You can install specific DDP package files for different physical devices in the same system. To install a specific DDP package: @@ -177,9 +189,87 @@ of the default DDP package file. +ESX +--- +TO INSTALL THE OS DEFAULT DDP PACKAGE: + +The default DDP package is installed as part of the driver binary. You don't +need to take additional steps to install the DDP package file. + +TO INSTALL A DDP PACKAGE FOR SPECIFIC MARKET SEGMENTS: + +You must first install the Intel(R) ESXCLI Plug-In for Managing Intel(R) +Ethernet Network Adapters to be able to install and load market-specific DDP +packages. Download it from: +https://www.intel.com/content/www/us/en/download/19380/intel-esxcli-plug-in-for- +managing-intel-ethernet-network-adapters.html + +NOTE: ESXi support for DDP packages for specific market segments requires the +following: +- OS: ESXi 6.7 or higher +- Driver: icen 1.9.1.x or higher +- Tool: intnet 1.8.3.x or higher + +To install and load this DDP package: + +1. Download and install the esxcli plug-in from the URL above. + +2. Download the DDP package. + +3. Copy the DDP package file to the following location: /store/intel/icen/ddp/. + If the directory does not yet exist, create it before copying the file. + +4. From the command prompt, run the following command to load the DDP package: + + # esxcli intnet ddp load -n -p -f + + Where: + = the name of the NIC + = the name of the DDP package to load + -f = forces the package to load + + NOTE: This operation will cause the driver to reset. + +5. Wait for the load result status. + + +To list all active DDP packages for all virtual NICs, run the following: + +# esxcli intnet ddp list + +To unload (roll back) a DDP package, run the following: + +# esxcli intnet ddp rollback -n -f + +NOTE: This operation will cause the driver to reset. + + +Troubleshooting +=============== + +Microsoft Windows +----------------- +If you encounter issues with the DDP package file, download the latest driver. + +FreeBSD +------- +If you encounter issues with the DDP package file, you may need to download an +updated driver or ice_ddp module. See the log messages for more information. + +Linux +----- +If you encounter issues with the DDP package file, you may need to download an +updated driver or DDP package file. Refer to the log messages for more +information. + +ESX +--- +If you encounter issues with the DDP package file, download the latest driver. + + Legal / Disclaimers =================== -Copyright (c) 2019 - 2020, Intel Corporation. +Copyright (c) 2019 - 2022, Intel Corporation. Intel and the Intel logo are trademarks of Intel Corporation or its subsidiaries in the U.S. and/or other countries. diff --git a/sys/contrib/dev/ice/ice-1.3.27.0.pkg b/sys/contrib/dev/ice/ice-1.3.30.0.pkg rename from sys/contrib/dev/ice/ice-1.3.27.0.pkg rename to sys/contrib/dev/ice/ice-1.3.30.0.pkg index 0000000000000000000000000000000000000000..454a2a6ea1930c0abcce83b1f2bf7a58061fa3b6 GIT binary patch literal 692660 zc%1EB2YeJoADz2gdb>a>1V{rC2#|!(TS5{@AS59Pp%Z%VC3FY`kS0Yzkgj6=P*DLv zQHrRjG^L10QxHLE7J9jF%iZ4p?)HDV-LSbJ^Y{&$_jc~>?9A-!%G@KtE$VwcfKY6Ea4O?Kj54$%wS!WBUz_%#I348Zo^8paJ7X_v$^g zUr5%Vk^P1a8s0A?cf^RHA$k2qj~O&#ct~9B*xE6*W9!v*BRZ9q-^R!oLUINT7~X5_ zxY7MWa{3Jz)^GS&vu$Yg>2c79saDb>1_$o<(GKi>d5{nXCuaEypCjD=a|#*V@sjg? z=I;c3Pzh@-(^690`r<&9ieH@WzaxF$=Pyr z>n{G2-Yk08`cH-@P3o57GAXcUOvtI4&3CkR3tm_0{R*`j5A%xN8d%}OPHSddiSuuy z^8Kpd>VR)uTfX{MUeNuW9y`DPR`Yw#g4_vjee8N|*w%}g&mZ@R3#|Rsz}jmYRJlDl z=e_I&v2`l8c=OY1pG;VGaYT=r)5z!9n{KRj8FM$Qe$$BF-;XSRe(VbEzUVEBUs)Q_ zWa^zZCq9`wwf5n*H}ik{EA_=zleVmW`CQ7cm0||=|M<}5t!sL1^{Ta_)0DLrs?Taa zcl?j9r?pwNeagYW5568Z@lkO5@pr3xu6cRkw@#jc1(W`_MaAUoclB<4>e#=cPvbM8 zLpJuSUz8L+S+mst4UzxMyam4NSd+*MhS+MWcwT$_{b(!3@`~04T zhx6W;{L#o|!`6S{)~oj8>6dOD{HoUaRjXgp_3GXG-1vyA>fsk&J<)aZ;oIBJuD^O_ zOUwqpZ+B}o9;v@Szdr7pDbF35yzYaY@x9NTe*E_tZQPor^UwYJx!-RQZsq^E*RZb&6oA&R~x?iqXpI7njo%h_k+^v-N_teN?y(YG8H{hFd zhi<<&XAV-=AEqBx5dhTn#?%9 zzT3ge?b5DadS%-GF4Z=#6AP!!=$3XRdj7##gEs!T`u=-k8vpD&YMs-_z+XERt*Ft~ z^$Aovo1vqiprD|jprD|jprCk)p(2~T+3NrvWTQ8G{h+b$ichbGZJIynSc}AhdqMT$ zhy8NB;_ABPd&g}_UEO3v?1EQToIG`YW#F9m@?RSJ%br)_wr)HAwnLSp*X}2@-CVc* zip<96$~&I9|9YLS$v^bZUC{sM#UEa-*J=50bM~%Y&@(W9=K-=YVn(`e%E8}#Cm#IZ za{2I!yPUf%ueL??#ZB)si>7@ub#KA*U0%D=*gGMhVoZz2F9yA~=&rBZ;D*~yy2j1f zd8_@e2b;CdnU?zAw2)d2E}fb7)ubmG;gt>-#ce}BNa_}AXOd^7gj zbyI(PT&Z2Rn4UL=e%G=5{1;MBT?{zzf^(Pi3vN#cjvP2(U6&z;V_Wwf9^clp==Qy9 zAcc-)wj(*{_)b^pJ?kWJ2b42xJuglv)Kbo-i(WA}P8gKUMK5Fu# zQAPiTUmI6ve#WsJ*DKvU7r*Ne)xFYdMaL&Ucm4Tmzr4D*@xt>N)nCg!ujv@t;(zPz zf7bYiWrL3|>G8tS%Ff?SC+?0}<+i)OdSk&$6<)iaIDW^w9WQ*pGuCnY_Me<9r!U+7 z(qCho+Gl+5z$K+i-Ujm5@cl)Dk}AA0_15Iv^E+OP*?Y(D;oM99M&Ues#3V);6i0;U8x?kM29iclC&WHoSb|(8ko?UK{sHw^2o-wvzr=T#91O zo{OtD|8Tc3ch@tuSGccQFg{e<$9>(2!!O_XbJ5t&L)rz;?wZ$ZlKb9jBchu}FKB+~ zQgCpE)iaOR?s>J`=dryUE_6y8e(BAccJ{`mg;TCRF^?S-IiQ?i!4;pEZ& z;&;ayoGt&{pYe_6#BFNRvZ|w>>eLGLnu!hfH%wJ8-}mbJ->1zjn%=outwpYhcfNn* zdf>|Gi3P8Y%I*1jpOvXgf4$o6J^!~~yR>+8<~Pl%?*8WF!uI9*eV_L6irop<%l|#9 zdiU%m^@7h=2^iGm(B+qkrkz_gX!4k|c{jfDi)gs^uRgzx%ATIrG|hj5X3?$A-R|8! z*8k$puRr{9Yr|hcyL`8((#Ly(zWCF5*`wNCfe*&D_%oGk=+f-q?(H>uT)Y(Dam%9j zuKc)X(@W#JzJKqR<9=!q`~JEpF7nzZua9ne|L*3~@y~~U5qtV!xh{pv+<*R|RqH0--fY}$-l63Mch4_< zw{!f~4zFLIcyXKW%0~S>KaZIELLawJeENqkxV5Rr$Nk^#;8*nhOPU@3RBh_JXKDAr zvl?gB-a7B-M@v5p4>*6YNBbs&yRQ2t=7dui`F=&}j`yGM-r!JD`ofkU-3eOx-79{5 z|7g$L_64{pr)-^j|*w=3nG{)^#-z2yE`{|EWo;%*LUwx;9eYN~5J*DkyB)BfSXw{F%6IdtcX zj6Sy?zxr@_a2w~4oXuane(vQ%Q~vmC_wZfUHaWcV=J=-d>O}s2yIsfHw+{pi(`qWr z9PqGDrTRBB(^`zK^NmBBW4WEuo9^cj@?R>Yt-rDWo<()c@mt8w_szbml z1CRD!|69t>4{jgIS?-;3N0YG0{g3$}KaM}~cMH86qV-qS|M}U@)oPcIPP@O;wcmou>8S~%b2ZQ1KI$2s`^x{baYbBwb{D^n&(BWo zJZ53|d(OWl3>`kTVC(bg*>`?wa=TlNzos=z|Ki5%pa@17gO|j&W$fR#&*1Rq0_1>HEZM@ zZa3n^SK}X_YF7QlUI{x+xMoH)T=edP84=&ajhcS1R_f@Dr|yq#-FIxO;2xuw9}IiT zV}WXYjW5sajefl5N|URNfBn<>VtvnM?>ZfcJ~ClZ?d7_nOCEes{m(%Sy_UUmw3*}O{%_6<=(m5%iAJ|0 z)8E_t)t2g&62>l?_nT_S>73dpEE4wE-C&9d;INzLHua>6_g2%z~S5Ue*om zIe+l6%q|xT&Thv(FYSZ4iJNUU@ zliuE#e=4v`+Tnkiw;CQ;ZPow2`Yvhi+BYtrT{X&k+OB~~lX{ka?ozMVzgzvWVpRX} z%P0QS#_LP%hxHOJhQ3v6!aet{z1_cd`gok@?m;n!CpSv?=~~~_4<9&QfANiP>mIA{ z!@gS!{l}jzm-)lsEV8HD4rlKL&r7 zvoEuL?9Y{2t=oCD@st`pR(AfN@Iu6|h@amW9@eIUVnGhrtz+VA&!OY-08z3ZKqH(iQzT|Peb zP{XL0EeB$YLVO3k{?({O@j>nz4?eg0R>YJJZ}tvZTe0HB-@khC_N%w!wr350@z-g` z=Z5dxKC|kFvtD<6Kgh``+@)SZ-+vrhR{QCr*Q+!S_t=njKIgJ-*2R@yr}<3#Eu!<_ zrW1Xd6waG{Ypr*Km)%GF)8W14OCBXGd3^rtu7zKI+UJ$)gPZh?Z0zt^mHXZMj(p+I zdmREkyW#7+`9}R=N5B91e)M;%SHF6}`yR>vVC=D$qi=oK_u!Szw|jYbMW1`^LEk^d z-pld$#dFz!=XJjfsuDYGRZ+7^_a@%>+iCP0|08ogeCg|(KD)dZ<~>;VVxRpp7c~9o zkLrU)f4pjo@1`1eYt-!;^mEFU`ZxZLUVUf4jLasT+idRi$%h%+KA-+trwK*Z54aBge$|Jx1yCeQs;azDINxe9zv3HgD*~bQsJ~*~ohd)Nw zYQ4?p%5#Tr{BpDE{qzR!?SAvqzt$~Kzf|*;wTok?z8LbtarKV}9?$Y_zG_A4hgW-L z&bYs?-kVK+i}*3G-OHhoJ2o{xo?JC+(VBld4GeDn@ItK-6Mj7ru_D7~N$78%-net% zlY}0}Ty%GK``nzHqt$-kMtrQ2>tHaWG^eT8;w#9z0Ye_JrK(!aybP8=L|b9uEnuf8AkX^XtS zZ#Dh&=KjUU{`t7?`!N?^d9CB%h2zFfx{>a9vwDkl9p85K9P#Pb+8rkYnuZM?b3Ja} z`i1waY`XWI|M1LyFU$)0dQtZL8vBR*Id$>4&=#}(=GC1P92j-y{RMUUhb`*k-ZZ|; z$oZZVs$Tf|Lv6h|%Xj7m9(<=y&y!Pq?|HobO6vT-+~4v}*)r|KjxLj`T^Y5rR%mMQ zf?-1+b?x*Zs#pC%zx|%ScF`vb3P0PeThpPMPvy(U8gIV5q1#~J+W&m{i`%NPqfdU? z%46`+=QSO*uHVgH=-Yq#|K8m_Wg_|cUf+uzD~^x5%`I(%LAjOSkqodXxVzxS<|({`K~-+t$- zyEkVZbNM+ckogw z@zeaTzs~=rTf;GRdI0PhGBmV`iq?yzjRk-W7IxdyPfu%Rle!bM~{)yr8!eUs&$!^WvW?kKb_l;9%?r zkrhrZZlU`5z^%vS!aTlN(D=lT4_|-zTHjf}KG+xeXYW0?UT-=5Qo)xiLbl%jeD0^u ztvvMh=L`0B9yBtk={b$V*091YU%htfLe(D&-+B8)hTn(hGu{nL+Mo4%U0;VL*^Ljr zGW)r}5Qop}Ej^rf^u$jq?`k%jPta&*UHx})ry~nKXr^v5c=GLDuhqP;R_C4CdQkfx z*Sel{P<35fEu^Tq@4okw4nFr>1Gg*d-<-Mde$v#lyWXkzb2+y|!T-K?-EDKx#l(x?Sf3hrf^gIM^|9Q=f&-D{~`jRc!UmvY9I%^jUmK z{crE8eMfxyqF>Cq@Qb$_1}~e^Wb2Z333dCt9Pd(Y($V4d?uG6C^5w9r?J_2B*u3k* zwWn_NN_5%t!?)ICvADk7}eQm+zzTqGL6!LEShcz9}938y1u1{*fZ!K2^bn;vE-L?0MUU{|6 zZ~aaM1m?Z;)$K8(wXgVprTgLe<-_WadaYWO1%*{Ynz}Wc^xQ$$*F#s|tnluq*_YOR ze_VIt@wxQC-5;FjaK!&;o3_slYUC8&aL0Hr&v@sDEq|La@aDJQG=2N*%=dp=uKuF; z7r*+auKfD%%iShcx|&?R=El+OdyZ@U-l<((auQYsxAQ#}{eGW;om=#dA3J;R^=hu= z`(&mi^;k9Q;qKPSvs62x??2ue{qmp&T{e8Sy-HQ^ZOPiyq)&s$NyTwkN-9F9w!9_1qB5K1qB5K1qB7ge-v86&gWN=h6!$j zef~jsTwyHxWJ0po;}T>eq0bG_=LYC=1Gd86$2bKA1qB5K1qB5K1w~0fpBtdh4bbNX z=yL<~xdHmz0DW$NJ~u$08=%h((B}r|a|86b0s7nkeQtn0H$Xu_K|w)5K|w)5K|w)5 z@$^GWl0Ds-asMoGfcP-WER_%OW0rkY2S`O`8Kd?gRhVV6`T(iQEVT|kB#K!^I2<5# znB`4|*JG9%jfS*fmM+Y)J$pStbAWVYmM1knqzkjWstF`r*?5kD-17s$juFI-StdAY zT-fUlj`}&)&MH#4yX%%sxAzbG&i}oxhN;(cpB7zg(m15fgcMrNEKgYQwCEO+e*gIL zbL7e|q0{Ex$NSft>|d*9LYhkNC#vsWh{Rtk&F9JKBUH(%vCKaeQ4?1NYLWdbJhiF+ z;aB@PMn-}B64m+r6N$fAn$MHd`MqQJk1+d3h>B|i+#J}zeo4fUcqFj3l>I%)`>@xZ z`6=J}jp=`Ib>~6t++28HOP$|Ok@$QPZv9qQGq7avx)UcI=u*sz$m zIID3>7v}jv@n6Sj=4h~Y8Yn0zC@3f>C@3f>D4qmVB!V0Fdyqf}Ux$m{_D&B4MOlNr zuhCIZP*6}%P*6}%P*6Mxs7RnEKmPadbn(1f&fa4!3JMAe3JMAe3JMB}(uTdqWfT;p z0X2E(?a9sm*SL5w%RrZ3ob|5)q@bXnprD|5#;}?Ry6J{q0#T7&?D8D@pM#pL7y1tl z4n+4@AR02=xWwWYI^82(4V})-jUmTaQd6gM zcW1~^mPG4x9v%!iz>>N;oz{~f-?1cKr}Ob*$hR!1r_+`5X2=gLsio8TYZ>xAOKR(M z0X_^l!jd{VU0^we9A-(3P8U?3AqQC!r_)vRWym3x#Oibv{Kz9VZ8|n>p8gCu#u6rN zUI7d_$`U4R-hm7`z!D~H<%1aV9ZQ(B`Bq@aw=7}O=2wv+Kd^*JTW~N#zGn%Own~*4 za)c#J+A3FO$YGW+X{%C&A%|GPBqt<m;8 zVo5cf&MAT+Cs|Tmr*p2#kRMqRsnfYsW5@}XMCo*{)d^Rm+<&S_NTMLA0ZC0rq9LgT zNo`2#KoSEbVTBt0SN1xas6`asebl75i%hhzXG10fj%$zVu^Kr$4PVUP@mWCSE5 zAsGe9Xh_CDG8U3?kQ6{N9+C-=OoU_-B$FYT0?AZJra>|tl0rykKr$1OB1mRIG8>XP zkUR&;TuA0YG9Qu!kSv7cc}NyPvKW#jkSvAd1xQ|mWEmvOA$bXs6_Biil7*$K&KkbDlwE=YDm zvImmAkbD8jK1jZVyx+=JvkBo82Y2+1Qz9-B0jkPsCh zDoE6jI6$I-#1RrFNSq;Yfy5OOH%Qze@qok=5-&)+A<;tO14%hZ%0uD{i612XkOV*y z2uTnm6(FeyNiZapAgK&V6-YuL356sKl5j{OAgKyTHAt#M5(!BZBsCzZ2}v|0wIHbt zNgYUHAc=(}4wAZ%#6waKlKPM|fTSTLjUZ_ZNfSt#LedP9=8&|2Bmt5{NLoUY1W7U^ zDUh^+q%|a|kfcG<29marq(jmUk_<>PA<2TIJtWzXy) z8ImiI`~k_IkX(i28YI^txdF*dNdAK4Z%F=uNn1`ZlLWazNrBSwxIJ!b5a^*FGiDaenmy;a zx%1{PSor*+#Y>jH@Zz%NFRfVl@~YKq)~;K>;gwfk+xYq$Z@%^RJOA7C?tAZV{@}xp zwrt(@@%9~`e7f_q&v)(Kv-gXAUw-xV{%^kh?!duAhmU;!!_i~MPyBfDr&B+lK6Ccm z`Cop$aPhZGzhA!c$DdcPUB7YjufPBK_txz@ckkVQ@bJ-N9eeyRx$*ReNM=83Fd>== z)rh`mnV?~U6BAsR;6@0$zwEa|T+7;iKkey%+Jl1mAIrpzWxPU|WeDRF;=u&wL>9uB zg_L_j<)~o#*>C35dy3O<2v2MJ4fTiF1c_vqj8`P%70K*JB(oopjAFKRMwx++V z)*oBbuYCP^I-nv)yxIKoAp5-ih_=+)O)THadb?w&_fKp3i`#ZtKW@uv{gK*kKCS65 ztMx}}`@?<`F^XL>>!X+zQOxh7nBPY+rcq3KqnLhY6w~jFV)~s?OusXV>32pk{mv-n zgb~H`JENF>XOtcKore1Jw5H#%ji)vJwEocg^CZq+Piy+iYW7@q@M;FXrO@x8fc(_1{!Fffd(3AC_a>A{NJ|>q@M;FXrO@x8fc(_ z1{!Fffd(3AC_a>A{6Db_q@M;FXrO@x8fc(_1{!Fffd(3AC_a>A{J*FSq@M;FXrO@x z8fc(_1{!Fffd(3AC_a>A{Qp`RNIwlU&_DwXG|)f;4K&a|0}V9LP<$xK_6C@3f>C@7vmxTvr-8b>uzIXHT|dk{}AcVDd!DOX-= zotingx;Yak7q=ike-aSrSGht(5?rZ*b*k*n76T?LwCp(FgZ%4#(;Lqj3JMAe3JMAe z3JMAe3JQv+7b+6y&5!>*yj{HSYVDmK3JMAe3JMAe3JMAe3X0N%)OqwzE7atncL1}$ zzC_~^$Seb0esLxmd*?m{1qB5K1%*AZnhB5D4g&X|N(G4;5(h{$kT^o(1c@^wE|9oF z;s%L3Bp#4>LgEF9HzZm}d>|4qyi)rAqj@05+s!&sRBs| zB%zRmK@tv01SC}IA!!0hQ%IUY(j1Z&kR(8o2uVvwk|0TjBn6UIkhF#*6_PYa+Cb74l5|Mg zL6QMUCL~#qw1*@ck{n2KA?W}~9wZ$h=>$n9l^grpZFy&>rX zNnc3%LDC;q zNR~jd6p|Mpc@dIjkSvGfB}i64vJ#S)Az1~9jt4vcpB?wJ^Zy&GR>K%;AXy8^I!M+- zvH_A;AbAy%*C5#l$?K530m++?yambIkh}xQ{~*}}$-9ue2g&=8Y=-0mNIrz*BS^MD zvK5kTkbDfuc1U(W@(CoLLb4N*&mj36l3kGOhGY*Udm;G(l6{bT3CUNGd=1HdNWOvO zTS&fxW|BoLAyNGd>55t3j?DnU{ik}8mdKoSZ` z7$o74L_ks%l4_7tha?h`C`f8RQWKJBNNPb+8 zNE$)X7?LKCG=-!YB+Vgd0Z9TRiIB8}BngsaNKzna1xag2QXxr$qzxo(AxVd%9V8i$ zWI~b!Nqb1LA<2Ox7m^N;6MnN(fk};5sg=8Eg1(1w~WCA1;A(;fp zWJsn!G8K|(kW7c95Rw^?%!H%}l39?x=Br73#8Io0ytcGL_Bx@mA2g!O!HbC+UB(Fm98YCMbc^#5B zAbAs#w;*{Nl6N5aA0(R~c^8uRAbB5>&5(Qm$%l}91j!aiwnDNEl8+(T4#^HkK7r&@ zNOnT<86=-WvI~;kknDkEFC)5Gtm#*Eq_sH+rt9PHi{rV3WIB4*Y zp~Hrc7&&V6n6cvu#!r|yY4Vh*)26faS62q!IYt8wG|)f;4K&a|0}V9LP&&ajsK^rU z3f%qwS>9Eck-z=ZO#=-y&_DwXG|)f;4K&a|0}anYC|#J2{~MKo{ilHj8fc(_1{!Ff zfd(3Apn-;GA=q>L->D4jKMgd{Km!dl&_DwXG|)f;4KzFpp>*L)h?X7yHzl()!OU{E z#)VX7misjtHM>0E#4nGy@XKRv8g&S>Jn0eX;7Z1qf&8L@1{$8#AV6IzuSKUQ%TN=w zisG3DQwKC^if0;39dL4>c%~8IfSn{Xj%uQEaP)TfAf8_CzFHqruDsSdHFI!vb0$tM zZb5$jBp}eQa)pW{xKah{RN3<$1%*to@Ba@W{QLiJdgDh81qB5K1qB5K1qB5K1qH>^ z3l(|j5klDU|4omp9=_%6ogNAb3JMAe3JMAe3JN7q+Hu7?Mhm zREDGqBq5N5LJ|f^I3y8}RE4A(B-J5_gd_@*8j#e4BpQ-hkkp2x4kR&<#6l7WNnJ?d zA*lyReMlNW(h!nHkTiy*2_#J+X$DDiNLoOW07)VwEg?yQBpH$vNLoSC8j@5<(jaLA zNn1$LA!!Fm1|*q~WI@s%l59wFAjyTK10;EnbcCc6B%LAY0!dd$x(AkW2}K> zEhOt8Sr5quNM3>DRY+cgWFsW6L-GbBZ$k1GByU6V4kZ7BWD_LsLh>FY??bW~k`EyH z5R#7|*#gN{NVY-pF(lg|*#XHXkbDZsPDnn3N%x29j?f`3{l;kQ{{M5G02oIReS|ko*A2QAmzKavYKqko*YANl1Qz{w$wf$hgX9t0m+|`T!rKsB-bIi0m)5B{(|Ih zNdAH3Ur262avPF6klcmj9whf6c>u{nNFG7**rcI^gs1>fL86Al0TK-)j*vJ(;tYuk zB(9LSLE;XH2PB@5ctPR~i53zcNXkJ{9ui+j{2=j%Bmj~?NP-}#07*qif+48{No7c? zKoSB;C?sK!ghLVmNmWRyK~f!(NJyd}sR2n%NTMOB1xam4>Oc|$Nh~CBkko}F9+G;H z)Q6-2Bn=^H1W98^nn2PNl4g)Jhol8036La0(h`y+NRlB*fut2AtszN;Bn^@_khFy) z9g=pCWI&P$NfspSA<2d$2a;S!IzW;KNk>RJLDCtLE|7GEq#GpNA?X20J|sOM=>1G9Ho% zkW7SR5+su$nF7gFNTxwD9g;#wWmk_y$t#e&3dw7b zY=q=>NZx?tO-SB?wnhGaV= zJ0ST4l20Mo3CU-Wd=AMjNOnWA2a>&zd;!TmNWO&RD@eYEWIrU|K=Lgl-$8N!l7o;O zg5)qHM)^>Il!8yx+=JvkBo82Y2+1Qz9z!BrNaBc2SIFKxQmGv@ zj!w=lu5Rugo?hNspK|4W{rm$0gDO-Eu2i{7NN8AiMAd55Bcp26jILF?PE2fE-S~R- z8#HXxxJlDy&08cSwoFP+Y1KM4txenXb{Uyj?Xz=oJLGlj)VWL7Zryw2_w3cXPv3t1 z2MioEc*xLU!$*u9HG0h0aRuWiOq?`%%G7Do*?O!i1MjAzfd(3Apn(P&XrO@x8fYjR zK|f|AN4z7r`~UmAYmo3VkS-c%pn(P&XrO@x8fc(_1{!Ffq4*#TrsMy1Wgz`D&_DwX zG|)f;4K&a|0}V9LKtu6C8XO7Hvg7|~($}FHvs~ifLL!;vN(YUGU9NG~XlgLa4emWP zu4H@}*gqO*`0oa7TyDY2PfkCk@8G1Mc*dcjtpEkZvy5(N3qZePPeDOZGO+LeW5@UG z`~Plwy3Z*foewRd_bC@3f>C@3f>C@8E$ z>YQH+3JMAqYVy##Cew!a5{*kVvkY|k#hK1Jq@bXnprCj*v6>0G>4shcQITCjmw~9s zIU{l4Kaa^lrGekdbcDnS5@$$UAaRAn4H9=qJRtFe#0wH{NVJgnKvE8p@{ssK;s=R8 zBms~FLJ|Z?1xPAF5)4TtNGd~81(Fa*LLmu*Bpi|mNUB0o4U+1RL_!h;NexJ9LJ|#0 zEl6rZQU{V4NMa$0gQPAb@sQMmq&_4KAZZ9mBS;!U(gc#GkTip&IV3G0Nq{5~l9rGp zL6Qtf3M8!{X$?s#Bx#Vefut=Y>5#O8Bmy7G6a&LkPL%lI3yz= z841ZKNJc|429mLmjDw^AlJSsCfMg;hlOUN4$rMPYLNX1K>5vpcG6RyCkQ6~O3zFH8 z%z@-NNajK^50d$iEP!MoB+o;#2$IE+EP-SxBribnA|%ToSq{ldkgR}YB_uCHvI-I% z4|e`PJML%a|2I~xhB4MavKEqckgSJf10=6N@+u^+L9!8&*CBZWk~blF3zD}Xc?XjJ zL9z*wcOiKXlJ_Cm49N$Od`52PzknDiu6G%RVWG5t_LGn2yyCB&O z$sS1dLh=P9`ylxelCL278j}5xd;`h1kbDQp0Z0x)atM;ckQ{;Jdq{qO>dlHVb@49OKp{($69 zNUlP14U+4S+<@dJB!5BjHzfZ+@-HN}Ah`|69Z2p%au1UGkUW6oAta9=d2G^9LPFrT zFIAAJO}?rMi3So!NSq*XhQtLDS4i9-afieM5>H6HAn}Go3yBXTOv9^Nj*sFL(%|}hLALZq%kB-AZZFoGf0|4(gKnMND?7w2}u$p z$&jQ#(h8E+kfcJA21y%8+Cq{JNjpd~AjyOz3zGJbWJ8hzNiHNEAjyNIBP5+5=?qC1 zNV-DO4U+DV^nfHElAe(Cf}}SjeIV%zNk2&XLoxu8fshP>WH2N{AQ=kDFi3_&G6Is3 zkc@(4G$dmn84JlcND3es56J{bCPFd^lF5)vfn+Kq(;%4+Ng*UNAejkC5hSx9nGMMt zNS=daE+q3HnGeYVNESl!JS2-CSq#Y%NR~qK0wga&vJ8^tkh}!R3P@H$@-if=AXyE` z8c5bcvJR5RQY)OFfI! zW|qg)5uP>4aTxUkBtJrO5|W=FIR(kjker6(3?yeEIS0vkNPdCjS4b{EauJf>Ah`s| z?~q)EKynk3zaaS=l7Ari7m{0$+=k>1BzGaX2g!X%9zgOC zl1Gp{hD5lK#1WmYkiB`NQaflIot#}<-P}Dqy}Y$P<;wf|`3D3BRj3$TsdAN&(6I1` zs@1ATM%AbpU8{DTnAo_w@%8FAXxON6lcvp@w@65AnUtK;swo3`ogGBUH;XXoU0 z$m`gtbC<5&y7$QM*{gS-zWw?S7&vI~kfFndj~F>>^q8^Z3dT>EIBD{fsne#1v-hsb zz`Oitpn(P&XrO@x8fc(_1{%sn(2v>35%1c>@cqB=GLSABXrO@x8fc(_1{!Fffd(3A zprQC64W{G&c4Z*_G|)f;4K&a|0}V9LKm!dl&_F}+K^oMg(19KQ`;xv6G0bv;g9~vb z3s!z|`mxmGm4bqTf`WpAf`WpALN3_%|HpCT|9#x@q;~{)Sn6*Z zRvtklfRHfej}WaY%FEuVQ65!EMMCnJKO$m?ZjrrHBOS4ZKdRkyA6rAM`1ZK^;i6)~ z5mFZGhaa0hFD89XStQcQ-l<`~8|p}iR;wa0#D{p=J2eza!`1LdQ{vFGLg{bcnQhPX z_5W#X&nTWOni>9J&JVgp#h)MiK>J=6+RrDAAjA0~BFm=p7uP@N#QYH!LXRh+uODpx zP}{Ri)Q?J}9{)$Rn-$$e>US-}A58rp7hUrHk&12M7cuAa=1Y9k9Czne)VNtuO_c9{ zJ?p5%_IUa6iI0oNlh1GN{HbsM=<#0q;L4ZJf2}R$t0E4Btn^WlXyT)CmcBi;4WoF| z@&7SydD+{SJR*-R;&Syz-#&`fA7{h)qym3F@gy|~`NUI20ymI^a?;apZ}^Z<{*U1E zT6gch_DxJFAb^x-9|B=2j+TVFmHzg<4Sz7_v%Zz-`KGi{$M6S}{{?X+&;P2=*!hO* zKkD1R(&#^O*I)GgH95a8&mQk=F}oF#5Vih%bDcTgq%-Fm8-8Eaa6Smps-A`NEe_oH zOMkt!EM7mfH=GUEqn+ekk9IKp!L(1Yf%N=P+6Vycg%bS(Zv3ggo~%~jdNS9(d1wi# z&9rZ>#YQhNxc1eB`NMw+*S;19?)d1z{NXXOOr9^f^IJCa2h*P~3)e?VicnI8|D(oD z_j>-ywD_zW`i z55q!izdozKp8F))0~L8-(XC_0|0;5m8~1yVi{6jPLtDCjPcwP@Xx{aA3~+A(|(DUg6r83Ctf63ABBA0^sVGzTZwtq2I1z zHlx3eqjhpretXXvxcue&QKgc<0pNHKRp7pwRB-iKf1StE095 z@mJd5u50P<&FB9!1AZ+({{PooNA4GUfVLo= z`9I1nvFW-*C3dC~KMz3b{6wx}_!#DaK9%`T@pOY5-|^Q)%jEcu>tE{6H$>!oqhcrP z^Y?Dh9b8(jZVKb~ZBn`@s~L}Zo8_Nh2<`=h_! zqqgCCPk1ShFSz^2`uo%>MebAk80J}W{TmhAh_n4C-woO_c(_wY1XsTLdBA0{|LRGi z2-*BCH}BQ+KREBT57n>c*T0bZ#o-DZcxUusE{FjKn$BnxlqMxtyG+wXd`Um>?5>I3QprkMz|KBOL zhr!Pu=Fi7ZV|i033+}xk`uBLS?+xMKJ4(3rL(ctI?!6%T_mGsvdsDdQRrvPKrsq|- z{bNDjOI|AA>X-gKEc|=xoQ3Bd97^H&I;r5_J5IRz|1`Y!#~j>yYYpS)GVuKLvj(nx z(YJqPv3*e)-rQQ_W<^Jpny>FHO6fZ#LE!vA-@B-PZxsLjkuq6dC@7w0aK{(_{;+4_ z_zE(#=g4Yul=cUnpVrsMzHWxj{XJ-?zaA0=uV*MHF;-%9jf zoDJt^rzdlM=Egtz=OrCV;r&*2; zBf!ueglJXv_>NCmM-b@WJ&o6k0>JV7B+9p{;r_)Fde68yd`NZSeF7eI{`*r8?)=5h z5)a%^D(A1#2G?KHzfY$u+}^IL@b!P=_y4wVZw}f|;=SF;tul8fEy&Psj7Sh|x0B0a zXOH5EB9{HH+D-SdHE2gxNpR21*Rbuo+UDTqPt>?s(OJfCZ2@<$UG7awA;`Dt0Cg5g~iG2H;8;>(>n!_{Rrg7ir7WpEx*9uSc@q){(J&3W~CZ^7{H5qQ>hpcU_QvhnhkJ z-25ck2S>qucbSfHC4>7OavAv+@_zx`yj8Z}^dFeBN?{3Z9+S*#*c2rP_nlpZzOhU3 z-v{?x8GNRUsY{VgWxFl~=<9r0_|B}YFdhG|e0ma2Aq(7mEd9MU?%S;T?kaqrcm{cZkx zie>a$iq8Vn0{Au)cw#Q2#KbQK6HF!` z?dEdUBK{guXow#ICJea?ADm*?!0x8`6JSkjZ?thOIo=Wd_YLo}OYmWSqtg(7)nnZ` z!6!uT<46L?Lt(qZR4$zp2_z5i2)c_wcP16c!yAIGHRvv+B6)aD&_fKmD+wkKcL{oe zL3bmS$!TG`;lg&^NfmNh*lxJ6T@MmUP7B)&7q;t3!pLc1yWzriy+}AYEo?Vj*seFJ zN~Q?gEfTh?CDq6jVY@}bc6~@BnIderNZ4*U5=Ev6+bt5dTb|S)Q>F`gp+Wa0HOa$A zLOvMoHtPIHEpl4W!v)=+)F!8I3A*9!-MRo0Lrz~4bi-Sfb%7+7oIWGyVFo>j#F5jV z33^L|UV+4uDZ+M(gzZ)&^~e-qyG6owgGmE2Mc8hUu-!_eA(4i!OvTC8q^FT+l;FGjjSbK{ve3R~JTFkkeNL-QS>xGqwHnPl8^} zphu8Ia(aiL8)hcys*)r!Mc8hUu-$4TnM@J3TO@3^I%!3w2-__Zwi`)WlPSV>i-he) zkyJ8eqM*+)=ru@Za@!y?M~OkNNxG2RLOpa9>R~kLMsD8_bge;o8%O$*6T)_*gzeTP{m6-3 zf}UW|GJ5ShdeKMF72-}?}Y_|a!LJEZK&J(uVkPIaS!v4+^ z_O}rkMs5rB+*PROjmZddLeQfGy$KmfP6+isN~r%$$!KyysQ*zy{clFbkP|}vj}q#C zb265k5RSVj;kau-3P^#l-Fd=x6UcZ{AZ&M@u-!y5krW8qohNL!C7DDDgze4~wwpvI zlLFzmnB&g?jEP)bkWFjhqnlC_!&Urjrvw{TJS%t!qtYkP|}vj}q#CDw#=6 z2=zZosQ+oCh@23PyC~tfYeQy}0%5!JgzdH^b4Y=(-Fd=x)5%;?AZ&M@u-$fK9w`vE zJ5Shd2ANL^gyU|WaNK1Qe{$a->x563>#|5oGUcv8FEYH-T-Tm7Av=Y1CJE`xCUwaa z;lx>FIC1K7NHp0gly8zyzPTiVOc74-MTQf+t^=t=b_(f964H}LI+06)F7zaH9myFh@G$u=Q9 zX+nCsGyfU;Ekw|JAbLKc_eAtwh~68~`yhH>MDK^_{Skctq7Ov$L5MyW(T5=VP(&Yw z=))0x1fq{bbO`qTKRfPc@BhC;_xljaqQ8mgZz1~Ii2e?u{}0hOA^N+B{vM*gkLa5b{R2e*5YazE^eu?K716gL z`p1aA9np6n`X`9~DWdO0^v@9eb41^T=(`bp52Eiy^e+&7AEJMW=wBiF*NDC!(Z50T zZxQ`FL_dJ&2NC@cq8~=|BZ&SzqW^&CM-lxPq8~@}6NvsJqMt$@3ZjP~dMKiYA$mBXM<9AtM6ZVE)e$`s(W4N(2BOzQ z^k_t{h3K^ry$+(sAbKpK$02%MM2|=GdWc>h(HkIoLqu?iPeSx$L{CBVR*2pj(NhsU4bj^mdRs(KNAz}xo`L9@h@OS$?GZg2(Q^s3Mf4XC{Y6AyhUm)?{UtvCTZn!e(eEJoT|~c!==Tx*0ir)d^hb#P7}0fz&d&W8ZLbks zjpz=Du0eE1M0Y}TXGC{FbXP=oLv(jU_ds+{ME62;Z$#H3x(}k4L-g{9?u+Pti0+T* z0f-)m=s}2H0nsZWdN86_LiEarUIo!Z5Iq#p!w@|j(IXJODxz0I^y-KniRe*?UIWo< zB6>8U*FyB#h+YTLV-P(S(c=)kE~3XHdObw1kLV2$y&4KZ;t3K z5Iq6W6A`^7q9-AGGNPv-dMiY4jp(U}o`&dc5WOv;rz3hhM9)CgXqs8`dmbxhv@SWeF35`MD*to zeG#HBM)W0!z7)}4K=c<8eHo%JNA#BveFdVgMD&*teHEgwM)Wm^z82BfA^Lhm-+<_^ zAo{C_{u-ihMD*7Y{S8Ea6Vcy7^tTcH9Yp^hqHjXe}w2; z5Pd76Z$tEt5q&$N??CiV5dBj`--+m-A^PWtz6;TJBl;dh-;3y9Ao@N;{}R!^LiDc@ zeLtdqgXrHP`ge$a0MQR3`XNL=jOa%Y{d+|J0nv{l`Y}X5j_4;4{YOMUiReEe`YA;J z8PQK8`WZw&i|FSN{XC-og6O{@`UOP4i0Ho|`XxmF9nmi%`V~a~1JVCP^s9(|4biV7 z`VBncQ7 zBf0~kYY^QL(VY<88PQ!3-4)T@5ZxWoJrLa!(Y+Ae8_~6h?t|#%5WPI2`y#p@qWdFy z0HOzC^~Y@dAB4oKfanzwJs8m|A$ny*uY%|yh#rdQVTc}%=n;rs71669dUZsPMD!>` zuYu?_5j`5wYax1VM6ZMBF^C?E=y8Z%7t!Mpy&j_1NAw1W-Vo6nA$ns(Z-VGe5xp6r zH%Igqh@ODxiHP13(UTB88PQV^y%nOjM)XufPeb%Jh~5^_(-FNLqGup_CZcB{dV56A zM)Vv+&qeeOh@OY&9TB|~qIX90E{NV0(YqmfcSP@j==q4=6VZDidT&JUgXnz`y&t0Y zNAv-RJ`m9dA^KoMAA;yZ5q%h<4@dM7h&~e0MT45PcS+&qnk)i2fX+&qefkh&~_D7a;mVM1LO9 z7a{s$L|=mFOA-AAM1K*{mm&IcM1KjorwM!qJNI)yAXXhqVGZUy@;+R2fW$wzc1P8*pOKs zaeV07j9Gqx#NUVLUn2Tfi2gOA???1+5dB+3{|?a)Ao@W>KZNLq5&a0Fe~;)tAo@{6 zKZfYX5&ZAXKZEFJ5&ayZpGWjx5dBv~zkui$5&btrzl7+& zBl=}Tzk=w0Ao`z(eihNLA^LShzk%pC5&bVj{~OW&LG*tS{T8C%M)W&~eiza2A^Lqp ze}L!@5&aROKSp#NqF2)YVO+;z8W&77GJ_2>)DG;Mf(ZKxTvEvXm$8RHs6q-qsu;wa z9%_HVzJf@^cese}Qrmsw_Og#OSo(GxZ|VEiTa^# zl3ou|dk@iw`I@Jf1(RN@zQ*)=u+)(&Sj+r1ODmGUiD>>hiTN_=weS_qU*Kz&zs?rE zhBRB|uY*NCh^E(r#I;npi20hQ7tLSM^m6&@D(1Ug#JBa_JvGM{I2n8`&{*(gs{(No z&4(M768knb{%!c;zGiS&$k#(5Ur&X6y+nP7TS{c<9!F4ag(6!+-2r~P zxkV;%+2d5izB!rvPBiMCMT9T+#WP8WA7A`r#!T%7u0A~8L$t;D>cRRtoBQ_PgZlc2 z_^uC|XyH5J%$z+g4zqY)C(bt)$%kBWe-9}qn!lF$pfl!!N&Yfkb^Y{TtAXh)U!1St zFXV%`FOw(G*H_F}Bp-Cfd_dC+^TAJ{`s*+1t2=BiFz1h$+Sb~gmil%_#cw^p!gm+z zVdk63{udK%%W=W_GHqL+h3|S1-w~W|EeXCsqWQ23X0YLi)4CSv>#v%tzZ@19g5#o) zsSjL-WDluOyssfIv~K3U?D2;9RunHc5nr8XdQFZC;2SLJyZ*YRHp0iS%IIp$rPSymF?XOXn&MAK$*Z0XZhS=9IFeG7?1J9>y5vnKmY zUjAscmV}VrdsS7$(<|bubM$m5wC<}4V#`QNdhg-Br6rQydzQY@PEyhvDxO{uU!9Yb z^oH4*4?5F)5c7fvw>7;w)ASbS8zJs{S44C6C@vqE{N=X69LrV3eIJNu8ms9wLp4#~ z8zQ#S&JEQS@{Lr;H%cMj8VdQ=RLD14A>UdG`PNp*w~j)-F$(#{D&!ldkZ)aueB%}J zt*4N0eT94*DCFBv#FyzqKNQg%wB~*2RwCD)*w1_VCp$J$$hWaVzD*SJZK{xOGlhJc zE9BckA>Rard=nM&ZK;rNl0v@83i+leTF%`HC2rzRyzd@~jD%~Hs>y+Xd(3i;+3eVI1oA9k5`&Dfqe=r3b#E64opV}8xK z)tY0V`Sv3g=b_YVmi^C<*>>H_@?KY=tCPAXd;f{OprUWrwPf@j(%;fo{ZMzq z-1m`#?uI1a0hYeHCvn~!sF3dbK)K_;#L<{q_wf$udWBBP`GUa&OJDXp_otcGso%X!Utb56 z`T5Oc--S6B{>e@v=`w-&_?u=qP~Yke5c#$ z8_4=j5%rxe;(J(1J~&Sm^&KJNdrm@nlbxrD`sQ2u-n}6uADpL)`eurxS7)m)+wXEN zwDe_R?yJ)=_1(|h*9@?}&k*&ssK1PFzOBAo{hcZ5Yf*n0-w_gg_c;}b`dZXq#&^1{ zzFhsCCF*NYe;MDyQu4uhwy3W~{bhX5Nl0(9^BhrMi~7s>-jI?H&d-VZTGU^5MvJY! zT>YIZ>MK%zb&Rjt(${RBR9#>FoYHxszSFK)NMwe;zpZoDTkTcN7xCStyJPe<&UN(< zuyw90b9^r-&Q}lCH_+C(u8i+O5#RM;6D@p4oSEYvWb0g4#`k&A{I$#noiQJZpOeb? zE-KDf@E7u-_&KzU?_x1uk$lh@^P%{;u8i*z@%n4&tJC>cux(B?m)@n~zGB#;av;SY zZ~8g*FNpfy5V4ikPkd1!-(?EWr3@?E8n?`nm7*C^z>Rw3VY z3i+;A$ajN6zON|c`>H~|uPNlaQ6b;g74m&UAzu*#6PSSE9ARhA>VHl z^8HpJ-|rOiJ)n^9L4|w|Ddc-tA>Shk`F^jE?+*(39#zQqm_oiH1}0FGtK9hCmmKh7 zpDjD@<)LDqnLVyhdQK?h`=dg>Cl&JjNg>};3iZSIGB zvhPq;GrwpUqAr(893QI+r<3zl$X#bnFVw{F#7t-7tF)GSp{NdsxsR5K?;)c5is=#O ziDQC@PfJD@OJ_Es`|cJ`k4R>6-~VIB{rva;+}Y>A)M9BEND|ay+bTqKEKjITb*fXH z>QtvX)u~Q(s#BfnRHu6B>keeI6FdG7CLSvG9k7wASt|COt((*ujXT*|=F)2~UCV?* z4<=}RTHM~9NCvukanB{PS3tydEI!rc>fDo{Dz$^g(aD)E>5?v=y`|VSYzhhr3JMAe z3JTkyCJ(*YZ>ji_ecbY-cLX;l<-dj?_G_8!NAg0nswgjer$%{HWxrm@z63rZhUgaA zJ2lb~YxtwuP4}@i)QWG9s~;{ZCLAGUv3~fm>GNXJ=afYvo$Q?&_M3~2glM%Y5<`5r z4?k1LNBLRR{yNHpG=N`x4i7_OCE7gd6 zMR1+FJ^?VE>|klIZ9=2~p@5`$}B+2@q~mvHTCao~;*_M@jBBg^FZ zk~_b#A8ls(^JU@sNJ$Y&s_=i*xanTcU-|aL&v1TV+P|kE|H^~$fGzd6q~P*LU;o+s zaW|}B+WX$Y_HXaG<5OS%#wufJ1rCxN~0<>^5MW7e|5C>KmJM^+;uJeeO$GS`?%7#=VKU0)AN@C;PRKhj`LLJ zulf9c_WghS`2SyT9l2lZ0osCe=KmYa%3}FB!~UMjv)A7%ZE*cd z{dkhiZLWP{5s_6U+o$5d?T`L?kJ^UoJ>jK1zToa7>+e&m6uD3BW0+^j^>0*cBhL1p zd^c#z;Neao5nTD|=K+_+{;Ma6B4qQo+`L!M|KPmWO6*KW!#r#m^H?gD@$(zkKj7!- zi1rV-_EmqsrtGEO{^06|;W-RD%!4j%n2!JN(Nhcsg#>W@GyOd5XW`)C`Y+u4nZh#J ze=$c3(w+aK`4T!$y%fNWKlyoXvc{iW{bJ`&I3+w4^^1a{B=BL57!LY*$Xx$jfB&v5 zUf-jThIWQOs@-%SKa`HRo_27@r+)sDEyt%131`k_)W5F z@p?XgzmRbAb)Log$+chl`*pOxNMR1He(3Ml(fUC_@npfh$BmsY;J-xlJ#O6f5dD0m zr}26v*FVtDmv|cc2PK8+`2S9^Jq&*SFn>ON8q1qPS#a+S(Z9!oeQyZ=-ciD}A9C)$ za_#JUh+}_SHJY{Vd39f=PW$$;7|(B*GUEc-f_a! z|EJ--Kjz@xTWc6Umx1S}pEYppi@yCUi|vcb@aEPUH!C`-)O>wsQA*z_2?FN_`rbwT zd!zXGkCe&!LP7B~gFC+X_lG?b$5)V{Jx9JP!u1af`b+*xNPH=L7l(fj zIsYAw(s+;ae;qAISN;!QdGCQ%8Ucp(AVjOO$9H_nI)Xs|?rFSU6abFrCsDpt4fije z(0j(s;X|qm?-TH#^WUF(aOW>}mU!TXQaOK>Hn{$p{(U-S;r4b_g|Gh`zyG&|`=;)G z67TI!Zk4$^X+eg5V?=^zyPaGXJ9`vQ6tV1o)o!|vtwB4oN`iZ4zJ_hz)iwt=f1<|C ziq0~AYYVvRvy49}mJde$N(_Q3y?zQykDd~6Ixo@`9@3m7(QO(#b+e?5}@wvLSL zQ&5yOl-Jki5H(((x$A=TJJb{+;N~aMJ~#^QyUTQpD;eDPkju!okpBzd=B={*rvJd4 zRSHXR^O$5_!=@-XxbN&L^o?DL|30|q%HT6)OkIj}D%*7_Kwsy}!gpqEh3WWz<aKy?hEsG!c%$=?nxm?-_}HVJ=^*4w1E2_>3^`F$GvBTJ^wlU zxAu#I;#q*R;oNHDH^R8*R+!^m^^9CMb5X-Jms>G^6mCdlU$s76B)&EO5fbC) zON^f>F@6iF{(DNEpQce=&R=b1ocZl7(Z7nq>1pu<{6kcQWCl}XbkmLghR38wD3pG= z@$2j@d3spSO@z#2_jiM;Fu`O3(rzwiE#j{sg@*VcF#axy=~?^)(r+4-&mT=;#~3N` zg~#A+iSGpJ{}JO1}2 z$&QK4vXkQ)w_vid%%zjYr}1fg8vkjDKZ^TbMIOT|tvr~&oEUwVpeN{cHF0O-KN8~e z_pfy9yKi`(U4jqu8=Z#us~+pl2|giuA4d{E9tztPrgG_=NFaH5N6=jix-+Rj9^Mdi ztwDDo70JVMf*xYfT}d!`xJ%Fz47wYsOil~i4HvfSPO6a8!gj-j?Rt<qWxJX<@tJ!gjq$RWe1`ZjrEEEvZJP2-__Zw(CP8$rNF`MZ$K=kti}n z*lv-q-SVUcnKE6_3k|w2sYxC_67s=tw^8RuYLU}|9xmwqq&7KyOVAB(@74v77;^fW zpc~$*tP3QuRaNB#xZ^Owd~z^a>=NOcAzQBy6`LsYj*=+bt5d8%!FIDZ+M( zgzZ)$4apQ?yG6owE0acK%2Yw0Y0#^X#^m8WAs-CWS#%+!DLF0Z;esAYnvv6g3A*8J zzPd2df}Fl0=>7&hoT=@ne-iX+20el#lG8f`-7qsrSCu4@DZ+M(gzZ)%$z+PK-6CPT z)k!NdMc8hUu-!<~noJS4TO@2ZilmY$69s*aL9anNliLQFIZ6zAP11$j7V4p^P!FR? zH*))iplc0!Ez+IbJ}2lQ2E8`vL2geM^g@GPhx8;TgzZKN+l?W;$O&P)QNng(Ngr}T z*lv`t-8j;hoDjAfC2Y4Y=|@iN67&Rv9#0040%5!JgzeTN14)6f-Fd=x>yyExK-lg) zVY>~;5K4_hGZxy5cYSTu)mGSFmhX{=dMCMZ%jsz6M`Nk=uOB-azd#8Q9}K1 zN=B0tLj8{t>VGpbhMW-Uf0R)Fo0GBRgmBzN3CCRvQa}oX?amXnn?S~s0%5!JgzYAh ziKIZ-?mS_;Ey*NOAZ&M@u-zmwnG^`e-8|vAV`?+GE!1;Yp`NFZY2<{UM+tf>GM$_d z>c8+7ZCz_JgPaiRf0R)FQ^`znLa6^yLj6x8MdXBV+(ik;T^ll+6bRd$Cv3MZnL`SM z?amXnn@;AE0%5!JgzdH?^GJcP-Fd=xGst{WARKq|gySxg_>=nvStoqLT$e>!k|}o$ zdXeFs=DPNz3E3&6Gf7BiHmOUd2q(@W!--RuL!!w}p?s5s^35d?WQuTtFEX6qbsb10 zvQtP;l8~M}(urIWbfG7q>qusi0wJCAgmiWyQ^__VooPZkJCkvwKuG62A)Q^waI#HE zXPS`CuB1OH5YjnMNM|>aPqqo^NfXl3o%zq$Zy|!-1JUyly(gmgLiFB<-Ure9B6>eW z?~mvM5Pcw`4?^_8h&}|-ha&ngL?4dmBM^NgqC>Fv|JiXrd;k9(!uMOnelr@;#~}Jx zL?4If1&BT#(I+7KL`0v2=#vqB3ZhR%^l6Ab9nlLBeFmb>MD!v=pM~hN5q%D#KZodZ z5q%z_&qwqHh`tcfpGWjXh`t!nmmvC5M1KL%Uqtj}h`t=rUqbX1h`tihUqk)keqQ8RZuOj+uh`tfgUq|#e5dBR=e+$vyM)Y?O{eOtQ3DMt0^!E_` zeMH}k=pP{Zhlu_WqHjU;t%$x2(LYA??TEeu(LX`-PZ515qJM_ypCkG%MBk0*dk}pu zqJM$t`w;z0ME?rWzee=^i2eT5dAEopF{NXi2e(r|BC1r5d9*e|Ay$75dC*Vzl`Wt z5d9BC{}a)#BKkE%zmDiP5d9{i|Apv(BlUqtsqbbmw-K=eRF4?^?`SbZ}a|5rrf1tWSTM6ZnKRS-P{(L)hE4AH|8 zJp$3IB6>AMua4-Eh#rOLH4wcfqDLcoEkv)4=yec12GL^?Jr2?9B6>Wc*F*IBh~5Cv z8zOomL~o4fO%S~)qBleI=7`<`(Gw6o5z$*BdJ>{1BYFy=w?g#Rh@Oh*X^7qi(c2<= zI-<8j^bADLMD#2~Z;$BNh@OM!xrp8Y(en_!BcgXg^v;Oh1<|`AdN)Mxj_5rQJs;6~ zB6=@G?~Ukv5WO#=_e1plh&}+(2O|0)L?4XkLlAu^q7Os#;fOv0(MKZsC`2EP=wlFl zETWG?^a4a5kLVK+eIlYyLiEXqJ_XUIBKkB$pN{B-h&}_+XCitLqR&F~*@!*|(Vs)~ zxrjaw(dQ%j0z_Yk=+7hiB1B(|=t~fNDWboC=r1DrGDKgF=r1Aq3PfLt=r1GsDnwt6 z=xY#tEuybO^!13o0nuMU^j8u6HALTt=&vLC8;JfUqQ8acZzK9Ui2grB--PJzBKmuX z{yw5_M)VI5{X<0m2+_A7`c_2WhUgz7`gTO$f#{zg`lpD#6VX3I^v@A}7ozV*^gW2a z7ty~!^nHk~G8_MYiNyN~(Z5FY{fPbzqJN9%-y!+|L_dh=hY&baw8)XnT$5YD9NHbPb|ABDxczJ0rRaqPrrx8=|`-x(A|rBDxo% zdn39Q(R~oT9HN&;bYDdGLv(*c4?y%lL=Qsr3W#12(Ss4a5~5c|^eTuRg6N@$9){@Q zh#rCHRS~@!qE|=sNJNi9^csj>6Vam)y%wU^M)W#}9)swyh#rUNbrC%t(d!|4eME17 z=nWCQ5u!In^d^Yj6w#X@dUHf?f#?Z{o`~oz5j_dflMy`y(OV&UYeY{)^fW|ogXnD$ zJsr{8A$kU)XCitQqPIu%Y(&pN^jt*mfarOM-VxC|A$n&-?}F%E5xpCtcSrOdh@Ow= zJrTVZqW4DhK8W5I(fc8Ke?%XE=mQab5TXx8^dX2o6w!wv`f#j2X*T{Jfy5h$=%WyQ zG@_3|^s$IO4$%t`eLSL1K=g@-J_*q$Bl;9XpNim`UXUQ1<_wc^w$u5Bci{K=x-qUn~44vqQ8yk?;!gB5PcJ( zzl-SaA^Q7>z8TR!K=cn0{Ub!*g6LZjeH)^GjOg1DeFvg{g6N+j`c6du4ADPF^j(O) z8`1Y5`d&o;0@3#&`j?3Q6{3HQ==%}<8$|yW(Z56V1BiYQ(GMZ|VMITI=-(sy4~Tvg z(T^eeaYR3X=szO*Nksn%(N7`z&xn2+(a#|ISwugF=;smr7exOR(JvtSMMVD%(JvwT z?}&aG(XSx-ABg@ZqF+VyYlwaw(QhF7O+^0-(f>yDe-Qm&M8AdTw-NmgqTfaIdx(A? z(H|iCLqvas=#LRyhv>vXTvs8w8qpmPU4!V3i0*{w&WP@U=&p$FhUo5y?t$o@i0*~x z-iWS6bRR@7hv?-I-51gQ5Zxcq0}wqBt3PJr{~#n@1w^li=)s6y3DGMfdKE+uLG(~W z4@2~DM2|r9s)$|<(W@hRB%((ldJROciRjUYUJKD{BYGV~k3sZUM2|!Cx`-Z+==Bi2 zKB6~3^oEGu2+HlpVsdM=`OK=eFB?}+G~5WO>^cR}>7h~5p+yCZrJ zM9)X`o`~KH(R(9$A4Knq==~7AKcWvn^nr*z2+;>4`Vd4Pis-`-eK?|zK=hG_J_^xC zBl;LbAB*VY5WN7=$0Pa#M4yQ0lMsC}qEA8esfa!e(WfJNA)?Pf^qGiWgy^#neKw-c zLGx#BKpgS zz6#M-Bl;RdUyJDL5PdzOZ$R`{5dBp|e+|($BKqry{syAIiRf=3`rC;94x;}L(KjLb zyNLcCqQ8&mn-Tp3ME?-cKSJ~^h`trkw;}q+h`t@scOd#Fi2f;}??m*^5dCvR--YPA z5q%G$??rSqIpEEX|9#0$$Ck|Uh~q=oX3X*nB>p}`{}R!^LiDc@eLtdqgXrHP`ge$a z0MQR3`XNL=jOa%Y{d+|J0nv{l`Y}X5j_4;4{YOMUiReEe`YA;J8PQK8`WZw&i|FSN z{XC-og6O{@`UOP4i0Ho|`XxmF9nmi%`V~a~1JVCP^s9(|4biV7`VB|;3 z6hzop;F3c2zl=QuLKRZ@QNm}+t+)^S- z_c(%bD-`iE*0a6DTf}#l?hf$V%`GyC%O0m9_RY!UccM}EEFyflFP=$4{P^N0GiGWx zaP{Hw9-=MIR}a?L+1$7P9@N)I#CLtzL<`>$XXfm2ahS#XI&r?aNIv9}`+G<^(fqZ{ z2c0n=O!AlUs_Uo!S`AEZ`Qm&9e<2^leVIIgzP@6*<^!5um=Asm)n9*6U)^DI zfjNJ~)V9{{wA8mdDt_w$7QVY!4>R9P_P>~DTaF9XmucGqEqvFD_>SOwYf11863vHQ zFoO+8oYu8SUw_qH{pGN@5F8hUOnu-wBzs7O;(ZNyp>;F&Wsf(+x1xBtiTLV7(`#~E z0N-Fy-}TomwGlq<4n@2#-#%m^`J2VoUy*bb3l%x<&5rv$$T9EQ%(9aBK8vJHCz>{k zV@sd5%A&qU?^{SD+R;Pgm^ImF^72QkwIqb}-m9u2o?a1Oouj8ip>nr5jKq23TBEC!?`k{#Cpf&G9w-UMb#D3n> zKiRR7LcWa^@@=A!Z&QVQn(I+A>URC`L(KU=*zSr z|FFxnYsU7(L4O%@TRG-$AMwV6R_A!uDjJCf6jr7QQ;@Th8)YHtO5K z=={U4zH>(tMvIh1#BRGFvOEz0PdxSzYm$oe|wDdgKxA>U34`F2*ww~Io)T@~`} zrjT!Ug?xJ`7Mdnx4GTOr>*3iv+RF<%(m-ZmiM{}U7gfD z+51oI1r>d}t|g=Qkp7mw>W8`;=Dv>{bT=gV4zTpqJ&E(?K!tnSbi`3_ac zcbG!H!xi!!p^)!Lg?vXT`uaMk%+GHo`!3A6@K1IUNtX%C$Nv*W({V#29r?DVL(8UP zlBn-F5#JFKeD^s`7WF+W;yc||-$2%Pim2~&5#Pg7^1*qksP707-*Xbuo9sMI)HmPK z_wEfT`QSWV)HhQky*gWc*?yOEp`|Yib6=f~sqcQ~zGi^+eTJy7Mg3)b^KJFz>hDZZ zUyJ(7_>Pd^yU(de)Yqc^GQQJo_2ugCEKy&J`pftpmXZ(7vqgO^>M!GaPC|NfUR>~nd5swalU%6zJa#Rb!B`PiukS%n`q%X;>;ZX zAY13UGQQ7?=C5Tw=#2SL{G3$AcTsV^g1?Xt#m}K-d>4!PisXaNm=DFzb!B{)h}U0B zU!Bgsf^BoEx%4g-_Z7n)l>;gMc+=0Ze?ipuhKQ}Se&UM?`7Tq)cez5oFDc}^LLuLk z3i-aQknbvmd{-;vyG9}3wF>#JQ^Vfu@_kPs-}e>r-K>!B2MYOqsF3eR3i)nP z$akwkzS|V?{a7L2?F#wsP{{Wag?v9%$akkgzMm=N`?*5CyA<->t&r~?g?#rae}(FAo*_%R#8yh6UeDCGOALcSLi^1Y~#?{5nEUQ)>ScZGZ}E984cA>Tg~ z^8HgG->VAwUQ@{Tx`Fp{;QDhEronX?S`QB5= z_r5~D4;1o!sF3d?g?t|?d=C-bS4@vMPaG3Od|EQPSUR&2-FLTmdPFjl`~Dw0?&rV%=gvL{ zrWQ-XK$4&q+g2f>V|hY#s#BfnRHr)CsZMpOQ=RHmr#jV3Uw0)5-t724op`9&cT#pz z%~G-NkUXu{XxMi?o_Erc*39xIvrJ`{#95(k=)?nE-s)r)(Mk-cIfu4D13E?4KC995|uG>%TrbV-+V`Rp!TT;1F~ zJiWZlc_G_Qs9SR&D*e(yzx3BHgZQO$Yd!blm#%KDDJUqOa?mz_g5uc*+XR#+IIwkM z_tc*zsK^o*_TBs*WR^>zmHnrnprD|jprB9+_U;c+P*6}%P&^B8Ad%keH}ry;@jv^` zyA3YM4w=mIqI)v`b#w{}3JMAe3W`!jg1r+!LGhF$!QKg=pm@q*?{T_Pa3VyTMcB^~ zYSiqqp_*Sd)$q#%Cyko@R(rBbs)k)I^cd^Fe@%$unTDFERTR%Om^z?QQ#{jP>VT62 z#WRg$4R(@na#RzQgHs7kGal|P#M#xO1P&Aw6i)+2T4_lrC@7vhRAh+a*4Y}L+Gp=3JMAe3W{eE z34|X{Jo=saPfb)rH{H;MAZnt!BM=9o+a(YU>3a9EPWP~miusWv$!E!9mN+;uYwogZ zoS8m|GqdI{yM`f-E~LucM+|A9*6G|_8FGguE;^mN8)?C=Ia`NW~GjLPM!>5f5WbE_9D|y8rHaYGsLjQRZF^FX4BS!NsgBf$!E!9mUx$A*05=7!6Zjp zo>{}DjU_(5q{?MO+Wh?(Vn|znKWV|PIm@mo7r+oUZS0!zfn@rA!*6_p7-IO1Uj_1* z%_Z(XRYgdGA*lpOWk{+(5&}smBw>(*LlOZ=RYlZV7~kO1~^5o+|#TzYE9TXaCd02yqOr0h~dcog9h5k&yRTQO(C=;~1t7 z%w*#&6gKTmKs*N~xai{*5f!&x;%ra|@lx^eH2VGzi&XFZW4s_6<5jRRUPT+@1=|>}l8x~y+ZeBkjqyTkj2CKSyf7Q% zh1(b}!p3-2ZH!mV#(33jj2CHRyeJ#v)vz&MO&jAy+ZeBwjqz&R7_W|v@nUR@7i(j@ zI2+^DwJ~12jq&Q)7_YvK@fz3|uc3|c8rc}Hv5oPX*ch*=jq#e<7_YgF@mkmzPszR} z1qB5KMcILx+;!XF>_(*#kYcyn;qmN{Q$ZGsB=i^Mgg zp|OOxIzo1r2XV#HYwvy+1qB5K1%*A~K<;|8(KVl0?snB^dNRv>p7H92#D%cu zQn4U*{-trH$Hn?sOi!g8f3KP8u0;614*tqFJ-&kh$~b+jMQ|2}36?tHfmrivJk6zUfUX z5twz}4CJh{<|%jGI0mJ;J!|U<7?hQ?l;%3a_+Le?diNykyzukhOWB$vcUyZ131J{w zm#nPQw&d5n~%J#Hs{&%qq%zvky$*=3lz+&BAeqC<{=Id1Yb)gKTtP4#tuCv$= zEBg@YVEmo=euNeo*O~7}=oiLy=KG=2uZv<(hk?nuy~cIeKCtWL>;wBdY#-Ql*gml9 z`Z9p))cSP^Ob^Ff53zOTWnpbyT(applYO+Ze{mYq-@$#1n`OEV?ql3O({-?%)L@-? zIl+A}FDJMU7Ug8)KA4vi+z0b=iZe?mEGL*w4Fi*M*k`&9mW8!-EmJJl3HxYe|5`d) z{?4$EEoWPXO@D{& zgT;B%ES)CjO^{BD^X4(rbtdOcE9+Wzuv}-;ImgfPcfvVmx#c?HoO9fAop9bXTW50K zJZ8BL+XtJ^n|_wR!}h`CyeX1Sdfue#{>$sc?*F@yhn_u|b-CoOYcFP*?v_k?Gs`8+ zvJbO7<~ED;WtN@XLr6bnIe}UBXO=6O2Ow!dq5Z)b7u)nwnh z!M@v~xcBQZDCB$C`hK@ae%W(U+JS!5`) z+{i2oO=^p6Z~lovW@Ax2od^MctBMH@X2I6@wxpprFGEo4gVZz>7th|uDFp=u1qB5K z1qB5KMM=Som9!pe_8F3a-t2OU_bk#=KO@P7f`WpAf`WpAf`WpAf`X#Nn8J*)*|D}g z)1V@=xN*M+nczK=Ny}+Fr-y=qf`WpAf`WpAf`WqLzY2Ta|98^g|Boj-iKm)FJ^lVwMBF=aOhwLXP><>jD%M6ciK`6cpBA zm}hA3bWl)G*bi`?p&P}s3YS7sHs;rv&;Mtq^jF~C|2LO}Fv~ThueUL+_RX|z-t-(w zK|w)5K|w)5K|w)5LGdKO&i@Yq^Z(f~x4qLuK|w)5K|w)5K|w)5K|%3k;iMn`$CKS8 zlv(~uTr^?K@}b5>9nLH@&T~lwvs}n5Yq0YKT&_BACxoO{d3vuC(7AaO6cluB9)(*O znwwWv=G|R%rsqcr3JMAeivKd$`~Nk-{eSk{Z|^iwP*6}%P*6}%P*6}%P*6NsIFec3 z?D#*LG<2!SEc?2+Xrh_rP!AV%EoM2^Yc8qH&Ie$YF)oB0382>pC@3f>C@3h32c2(6 zK|w+1+flgD`F0jCe4&cF;IFITNB%snre+8jYi)lasTvX(Vea%NA%p@EB@`<2TEbkfCLu&7h#5prD|j zprD|jpmNU)Arzag_QIuWS^I91N1qB5K1qB5K1qH>^3>Dew&5!># zd%xvPNGB~F|5H#j8vUtPue>JaOzL#?k4v%Wgq`sxUa)yZ&mRqMa5W_@*a>#HNJua2_5x`y@DHLb6X zw!XTS_0_eludZXUI)ptm469?T|2o$C>NxAG>sntOZ+&$=>#OTqU){j^>W0==H?qFE zvGvtWtglY6zBXsI(3t@q#SpRjZ_0?(CS2I{lOj=|O@gN+fprD|jprD}m??Odx zdh_G|3*N(wLshm91Ug1~gmA0fxSo<)Z934cBJ``=nc!ja^FX$jX!yAY_Vch%ZoQ|) zdbQr38JVyVyf{gxU**Oas@S1im8i$;4srjf_>_2yZi>MejLAYi> ze9VdwltVc~=7P-eRk4K_Vz??`wV3^^7qdV5^H9Te0qeyaXmc?KVa3d^u3)jc5U#Fh zJ)45jY~p`j+4{z-SkI-9Vlp{YG?#=t3bQ$n!mTGW!eVt#xVozKJgQbq9z|N;c$D=# zs!>cH)wG^R(KhE%E$hjwZ9R|bSkI#vG>@tou08;FSLUBHlgugxCw;8CCb1luf2OfA z4c8*fQW|48w$`p?wz)fq*aqR+h1vN)!|z!=!^H_k(CJtBaszEPgX=yP&N6}_^Vb-| zB@6JQNT!scxgWK*JVo6QIc?praR=aTG_uH*Kv;f_vDG1l%OK*lP{o%_Q({Z`H6yl^ zUvrCXm8kq$5Ub^vNMw{>OH%6Pmt>JECgqomtqw6h&JxXYVRd_A zwcXFgO3ZLK$6~$OfUf(xIP9v0D};bE0;vD&nddRnXxhClCx{d^$5 zzBjs_x9?-I-lUlN5@L4Hn(SLY>#O@)Up>J3>VeqmaBlS=iVvS68sUI>h?wQ0uG1tgjBY zzBKfQ;k@5d&S1mXGKjg}GaC?!|E=z6_ zm~Jqi5Am{h!%fIZxeo4zy(p+J)4>DOheTPq>B&0eB;{twOaXh@OE@;;^thCaWO*rIFG}fawak5BuUjz=t&*fxBa(;SdW|pip__7XJ zSy`DWvVKn3t7nWu>)fmk`B}*w@>5c!JNR>Zk&`7i1?(j_#vxJ0rUeA@^&vMaudYmo zAl9K>R(g)?G*sa7At@_6O|C;lVK3xX$~uOlo2yoGTw+Wdp!B__!q zdJ+}mP@>BKM9nyqXs|*Y3=VRK;6%eXlxWaK9N9FKWB^T^7>5!K;)%094YG&7#D#Gv z(WMCD%9n-w%K*g9P@1wG+zn}vJ@)i4q(QcWr@=w)#Vz8cKQ?72fp{CzAopSh(dz4i z{51H0e30i-4j$dIEXuPE8F{j{&che9QS$b}k8#LK?3|vND7UNc&p4!~Cuhn0Ibp8@ zWaKAim%xAoG7d?(+3E5fD(hQX`D30c>~T?&3kjqeTNWj0?1|1fqhy0NLeISh&~q;j zdhX>x&%He8xt9k$_wu0UULGYs_u6|bZVxz+)86d+--5|dNA^W;a~+2|vTtm6EUC$^S5AD~?c&t4k6gZwl&8uCGQv+V@)L6(cNAs=L?!3E}n zEEiYCf$2|WB+9TJCqR(HkmKM29U97UaD@(yMZo z;Ai3>rvvEE9S2+wknKFla0oCu$ZX;RjSlR^S{W`uCix(z7FRH)L1qW4qR~NS2P)X; zAhQEi2{_0r(#mEI(hH-CnS=Bq4KX=a%V}9d&C(#fNW;t=q!($pnS=BqjWBVjFQYW8 znmI@>&1z;2(o3_tNomSyCnL?$AiXrB%p9bbW(_k3>En}{poQs>A*Yp#Hg%A3gx3;r zkd~0zA`a5(L>-fUw4Aejj7S=!)rnXU2WfR8PQ*dl5nk8ALFy46FXAArH0y~tNGr|y zCSw{oJW$4MiNJm1ZLm2WdxmV^aqi1KuW<4ib;>rig>>32BBn*j^`^!}?HP z&bhk6i?pAagY+vY{Y@O?9H0Zt9Hf`#Kr;vF zr8&s-u7`|NS`T;r&v@r6nqzpH8ka2{M5OI)}kdYz|((1%0 zllma%2GVGeG)SuxV?-RJ)rqkp4$_YBaTX3zkMIH!2Wh1_Uc^CKX-+V`pBN)y<9bXK zNrSY~oFw8Ptu!Z#I7mCfrxH_a*P}Q`;v7I?5EndID~k3k>^d*BV)+4$kai`Fn+PA zgN&ZQ5>p2m!}z794l-)^3#JY-hVd_&I>;EtFEe$J(dsTYb&yfRUoxGO6C>SWg{gy# zI=Rx+K}MZ?+0;Qso%C0+4NOvIMs9Ydys4Zz9s8xZ(f!8s8(q+^Z;UJt!~DhMl&suT znHFjx4RU7(I|vSP-yWwi?uE?x=8i%dWKCWq?EFQ8gWRdT&Vqxi$%}-Yzi4ofHF=S+ z^B4J7OUkbiZqPwiX}Ak}5i7GaJ&X=AE2XE=L1v}&GCIhbQOnI=EWx~D7Fv>*o|wTt zotu)KktnB~@!=iPGGsfH;~khjRYr=umae=pCuPm3WoIJ_ZB&dr2S1~O%sT0BbdXsm z1B?zb>tvwOL1vu{GCIg?C@UBpWHyu)jSey!%3z~|%!aa((LrXNtZY2%#mIB0VswyM zCqoD~U@gJvKNL8~OGFrOkau8&8}mVCos2L#$gGo9jSe#FWHqCM%sN@!=peIBMiOqw zTY~x!1svqnhZ?{^UVW%3zCArtj zBF30{nYB5Vl~*w~jIZi(4l-ZF%%Q$^cTCzFg0GV5fr(LrXNOffphtdp&b*BxTyIkYx9$gGp8MhBU7 zGR^29vre`#I>>A%+Zr8Y*2#3EgUmYF&gdYsPG%S#WY)<{ql3&knPt3^6(i4~z0pBt zoy;~m$gGn&MhBU7GS}!Jvrcv}I>@Y(c}54Bb+V(;L1vxoWOR^OCp#M*WY)5*5mqkx0F=4Lc-kk{Od0S@w-o3W&1Z`@($ z@s;e2JM28ZlD%<7>HhkW)HnM8+!{9csc;`<>}2N?(WVZDR=3%*B8 z9b}~8d)|S2J(|4h2|t)SNZya5<_?ng;~488Tq=~)_8&KKkkeY9U>(?IO7?Y`A59%( z?8Zq`2N}EZ6YIbnbZno^bxM1oaV=pe6I>^|I-&hCzgfn?Na>>F$>W=(w z;UINKE^~EK-;I&A8&@nHB<{u^mJSkkL&rL_OUX^lkCS;r#f1qiGc$8?h3-Rsa$dXq zhOsd*+<&%ssKgu^NpVn%rlDbCLz^8Ou)VP5#~d_R8fR^I!L*LP(#!~%4n;msDqRPJzCU3${ns+77h}A%%QfqgX95c9TSHR z8ErE%J4(A+9Yb<6(z%P1yhm267>Q|!wQ!Jn?IF&>L26dkwQ!JnM=RdKLF&n`o`r+d z;;e7sAobMMz`{Z5J==zyLtZDYJKQQJKStv3+1a3`4$?2FH8ydGm63)fW)9NR(A2~s zPDUD8WKz#;$@^E(ab@58d{n-)RU2hBr^x;X-GD4s4pW8 zPiFj|VwMi+d(z6pp@ED&X>H~pJq@Y)^A_7uwa(2-%FoD6Nt1LDA7&&HSP z;UMMLEIM$LB9;-rL+kavJ)WI>i;E*1do0S(Q^~E*AEF2{50%7OYTBJeZe9RG+4ic}Q zkF<1(FXAArp_!m}V4q}Uda_A5ZKcizod_IqWKAWV1RNx9p2KA5AS)45fJ2h3G)#pK zveGaOI>^e1=_U?x5>aSbA0%EZoncZRzeW}4J4 zIq4}ftz9xKW|`D3Icb<}TDxRe%rUK9G8~>Wt6kD9<^l&PcmIu;XSlbNk&%)uYufpI z3kRw9Zx@ImqzFY_wS{x))N?u*AYae-^VjAC|)TF%o~v zLua@%Wb+jibI^H=2GDtoJmj9iC@3f>C@3f>D5QYB{~tu>|C@o%|K~yH|MQ^p|9Q~) z|2*jYe;#!HKMy+pp9h`)&x6kY=RxQH^PuzpdC>X)Jm~y?9(4Xc5Btvl52Ew`%|Pe> z^PuzpdC>X)Jm~y?9(4Xc4?6#!2c7@VgUYT2k&jlDjp9|nYp9|nY zp9|no^5+5^nUIy8*(xnLKQTEuTh<^#W#S-b5TQ14kn;v~2NMT5UqI2RphG|JOU5d;WiuX9Tm{?HQ+LZ)&zm zXNE3WZJFUjPJVJun&k7d%ETcpD<_5hVoFkW=d9cm+r|cJ6Nh$**=?oGhh+OH(4kd! zX1n~1l-!P)*==og(C`k~ouq9Tz>#-ommz(cvK}tHLx-fq9GO$cUG-^@@8HHew3P4Q z&O5~AXLiWtho{n?$>1Dva%If;=HbaZq{(;i;vG`B=P@Kb-E4q2bZ8{ULCZVHPXh7b z9WruKGv(YKBkVI7oP)kB*@podHjI}v8L+&VgQO3*`s!9VeX2beg>_#7k&)bB-(yoVtO`%DJk%4H@C)6XS3RNx)hHy6m+1;Rd2 zp-)4S%qmGL>D$TVto%+n9on%O5+lu_vi{hVb;FiJ6=NFM8^u}=L#V!; z>@3?MOkWmq+j{ni3jWNSBlR%|0TH}IVv^10YI9W8mqoVhGjBEF*o=|sP+f44`Pp|8 zDLBad>^q4v)=BnO0pldqp@zP#PHZPL38bd57wlO_jzhG*R(DLxO_KFlOi~Lv$SRK7 z28S%UZB!kDLw-^_c~{6|^k==qB=)9{9EVu_v6+>f!``jTY}HEYh5k7GvDq>+TV@MU zSD%K&?A)}(^!&t(#PrTxBxPz-;fy0M1?&uZzNJl#IBLz2-nwFd2E=|@gNxchur98V6Ghoa~Vv-|o z^ySQqIowGWQ%CcS3ChovXy(HGUxEVS;L4079SGYyYL%bcS#I;;%8ZE#$!e9K(Lr{d zHNSGMB3Hd5xcUEQy&E%I@M7|wPpABixN-k-6SJ97N={mSUShh$sqcCh59S|hzz+ZA zj-TC_eIi0y$E^G3_Grw$~UDWY8IgIwUkwqQBgN%>i_ z`wr~Y2T$hz^puR&xw6L>>=g)CeOgLT>6p>C1ACn@IX@*YEs34@kSNc^n`vK|GhULc z14wYu5bnI0msKJc5B7>-GSf3k>69XMw9A*-~5m^$Z5(&J0&!u4YHCC!|U2chyq-C%-oMgFheFnCd zz*S#4DPdh$7)x4KL6Zu?ag{vl_cMa=R~#i#LZAvBLKVqpX?IB-T&7(RArWd4mBLznJuy- zRc4F1neAnFm>3tfht0T@q=QW=bM@SqhjDfDCEX`9x^T53r(TM#g40?*_pC0CsbkToU4!) zrrODB(yADG`&?S&+W3+%l9k&!pBZ>3c1V|Z^oMf}5+9i~K?LW(Jxh?AlqJWZs?niC zvMh&cMhD67B;!%t;80)gUPKxlWbQ?j(Lv^3)Bt6nzuzyhWNKm#Ht$n3=3w(a)e?7* zP%^bK2b=ez4(4F0Aha#5FfQ;A1U&Lti_xfmT}=A^69L1s?6m3Rc_ZZhtWQz#xH4$`vJQ^Y}9v3iL(NGlX? z5eI37qP1|4S|~mu4$^Y6oQQ+8oGdTmAT1|-MI5B%q@T(7OHQ5i7jcl5lK~lvJ#j7Z`QP2>ATuXDN__secVC-=f`XzfK}9@VBDwkh z#D$Lg4WQ$G9(3H#gO2-o&~ZNxI_~E|$NfD18{>X^_eCfuC@3f>C`t!TWUgxzGyacf z275KwrAsoY$t))@%V=h~lUdecKXKriOlmXBWM)aP6Bt0R6Y!wd33$-!1U%?<0v_}_ z0S|hefX9F1I)RD_c{yn%>Hts=Q`oy7NI^kCLGf%sMFx7*;qL!;^XSf;$<#9)@RJ;2 zN8P3|3N2zdnZ*dPh~aJ)Bf%nuKNmwKnhrI$2(S`^Uj*u7grG4h=wqnGVrbb#MU$2fG*>lz{K{5h@QW(?7$WqVM2EVASj}e2$h|$L=Cl-U-+gOvWsLVWA9%m9mV-|xgxw`t26G`bz zK7PDD$HZdri+cJPBKx9`(Lf(VWM5|T`_j;4D=IS&mNznqp*4%a?n`4*ZjSYmYr@2E zwpwyct;FCL&Ga$wl53%lftOr@$yUTlF3}{0Sjn~IVpuD=BtC|fl1tXdz)P-`J_cTL ztxdKfR&uE(F|=ke*nR25xBN5AH$jAUy)z#}tYOz{UGy+VvhL zF|?*JxN^$pVyMMZs&5N>T8Y6gdg)_`?2A4|AAJnGE$nNu6|uIkpGgd{cD+Bpw?+8A z4B%skouu@J1NAZReHpBef$z%@ldXvD%TSXTT9K^`)0Z6HItmUziNPto<0H$xu-FS(f}TM;X{B9j&txlNB{$zBhSn?wyD$DK(;hvDqc^noIER9Q!V(T-gLe!w zj1MMXxx_Ndi!O7R(F7?R>Kf<7Hi!D}^tS9Q^W~_*_ljj-8yCJ;Ec-=j!uN_(6SI>$ zCT6EFe`F-5+xQ(q78-+t>@T57H~0fkh2%ACJu7;!p%G< z*}GWA7iaOGbF=E;ZpDFpK(crT4=WDbJ{9ZXY2v_sm`Tzu5bi#|xPxuGK)Cz-;tsa$ z0^#oSo7M-(W#MDR!L}VKXT`y`9Vu_(Ag3&RF$bH=!Vhz>xh(wAI$7Vw9SN}FVB3xa zT5+&#M}pAOv~@2kSaGmzFDhDbux&4b#g8mocY$#C`NbV}Hbk1PE1eci9Hb2Lf@2NRhI!OEso5rM6 z<)p#U(m`SxoXp!Q$qvru?Sy0p7mGA-pR;8@vuvZ6t3?{5I#~3lxR0>r#pQR%u<@R{2IGC0_rp`KV8Yok4F42dSSY)LJ_(Y%=q)aFCjYau#JF)uFsa zSx9y8wQ^k8=H+MQxUkK^qCaJ*Q8w>P09Ge$c2Iu&??g6v*5&5^&mr;be1B$HkDceQ z%2n5AmMa}{)eV^CRmbxV4VmQu*Ygf#<$PLJ(!qY^pdJ5BR<^Fln!m1s4$>1r*z>y4 zp}s5!2ZKX;y1d5#*zfxA4vE?F4A^fS8TTT8?6~2>`wf-m;AC(}%FUMd)_rHzf$3_p zpYUdkWPEseI%$`f-InQ}w$9G%kd>bz{i`>$P8mS!6pxbEDfW9IS^R#m-;$gl=ZoSh zy@QOO>Y+BIp9LFLiR5M`Wv0tL!s+pC06o5W(BqrOf8+R8el$q&tiget^lHT2|6k_Tm|1Ri zdr94dS*Cg}QbjR!u2p(s>-?<5>>Q@!nb=bLN4r%f4$LAqP2yL1)g}%d*ef^^)8Js@ z&`ySf#>Ane3eJfeWm<{u`w~Wr@=$Sp^+p9PZI|jNnpPSisnND zn;g7Nj|=wtDDbhxL2Kf`-HfyOw>*5z>m>IHGAZZya$*i_hT7~<-ozm}ubs4?6ZU#F zbci$LVq%ibIpJ?{WU*HYQgX6vPD6lcos{rP0|HGPvNQD;u2VYM+}H(~)`uMF@A9Z% z;*gP(nkn^Lw-rs(Aftr|Hp_=3$)DS;WKo*j2gaEUiLuck#KfUhn)I(#hnlvuouxa3 zndXD!a~}IdnaSBdCsndTglP{XNn*F1uur6!lts4m>!>wMTiQfv_p(^jG;gEWnk~tp zsd*bEIStLk&KHsfQ_V#jqzwvNh&xCan6h6^=0Ab0|9J{K4}|?Fe138ovm!Swv)GER zGIe0ba>>%Zd#)C9kaX5?FgRo}bvh+CU)mn9(3qw{>M4t`eNw%H{@$0g?Xz&Q%!iyr z_QFdFQ@}PkIEy-P{UIA|T#Bs^#g(wD)v~Z9f7$z}ge0frs$Cp(EVkax|BEf0f(jL)=la1$J2W8^v5dsO}OKuwN$Cve~Y8|=v( zA%%sWxoWoRw93oR>6n(Al$tN^3aW~A&>v66Ly0}IY;{nx4*KrAl;0WQz&dbE347L& zGdn=Tzo|ovR0kJRhaBm9;cD(6c`w|| z9b%+9xU&w-XeK2)BQafnj*(WmJy?gdj1o9_8uubaUaj^rI)Do>G5I-Jw$@K?QHNLw z4q84RGUV4NA9DxE^`V@(gXB6{-qb-xjq){jkh~Xu<_?ng!k=|W&upER#8k;F=~rmE zD+h)&$gGorTw9%##ZQ7|PTrE!5dcBx>eTW1O z^6EnraFACYY5)g$^`RzkkXIjSu@2cOIqYQ%g{OmAh~)w z3Hcy*x~j8z8YEY57jp;6mra4YJklI@NSU5w+6-E^i2WdqbVsXxv+D?Xwq(NGdhKV>xE7EWg2WdqbVc{V4 zjrdhX9Hf>F4$?}qhKK`Onzr9%u4&f7bjXm}%0*i` zNIb%8Ar7`Dq&DJUd!49b(Vv!j?v6pyV0)d2MI3Ce6LE-x?MHZBQ3okUcs$}@dui4~ z9BePm`W9mwsr|_YNE&P}&4!4B?WNfWaj^XeZ*1uxap2oT+(E(--qebNZ3$^+#lf~Z z(cHW~)R%hhZeb-2w$+IQD-O2Ri9{<7wjJRuQ3qR(@FXh^wv}eG6$jf&GsU7brJlRx z-v2ip|F^QTC$`mz)>a&Bs}reK9BgYR(k!mQNFBYmv2c*u+P1ZDkUCOIw{Va;dT(dp zAhidOVc{Tk^qy(qAhl*^SzHf`k?7Fg!a?fjJ=?-TYHOQg;UKk6=2|#Nt&<%r9HiFC zJPQY@b+V&{gVZ|N3APWiz82Zp)Ir9TlrE+YGOna_wYbtMwN7?3O@oXpDcwySWL!z< zVd@}lpvEEJ%t88qb%x!)Z0O$ku=y|C&nNSw%3WVh=c7%_&8AqDMxq#;$VAejz=79 zFU<*-cL!r^+_)YSku=y|nv)O*+e>pY;$Zs`KE={O;+2%C;tmpy@M%^YY)i;=D-O2R zi9*YJy%JB}GpwY+wmLD>ii2%+LhkrqO*V1ke_wLbu_d$AIKAZ>K#HuS$F}xj7V2PY zdokOJgKc$Vjui*n>c(@HbwgrLV6K%k*j6{@S#hwfZp^phU|V~!z%m~s4&xVE&WDJR z=J33wgTx`zB1;E}!}!IP4ib9;ODr8E4&#?vI!LUOFIYNA9LB$B=^$|!zs%A>VynB{ z(m`UKe96M0UW_z{6_ySX>*PvH2Z?p^WlIN%b+Wv)=!!r^TDk0gG;R8>UFTHf&Vxsf z9#3;NkCUKi3dBBHTDm~KyjzM8zEigI9RJ;}o0g@ZEF4RBb0ToEcLGQ$XSoPm?41Di zc|kk32X2Nx>~<`;m+t07;9>6s5Knt2fOy$E0qpadc0Lxgc29wi-BVD`@W*z2dvLzA z+XJ~{d}~mV1KusU`~SPWtCWGBps&3XK>X~T0OD`&1dsrGCx8UnI{_rf-U%QT?41Bo z(cTFl!S+r7sbud2kjnN>0I6c{1aP0K7S8{6K`8s*d9RVq+nI5CVUyjJ&X;!2Fszif zXNhooCxArQI{~Dsy%RvH**gKGy1f%XA|XM>1Ii+b`=4zNPBSeM+Z>E6Rb?x~b~{4evhlTQ%x-iq=hp}!qFLEqCnU;1Yj?so2sgoKa~t+7UxD&)TV^T|Qp zTVb9g7Ec|eo*cSLdFX^pDrHNh8zV2_RYaPQViw|F^gIE)aR|U4D{~^PPYv3t2C(exm4L z?*x!MdnbU%dinMfMJIbFfXI8B?2|+ndnbT&wRZwYH+v_5bhmc`NDq4_faKdd0i-AU zp9_0mVFn=~S{L>nL+PfVSLtp}1ijh+wCsH*cGp9=drqZ`KBc@pOX$C!&Srs%Bp7bH zl_vU??&d_$-`)uzL+qUZGSuD)Aj9mP05aU(2_Pfvod7b@-U%S1?41BI+TIBuW9*%P zCoujWYr|F&6y1k$3i}t20=qx{Homk^=N6b?=j{`+pMBoPF!g%8c$@@9Q}7MHu;cx{ zZ`uD0PZ$E@qKrMo#1oMZnpj9EhLk%IHw|N1M_ z#2#0se<|I~iQrfEKdqi5*dqmU#{Z6Fpcgy-k0#4BDa>-WCYhbtRH$)$LzBiVH@UvS z9^Z>|y?lu)xxj75h44pN2;sXTrHPBByEzg3X72=$OZH9x`Q6?LAeZf(0CL6N2_S#i zI{`#z?*woIv8?ROR%yxk$tkT8JEY5)Jj1@(rDf-=#GIV8jMn)rGc$9fn>Y~eclo(F z+1$HDx$gl>a?k(=?$w+ropPnwIGQ^oXG%$2<*^ z9m-qgWMYaW8(-j%k=Hs~+OJvo1BVpuo6_8Ot|d5xnbk?Dzh|GaHafJ<&FYY!mE0je zMas1aD;0Bxsu&~}OkZKrsYjdn^U{H4Y?C>CiNsy{8=^XA7;u4a;{3BuPU+h1fih)|= zq79L~B&PrDom&(X6ciK`6ciK`6ciK`&lXf9oxN^Q=s^;QnpEMYov_yfG(@dtulDd) p3VQNy)8|*S(be27L&NGSq>x@4qM)FlprD|jprD|jpr9zb_& 0 */ diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h --- a/sys/dev/ice/ice_common.h +++ b/sys/dev/ice/ice_common.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -60,7 +60,7 @@ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); -void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading); void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -197,6 +197,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); +bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw); enum ice_status ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); @@ -301,6 +302,7 @@ void ice_print_rollback_msg(struct ice_hw *hw); bool ice_is_e810(struct ice_hw *hw); bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e823(struct ice_hw *hw); enum ice_status ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1); @@ -332,6 +334,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw); enum ice_status ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c --- a/sys/dev/ice/ice_common.c +++ b/sys/dev/ice/ice_common.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,118 +39,110 @@ #define ICE_PF_RESET_WAIT_COUNT 300 -/** - * dump_phy_type - helper function that prints PHY type strings - * @hw: pointer to the HW structure - * @phy: 64 bit PHY type to decipher - * @i: bit index within phy - * @phy_string: string corresponding to bit i in phy - * @prefix: prefix string to differentiate multiple dumps - */ -static void -dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string, - const char *prefix) -{ - if (phy & BIT_ULL(i)) - ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i, - phy_string); -} +static const char * const ice_link_mode_str_low[] = { + [0] = "100BASE_TX", + [1] = "100M_SGMII", + [2] = "1000BASE_T", + [3] = "1000BASE_SX", + [4] = "1000BASE_LX", + [5] = "1000BASE_KX", + [6] = "1G_SGMII", + [7] = "2500BASE_T", + [8] = "2500BASE_X", + [9] = "2500BASE_KX", + [10] = "5GBASE_T", + [11] = "5GBASE_KR", + [12] = "10GBASE_T", + [13] = "10G_SFI_DA", + [14] = "10GBASE_SR", + [15] = "10GBASE_LR", + [16] = "10GBASE_KR_CR1", + [17] = "10G_SFI_AOC_ACC", + [18] = "10G_SFI_C2C", + [19] = "25GBASE_T", + [20] = "25GBASE_CR", + [21] = "25GBASE_CR_S", + [22] = "25GBASE_CR1", + [23] = "25GBASE_SR", + [24] = "25GBASE_LR", + [25] = "25GBASE_KR", + [26] = "25GBASE_KR_S", + [27] = "25GBASE_KR1", + [28] = "25G_AUI_AOC_ACC", + [29] = "25G_AUI_C2C", + [30] = "40GBASE_CR4", + [31] = "40GBASE_SR4", + [32] = "40GBASE_LR4", + [33] = "40GBASE_KR4", + [34] = "40G_XLAUI_AOC_ACC", + [35] = "40G_XLAUI", + [36] = "50GBASE_CR2", + [37] = "50GBASE_SR2", + [38] = "50GBASE_LR2", + [39] = "50GBASE_KR2", + [40] = "50G_LAUI2_AOC_ACC", + [41] = "50G_LAUI2", + [42] = "50G_AUI2_AOC_ACC", + [43] = "50G_AUI2", + [44] = "50GBASE_CP", + [45] = "50GBASE_SR", + [46] = "50GBASE_FR", + [47] = "50GBASE_LR", + [48] = "50GBASE_KR_PAM4", + [49] = "50G_AUI1_AOC_ACC", + [50] = "50G_AUI1", + [51] = "100GBASE_CR4", + [52] = "100GBASE_SR4", + [53] = "100GBASE_LR4", + [54] = "100GBASE_KR4", + [55] = "100G_CAUI4_AOC_ACC", + [56] = "100G_CAUI4", + [57] = "100G_AUI4_AOC_ACC", + [58] = "100G_AUI4", + [59] = "100GBASE_CR_PAM4", + [60] = "100GBASE_KR_PAM4", + [61] = "100GBASE_CP2", + [62] = "100GBASE_SR2", + [63] = "100GBASE_DR", +}; + +static const char * const ice_link_mode_str_high[] = { + [0] = "100GBASE_KR2_PAM4", + [1] = "100G_CAUI2_AOC_ACC", + [2] = "100G_CAUI2", + [3] = "100G_AUI2_AOC_ACC", + [4] = "100G_AUI2", +}; /** - * ice_dump_phy_type_low - helper function to dump phy_type_low + * ice_dump_phy_type - helper function to dump phy_type * @hw: pointer to the HW structure * @low: 64 bit value for phy_type_low + * @high: 64 bit value for phy_type_high * @prefix: prefix string to differentiate multiple dumps */ static void -ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix) +ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) { + u32 i; + ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, (unsigned long long)low); - dump_phy_type(hw, low, 0, "100BASE_TX", prefix); - dump_phy_type(hw, low, 1, "100M_SGMII", prefix); - dump_phy_type(hw, low, 2, "1000BASE_T", prefix); - dump_phy_type(hw, low, 3, "1000BASE_SX", prefix); - dump_phy_type(hw, low, 4, "1000BASE_LX", prefix); - dump_phy_type(hw, low, 5, "1000BASE_KX", prefix); - dump_phy_type(hw, low, 6, "1G_SGMII", prefix); - dump_phy_type(hw, low, 7, "2500BASE_T", prefix); - dump_phy_type(hw, low, 8, "2500BASE_X", prefix); - dump_phy_type(hw, low, 9, "2500BASE_KX", prefix); - dump_phy_type(hw, low, 10, "5GBASE_T", prefix); - dump_phy_type(hw, low, 11, "5GBASE_KR", prefix); - dump_phy_type(hw, low, 12, "10GBASE_T", prefix); - dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix); - dump_phy_type(hw, low, 14, "10GBASE_SR", prefix); - dump_phy_type(hw, low, 15, "10GBASE_LR", prefix); - dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix); - dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix); - dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix); - dump_phy_type(hw, low, 19, "25GBASE_T", prefix); - dump_phy_type(hw, low, 20, "25GBASE_CR", prefix); - dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix); - dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix); - dump_phy_type(hw, low, 23, "25GBASE_SR", prefix); - dump_phy_type(hw, low, 24, "25GBASE_LR", prefix); - dump_phy_type(hw, low, 25, "25GBASE_KR", prefix); - dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix); - dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix); - dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix); - dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix); - dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix); - dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix); - dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix); - dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix); - dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix); - dump_phy_type(hw, low, 35, "40G_XLAUI", prefix); - dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix); - dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix); - dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix); - dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix); - dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix); - dump_phy_type(hw, low, 41, "50G_LAUI2", prefix); - dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix); - dump_phy_type(hw, low, 43, "50G_AUI2", prefix); - dump_phy_type(hw, low, 44, "50GBASE_CP", prefix); - dump_phy_type(hw, low, 45, "50GBASE_SR", prefix); - dump_phy_type(hw, low, 46, "50GBASE_FR", prefix); - dump_phy_type(hw, low, 47, "50GBASE_LR", prefix); - dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix); - dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix); - dump_phy_type(hw, low, 50, "50G_AUI1", prefix); - dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix); - dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix); - dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix); - dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix); - dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix); - dump_phy_type(hw, low, 56, "100G_CAUI4", prefix); - dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix); - dump_phy_type(hw, low, 58, "100G_AUI4", prefix); - dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix); - dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix); - dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix); - dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix); - dump_phy_type(hw, low, 63, "100GBASE_DR", prefix); -} - -/** - * ice_dump_phy_type_high - helper function to dump phy_type_high - * @hw: pointer to the HW structure - * @high: 64 bit value for phy_type_high - * @prefix: prefix string to differentiate multiple dumps - */ -static void -ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix) -{ + for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) { + if (low & BIT_ULL(i)) + ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", + prefix, i, ice_link_mode_str_low[i]); + } + ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, (unsigned long long)high); - dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix); - dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix); - dump_phy_type(hw, high, 2, "100G_CAUI2", prefix); - dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix); - dump_phy_type(hw, high, 4, "100G_AUI2", prefix); + for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) { + if (high & BIT_ULL(i)) + ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", + prefix, i, ice_link_mode_str_high[i]); + } } /** @@ -227,13 +219,23 @@ { switch (hw->device_id) { case ICE_DEV_ID_E810C_SFP: - if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T || - hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2) + switch (hw->subsystem_device_id) { + case ICE_SUBDEV_ID_E810T: + case ICE_SUBDEV_ID_E810T2: + case ICE_SUBDEV_ID_E810T3: + case ICE_SUBDEV_ID_E810T4: + case ICE_SUBDEV_ID_E810T5: + case ICE_SUBDEV_ID_E810T7: return true; + } break; case ICE_DEV_ID_E810C_QSFP: - if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2) + switch (hw->subsystem_device_id) { + case ICE_SUBDEV_ID_E810T2: + case ICE_SUBDEV_ID_E810T5: + case ICE_SUBDEV_ID_E810T6: return true; + } break; default: break; @@ -242,6 +244,31 @@ return false; } +/** + * ice_is_e823 + * @hw: pointer to the hardware structure + * + * returns true if the device is E823-L or E823-C based, false if not. + */ +bool ice_is_e823(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_SGMII: + return true; + default: + return false; + } +} + /** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure @@ -308,10 +335,10 @@ if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { ice_memcpy(hw->port_info->mac.lan_addr, resp[i].mac_addr, ETH_ALEN, - ICE_DMA_TO_NONDMA); + ICE_NONDMA_TO_NONDMA); ice_memcpy(hw->port_info->mac.perm_addr, resp[i].mac_addr, - ETH_ALEN, ICE_DMA_TO_NONDMA); + ETH_ALEN, ICE_NONDMA_TO_NONDMA); break; } return ICE_SUCCESS; @@ -355,23 +382,30 @@ cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM); cmd->param0 |= CPU_TO_LE16(report_mode); + status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); - if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) + switch (report_mode) { + case ICE_AQC_REPORT_TOPO_CAP_MEDIA: prefix = "phy_caps_media"; - else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA) + break; + case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: prefix = "phy_caps_no_media"; - else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG) + break; + case ICE_AQC_REPORT_ACTIVE_CFG: prefix = "phy_caps_active"; - else if (report_mode == ICE_AQC_REPORT_DFLT_CFG) + break; + case ICE_AQC_REPORT_DFLT_CFG: prefix = "phy_caps_default"; - else + break; + default: prefix = "phy_caps_invalid"; + } - ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix); - ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix); + ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low), + LE64_TO_CPU(pcaps->phy_type_high), prefix); ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", prefix, report_mode); @@ -444,7 +478,7 @@ * * Find and return the node handle for a given node type and part number in the * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST - * otherwise. If @node_handle provided, it would be set to found node handle. + * otherwise. If node_handle provided, it would be set to found node handle. */ enum ice_status ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, @@ -452,11 +486,12 @@ { struct ice_aqc_get_link_topo cmd; u8 rec_node_part_number; - enum ice_status status; u16 rec_node_handle; u8 idx; for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) { + enum ice_status status; + memset(&cmd, 0, sizeof(cmd)); cmd.addr.topo_params.node_type_ctx = @@ -545,7 +580,6 @@ case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: - case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_40GBASE_SR4: @@ -602,6 +636,7 @@ case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_5GBASE_KR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: @@ -629,6 +664,8 @@ return ICE_MEDIA_UNKNOWN; } +#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1 + /** * ice_aq_get_link_info * @pi: port information structure @@ -668,8 +705,8 @@ resp->cmd_flags = CPU_TO_LE16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); - + status = ice_aq_send_cmd(hw, &desc, &link_data, + ice_get_link_status_datalen(hw), cd); if (status != ICE_SUCCESS) return status; @@ -1255,7 +1292,7 @@ * that is occurring during a download package operation. */ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + - ICE_PF_RESET_WAIT_COUNT; cnt++) { + ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, PFGEN_CTRL); if (!(reg & PFGEN_CTRL_PFSWR_M)) break; @@ -2341,8 +2378,6 @@ ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; - case ICE_AQC_CAPS_NVM_VER: - break; case ICE_AQC_CAPS_NVM_MGMT: caps->sec_rev_disabled = (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ? @@ -2369,6 +2404,11 @@ caps->iwarp = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp); break; + case ICE_AQC_CAPS_ROCEV2_LAG: + caps->roce_lag = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n", + prefix, caps->roce_lag); + break; case ICE_AQC_CAPS_LED: if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) { caps->led[phys_id] = true; @@ -2425,7 +2465,7 @@ case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3: { - u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0; + u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0); caps->ext_topo_dev_img_ver_high[index] = number; caps->ext_topo_dev_img_ver_low[index] = logical_id; @@ -2458,6 +2498,14 @@ caps->ext_topo_dev_img_prog_en[index]); break; } + case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: + caps->tx_sched_topo_comp_mode_en = (number == 1); + break; + case ICE_AQC_CAPS_DYN_FLATTENING: + caps->dyn_flattening_en = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n", + prefix, caps->dyn_flattening_en); + break; default: /* Not one of the recognized common capabilities */ found = false; @@ -2653,6 +2701,29 @@ dev_p->num_vsi_allocd_to_host); } +/** + * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. + */ +static void +ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->nac_topo.mode = LE32_TO_CPU(cap->number); + dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M; + + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", + dev_p->nac_topo.id); +} + /** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct @@ -2695,6 +2766,9 @@ case ICE_AQC_CAPS_VSI: ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_NAC_TOPOLOGY: + ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -2999,12 +3073,10 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) { switch (hw->device_id) { - case ICE_DEV_ID_E822C_10G_BASE_T: case ICE_DEV_ID_E822C_SGMII: - case ICE_DEV_ID_E822L_10G_BASE_T: case ICE_DEV_ID_E822L_SGMII: - case ICE_DEV_ID_E823L_10G_BASE_T: case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823C_SGMII: return true; default: return false; @@ -3349,8 +3421,12 @@ */ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) { - if (caps & ICE_AQC_PHY_EN_AUTO_FEC) - return ICE_FEC_AUTO; + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) { + if (fec_options & ICE_AQC_PHY_FEC_DIS) + return ICE_FEC_DIS_AUTO; + else + return ICE_FEC_AUTO; + } if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | @@ -3641,6 +3717,12 @@ /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; + case ICE_FEC_DIS_AUTO: + /* Set No FEC and auto FEC */ + if (!ice_fw_supports_fec_dis_auto(hw)) + return ICE_ERR_NOT_SUPPORTED; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; + /* fall-through */ case ICE_FEC_AUTO: /* AND auto FEC bit, and all caps bits. */ cfg->caps &= ICE_AQC_PHY_CAPS_MASK; @@ -3909,7 +3991,7 @@ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm); - desc.datalen = data_size; + desc.datalen = CPU_TO_LE16(data_size); ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), ICE_NONDMA_TO_NONDMA); cmd->start_address = CPU_TO_LE32(start_address); @@ -5932,7 +6014,7 @@ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); cmd = &desc.params.read_write_gpio; - cmd->gpio_ctrl_handle = gpio_ctrl_handle; + cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); cmd->gpio_num = pin_idx; cmd->gpio_val = value ? 1 : 0; @@ -5960,7 +6042,7 @@ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); cmd = &desc.params.read_write_gpio; - cmd->gpio_ctrl_handle = gpio_ctrl_handle; + cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); cmd->gpio_num = pin_idx; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); @@ -5972,26 +6054,70 @@ } /** - * ice_fw_supports_link_override + * ice_is_fw_api_min_ver * @hw: pointer to the hardware structure + * @maj: major version + * @min: minor version + * @patch: patch version * - * Checks if the firmware supports link override + * Checks if the firmware is minimum version */ -bool ice_fw_supports_link_override(struct ice_hw *hw) +static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) { - if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { - if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) + if (hw->api_maj_ver == maj) { + if (hw->api_min_ver > min) return true; - if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && - hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) + if (hw->api_min_ver == min && hw->api_patch >= patch) return true; - } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { + } else if (hw->api_maj_ver > maj) { return true; } return false; } +/** + * ice_is_fw_min_ver + * @hw: pointer to the hardware structure + * @branch: branch version + * @maj: major version + * @min: minor version + * @patch: patch version + * + * Checks if the firmware is minimum version + */ +static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min, + u8 patch) +{ + if (hw->fw_branch == branch) { + if (hw->fw_maj_ver > maj) + return true; + if (hw->fw_maj_ver == maj) { + if (hw->fw_min_ver > min) + return true; + if (hw->fw_min_ver == min && hw->fw_patch >= patch) + return true; + } + } else if (hw->fw_branch > branch) { + return true; + } + + return false; +} + +/** + * ice_fw_supports_link_override + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports link override + */ +bool ice_fw_supports_link_override(struct ice_hw *hw) +{ + return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, + ICE_FW_API_LINK_OVERRIDE_MIN, + ICE_FW_API_LINK_OVERRIDE_PATCH); +} + /** * ice_get_link_default_override * @ldo: pointer to the link default override struct @@ -6254,19 +6380,12 @@ */ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) { - if (hw->mac_type != ICE_MAC_E810) + if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC) return false; - if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { - if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && - hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { - return true; - } - return false; + return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, + ICE_FW_API_LLDP_FLTR_MIN, + ICE_FW_API_LLDP_FLTR_PATCH); } /** @@ -6295,6 +6414,19 @@ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } +/** + * ice_lldp_execute_pending_mib - execute LLDP pending MIB request + * @hw: pointer to HW struct + */ +enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + /** * ice_fw_supports_report_dflt_cfg * @hw: pointer to the hardware structure @@ -6303,18 +6435,24 @@ */ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) { - if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { - if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && - hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { - return true; - } - return false; + return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, + ICE_FW_API_REPORT_DFLT_CFG_MIN, + ICE_FW_API_REPORT_DFLT_CFG_PATCH); } +/** + * ice_fw_supports_fec_dis_auto + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports FEC disable in Auto FEC mode + */ +bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw) +{ + return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH, + ICE_FW_FEC_DIS_AUTO_MAJ, + ICE_FW_FEC_DIS_AUTO_MIN, + ICE_FW_FEC_DIS_AUTO_PATCH); +} /** * ice_is_fw_auto_drop_supported * @hw: pointer to the hardware structure @@ -6328,3 +6466,4 @@ return true; return false; } + diff --git a/sys/dev/ice/ice_common_sysctls.h b/sys/dev/ice/ice_common_sysctls.h --- a/sys/dev/ice/ice_common_sysctls.h +++ b/sys/dev/ice/ice_common_sysctls.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -94,6 +94,17 @@ */ bool ice_enable_health_events = true; +/** + * @var ice_tx_balance_en + * @brief boolean permitting the 5-layer scheduler topology enablement + * + * Global sysctl variable indicating whether the driver will allow the + * 5-layer scheduler topology feature to be enabled. It's _not_ + * specifically enabling the feature, just allowing it depending on what + * the DDP package allows. + */ +bool ice_tx_balance_en = true; + /** * @var ice_rdma_max_msix * @brief maximum number of MSI-X vectors to reserve for RDMA interface @@ -137,4 +148,8 @@ &ice_enable_tx_lldp_filter, 0, "Drop Ethertype 0x88cc LLDP frames originating from non-HW sources"); +SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, tx_balance_en, CTLFLAG_RWTUN, + &ice_tx_balance_en, 0, + "Enable 5-layer scheduler topology"); + #endif /* _ICE_COMMON_SYSCTLS_H_ */ diff --git a/sys/dev/ice/ice_common_txrx.h b/sys/dev/ice/ice_common_txrx.h --- a/sys/dev/ice/ice_common_txrx.h +++ b/sys/dev/ice/ice_common_txrx.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h --- a/sys/dev/ice/ice_controlq.h +++ b/sys/dev/ice/ice_controlq.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c --- a/sys/dev/ice/ice_controlq.c +++ b/sys/dev/ice/ice_controlq.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -508,12 +508,18 @@ return false; } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) - ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) - ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); } else { /* Major API version is older than expected, log a warning */ - ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); } return true; } @@ -665,10 +671,12 @@ * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, + bool unloading) { struct ice_ctl_q_info *cq; @@ -678,7 +686,7 @@ case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); + ice_aq_q_shutdown(hw, unloading); break; case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; @@ -694,18 +702,19 @@ /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); } /** @@ -739,7 +748,7 @@ break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -809,7 +818,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); ice_destroy_ctrlq_locks(&hw->adminq); ice_destroy_ctrlq_locks(&hw->mailboxq); diff --git a/sys/dev/ice/ice_dcb.h b/sys/dev/ice/ice_dcb.h --- a/sys/dev/ice/ice_dcb.h +++ b/sys/dev/ice/ice_dcb.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -249,6 +249,8 @@ struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event); enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg); enum ice_status diff --git a/sys/dev/ice/ice_dcb.c b/sys/dev/ice/ice_dcb.c --- a/sys/dev/ice/ice_dcb.c +++ b/sys/dev/ice/ice_dcb.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -102,6 +102,9 @@ if (!ena_update) cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + else + cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE << + ICE_AQ_LLDP_MIB_PENDING_S; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -857,9 +860,9 @@ bool *dcbx_agent_status, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop_start_specific_agent *cmd; - enum ice_status status; + enum ice_adminq_opc opcode; struct ice_aq_desc desc; - u16 opcode; + enum ice_status status; cmd = &desc.params.lldp_agent_ctrl; @@ -1106,8 +1109,8 @@ */ if (!err && sync && oper) { dcbcfg->app[app_index].priority = - (app_prio & ice_aqc_cee_app_mask) >> - ice_aqc_cee_app_shift; + (u8)((app_prio & ice_aqc_cee_app_mask) >> + ice_aqc_cee_app_shift); dcbcfg->app[app_index].selector = ice_app_sel_type; dcbcfg->app[app_index].prot_id = ice_app_prot_id_type; app_index++; @@ -1188,6 +1191,43 @@ return ret; } +/** + * ice_get_dcb_cfg_from_mib_change + * @pi: port information structure + * @event: pointer to the admin queue receive event + * + * Set DCB configuration from received MIB Change event + */ +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event) +{ + struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + struct ice_aqc_lldp_get_mib *mib; + u8 change_type, dcbx_mode; + + mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + + change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + if (change_type == ICE_AQ_LLDP_MIB_REMOTE) + dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; + + dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >> + ICE_AQ_LLDP_DCBX_S); + + switch (dcbx_mode) { + case ICE_AQ_LLDP_DCBX_IEEE: + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg); + break; + + case ICE_AQ_LLDP_DCBX_CEE: + pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; + ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *) + event->msg_buf, pi); + break; + } +} + /** * ice_init_dcb * @hw: pointer to the HW struct @@ -1597,7 +1637,7 @@ tlv->ouisubtype = HTONL(ouisubtype); buf[0] = dcbcfg->pfc.pfccap & 0xF; - buf[1] = dcbcfg->pfc.pfcena & 0xF; + buf[1] = dcbcfg->pfc.pfcena; } /** diff --git a/sys/dev/ice/ice_ddp_common.h b/sys/dev/ice/ice_ddp_common.h new file mode 100644 --- /dev/null +++ b/sys/dev/ice/ice_ddp_common.h @@ -0,0 +1,478 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2022, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*$FreeBSD$*/ + +#ifndef _ICE_DDP_H_ +#define _ICE_DDP_H_ + +#include "ice_osdep.h" +#include "ice_adminq_cmd.h" +#include "ice_controlq.h" +#include "ice_status.h" +#include "ice_flex_type.h" +#include "ice_protocol_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* Missing security manifest in DDP pkg */ + ICE_DDP_PKG_NO_SEC_MANIFEST = -9, + + /* The RSA signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11, + + /* Manifest hash mismatch */ + ICE_DDP_PKG_MANIFEST_INVALID = -12, + + /* Buffer hash mismatches manifest */ + ICE_DDP_PKG_BUFFER_INVALID = -13, + + /* Other errors */ + ICE_DDP_PKG_ERR = -14, +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver pkg_format_ver; + __le32 seg_count; + __le32 seg_offset[STRUCT_HACK_VAR_LEN]; +}; + +/* Package signing algorithm types */ +#define SEGMENT_SIGN_TYPE_INVALID 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */ + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE_E810 0x00000010 +#define SEGMENT_TYPE_SIGNING 0x00001001 +#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020 + __le32 seg_type; + struct ice_pkg_ver seg_format_ver; + __le32 seg_size; + char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[STRUCT_HACK_VAR_LEN]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[STRUCT_HACK_VAR_LEN]; +}; + +struct ice_run_time_cfg_seg { + struct ice_generic_seg_hdr hdr; + u8 rsvd[8]; + struct ice_buf_table buf_table; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 rsvd; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +struct ice_sign_seg { + struct ice_generic_seg_hdr hdr; + __le32 seg_id; + __le32 sign_type; + __le32 signed_seg_idx; + __le32 signed_buf_start; + __le32 signed_buf_count; +#define ICE_SIGN_SEG_RESERVED_COUNT 44 + u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; + struct ice_buf_table buf_tbl; +}; + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ + (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_METADATA 1 +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_CAM 50 +#define ICE_SID_RXPARSER_NOMATCH_CAM 51 +#define ICE_SID_RXPARSER_IMEM 52 +#define ICE_SID_RXPARSER_XLT0_BUILDER 53 +#define ICE_SID_RXPARSER_NODE_PTYPE 54 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_PROTO_GRP 57 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_RXPARSER_XLT0 59 + +#define ICE_SID_TXPARSER_CAM 60 +#define ICE_SID_TXPARSER_NOMATCH_CAM 61 +#define ICE_SID_TXPARSER_IMEM 62 +#define ICE_SID_TXPARSER_XLT0_BUILDER 63 +#define ICE_SID_TXPARSER_NODE_PTYPE 64 +#define ICE_SID_TXPARSER_MARKER_PTYPE 65 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 +#define ICE_SID_TXPARSER_PROTO_GRP 67 +#define ICE_SID_TXPARSER_METADATA_INIT 68 +#define ICE_SID_TXPARSER_XLT0 69 + +#define ICE_SID_RXPARSER_INIT_REDIR 70 +#define ICE_SID_TXPARSER_INIT_REDIR 71 +#define ICE_SID_RXPARSER_MARKER_GRP 72 +#define ICE_SID_TXPARSER_MARKER_GRP 73 +#define ICE_SID_RXPARSER_LAST_PROTO 74 +#define ICE_SID_TXPARSER_LAST_PROTO 75 +#define ICE_SID_RXPARSER_PG_SPILL 76 +#define ICE_SID_TXPARSER_PG_SPILL 77 +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 +#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +#define ICE_SID_RXPARSER_FLAG_REDIR 97 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 +#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 +#define ICE_SID_LBL_RESERVED_12 0x80000012 +#define ICE_SID_LBL_RESERVED_13 0x80000013 +#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 +#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 +#define ICE_SID_LBL_PTYPE 0x80000016 +#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 +#define ICE_SID_LBL_RXPARSER_PG 0x8000001A +#define ICE_SID_LBL_TXPARSER_PG 0x8000001B +#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C +#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D +#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E +#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F +#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 +#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 +#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 +#define ICE_SID_LBL_FLAG 0x80000023 +#define ICE_SID_LBL_REG 0x80000024 +#define ICE_SID_LBL_SW_PTG 0x80000025 +#define ICE_SID_LBL_ACL_PTG 0x80000026 +#define ICE_SID_LBL_PE_PTG 0x80000027 +#define ICE_SID_LBL_RSS_PTG 0x80000028 +#define ICE_SID_LBL_FD_PTG 0x80000029 +#define ICE_SID_LBL_SW_VSIG 0x8000002A +#define ICE_SID_LBL_ACL_VSIG 0x8000002B +#define ICE_SID_LBL_PE_VSIG 0x8000002C +#define ICE_SID_LBL_RSS_VSIG 0x8000002D +#define ICE_SID_LBL_FD_VSIG 0x8000002E +#define ICE_SID_LBL_PTYPE_META 0x8000002F +#define ICE_SID_LBL_SW_PROFID 0x80000030 +#define ICE_SID_LBL_ACL_PROFID 0x80000031 +#define ICE_SID_LBL_PE_PROFID 0x80000032 +#define ICE_SID_LBL_RSS_PROFID 0x80000033 +#define ICE_SID_LBL_FD_PROFID 0x80000034 +#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 +#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 +#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 +#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +/* Label ICE runtime configuration section IDs */ +#define ICE_SID_TX_5_LAYER_TOPO 0x10 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +enum ice_sect { + ICE_XLT0 = 0, + ICE_XLT_KB, + ICE_XLT1, + ICE_XLT2, + ICE_PROF_TCAM, + ICE_PROF_REDIR, + ICE_VEC_TBL, + ICE_CDID_KB, + ICE_CDID_REDIR, + ICE_SECT_COUNT +}; + +/* package buffer building */ + +struct ice_buf_build { + struct ice_buf buf; + u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +struct ice_hw; + +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_change_lock(struct ice_hw *hw); + +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); +void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); +enum ice_status +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); +enum ice_status +ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); +u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); + +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +void ice_release_global_cfg_lock(struct ice_hw *hw); +struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr); +enum ice_ddp_state +ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); +enum ice_ddp_state +ice_get_pkg_info(struct ice_hw *hw); +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg); +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); +enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access); + +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); +struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state); +bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state); +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)); +void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type); +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +bool ice_is_init_pkg_successful(enum ice_ddp_state state); +void ice_free_seg(struct ice_hw *hw); + +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section); +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); + +enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); + +#endif /* _ICE_DDP_H_ */ diff --git a/sys/dev/ice/ice_ddp_common.c b/sys/dev/ice/ice_ddp_common.c new file mode 100644 --- /dev/null +++ b/sys/dev/ice/ice_ddp_common.c @@ -0,0 +1,2532 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2022, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*$FreeBSD$*/ + +#include "ice_ddp_common.h" +#include "ice_type.h" +#include "ice_common.h" +#include "ice_sched.h" + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static enum ice_status +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +enum ice_status +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static enum ice_status +ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, + bool last_buf, u32 *error_offset, u32 *error_info, + struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); + + if (LE32_TO_CPU(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_get_pkg_seg_by_idx + * @pkg_hdr: pointer to the package header to be searched + * @idx: index of segment + */ +static struct ice_generic_seg_hdr * +ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg = NULL; + + if (idx < LE32_TO_CPU(pkg_hdr->seg_count)) + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + + LE32_TO_CPU(pkg_hdr->seg_offset[idx])); + + return seg; +} + +/** + * ice_is_signing_seg_at_idx - determine if segment is a signing segment + * @pkg_hdr: pointer to package header + * @idx: segment index + */ +static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg; + bool retval = false; + + seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (seg) + retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING; + + return retval; +} + +/** + * ice_is_signing_seg_type_at_idx + * @pkg_hdr: pointer to package header + * @idx: segment index + * @seg_id: segment id that is expected + * @sign_type: signing type + * + * Determine if a segment is a signing segment of the correct type + */ +static bool +ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, + u32 seg_id, u32 sign_type) +{ + bool result = false; + + if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) { + struct ice_sign_seg *seg; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, + idx); + if (seg && LE32_TO_CPU(seg->seg_id) == seg_id && + LE32_TO_CPU(seg->sign_type) == sign_type) + result = true; + } + + return result; +} + +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status = ICE_SUCCESS; + u32 i; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + u32 offset, info; + + status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + + ice_release_change_lock(hw); + + return status; +} + +static enum ice_ddp_state +ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + return ICE_DDP_PKG_NO_SEC_MANIFEST; + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + return ICE_DDP_PKG_MANIFEST_INVALID; + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_BUFFER_INVALID; + default: + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + bool metadata = false; + + if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF) + metadata = true; + + return metadata; +} + +/** + * ice_is_last_download_buffer + * @buf: pointer to current buffer header + * @idx: index of the buffer in the current sequence + * @count: the buffer count in the current sequence + * + * Note: this routine should only be called if the buffer is not the last buffer + */ +static bool +ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) +{ + bool last = ((idx + 1) == count); + + /* A set metadata flag in the next buffer will signal that the current + * buffer will be the last buffer downloaded + */ + if (!last) { + struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1; + + last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); + } + + return last; +} + +/** + * ice_dwnld_cfg_bufs_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * @indicate_last: if true, then set last buffer flag on last buffer download + * + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, + u32 count, bool indicate_last) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_buf_hdr *bh; + enum ice_aq_err err; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)(bufs + start); + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + for (i = 0; i < count; i++) { + enum ice_status status; + bool last = false; + + bh = (struct ice_buf_hdr *)(bufs + start + i); + + if (indicate_last) + last = ice_is_last_download_buffer(bh, i, count); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); + break; + } + + if (last) + break; + } + + return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static enum ice_status +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_has_signing_seg - determine if package has a signing segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + */ +static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); + + return seg_hdr ? true : false; +} + +/** + * ice_get_pkg_segment_id - get correct package segment id, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) +{ + u32 seg_id; + + switch (mac_type) { + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K: + default: + seg_id = SEGMENT_TYPE_ICE_E810; + break; + } + + return seg_id; +} + +/** + * ice_get_pkg_sign_type - get package segment sign type, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) +{ + u32 sign_type; + + switch (mac_type) { + case ICE_MAC_GENERIC_3K: + sign_type = SEGMENT_SIGN_TYPE_RSA3K; + break; + case ICE_MAC_GENERIC: + default: + sign_type = SEGMENT_SIGN_TYPE_RSA2K; + break; + } + + return sign_type; +} + +/** + * ice_get_signing_req - get correct package requirements, based on device + * @hw: pointer to the hardware structure + */ +static void ice_get_signing_req(struct ice_hw *hw) +{ + hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); + hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); +} + +/** + * ice_download_pkg_sig_seg - download a signature segment + * @hw: pointer to the hardware structure + * @seg: pointer to signature segment + */ +static enum ice_ddp_state +ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +{ + enum ice_ddp_state state; + + state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, + LE32_TO_CPU(seg->buf_tbl.buf_count), + false); + + return state; +} + +/** + * ice_download_pkg_config_seg - download a config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index + * @start: starting buffer + * @count: buffer count + * + * Note: idx must reference a ICE segment + */ +static enum ice_ddp_state +ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx, u32 start, u32 count) +{ + struct ice_buf_table *bufs; + enum ice_ddp_state state; + struct ice_seg *seg; + u32 buf_count; + + seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return ICE_DDP_PKG_ERR; + + bufs = ice_find_buf_table(seg); + buf_count = LE32_TO_CPU(bufs->buf_count); + + if (start >= buf_count || start + count > buf_count) + return ICE_DDP_PKG_ERR; + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, + true); + + return state; +} + +/** + * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index (must be a signature segment) + * + * Note: idx must reference a signature segment + */ +static enum ice_ddp_state +ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx) +{ + enum ice_ddp_state state; + struct ice_sign_seg *seg; + u32 conf_idx; + u32 start; + u32 count; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) { + state = ICE_DDP_PKG_ERR; + goto exit; + } + + conf_idx = LE32_TO_CPU(seg->signed_seg_idx); + start = LE32_TO_CPU(seg->signed_buf_start); + count = LE32_TO_CPU(seg->signed_buf_count); + + state = ice_download_pkg_sig_seg(hw, seg); + if (state) + goto exit; + + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + count); + +exit: + return state; +} + +/** + * ice_match_signing_seg - determine if a matching signing segment exists + * @pkg_hdr: pointer to package header + * @seg_id: segment id that is expected + * @sign_type: signing type + */ +static bool +ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) +{ + bool match = false; + u32 i; + + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, + sign_type)) { + match = true; + break; + } + } + + return match; +} + +/** + * ice_post_dwnld_pkg_actions - perform post download package actions + * @hw: pointer to the hardware structure + */ +static enum ice_ddp_state +ice_post_dwnld_pkg_actions(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + enum ice_status status; + + status = ice_set_vlan_mode(hw); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", + status); + state = ICE_DDP_PKG_ERR; + } + + return state; +} + +/** + * ice_download_pkg_with_sig_seg - download package using signature segments + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + */ +static enum ice_ddp_state +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + enum ice_aq_err aq_err = hw->adminq.sq_last_status; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; + enum ice_status status; + u32 i; + + ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); + ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + state = ICE_DDP_PKG_ALREADY_LOADED; + else + state = ice_map_aq_err_to_ddp_state(aq_err); + return state; + } + + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, + hw->pkg_sign_type)) + continue; + + state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + if (state) + break; + } + + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + enum ice_status status; + struct ice_buf_hdr *bh; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_download_pkg_without_sig_seg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package without signature segment. + */ +static enum ice_ddp_state +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + enum ice_ddp_state state; + + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + LE32_TO_CPU(ice_seg->hdr.seg_type), + LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + return state; +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + struct ice_seg *ice_seg) +{ + enum ice_ddp_state state; + + if (hw->pkg_has_signing_seg) + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); + else + state = ice_download_pkg_without_sig_seg(hw, ice_seg); + + ice_post_pkg_dwnld_vlan_mode_cfg(hw); + + return state; +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_ddp_state +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_DDP_PKG_ERR; + + hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); + ice_get_signing_req(hw); + + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", + hw->pkg_seg_id); + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); + if (seg_hdr) { + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + /* Get package information from the Metadata Section */ + meta = (struct ice_meta_sect *) + ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + hw->pkg_ver = meta->ver; + ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), + ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); + + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, + sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, + seg_hdr->seg_id); + } else { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_aqc_get_pkg_info_resp *pkg_info; + u16 size; + u32 i; + + size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); + pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); + if (!pkg_info) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto init_pkg_free_alloc; + } + + for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + LE32_TO_CPU(pkg_info->pkg_info[i].track_id); + ice_memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(pkg_info->pkg_info[i].name), + ICE_NONDMA_TO_NONDMA); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + ice_free(hw, pkg_info); + + return state; +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void * +ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = (struct ice_label_section *)section; + if (index >= LE16_TO_CPU(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char * +ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, + u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, + NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = LE16_TO_CPU(label->value); + return label->name; +} + +/** + * ice_find_label_value + * @ice_seg: pointer to the ice segment (non-NULL) + * @name: name of the label to search for + * @type: the section type that will contain the label + * @value: pointer to a value that will return the label's value if found + * + * Finds a label's value given the label name and the section type to search. + * The ice_seg parameter must not be NULL since the first call to + * ice_enum_labels requires a pointer to an actual ice_seg structure. + */ +enum ice_status +ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, + u16 *value) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return ICE_ERR_PARAM; + + do { + label_name = ice_enum_labels(ice_seg, type, &state, &val); + if (label_name && !strcmp(label_name, name)) { + *value = val; + return ICE_SUCCESS; + } + + ice_seg = NULL; + } while (label_name); + + return ICE_ERR_CFG; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < ice_struct_size(pkg, seg_offset, 1)) + return ICE_DDP_PKG_INVALID_FILE; + + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_DDP_PKG_INVALID_FILE; + + /* pkg must have at least one segment */ + seg_count = LE32_TO_CPU(pkg->seg_count); + if (seg_count < 1) + return ICE_DDP_PKG_INVALID_FILE; + + /* make sure segment array fits in package length */ + if (len < ice_struct_size(pkg, seg_offset, seg_count)) + return ICE_DDP_PKG_INVALID_FILE; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = LE32_TO_CPU(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_DDP_PKG_INVALID_FILE; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + LE32_TO_CPU(seg->seg_size)) + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + ice_free(hw, hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_ddp_state +ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_ddp_state state; + u16 size; + u32 i; + + /* Check package version compatibility */ + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return state; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + /* Check if FW is compatible with the OS package */ + size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); + pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); + if (!pkg) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto fw_ddp_compat_free_alloc; + } + + for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + state = ICE_DDP_PKG_FW_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + ice_free(hw, pkg); + return state; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = + (struct ice_sw_fv_section *)section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= LE16_TO_CPU(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = LE16_TO_CPU(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in hw for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + + do { + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return ICE_SUCCESS; +} + +/** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + * + */ +static enum ice_ddp_state +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + bool already_loaded = false; + enum ice_ddp_state state; + struct ice_pkg_hdr *pkg; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + pkg = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return state; + } + + /* initialize package info */ + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; + + /* For packages with signing segments, must be a matching segment */ + if (hw->pkg_has_signing_seg) + if (!ice_match_signing_seg(pkg, hw->pkg_seg_id, + hw->pkg_sign_type)) + return ICE_DDP_PKG_ERR; + + /* before downloading the package, check package version for + * compatibility with driver + */ + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; + + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); + state = ice_download_pkg(hw, pkg, seg); + + if (state == ICE_DDP_PKG_ALREADY_LOADED) { + ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); + already_loaded = true; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); + } + + if (ice_is_init_pkg_successful(state)) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + ice_get_prof_index_max(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + state); + } + + return state; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_ddp_state state; + u8 *buf_copy; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); + + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { + /* Free the copy, since we failed to initialize the package */ + ice_free(hw, buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return state; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ + struct ice_buf_build *bld; + struct ice_buf_hdr *buf; + + bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); + if (!bld) + return NULL; + + buf = (struct ice_buf_hdr *)bld; + buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, + section_entry)); + return bld; +} + +static bool ice_is_gtp_u_profile(u16 prof_idx) +{ + return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && + prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) || + prof_idx == ICE_PROFID_IPV4_GTPU_TEID; +} + +static bool ice_is_gtp_c_profile(u16 prof_idx) +{ + switch (prof_idx) { + case ICE_PROFID_IPV4_GTPC_TEID: + case ICE_PROFID_IPV4_GTPC_NO_TEID: + case ICE_PROFID_IPV6_GTPC_TEID: + case ICE_PROFID_IPV6_GTPC_NO_TEID: + return true; + default: + return false; + } +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + * @prof_idx: profile index to check + */ +static enum ice_prof_type +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) +{ + bool valid_prof = false; + u16 i; + + if (ice_is_gtp_c_profile(prof_idx)) + return ICE_PROF_TUN_GTPC; + + if (ice_is_gtp_u_profile(prof_idx)) + return ICE_PROF_TUN_GTPU; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + if (fv->ew[i].off != ICE_NAN_OFFSET) + valid_prof = true; + + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + ice_bitmap_t *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv, offset); + + if (req_profs & prof_type) + ice_set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and offset and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!lkups->n_val_words || !hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + do { + u16 i; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!ice_is_bit_set(bm, (u16)offset)) + continue; + + for (i = 0; i < lkups->n_val_words; i++) { + int j; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == + lkups->fv_words[i].prot_id && + fv->ew[j].off == lkups->fv_words[i].off) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == lkups->n_val_words) { + fvl = (struct ice_sw_fv_list_entry *) + ice_malloc(hw, sizeof(*fvl)); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + LIST_ADD(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (LIST_EMPTY(fv_list)) { + ice_warn(hw, "Required profiles not found in currently loaded DDP package"); + return ICE_ERR_CFG; + } + return ICE_SUCCESS; + +err: + LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, + list_entry) { + LIST_DEL(&fvl->list_entry); + ice_free(hw, fvl); + } + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + ice_zero_bitmap(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + ice_set_bit(i, + hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ + ice_free(hw, bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +enum ice_status +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return ICE_ERR_PARAM; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't increase table size */ + section_count = LE16_TO_CPU(buf->section_count); + if (section_count > 0) + return ICE_ERR_CFG; + + if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) + return ICE_ERR_CFG; + bld->reserved_section_table_entries += count; + + data_end = LE16_TO_CPU(buf->data_end) + + FLEX_ARRAY_SIZE(buf, section_entry, count); + buf->data_end = CPU_TO_LE16(data_end); + + return ICE_SUCCESS; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ + struct ice_buf_hdr *buf; + u16 sect_count; + u16 data_end; + + if (!bld || !type || !size) + return NULL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* check for enough space left in buffer */ + data_end = LE16_TO_CPU(buf->data_end); + + /* section start must align on 4 byte boundary */ + data_end = ICE_ALIGN(data_end, 4); + + if ((data_end + size) > ICE_MAX_S_DATA_END) + return NULL; + + /* check for more available section table entries */ + sect_count = LE16_TO_CPU(buf->section_count); + if (sect_count < bld->reserved_section_table_entries) { + void *section_ptr = ((u8 *)buf) + data_end; + + buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); + buf->section_entry[sect_count].size = CPU_TO_LE16(size); + buf->section_entry[sect_count].type = CPU_TO_LE32(type); + + data_end += size; + buf->data_end = CPU_TO_LE16(data_end); + + buf->section_count = CPU_TO_LE16(sect_count + 1); + return section_ptr; + } + + /* no free section table entries */ + return NULL; +} + +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section) +{ + struct ice_buf_build *buf; + + if (!section) + return NULL; + + buf = ice_pkg_buf_alloc(hw); + if (!buf) + return NULL; + + if (ice_pkg_buf_reserve_section(buf, 1)) + goto ice_pkg_buf_alloc_single_section_err; + + *section = ice_pkg_buf_alloc_section(buf, type, size); + if (!*section) + goto ice_pkg_buf_alloc_single_section_err; + + return buf; + +ice_pkg_buf_alloc_single_section_err: + ice_pkg_buf_free(hw, buf); + return NULL; +} + +/** + * ice_pkg_buf_unreserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to unreserve + * + * Unreserves one or more section table entries in a package buffer, releasing + * space that can be used for section data. This routine can be called + * multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +enum ice_status +ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return ICE_ERR_PARAM; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't decrease table size */ + section_count = LE16_TO_CPU(buf->section_count); + if (section_count > 0) + return ICE_ERR_CFG; + + if (count > bld->reserved_section_table_entries) + return ICE_ERR_CFG; + bld->reserved_section_table_entries -= count; + + data_end = LE16_TO_CPU(buf->data_end) - + FLEX_ARRAY_SIZE(buf, section_entry, count); + buf->data_end = CPU_TO_LE16(data_end); + + return ICE_SUCCESS; +} + +/** + * ice_pkg_buf_get_free_space + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of free bytes remaining in the buffer. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return LE16_TO_CPU(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ + if (bld) + return &bld->buf; + + return NULL; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + LE32_TO_CPU(ice_seg->device_table_count)); + + return (_FORCE_ struct ice_buf_table *) + (nvms->vers + LE32_TO_CPU(nvms->table_count)); +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = LE16_TO_CPU(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = LE16_TO_CPU(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + CPU_TO_LE32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void * +ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = (struct ice_boost_tcam_section *)section; + if (index >= LE16_TO_CPU(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static enum ice_status +ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return ICE_ERR_PARAM; + + do { + tcam = (struct ice_boost_tcam_entry *) + ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && LE16_TO_CPU(tcam->addr) == addr) { + *entry = tcam; + return ICE_SUCCESS; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return ICE_ERR_CFG; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name) { +/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */ + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers for tunnels */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) + hw->tnl.tbl[i].valid = true; + } +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * ICE_SUCCESS - Means the caller has acquired the global config lock + * and can perform writing of the package. + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + enum ice_status status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (status == ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_acquire_change_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the change lock. + */ +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) +{ + return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, + ICE_CHANGE_LOCK_TIMEOUT); +} + +/** + * ice_release_change_lock + * @hw: pointer to the HW structure + * + * This function will release the change lock using the proper Admin Command. + */ +void ice_release_change_lock(struct ice_hw *hw) +{ + ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); +} + +/** + * ice_get_set_tx_topo - get or set tx topology + * @hw: pointer to the HW struct + * @buf: pointer to tx topology buffer + * @buf_size: buffer size + * @cd: pointer to command details structure or NULL + * @flags: pointer to descriptor flags + * @set: 0-get, 1-set topology + * + * The function will get or set tx topology + */ +static enum ice_status +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, + struct ice_sq_cd *cd, u8 *flags, bool set) +{ + struct ice_aqc_get_set_tx_topo *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_set_tx_topo; + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; + /* requested to update a new topology, not a default topolgy */ + if (buf) + cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | + ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; + } + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + /* read the return flag values (first byte) for get operation */ + if (!set && flags) + *flags = desc.params.get_set_tx_topo.set_flags; + + return ICE_SUCCESS; +} + +/** + * ice_cfg_tx_topo - Initialize new tx topology if available + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @len: buffer size + * + * The function will apply the new Tx topology from the package buffer + * if available. + */ +enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) +{ + u8 *current_topo, *new_topo = NULL; + struct ice_run_time_cfg_seg *seg; + struct ice_buf_hdr *section; + struct ice_pkg_hdr *pkg_hdr; + enum ice_ddp_state state; + u16 i, size = 0, offset; + enum ice_status status; + u32 reg = 0; + u8 flags; + + if (!buf || !len) + return ICE_ERR_PARAM; + + /* Does FW support new Tx topology mode ? */ + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); + return ICE_ERR_NOT_SUPPORTED; + } + + current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); + if (!current_topo) + return ICE_ERR_NO_MEMORY; + + /* get the current Tx topology */ + status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, + &flags, false); + ice_free(hw, current_topo); + + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); + return status; + } + + /* Is default topology already applied ? */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 9) { + ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n"); + /* Already default topology is loaded */ + return ICE_ERR_ALREADY_EXISTS; + } + + /* Is new topology already applied ? */ + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 5) { + ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n"); + /* Already new topology is loaded */ + return ICE_ERR_ALREADY_EXISTS; + } + + /* Is set topology issued already ? */ + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { + ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n"); + /* add a small delay before exiting */ + for (i = 0; i < 20; i++) + ice_msec_delay(100, true); + return ICE_ERR_ALREADY_EXISTS; + } + + /* Change the topology from new to default (5 to 9) */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 5) { + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); + goto update_topo; + } + + pkg_hdr = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg_hdr, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return ICE_ERR_CFG; + } + + /* find run time configuration segment */ + seg = (struct ice_run_time_cfg_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); + return ICE_ERR_CFG; + } + + if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", + seg->buf_table.buf_count); + return ICE_ERR_CFG; + } + + section = ice_pkg_val_buf(seg->buf_table.buf_array); + + if (!section || LE32_TO_CPU(section->section_entry[0].type) != + ICE_SID_TX_5_LAYER_TOPO) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); + return ICE_ERR_CFG; + } + + size = LE16_TO_CPU(section->section_entry[0].size); + offset = LE16_TO_CPU(section->section_entry[0].offset); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); + return ICE_ERR_CFG; + } + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); + return ICE_ERR_CFG; + } + + /* Get the new topology buffer */ + new_topo = ((u8 *)section) + offset; + +update_topo: + /* acquire global lock to make sure that set topology issued + * by one PF + */ + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); + return status; + } + + /* check reset was triggered already or not */ + reg = rd32(hw, GLGEN_RSTAT); + if (reg & GLGEN_RSTAT_DEVSTATE_M) { + /* Reset is in progress, re-init the hw again */ + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n"); + ice_check_reset(hw); + return ICE_SUCCESS; + } + + /* set new topology */ + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n"); + return status; + } + + /* new topology is updated, delay 1 second before issuing the CORRER */ + for (i = 0; i < 10; i++) + ice_msec_delay(100, true); + ice_reset(hw, ICE_RESET_CORER); + /* CORER will clear the global lock, so no explicit call + * required for release + */ + return ICE_SUCCESS; +} diff --git a/sys/dev/ice/ice_opts.h b/sys/dev/ice/ice_defs.h copy from sys/dev/ice/ice_opts.h copy to sys/dev/ice/ice_defs.h --- a/sys/dev/ice/ice_opts.h +++ b/sys/dev/ice/ice_defs.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,19 +30,42 @@ */ /*$FreeBSD$*/ -/** - * @file ice_opts.h - * @brief header including kernel option files - * - * Contains the various opt_*.h header files which set various macros - * indicating features and functionality which depend on kernel configuration. - */ +#ifndef _ICE_DEFS_H_ +#define _ICE_DEFS_H_ + +#define ETH_ALEN 6 + +#define ETH_HEADER_LEN 14 + +#define BIT(a) (1UL << (a)) +#ifndef BIT_ULL +#define BIT_ULL(a) (1ULL << (a)) +#endif /* BIT_ULL */ + +#define BITS_PER_BYTE 8 -#ifndef _ICE_OPTS_H_ -#define _ICE_OPTS_H_ +#define _FORCE_ -#include "opt_inet.h" -#include "opt_inet6.h" -#include "opt_rss.h" +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 +#define ICE_MAX_TRAFFIC_CLASS 8 +#ifndef MIN_T +#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b)) #endif + +#define IS_ASCII(_ch) ((_ch) < 0x80) + +#define STRUCT_HACK_VAR_LEN +/** + * ice_struct_size - size of struct with C99 flexible array member + * @ptr: pointer to structure + * @field: flexible array member (last member of the structure) + * @num: number of elements of that flexible array member + */ +#define ice_struct_size(ptr, field, num) \ + (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num)) + +#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0])) + +#endif /* _ICE_DEFS_H_ */ diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h --- a/sys/dev/ice/ice_devids.h +++ b/sys/dev/ice/ice_devids.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -34,6 +34,7 @@ #define _ICE_DEVIDS_H_ /* Device IDs */ +#define ICE_DEV_ID_E822_SI_DFLT 0x1888 /* Intel(R) Ethernet Connection E823-L for backplane */ #define ICE_DEV_ID_E823L_BACKPLANE 0x124C /* Intel(R) Ethernet Connection E823-L for SFP */ @@ -52,6 +53,11 @@ #define ICE_DEV_ID_E810C_SFP 0x1593 #define ICE_SUBDEV_ID_E810T 0x000E #define ICE_SUBDEV_ID_E810T2 0x000F +#define ICE_SUBDEV_ID_E810T3 0x02E9 +#define ICE_SUBDEV_ID_E810T4 0x02EA +#define ICE_SUBDEV_ID_E810T5 0x0010 +#define ICE_SUBDEV_ID_E810T6 0x0012 +#define ICE_SUBDEV_ID_E810T7 0x0011 /* Intel(R) Ethernet Controller E810-XXV for backplane */ #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 /* Intel(R) Ethernet Controller E810-XXV for QSFP */ @@ -86,5 +92,4 @@ #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 /* Intel(R) Ethernet Connection E822-L 1GbE */ #define ICE_DEV_ID_E822L_SGMII 0x189A - #endif /* _ICE_DEVIDS_H_ */ diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h --- a/sys/dev/ice/ice_drv_info.h +++ b/sys/dev/ice/ice_drv_info.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,16 +63,16 @@ * @var ice_rc_version * @brief driver release candidate version number */ -const char ice_driver_version[] = "1.34.2-k"; +const char ice_driver_version[] = "1.37.7-k"; const uint8_t ice_major_version = 1; -const uint8_t ice_minor_version = 34; -const uint8_t ice_patch_version = 2; +const uint8_t ice_minor_version = 37; +const uint8_t ice_patch_version = 7; const uint8_t ice_rc_version = 0; #define PVIDV(vendor, devid, name) \ - PVID(vendor, devid, name " - 1.34.2-k") + PVID(vendor, devid, name " - 1.37.7-k") #define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \ - PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.34.2-k") + PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.7-k") /** * @var ice_vendor_info_array @@ -130,9 +130,6 @@ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), - PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, - ICE_INTEL_VENDOR_ID, 0x0008, 0, - "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x000C, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"), diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h --- a/sys/dev/ice/ice_features.h +++ b/sys/dev/ice/ice_features.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +68,8 @@ ICE_FEATURE_HEALTH_STATUS, ICE_FEATURE_FW_LOGGING, ICE_FEATURE_HAS_PBA, + ICE_FEATURE_DCB, + ICE_FEATURE_TX_BALANCE, /* Must be last entry */ ICE_FEATURE_COUNT }; diff --git a/sys/dev/ice/ice_flex_pipe.h b/sys/dev/ice/ice_flex_pipe.h --- a/sys/dev/ice/ice_flex_pipe.h +++ b/sys/dev/ice/ice_flex_pipe.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,20 +35,6 @@ #include "ice_type.h" -/* Package minimal version supported */ -#define ICE_PKG_SUPP_VER_MAJ 1 -#define ICE_PKG_SUPP_VER_MNR 3 - -/* Package format version */ -#define ICE_PKG_FMT_VER_MAJ 1 -#define ICE_PKG_FMT_VER_MNR 0 -#define ICE_PKG_FMT_VER_UPD 0 -#define ICE_PKG_FMT_VER_DFT 0 - -#define ICE_PKG_CNT 4 - -enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); enum ice_status ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); @@ -61,12 +47,6 @@ void ice_init_prof_result_bm(struct ice_hw *hw); enum ice_status -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); -enum ice_status -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); -enum ice_status ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd); bool @@ -89,8 +69,8 @@ enum ice_status ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig); enum ice_status -ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es); +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, + ice_bitmap_t *ptypes, struct ice_fv_word *es); struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); enum ice_status @@ -103,11 +83,7 @@ ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt); enum ice_status ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt); -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); -enum ice_status -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); enum ice_status ice_init_hw_tbls(struct ice_hw *hw); -void ice_free_seg(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); @@ -119,10 +95,14 @@ u64 id); enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section); -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); + +void ice_fill_blk_tbls(struct ice_hw *hw); + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +#define ICE_TNL_PRE "TNL_" + +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c --- a/sys/dev/ice/ice_flex_pipe.c +++ b/sys/dev/ice/ice_flex_pipe.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -31,14 +31,11 @@ /*$FreeBSD$*/ #include "ice_common.h" +#include "ice_ddp_common.h" #include "ice_flex_pipe.h" #include "ice_protocol_type.h" #include "ice_flow.h" -/* To support tunneling entries by PF, the package will append the PF number to - * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. - */ -#define ICE_TNL_PRE "TNL_" static const struct ice_tunnel_type_scan tnls[] = { { TNL_VXLAN, "TNL_VXLAN_PF" }, { TNL_GENEVE, "TNL_GENEVE_PF" }, @@ -125,369 +122,13 @@ return ice_sect_lkup[blk][sect]; } -/** - * ice_pkg_val_buf - * @buf: pointer to the ice buffer - * - * This helper function validates a buffer's header. - */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) -{ - struct ice_buf_hdr *hdr; - u16 section_count; - u16 data_end; - - hdr = (struct ice_buf_hdr *)buf->buf; - /* verify data */ - section_count = LE16_TO_CPU(hdr->section_count); - if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) - return NULL; - - data_end = LE16_TO_CPU(hdr->data_end); - if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) - return NULL; - - return hdr; -} - -/** - * ice_find_buf_table - * @ice_seg: pointer to the ice segment - * - * Returns the address of the buffer table within the ice segment. - */ -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) -{ - struct ice_nvm_table *nvms; - - nvms = (struct ice_nvm_table *) - (ice_seg->device_table + - LE32_TO_CPU(ice_seg->device_table_count)); - - return (_FORCE_ struct ice_buf_table *) - (nvms->vers + LE32_TO_CPU(nvms->table_count)); -} - -/** - * ice_pkg_enum_buf - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This function will enumerate all the buffers in the ice segment. The first - * call is made with the ice_seg parameter non-NULL; on subsequent calls, - * ice_seg is set to NULL which continues the enumeration. When the function - * returns a NULL pointer, then the end of the buffers has been reached, or an - * unexpected value has been detected (for example an invalid section count or - * an invalid buffer end value). - */ -static struct ice_buf_hdr * -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (ice_seg) { - state->buf_table = ice_find_buf_table(ice_seg); - if (!state->buf_table) - return NULL; - - state->buf_idx = 0; - return ice_pkg_val_buf(state->buf_table->buf_array); - } - - if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) - return ice_pkg_val_buf(state->buf_table->buf_array + - state->buf_idx); - else - return NULL; -} - -/** - * ice_pkg_advance_sect - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This helper function will advance the section within the ice segment, - * also advancing the buffer if needed. - */ -static bool -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (!ice_seg && !state->buf) - return false; - - if (!ice_seg && state->buf) - if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) - return true; - - state->buf = ice_pkg_enum_buf(ice_seg, state); - if (!state->buf) - return false; - - /* start of new buffer, reset section index */ - state->sect_idx = 0; - return true; -} - -/** - * ice_pkg_enum_section - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * - * This function will enumerate all the sections of a particular type in the - * ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the matching - * sections has been reached. - */ -static void * -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type) -{ - u16 offset, size; - - if (ice_seg) - state->type = sect_type; - - if (!ice_pkg_advance_sect(ice_seg, state)) - return NULL; - - /* scan for next matching section */ - while (state->buf->section_entry[state->sect_idx].type != - CPU_TO_LE32(state->type)) - if (!ice_pkg_advance_sect(NULL, state)) - return NULL; - - /* validate section */ - offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); - if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) - return NULL; - - size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); - if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) - return NULL; - - /* make sure the section fits in the buffer */ - if (offset + size > ICE_PKG_BUF_SIZE) - return NULL; - - state->sect_type = - LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); - - /* calc pointer to this section */ - state->sect = ((u8 *)state->buf) + - LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); - - return state->sect; -} - -/** - * ice_pkg_enum_entry - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * @offset: pointer to variable that receives the offset in the table (optional) - * @handler: function that handles access to the entries into the section type - * - * This function will enumerate all the entries in particular section type in - * the ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the entries has - * been reached. - * - * Since each section may have a different header and entry size, the handler - * function is needed to determine the number and location entries in each - * section. - * - * The offset parameter is optional, but should be used for sections that - * contain an offset for each section table. For such cases, the section handler - * function must return the appropriate offset + index to give the absolution - * offset for each entry. For example, if the base for a section's header - * indicates a base offset of 10, and the index for the entry is 2, then - * section handler function should set the offset to 10 + 2 = 12. - */ -static void * -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type, u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) -{ - void *entry; - - if (ice_seg) { - if (!handler) - return NULL; - - if (!ice_pkg_enum_section(ice_seg, state, sect_type)) - return NULL; - - state->entry_idx = 0; - state->handler = handler; - } else { - state->entry_idx++; - } - - if (!state->handler) - return NULL; - - /* get entry */ - entry = state->handler(state->sect_type, state->sect, state->entry_idx, - offset); - if (!entry) { - /* end of a section, look for another section of this type */ - if (!ice_pkg_enum_section(NULL, state, 0)) - return NULL; - - state->entry_idx = 0; - entry = state->handler(state->sect_type, state->sect, - state->entry_idx, offset); - } - - return entry; -} - -/** - * ice_boost_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the boost TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual boost TCAM entries. - */ -static void * -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_boost_tcam_section *boost; - - if (!section) - return NULL; - - if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) - return NULL; - - if (index > ICE_MAX_BST_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - boost = (struct ice_boost_tcam_section *)section; - if (index >= LE16_TO_CPU(boost->count)) - return NULL; - - return boost->tcam + index; -} - -/** - * ice_find_boost_entry - * @ice_seg: pointer to the ice segment (non-NULL) - * @addr: Boost TCAM address of entry to search for - * @entry: returns pointer to the entry - * - * Finds a particular Boost TCAM entry and returns a pointer to that entry - * if it is found. The ice_seg parameter must not be NULL since the first call - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. - */ -static enum ice_status -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, - struct ice_boost_tcam_entry **entry) -{ - struct ice_boost_tcam_entry *tcam; - struct ice_pkg_enum state; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return ICE_ERR_PARAM; - - do { - tcam = (struct ice_boost_tcam_entry *) - ice_pkg_enum_entry(ice_seg, &state, - ICE_SID_RXPARSER_BOOST_TCAM, NULL, - ice_boost_tcam_handler); - if (tcam && LE16_TO_CPU(tcam->addr) == addr) { - *entry = tcam; - return ICE_SUCCESS; - } - - ice_seg = NULL; - } while (tcam); - - *entry = NULL; - return ICE_ERR_CFG; -} - -/** - * ice_label_enum_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the label entry to be returned - * @offset: pointer to receive absolute offset, always zero for label sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual label entries. - */ -static void * -ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_label_section *labels; - - if (!section) - return NULL; - - if (index > ICE_MAX_LABELS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - labels = (struct ice_label_section *)section; - if (index >= LE16_TO_CPU(labels->count)) - return NULL; - - return labels->label + index; -} - -/** - * ice_enum_labels - * @ice_seg: pointer to the ice segment (NULL on subsequent calls) - * @type: the section type that will contain the label (0 on subsequent calls) - * @state: ice_pkg_enum structure that will hold the state of the enumeration - * @value: pointer to a value that will return the label's value if found - * - * Enumerates a list of labels in the package. The caller will call - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL - * the end of the list has been reached. - */ -static char * -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, - u16 *value) -{ - struct ice_label *label; - - /* Check for valid label section on first call */ - if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) - return NULL; - - label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, - NULL, - ice_label_enum_handler); - if (!label) - return NULL; - - *value = LE16_TO_CPU(label->value); - return label->name; -} - /** * ice_add_tunnel_hint * @hw: pointer to the HW structure * @label_name: label text * @val: value of the tunnel port boost entry */ -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) { if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { u16 i; @@ -517,49 +158,6 @@ } } -/** - * ice_init_pkg_hints - * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. - */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - int i; - - ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return; - - label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, - &val); - - while (label_name) { - if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) - /* check for a tunnel entry */ - ice_add_tunnel_hint(hw, label_name, val); - - label_name = ice_enum_labels(NULL, 0, &state, &val); - } - - /* Cache the appropriate boost TCAM entry pointers for tunnels */ - for (i = 0; i < hw->tnl.count; i++) { - ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, - &hw->tnl.tbl[i].boost_entry); - if (hw->tnl.tbl[i].boost_entry) - hw->tnl.tbl[i].valid = true; - } -} - /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ @@ -673,1491 +271,63 @@ /* count the bits in this byte, checking threshold */ count += ice_hweight8(mask[i]); - if (count > max) - return false; - } - - return true; -} - -/** - * ice_set_key - generate a variable sized key with multiples of 16-bits - * @key: pointer to where the key will be stored - * @size: the size of the complete key in bytes (must be even) - * @val: array of 8-bit values that makes up the value portion of the key - * @upd: array of 8-bit masks that determine what key portion to update - * @dc: array of 8-bit masks that make up the don't care mask - * @nm: array of 8-bit masks that make up the never match mask - * @off: the offset of the first byte in the key to update - * @len: the number of bytes in the key update - * - * This function generates a key from a value, a don't care mask and a never - * match mask. - * upd, dc, and nm are optional parameters, and can be NULL: - * upd == NULL --> upd mask is all 1's (update all bits) - * dc == NULL --> dc mask is all 0's (no don't care bits) - * nm == NULL --> nm mask is all 0's (no never match bits) - */ -static enum ice_status -ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, - u16 len) -{ - u16 half_size; - u16 i; - - /* size must be a multiple of 2 bytes. */ - if (size % 2) - return ICE_ERR_CFG; - half_size = size / 2; - - if (off + len > half_size) - return ICE_ERR_CFG; - - /* Make sure at most one bit is set in the never match mask. Having more - * than one never match mask bit set will cause HW to consume excessive - * power otherwise; this is a power management efficiency check. - */ -#define ICE_NVR_MTCH_BITS_MAX 1 - if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) - return ICE_ERR_CFG; - - for (i = 0; i < len; i++) - if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, - dc ? dc[i] : 0, nm ? nm[i] : 0, - key + off + i, key + half_size + off + i)) - return ICE_ERR_CFG; - - return ICE_SUCCESS; -} - -/** - * ice_acquire_global_cfg_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the global config lock for reading - * or writing of the package. When attempting to obtain write access, the - * caller must check for the following two return values: - * - * ICE_SUCCESS - Means the caller has acquired the global config lock - * and can perform writing of the package. - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static enum ice_status -ice_acquire_global_cfg_lock(struct ice_hw *hw, - enum ice_aq_res_access_type access) -{ - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, - ICE_GLOBAL_CFG_LOCK_TIMEOUT); - - if (status == ICE_ERR_AQ_NO_WORK) - ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); - - return status; -} - -/** - * ice_release_global_cfg_lock - * @hw: pointer to the HW structure - * - * This function will release the global config lock. - */ -static void ice_release_global_cfg_lock(struct ice_hw *hw) -{ - ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); -} - -/** - * ice_acquire_change_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the change lock. - */ -static enum ice_status -ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) -{ - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, - ICE_CHANGE_LOCK_TIMEOUT); -} - -/** - * ice_release_change_lock - * @hw: pointer to the HW structure - * - * This function will release the change lock using the proper Admin Command. - */ -static void ice_release_change_lock(struct ice_hw *hw) -{ - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); -} - -/** - * ice_aq_download_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Download Package (0x0C40) - */ -static enum ice_status -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = LE32_TO_CPU(resp->error_offset); - if (error_info) - *error_info = LE32_TO_CPU(resp->error_info); - } - - return status; -} - -/** - * ice_aq_upload_section - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer - * @cd: pointer to command details structure or NULL - * - * Upload Section (0x0C41) - */ -enum ice_status -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -} - -/** - * ice_aq_update_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package cmd buffer - * @buf_size: the size of the package cmd buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Update Package (0x0C42) - */ -static enum ice_status -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, - bool last_buf, u32 *error_offset, u32 *error_info, - struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = LE32_TO_CPU(resp->error_offset); - if (error_info) - *error_info = LE32_TO_CPU(resp->error_info); - } - - return status; -} - -/** - * ice_find_seg_in_pkg - * @hw: pointer to the hardware structure - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - */ -static struct ice_generic_seg_hdr * -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) -{ - u32 i; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", - pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, - pkg_hdr->pkg_format_ver.update, - pkg_hdr->pkg_format_ver.draft); - - /* Search all package segments for the requested segment type */ - for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; - - seg = (struct ice_generic_seg_hdr *) - ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); - - if (LE32_TO_CPU(seg->seg_type) == seg_type) - return seg; - } - - return NULL; -} - -/** - * ice_update_pkg_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - */ -static enum ice_status -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status = ICE_SUCCESS; - u32 i; - - for (i = 0; i < count; i++) { - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); - bool last = ((i + 1) == count); - u32 offset, info; - - status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), - last, &offset, &info, NULL); - - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", - status, offset, info); - break; - } - } - - return status; -} - -/** - * ice_update_pkg - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains change lock and updates package. - */ -enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; - - status = ice_update_pkg_no_lock(hw, bufs, count); - - ice_release_change_lock(hw); - - return status; -} - -/** - * ice_dwnld_cfg_bufs - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. - */ -static enum ice_status -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status; - struct ice_buf_hdr *bh; - u32 offset, info, i; - - if (!bufs || !count) - return ICE_ERR_PARAM; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)bufs; - if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) - return ICE_SUCCESS; - - /* reset pkg_dwnld_status in case this function is called in the - * reset/rebuild flow - */ - hw->pkg_dwnld_status = ICE_AQ_RC_OK; - - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == ICE_ERR_AQ_NO_WORK) - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; - else - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - return status; - } - - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); - - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); - - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (LE16_TO_CPU(bh->section_count)) - if (LE32_TO_CPU(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } - - bh = (struct ice_buf_hdr *)(bufs + i); - - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); - - /* Save AQ status from download package */ - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - - break; - } - - if (last) - break; - } - - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", - status); - } - - ice_release_global_cfg_lock(hw); - - return status; -} - -/** - * ice_aq_get_pkg_info_list - * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL - * - * Get Package Info List (0x0C43) - */ -static enum ice_status -ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); -} - -/** - * ice_download_pkg - * @hw: pointer to the hardware structure - * @ice_seg: pointer to the segment of the package to be downloaded - * - * Handles the download of a complete package. - */ -static enum ice_status -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_buf_table *ice_buf_tbl; - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", - ice_seg->hdr.seg_format_ver.major, - ice_seg->hdr.seg_format_ver.minor, - ice_seg->hdr.seg_format_ver.update, - ice_seg->hdr.seg_format_ver.draft); - - ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", - LE32_TO_CPU(ice_seg->hdr.seg_type), - LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); - - ice_buf_tbl = ice_find_buf_table(ice_seg); - - ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", - LE32_TO_CPU(ice_buf_tbl->buf_count)); - - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - LE32_TO_CPU(ice_buf_tbl->buf_count)); - - ice_post_pkg_dwnld_vlan_mode_cfg(hw); - - return status; -} - -/** - * ice_init_pkg_info - * @hw: pointer to the hardware structure - * @pkg_hdr: pointer to the driver's package hdr - * - * Saves off the package details into the HW structure. - */ -static enum ice_status -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) -{ - struct ice_generic_seg_hdr *seg_hdr; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - if (!pkg_hdr) - return ICE_ERR_PARAM; - - hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810; - - ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", - hw->pkg_seg_id); - - seg_hdr = (struct ice_generic_seg_hdr *) - ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); - if (seg_hdr) { - struct ice_meta_sect *meta; - struct ice_pkg_enum state; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - /* Get package information from the Metadata Section */ - meta = (struct ice_meta_sect *) - ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, - ICE_SID_METADATA); - if (!meta) { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_ERR_CFG; - } - - hw->pkg_ver = meta->ver; - ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), - ICE_NONDMA_TO_NONDMA); - - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta->ver.major, meta->ver.minor, meta->ver.update, - meta->ver.draft, meta->name); - - hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; - ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, - sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); - - ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", - seg_hdr->seg_format_ver.major, - seg_hdr->seg_format_ver.minor, - seg_hdr->seg_format_ver.update, - seg_hdr->seg_format_ver.draft, - seg_hdr->seg_id); - } else { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_ERR_CFG; - } - - return ICE_SUCCESS; -} - -/** - * ice_get_pkg_info - * @hw: pointer to the hardware structure - * - * Store details of the package currently loaded in HW into the HW structure. - */ -static enum ice_status ice_get_pkg_info(struct ice_hw *hw) -{ - struct ice_aqc_get_pkg_info_resp *pkg_info; - enum ice_status status; - u16 size; - u32 i; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); - if (!pkg_info) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); - if (status) - goto init_pkg_free_alloc; - - for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { -#define ICE_PKG_FLAG_COUNT 4 - char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; - u8 place = 0; - - if (pkg_info->pkg_info[i].is_active) { - flags[place++] = 'A'; - hw->active_pkg_ver = pkg_info->pkg_info[i].ver; - hw->active_track_id = - LE32_TO_CPU(pkg_info->pkg_info[i].track_id); - ice_memcpy(hw->active_pkg_name, - pkg_info->pkg_info[i].name, - sizeof(pkg_info->pkg_info[i].name), - ICE_NONDMA_TO_NONDMA); - hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; - } - if (pkg_info->pkg_info[i].is_active_at_boot) - flags[place++] = 'B'; - if (pkg_info->pkg_info[i].is_modified) - flags[place++] = 'M'; - if (pkg_info->pkg_info[i].is_in_nvm) - flags[place++] = 'N'; - - ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", - i, pkg_info->pkg_info[i].ver.major, - pkg_info->pkg_info[i].ver.minor, - pkg_info->pkg_info[i].ver.update, - pkg_info->pkg_info[i].ver.draft, - pkg_info->pkg_info[i].name, flags); - } - -init_pkg_free_alloc: - ice_free(hw, pkg_info); - - return status; -} - -/** - * ice_find_label_value - * @ice_seg: pointer to the ice segment (non-NULL) - * @name: name of the label to search for - * @type: the section type that will contain the label - * @value: pointer to a value that will return the label's value if found - * - * Finds a label's value given the label name and the section type to search. - * The ice_seg parameter must not be NULL since the first call to - * ice_enum_labels requires a pointer to an actual ice_seg structure. - */ -enum ice_status -ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, - u16 *value) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return ICE_ERR_PARAM; - - do { - label_name = ice_enum_labels(ice_seg, type, &state, &val); - if (label_name && !strcmp(label_name, name)) { - *value = val; - return ICE_SUCCESS; - } - - ice_seg = NULL; - } while (label_name); - - return ICE_ERR_CFG; -} - -/** - * ice_verify_pkg - verify package - * @pkg: pointer to the package buffer - * @len: size of the package buffer - * - * Verifies various attributes of the package file, including length, format - * version, and the requirement of at least one segment. - */ -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) -{ - u32 seg_count; - u32 i; - - if (len < ice_struct_size(pkg, seg_offset, 1)) - return ICE_ERR_BUF_TOO_SHORT; - - if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || - pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || - pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || - pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_ERR_CFG; - - /* pkg must have at least one segment */ - seg_count = LE32_TO_CPU(pkg->seg_count); - if (seg_count < 1) - return ICE_ERR_CFG; - - /* make sure segment array fits in package length */ - if (len < ice_struct_size(pkg, seg_offset, seg_count)) - return ICE_ERR_BUF_TOO_SHORT; - - /* all segments must fit within length */ - for (i = 0; i < seg_count; i++) { - u32 off = LE32_TO_CPU(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; - - /* segment header must fit */ - if (len < off + sizeof(*seg)) - return ICE_ERR_BUF_TOO_SHORT; - - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); - - /* segment body must fit */ - if (len < off + LE32_TO_CPU(seg->seg_size)) - return ICE_ERR_BUF_TOO_SHORT; - } - - return ICE_SUCCESS; -} - -/** - * ice_free_seg - free package segment pointer - * @hw: pointer to the hardware structure - * - * Frees the package segment pointer in the proper manner, depending on if the - * segment was allocated or just the passed in pointer was stored. - */ -void ice_free_seg(struct ice_hw *hw) -{ - if (hw->pkg_copy) { - ice_free(hw, hw->pkg_copy); - hw->pkg_copy = NULL; - hw->pkg_size = 0; - } - hw->seg = NULL; -} - -/** - * ice_init_pkg_regs - initialize additional package registers - * @hw: pointer to the hardware structure - */ -static void ice_init_pkg_regs(struct ice_hw *hw) -{ -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF -#define ICE_SW_BLK_IDX 0 - - /* setup Switch block input mask, which is 48-bits in two parts */ - wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); - wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); -} - -/** - * ice_chk_pkg_version - check package version for compatibility with driver - * @pkg_ver: pointer to a version structure to check - * - * Check to make sure that the package about to be downloaded is compatible with - * the driver. To be compatible, the major and minor components of the package - * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR - * definitions. - */ -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) -{ - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) - return ICE_ERR_NOT_SUPPORTED; - - return ICE_SUCCESS; -} - -/** - * ice_chk_pkg_compat - * @hw: pointer to the hardware structure - * @ospkg: pointer to the package hdr - * @seg: pointer to the package segment hdr - * - * This function checks the package version compatibility with driver and NVM - */ -static enum ice_status -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, - struct ice_seg **seg) -{ - struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_status status; - u16 size; - u32 i; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - /* Check package version compatibility */ - status = ice_chk_pkg_version(&hw->pkg_ver); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return status; - } - - /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, - ospkg); - if (!*seg) { - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; - } - - /* Check if FW is compatible with the OS package */ - size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); - if (!pkg) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); - if (status) - goto fw_ddp_compat_free_alloc; - - for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { - /* loop till we find the NVM package */ - if (!pkg->pkg_info[i].is_in_nvm) - continue; - if ((*seg)->hdr.seg_format_ver.major != - pkg->pkg_info[i].ver.major || - (*seg)->hdr.seg_format_ver.minor > - pkg->pkg_info[i].ver.minor) { - status = ICE_ERR_FW_DDP_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); - } - /* done processing NVM package so break */ - break; - } -fw_ddp_compat_free_alloc: - ice_free(hw, pkg); - return status; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_sw_fv_section *fv_section = - (struct ice_sw_fv_section *)section; - - if (!section || sect_type != ICE_SID_FLD_VEC_SW) - return NULL; - if (index >= LE16_TO_CPU(fv_section->count)) - return NULL; - if (offset) - /* "index" passed in to this function is relative to a given - * 4k block. To get to the true index into the field vector - * table need to add the relative index to the base_offset - * field of this section - */ - *offset = LE16_TO_CPU(fv_section->base_offset) + index; - return fv_section->fv + index; -} - -/** - * ice_get_prof_index_max - get the max profile index for used profile - * @hw: pointer to the HW struct - * - * Calling this function will get the max profile index for used profile - * and store the index number in struct ice_switch_info *switch_info - * in hw for following use. - */ -static int ice_get_prof_index_max(struct ice_hw *hw) -{ - u16 prof_index = 0, j, max_prof_index = 0; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - bool flag = false; - struct ice_fv *fv; - u32 offset; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!hw->seg) - return ICE_ERR_PARAM; - - ice_seg = hw->seg; - - do { - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* in the profile that not be used, the prot_id is set to 0xff - * and the off is set to 0x1ff for all the field vectors. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id != ICE_PROT_INVALID || - fv->ew[j].off != ICE_FV_OFFSET_INVAL) - flag = true; - if (flag && prof_index > max_prof_index) - max_prof_index = prof_index; - - prof_index++; - flag = false; - } while (fv); - - hw->switch_info->max_used_prof_index = max_prof_index; - - return ICE_SUCCESS; -} - -/** - * ice_init_pkg - initialize/download package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function initializes a package. The package contains HW tables - * required to do packet processing. First, the function extracts package - * information such as version. Then it finds the ice configuration segment - * within the package; this function then saves a copy of the segment pointer - * within the supplied package buffer. Next, the function will cache any hints - * from the package, followed by downloading the package itself. Note, that if - * a previous PF driver has already downloaded the package successfully, then - * the current driver will not have to download the package again. - * - * The local package contents will be used to query default behavior and to - * update specific sections of the HW's version of the package (e.g. to update - * the parse graph to understand new protocols). - * - * This function stores a pointer to the package buffer memory, and it is - * expected that the supplied buffer will not be freed immediately. If the - * package buffer needs to be freed, such as when read from a file, use - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this - * case. - */ -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) -{ - struct ice_pkg_hdr *pkg; - enum ice_status status; - struct ice_seg *seg; - - if (!buf || !len) - return ICE_ERR_PARAM; - - pkg = (struct ice_pkg_hdr *)buf; - status = ice_verify_pkg(pkg, len); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - status); - return status; - } - - /* initialize package info */ - status = ice_init_pkg_info(hw, pkg); - if (status) - return status; - - /* before downloading the package, check package version for - * compatibility with driver - */ - status = ice_chk_pkg_compat(hw, pkg, &seg); - if (status) - return status; - - /* initialize package hints and then download package */ - ice_init_pkg_hints(hw, seg); - status = ice_download_pkg(hw, seg); - if (status == ICE_ERR_AQ_NO_WORK) { - ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - status = ICE_SUCCESS; - } - - /* Get information on the package currently loaded in HW, then make sure - * the driver is compatible with this version. - */ - if (!status) { - status = ice_get_pkg_info(hw); - if (!status) - status = ice_chk_pkg_version(&hw->active_pkg_ver); - } - - if (!status) { - hw->seg = seg; - /* on successful package download update other required - * registers to support the package and fill HW tables - * with package content. - */ - ice_init_pkg_regs(hw); - ice_fill_blk_tbls(hw); - ice_get_prof_index_max(hw); - } else { - ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - status); - } - - return status; -} - -/** - * ice_copy_and_init_pkg - initialize/download a copy of the package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function copies the package buffer, and then calls ice_init_pkg() to - * initialize the copied package contents. - * - * The copying is necessary if the package buffer supplied is constant, or if - * the memory may disappear shortly after calling this function. - * - * If the package buffer resides in the data segment and can be modified, the - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). - * - * However, if the package buffer needs to be copied first, such as when being - * read from a file, the caller should use ice_copy_and_init_pkg(). - * - * This function will first copy the package buffer, before calling - * ice_init_pkg(). The caller is free to immediately destroy the original - * package buffer, as the new copy will be managed by this function and - * related routines. - */ -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) -{ - enum ice_status status; - u8 *buf_copy; - - if (!buf || !len) - return ICE_ERR_PARAM; - - buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); - - status = ice_init_pkg(hw, buf_copy, len); - if (status) { - /* Free the copy, since we failed to initialize the package */ - ice_free(hw, buf_copy); - } else { - /* Track the copied pkg so we can free it later */ - hw->pkg_copy = buf_copy; - hw->pkg_size = len; - } - - return status; -} - -/** - * ice_pkg_buf_alloc - * @hw: pointer to the HW structure - * - * Allocates a package buffer and returns a pointer to the buffer header. - * Note: all package contents must be in Little Endian form. - */ -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) -{ - struct ice_buf_build *bld; - struct ice_buf_hdr *buf; - - bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); - if (!bld) - return NULL; - - buf = (struct ice_buf_hdr *)bld; - buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, - section_entry)); - return bld; -} - -/** - * ice_get_sw_prof_type - determine switch profile type - * @hw: pointer to the HW structure - * @fv: pointer to the switch field vector - */ -static enum ice_prof_type -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) -{ - u16 i; - - for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { - /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && - fv->ew[i].off == ICE_VNI_OFFSET) - return ICE_PROF_TUN_UDP; - - /* GRE tunnel will have GRE protocol */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) - return ICE_PROF_TUN_GRE; - } - - return ICE_PROF_NON_TUN; -} - -/** - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type - * @hw: pointer to hardware structure - * @req_profs: type of profiles requested - * @bm: pointer to memory for returning the bitmap of field vectors - */ -void -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, - ice_bitmap_t *bm) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - if (req_profs == ICE_PROF_ALL) { - ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); - return; - } - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); - ice_seg = hw->seg; - do { - enum ice_prof_type prof_type; - u32 offset; - - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - ice_seg = NULL; - - if (fv) { - /* Determine field vector type */ - prof_type = ice_get_sw_prof_type(hw, fv); - - if (req_profs & prof_type) - ice_set_bit((u16)offset, bm); - } - } while (fv); -} - -/** - * ice_get_sw_fv_list - * @hw: pointer to the HW structure - * @prot_ids: field vector to search for with a given protocol ID - * @ids_cnt: lookup/protocol count - * @bm: bitmap of field vectors to consider - * @fv_list: Head of a list - * - * Finds all the field vector entries from switch block that contain - * a given protocol ID and returns a list of structures of type - * "ice_sw_fv_list_entry". Every structure in the list has a field vector - * definition and profile ID information - * NOTE: The caller of the function is responsible for freeing the memory - * allocated for every list entry. - */ -enum ice_status -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) -{ - struct ice_sw_fv_list_entry *fvl; - struct ice_sw_fv_list_entry *tmp; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - u32 offset; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ids_cnt || !hw->seg) - return ICE_ERR_PARAM; - - ice_seg = hw->seg; - do { - u16 i; - - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* If field vector is not in the bitmap list, then skip this - * profile. - */ - if (!ice_is_bit_set(bm, (u16)offset)) - continue; - - for (i = 0; i < ids_cnt; i++) { - int j; - - /* This code assumes that if a switch field vector line - * has a matching protocol, then this line will contain - * the entries necessary to represent every field in - * that protocol header. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id == prot_ids[i]) - break; - if (j >= hw->blk[ICE_BLK_SW].es.fvw) - break; - if (i + 1 == ids_cnt) { - fvl = (struct ice_sw_fv_list_entry *) - ice_malloc(hw, sizeof(*fvl)); - if (!fvl) - goto err; - fvl->fv_ptr = fv; - fvl->profile_id = offset; - LIST_ADD(&fvl->list_entry, fv_list); - break; - } - } - } while (fv); - if (LIST_EMPTY(fv_list)) - return ICE_ERR_CFG; - return ICE_SUCCESS; - -err: - LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, - list_entry) { - LIST_DEL(&fvl->list_entry); - ice_free(hw, fvl); - } - - return ICE_ERR_NO_MEMORY; -} - -/** - * ice_init_prof_result_bm - Initialize the profile result index bitmap - * @hw: pointer to hardware structure - */ -void ice_init_prof_result_bm(struct ice_hw *hw) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!hw->seg) - return; - - ice_seg = hw->seg; - do { - u32 off; - u16 i; - - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &off, ice_sw_fv_handler); - ice_seg = NULL; - if (!fv) - break; - - ice_zero_bitmap(hw->switch_info->prof_res_bm[off], - ICE_MAX_FV_WORDS); - - /* Determine empty field vector indices, these can be - * used for recipe results. Skip index 0, since it is - * always used for Switch ID. - */ - for (i = 1; i < ICE_MAX_FV_WORDS; i++) - if (fv->ew[i].prot_id == ICE_PROT_INVALID && - fv->ew[i].off == ICE_FV_OFFSET_INVAL) - ice_set_bit(i, - hw->switch_info->prof_res_bm[off]); - } while (fv); -} - -/** - * ice_pkg_buf_free - * @hw: pointer to the HW structure - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Frees a package buffer - */ -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) -{ - ice_free(hw, bld); -} - -/** - * ice_pkg_buf_reserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to reserve - * - * Reserves one or more section table entries in a package buffer. This routine - * can be called multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -static enum ice_status -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) -{ - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return ICE_ERR_PARAM; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* already an active section, can't increase table size */ - section_count = LE16_TO_CPU(buf->section_count); - if (section_count > 0) - return ICE_ERR_CFG; - - if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return ICE_ERR_CFG; - bld->reserved_section_table_entries += count; - - data_end = LE16_TO_CPU(buf->data_end) + - FLEX_ARRAY_SIZE(buf, section_entry, count); - buf->data_end = CPU_TO_LE16(data_end); - - return ICE_SUCCESS; -} - -/** - * ice_pkg_buf_alloc_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * - * Reserves memory in the buffer for a section's content and updates the - * buffers' status accordingly. This routine returns a pointer to the first - * byte of the section start within the buffer, which is used to fill in the - * section contents. - * Note: all package contents must be in Little Endian form. - */ -static void * -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) -{ - struct ice_buf_hdr *buf; - u16 sect_count; - u16 data_end; - - if (!bld || !type || !size) - return NULL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* check for enough space left in buffer */ - data_end = LE16_TO_CPU(buf->data_end); - - /* section start must align on 4 byte boundary */ - data_end = ICE_ALIGN(data_end, 4); - - if ((data_end + size) > ICE_MAX_S_DATA_END) - return NULL; - - /* check for more available section table entries */ - sect_count = LE16_TO_CPU(buf->section_count); - if (sect_count < bld->reserved_section_table_entries) { - void *section_ptr = ((u8 *)buf) + data_end; - - buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); - buf->section_entry[sect_count].size = CPU_TO_LE16(size); - buf->section_entry[sect_count].type = CPU_TO_LE32(type); - - data_end += size; - buf->data_end = CPU_TO_LE16(data_end); - - buf->section_count = CPU_TO_LE16(sect_count + 1); - return section_ptr; - } - - /* no free section table entries */ - return NULL; -} - -/** - * ice_pkg_buf_alloc_single_section - * @hw: pointer to the HW structure - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * @section: returns pointer to the section - * - * Allocates a package buffer with a single section. - * Note: all package contents must be in Little Endian form. - */ -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section) -{ - struct ice_buf_build *buf; - - if (!section) - return NULL; - - buf = ice_pkg_buf_alloc(hw); - if (!buf) - return NULL; - - if (ice_pkg_buf_reserve_section(buf, 1)) - goto ice_pkg_buf_alloc_single_section_err; - - *section = ice_pkg_buf_alloc_section(buf, type, size); - if (!*section) - goto ice_pkg_buf_alloc_single_section_err; - - return buf; + if (count > max) + return false; + } -ice_pkg_buf_alloc_single_section_err: - ice_pkg_buf_free(hw, buf); - return NULL; + return true; } /** - * ice_pkg_buf_unreserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to unreserve + * ice_set_key - generate a variable sized key with multiples of 16-bits + * @key: pointer to where the key will be stored + * @size: the size of the complete key in bytes (must be even) + * @val: array of 8-bit values that makes up the value portion of the key + * @upd: array of 8-bit masks that determine what key portion to update + * @dc: array of 8-bit masks that make up the don't care mask + * @nm: array of 8-bit masks that make up the never match mask + * @off: the offset of the first byte in the key to update + * @len: the number of bytes in the key update * - * Unreserves one or more section table entries in a package buffer, releasing - * space that can be used for section data. This routine can be called - * multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. + * This function generates a key from a value, a don't care mask and a never + * match mask. + * upd, dc, and nm are optional parameters, and can be NULL: + * upd == NULL --> upd mask is all 1's (update all bits) + * dc == NULL --> dc mask is all 0's (no don't care bits) + * nm == NULL --> nm mask is all 0's (no never match bits) */ -enum ice_status -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) +static enum ice_status +ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, + u16 len) { - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return ICE_ERR_PARAM; + u16 half_size; + u16 i; - buf = (struct ice_buf_hdr *)&bld->buf; + /* size must be a multiple of 2 bytes. */ + if (size % 2) + return ICE_ERR_CFG; + half_size = size / 2; - /* already an active section, can't decrease table size */ - section_count = LE16_TO_CPU(buf->section_count); - if (section_count > 0) + if (off + len > half_size) return ICE_ERR_CFG; - if (count > bld->reserved_section_table_entries) + /* Make sure at most one bit is set in the never match mask. Having more + * than one never match mask bit set will cause HW to consume excessive + * power otherwise; this is a power management efficiency check. + */ +#define ICE_NVR_MTCH_BITS_MAX 1 + if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) return ICE_ERR_CFG; - bld->reserved_section_table_entries -= count; - data_end = LE16_TO_CPU(buf->data_end) - - FLEX_ARRAY_SIZE(buf, section_entry, count); - buf->data_end = CPU_TO_LE16(data_end); + for (i = 0; i < len; i++) + if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, + dc ? dc[i] : 0, nm ? nm[i] : 0, + key + off + i, key + half_size + off + i)) + return ICE_ERR_CFG; return ICE_SUCCESS; } -/** - * ice_pkg_buf_get_free_space - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of free bytes remaining in the buffer. - * Note: all package contents must be in Little Endian form. - */ -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); -} - -/** - * ice_pkg_buf_get_active_sections - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of active sections. Before using the package buffer - * in an update package command, the caller should make sure that there is at - * least one active section - otherwise, the buffer is not legal and should - * not be used. - * Note: all package contents must be in Little Endian form. - */ -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return LE16_TO_CPU(buf->section_count); -} - -/** - * ice_pkg_buf - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Return a pointer to the buffer's header - */ -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) -{ - if (!bld) - return NULL; - - return &bld->buf; -} - /** * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage * @hw: pointer to the HW structure @@ -3528,101 +1698,229 @@ return; } - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + sect = ice_pkg_enum_section(hw->seg, &state, sid); + + while (sect) { + switch (sid) { + case ICE_SID_XLT1_SW: + case ICE_SID_XLT1_FD: + case ICE_SID_XLT1_RSS: + case ICE_SID_XLT1_ACL: + case ICE_SID_XLT1_PE: + xlt1 = (struct ice_xlt1_section *)sect; + src = xlt1->value; + sect_len = LE16_TO_CPU(xlt1->count) * + sizeof(*hw->blk[block_id].xlt1.t); + dst = hw->blk[block_id].xlt1.t; + dst_len = hw->blk[block_id].xlt1.count * + sizeof(*hw->blk[block_id].xlt1.t); + break; + case ICE_SID_XLT2_SW: + case ICE_SID_XLT2_FD: + case ICE_SID_XLT2_RSS: + case ICE_SID_XLT2_ACL: + case ICE_SID_XLT2_PE: + xlt2 = (struct ice_xlt2_section *)sect; + src = (_FORCE_ u8 *)xlt2->value; + sect_len = LE16_TO_CPU(xlt2->count) * + sizeof(*hw->blk[block_id].xlt2.t); + dst = (u8 *)hw->blk[block_id].xlt2.t; + dst_len = hw->blk[block_id].xlt2.count * + sizeof(*hw->blk[block_id].xlt2.t); + break; + case ICE_SID_PROFID_TCAM_SW: + case ICE_SID_PROFID_TCAM_FD: + case ICE_SID_PROFID_TCAM_RSS: + case ICE_SID_PROFID_TCAM_ACL: + case ICE_SID_PROFID_TCAM_PE: + pid = (struct ice_prof_id_section *)sect; + src = (u8 *)pid->entry; + sect_len = LE16_TO_CPU(pid->count) * + sizeof(*hw->blk[block_id].prof.t); + dst = (u8 *)hw->blk[block_id].prof.t; + dst_len = hw->blk[block_id].prof.count * + sizeof(*hw->blk[block_id].prof.t); + break; + case ICE_SID_PROFID_REDIR_SW: + case ICE_SID_PROFID_REDIR_FD: + case ICE_SID_PROFID_REDIR_RSS: + case ICE_SID_PROFID_REDIR_ACL: + case ICE_SID_PROFID_REDIR_PE: + pr = (struct ice_prof_redir_section *)sect; + src = pr->redir_value; + sect_len = LE16_TO_CPU(pr->count) * + sizeof(*hw->blk[block_id].prof_redir.t); + dst = hw->blk[block_id].prof_redir.t; + dst_len = hw->blk[block_id].prof_redir.count * + sizeof(*hw->blk[block_id].prof_redir.t); + break; + case ICE_SID_FLD_VEC_SW: + case ICE_SID_FLD_VEC_FD: + case ICE_SID_FLD_VEC_RSS: + case ICE_SID_FLD_VEC_ACL: + case ICE_SID_FLD_VEC_PE: + es = (struct ice_sw_fv_section *)sect; + src = (u8 *)es->fv; + sect_len = (u32)(LE16_TO_CPU(es->count) * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + dst = (u8 *)hw->blk[block_id].es.t; + dst_len = (u32)(hw->blk[block_id].es.count * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + break; + default: + return; + } + + /* if the section offset exceeds destination length, terminate + * table fill. + */ + if (offset > dst_len) + return; + + /* if the sum of section size and offset exceed destination size + * then we are out of bounds of the HW table size for that PF. + * Changing section length to fill the remaining table space + * of that PF. + */ + if ((offset + sect_len) > dst_len) + sect_len = dst_len - offset; + + ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); + offset += sect_len; + sect = ice_pkg_enum_section(NULL, &state, sid); + } +} + +/** + * ice_init_flow_profs - init flow profile locks and list heads + * @hw: pointer to the hardware structure + * @blk_idx: HW block index + */ +static +void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) +{ + ice_init_lock(&hw->fl_profs_locks[blk_idx]); + INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + ice_init_lock(&hw->rss_locks); + INIT_LIST_HEAD(&hw->rss_list_head); + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + ice_init_flow_profs(hw, i); + ice_init_lock(&es->prof_map_lock); + INIT_LIST_HEAD(&es->prof_map); + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = (struct ice_ptg_ptype *) + ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = (struct ice_ptg_entry *) + ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = (struct ice_vsig_vsi *) + ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = (struct ice_vsig_entry *) + ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = (struct ice_prof_tcam_entry *) + ice_calloc(hw, prof->count, sizeof(*prof->t)); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, + sizeof(*prof_redir->t)); - sect = ice_pkg_enum_section(hw->seg, &state, sid); + if (!prof_redir->t) + goto err; - while (sect) { - switch (sid) { - case ICE_SID_XLT1_SW: - case ICE_SID_XLT1_FD: - case ICE_SID_XLT1_RSS: - case ICE_SID_XLT1_ACL: - case ICE_SID_XLT1_PE: - xlt1 = (struct ice_xlt1_section *)sect; - src = xlt1->value; - sect_len = LE16_TO_CPU(xlt1->count) * - sizeof(*hw->blk[block_id].xlt1.t); - dst = hw->blk[block_id].xlt1.t; - dst_len = hw->blk[block_id].xlt1.count * - sizeof(*hw->blk[block_id].xlt1.t); - break; - case ICE_SID_XLT2_SW: - case ICE_SID_XLT2_FD: - case ICE_SID_XLT2_RSS: - case ICE_SID_XLT2_ACL: - case ICE_SID_XLT2_PE: - xlt2 = (struct ice_xlt2_section *)sect; - src = (_FORCE_ u8 *)xlt2->value; - sect_len = LE16_TO_CPU(xlt2->count) * - sizeof(*hw->blk[block_id].xlt2.t); - dst = (u8 *)hw->blk[block_id].xlt2.t; - dst_len = hw->blk[block_id].xlt2.count * - sizeof(*hw->blk[block_id].xlt2.t); - break; - case ICE_SID_PROFID_TCAM_SW: - case ICE_SID_PROFID_TCAM_FD: - case ICE_SID_PROFID_TCAM_RSS: - case ICE_SID_PROFID_TCAM_ACL: - case ICE_SID_PROFID_TCAM_PE: - pid = (struct ice_prof_id_section *)sect; - src = (u8 *)pid->entry; - sect_len = LE16_TO_CPU(pid->count) * - sizeof(*hw->blk[block_id].prof.t); - dst = (u8 *)hw->blk[block_id].prof.t; - dst_len = hw->blk[block_id].prof.count * - sizeof(*hw->blk[block_id].prof.t); - break; - case ICE_SID_PROFID_REDIR_SW: - case ICE_SID_PROFID_REDIR_FD: - case ICE_SID_PROFID_REDIR_RSS: - case ICE_SID_PROFID_REDIR_ACL: - case ICE_SID_PROFID_REDIR_PE: - pr = (struct ice_prof_redir_section *)sect; - src = pr->redir_value; - sect_len = LE16_TO_CPU(pr->count) * - sizeof(*hw->blk[block_id].prof_redir.t); - dst = hw->blk[block_id].prof_redir.t; - dst_len = hw->blk[block_id].prof_redir.count * - sizeof(*hw->blk[block_id].prof_redir.t); - break; - case ICE_SID_FLD_VEC_SW: - case ICE_SID_FLD_VEC_FD: - case ICE_SID_FLD_VEC_RSS: - case ICE_SID_FLD_VEC_ACL: - case ICE_SID_FLD_VEC_PE: - es = (struct ice_sw_fv_section *)sect; - src = (u8 *)es->fv; - sect_len = (u32)(LE16_TO_CPU(es->count) * - hw->blk[block_id].es.fvw) * - sizeof(*hw->blk[block_id].es.t); - dst = (u8 *)hw->blk[block_id].es.t; - dst_len = (u32)(hw->blk[block_id].es.count * - hw->blk[block_id].es.fvw) * - sizeof(*hw->blk[block_id].es.t); - break; - default: - return; - } + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = (struct ice_fv_word *) + ice_calloc(hw, (u32)(es->count * es->fvw), + sizeof(*es->t)); + if (!es->t) + goto err; - /* if the section offset exceeds destination length, terminate - * table fill. - */ - if (offset > dst_len) - return; + es->ref_count = (u16 *) + ice_calloc(hw, es->count, sizeof(*es->ref_count)); - /* if the sum of section size and offset exceed destination size - * then we are out of bounds of the HW table size for that PF. - * Changing section length to fill the remaining table space - * of that PF. - */ - if ((offset + sect_len) > dst_len) - sect_len = dst_len - offset; + if (!es->ref_count) + goto err; + + es->written = (u8 *) + ice_calloc(hw, es->count, sizeof(*es->written)); + + if (!es->written) + goto err; - ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); - offset += sect_len; - sect = ice_pkg_enum_section(NULL, &state, sid); } + return ICE_SUCCESS; + +err: + ice_free_hw_tbls(hw); + return ICE_ERR_NO_MEMORY; } /** @@ -3755,17 +2053,6 @@ ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); } -/** - * ice_init_flow_profs - init flow profile locks and list heads - * @hw: pointer to the hardware structure - * @blk_idx: HW block index - */ -static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) -{ - ice_init_lock(&hw->fl_profs_locks[blk_idx]); - INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); -} - /** * ice_clear_hw_tbls - clear HW tables and flow profiles * @hw: pointer to the hardware structure @@ -3788,151 +2075,59 @@ ice_free_vsig_tbl(hw, (enum ice_block)i); - ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes), - ICE_NONDMA_MEM); - ice_memset(xlt1->ptg_tbl, 0, - ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), - ICE_NONDMA_MEM); - ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), - ICE_NONDMA_MEM); - - ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis), - ICE_NONDMA_MEM); - ice_memset(xlt2->vsig_tbl, 0, - xlt2->count * sizeof(*xlt2->vsig_tbl), - ICE_NONDMA_MEM); - ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), - ICE_NONDMA_MEM); - - ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), - ICE_NONDMA_MEM); - ice_memset(prof_redir->t, 0, - prof_redir->count * sizeof(*prof_redir->t), - ICE_NONDMA_MEM); - - ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw, - ICE_NONDMA_MEM); - ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), - ICE_NONDMA_MEM); - ice_memset(es->written, 0, es->count * sizeof(*es->written), - ICE_NONDMA_MEM); - } -} - -/** - * ice_init_hw_tbls - init hardware table memory - * @hw: pointer to the hardware structure - */ -enum ice_status ice_init_hw_tbls(struct ice_hw *hw) -{ - u8 i; - - ice_init_lock(&hw->rss_locks); - INIT_LIST_HEAD(&hw->rss_list_head); - for (i = 0; i < ICE_BLK_COUNT; i++) { - struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; - struct ice_prof_tcam *prof = &hw->blk[i].prof; - struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; - struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; - struct ice_es *es = &hw->blk[i].es; - u16 j; - - if (hw->blk[i].is_list_init) - continue; - - ice_init_flow_profs(hw, i); - ice_init_lock(&es->prof_map_lock); - INIT_LIST_HEAD(&es->prof_map); - hw->blk[i].is_list_init = true; - - hw->blk[i].overwrite = blk_sizes[i].overwrite; - es->reverse = blk_sizes[i].reverse; - - xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; - xlt1->count = blk_sizes[i].xlt1; - - xlt1->ptypes = (struct ice_ptg_ptype *) - ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); - - if (!xlt1->ptypes) - goto err; - - xlt1->ptg_tbl = (struct ice_ptg_entry *) - ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); - - if (!xlt1->ptg_tbl) - goto err; - - xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); - if (!xlt1->t) - goto err; - - xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; - xlt2->count = blk_sizes[i].xlt2; - - xlt2->vsis = (struct ice_vsig_vsi *) - ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); - - if (!xlt2->vsis) - goto err; - - xlt2->vsig_tbl = (struct ice_vsig_entry *) - ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); - if (!xlt2->vsig_tbl) - goto err; + if (xlt1->ptypes) + ice_memset(xlt1->ptypes, 0, + xlt1->count * sizeof(*xlt1->ptypes), + ICE_NONDMA_MEM); - for (j = 0; j < xlt2->count; j++) - INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); - - xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); - if (!xlt2->t) - goto err; + if (xlt1->ptg_tbl) + ice_memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), + ICE_NONDMA_MEM); - prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; - prof->count = blk_sizes[i].prof_tcam; - prof->max_prof_id = blk_sizes[i].prof_id; - prof->cdid_bits = blk_sizes[i].prof_cdid_bits; - prof->t = (struct ice_prof_tcam_entry *) - ice_calloc(hw, prof->count, sizeof(*prof->t)); + if (xlt1->t) + ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), + ICE_NONDMA_MEM); - if (!prof->t) - goto err; + if (xlt2->vsis) + ice_memset(xlt2->vsis, 0, + xlt2->count * sizeof(*xlt2->vsis), + ICE_NONDMA_MEM); - prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; - prof_redir->count = blk_sizes[i].prof_redir; - prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, - sizeof(*prof_redir->t)); + if (xlt2->vsig_tbl) + ice_memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl), + ICE_NONDMA_MEM); - if (!prof_redir->t) - goto err; + if (xlt2->t) + ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), + ICE_NONDMA_MEM); - es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; - es->count = blk_sizes[i].es; - es->fvw = blk_sizes[i].fvw; - es->t = (struct ice_fv_word *) - ice_calloc(hw, (u32)(es->count * es->fvw), - sizeof(*es->t)); - if (!es->t) - goto err; + if (prof->t) + ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), + ICE_NONDMA_MEM); - es->ref_count = (u16 *) - ice_calloc(hw, es->count, sizeof(*es->ref_count)); + if (prof_redir->t) + ice_memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t), + ICE_NONDMA_MEM); - if (!es->ref_count) - goto err; + if (es->t) + ice_memset(es->t, 0, + es->count * sizeof(*es->t) * es->fvw, + ICE_NONDMA_MEM); - es->written = (u8 *) - ice_calloc(hw, es->count, sizeof(*es->written)); + if (es->ref_count) + ice_memset(es->ref_count, 0, + es->count * sizeof(*es->ref_count), + ICE_NONDMA_MEM); - if (!es->written) - goto err; + if (es->written) + ice_memset(es->written, 0, + es->count * sizeof(*es->written), + ICE_NONDMA_MEM); } - return ICE_SUCCESS; - -err: - ice_free_hw_tbls(hw); - return ICE_ERR_NO_MEMORY; } /** @@ -4338,7 +2533,7 @@ * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID - * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) + * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits) * @es: extraction sequence (length of array is determined by the block) * * This function registers a profile, which matches a set of PTGs with a @@ -4347,15 +2542,14 @@ * the ID value used here. */ enum ice_status -ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es) +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, + ice_bitmap_t *ptypes, struct ice_fv_word *es) { - u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; enum ice_status status; - u8 byte = 0; u8 prof_id; + u16 ptype; ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); @@ -4387,42 +2581,24 @@ prof->context = 0; /* build list of ptgs */ - while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { - u8 bit; + ice_for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) { + u8 ptg; - if (!ptypes[byte]) { - bytes--; - byte++; + /* The package should place all ptypes in a non-zero + * PTG, so the following call should never fail. + */ + if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) continue; - } - - /* Examine 8 bits per byte */ - ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte], - BITS_PER_BYTE) { - u16 ptype; - u8 ptg; - - ptype = byte * BITS_PER_BYTE + bit; - - /* The package should place all ptypes in a non-zero - * PTG, so the following call should never fail. - */ - if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) - continue; - - /* If PTG is already added, skip and continue */ - if (ice_is_bit_set(ptgs_used, ptg)) - continue; - ice_set_bit(ptg, ptgs_used); - prof->ptg[prof->ptg_cnt] = ptg; + /* If PTG is already added, skip and continue */ + if (ice_is_bit_set(ptgs_used, ptg)) + continue; - if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - break; - } + ice_set_bit(ptg, ptgs_used); + prof->ptg[prof->ptg_cnt] = ptg; - bytes--; - byte++; + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + break; } LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); @@ -4588,12 +2764,13 @@ u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - enum ice_status status; /* remove TCAM entries */ LIST_FOR_EACH_ENTRY_SAFE(d, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { + enum ice_status status; + status = ice_rem_prof_id(hw, blk, d); if (status) return status; @@ -4619,7 +2796,7 @@ p->type = ICE_VSIG_REM; p->orig_vsig = vsig; p->vsig = ICE_DEFAULT_VSIG; - p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; + p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis); LIST_ADD(&p->list_entry, chg); @@ -4643,12 +2820,13 @@ { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - enum ice_status status; LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) if (p->profile_cookie == hdl) { + enum ice_status status; + if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ return ice_rem_vsig(hw, blk, vsig, chg); @@ -5507,10 +3685,11 @@ ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { - enum ice_status status; u16 i; for (i = 0; i < count; i++) { + enum ice_status status; + status = ice_add_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; @@ -5689,10 +3868,11 @@ ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { - enum ice_status status; u16 i; for (i = 0; i < count; i++) { + enum ice_status status; + status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; diff --git a/sys/dev/ice/ice_flex_type.h b/sys/dev/ice/ice_flex_type.h --- a/sys/dev/ice/ice_flex_type.h +++ b/sys/dev/ice/ice_flex_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -42,6 +42,7 @@ u16 off; /* Offset within the protocol header */ u8 resvrd; }; + #pragma pack() #define ICE_MAX_NUM_PROFILES 256 @@ -51,251 +52,6 @@ struct ice_fv_word ew[ICE_MAX_FV_WORDS]; }; -/* Package and segment headers and tables */ -struct ice_pkg_hdr { - struct ice_pkg_ver pkg_format_ver; - __le32 seg_count; - __le32 seg_offset[STRUCT_HACK_VAR_LEN]; -}; - -/* generic segment */ -struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE_E810 0x00000010 - __le32 seg_type; - struct ice_pkg_ver seg_format_ver; - __le32 seg_size; - char seg_id[ICE_PKG_NAME_SIZE]; -}; - -/* ice specific segment */ - -union ice_device_id { - struct { - __le16 device_id; - __le16 vendor_id; - } dev_vend_id; - __le32 id; -}; - -struct ice_device_id_entry { - union ice_device_id device; - union ice_device_id sub_device; -}; - -struct ice_seg { - struct ice_generic_seg_hdr hdr; - __le32 device_table_count; - struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN]; -}; - -struct ice_nvm_table { - __le32 table_count; - __le32 vers[STRUCT_HACK_VAR_LEN]; -}; - -struct ice_buf { -#define ICE_PKG_BUF_SIZE 4096 - u8 buf[ICE_PKG_BUF_SIZE]; -}; - -struct ice_buf_table { - __le32 buf_count; - struct ice_buf buf_array[STRUCT_HACK_VAR_LEN]; -}; - -/* global metadata specific segment */ -struct ice_global_metadata_seg { - struct ice_generic_seg_hdr hdr; - struct ice_pkg_ver pkg_ver; - __le32 rsvd; - char pkg_name[ICE_PKG_NAME_SIZE]; -}; - -#define ICE_MIN_S_OFF 12 -#define ICE_MAX_S_OFF 4095 -#define ICE_MIN_S_SZ 1 -#define ICE_MAX_S_SZ 4084 - -/* section information */ -struct ice_section_entry { - __le32 type; - __le16 offset; - __le16 size; -}; - -#define ICE_MIN_S_COUNT 1 -#define ICE_MAX_S_COUNT 511 -#define ICE_MIN_S_DATA_END 12 -#define ICE_MAX_S_DATA_END 4096 - -#define ICE_METADATA_BUF 0x80000000 - -struct ice_buf_hdr { - __le16 section_count; - __le16 data_end; - struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN]; -}; - -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ - ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ - (ent_sz)) - -/* ice package section IDs */ -#define ICE_SID_METADATA 1 -#define ICE_SID_XLT0_SW 10 -#define ICE_SID_XLT_KEY_BUILDER_SW 11 -#define ICE_SID_XLT1_SW 12 -#define ICE_SID_XLT2_SW 13 -#define ICE_SID_PROFID_TCAM_SW 14 -#define ICE_SID_PROFID_REDIR_SW 15 -#define ICE_SID_FLD_VEC_SW 16 -#define ICE_SID_CDID_KEY_BUILDER_SW 17 -#define ICE_SID_CDID_REDIR_SW 18 - -#define ICE_SID_XLT0_ACL 20 -#define ICE_SID_XLT_KEY_BUILDER_ACL 21 -#define ICE_SID_XLT1_ACL 22 -#define ICE_SID_XLT2_ACL 23 -#define ICE_SID_PROFID_TCAM_ACL 24 -#define ICE_SID_PROFID_REDIR_ACL 25 -#define ICE_SID_FLD_VEC_ACL 26 -#define ICE_SID_CDID_KEY_BUILDER_ACL 27 -#define ICE_SID_CDID_REDIR_ACL 28 - -#define ICE_SID_XLT0_FD 30 -#define ICE_SID_XLT_KEY_BUILDER_FD 31 -#define ICE_SID_XLT1_FD 32 -#define ICE_SID_XLT2_FD 33 -#define ICE_SID_PROFID_TCAM_FD 34 -#define ICE_SID_PROFID_REDIR_FD 35 -#define ICE_SID_FLD_VEC_FD 36 -#define ICE_SID_CDID_KEY_BUILDER_FD 37 -#define ICE_SID_CDID_REDIR_FD 38 - -#define ICE_SID_XLT0_RSS 40 -#define ICE_SID_XLT_KEY_BUILDER_RSS 41 -#define ICE_SID_XLT1_RSS 42 -#define ICE_SID_XLT2_RSS 43 -#define ICE_SID_PROFID_TCAM_RSS 44 -#define ICE_SID_PROFID_REDIR_RSS 45 -#define ICE_SID_FLD_VEC_RSS 46 -#define ICE_SID_CDID_KEY_BUILDER_RSS 47 -#define ICE_SID_CDID_REDIR_RSS 48 - -#define ICE_SID_RXPARSER_CAM 50 -#define ICE_SID_RXPARSER_NOMATCH_CAM 51 -#define ICE_SID_RXPARSER_IMEM 52 -#define ICE_SID_RXPARSER_XLT0_BUILDER 53 -#define ICE_SID_RXPARSER_NODE_PTYPE 54 -#define ICE_SID_RXPARSER_MARKER_PTYPE 55 -#define ICE_SID_RXPARSER_BOOST_TCAM 56 -#define ICE_SID_RXPARSER_PROTO_GRP 57 -#define ICE_SID_RXPARSER_METADATA_INIT 58 -#define ICE_SID_RXPARSER_XLT0 59 - -#define ICE_SID_TXPARSER_CAM 60 -#define ICE_SID_TXPARSER_NOMATCH_CAM 61 -#define ICE_SID_TXPARSER_IMEM 62 -#define ICE_SID_TXPARSER_XLT0_BUILDER 63 -#define ICE_SID_TXPARSER_NODE_PTYPE 64 -#define ICE_SID_TXPARSER_MARKER_PTYPE 65 -#define ICE_SID_TXPARSER_BOOST_TCAM 66 -#define ICE_SID_TXPARSER_PROTO_GRP 67 -#define ICE_SID_TXPARSER_METADATA_INIT 68 -#define ICE_SID_TXPARSER_XLT0 69 - -#define ICE_SID_RXPARSER_INIT_REDIR 70 -#define ICE_SID_TXPARSER_INIT_REDIR 71 -#define ICE_SID_RXPARSER_MARKER_GRP 72 -#define ICE_SID_TXPARSER_MARKER_GRP 73 -#define ICE_SID_RXPARSER_LAST_PROTO 74 -#define ICE_SID_TXPARSER_LAST_PROTO 75 -#define ICE_SID_RXPARSER_PG_SPILL 76 -#define ICE_SID_TXPARSER_PG_SPILL 77 -#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 -#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 - -#define ICE_SID_XLT0_PE 80 -#define ICE_SID_XLT_KEY_BUILDER_PE 81 -#define ICE_SID_XLT1_PE 82 -#define ICE_SID_XLT2_PE 83 -#define ICE_SID_PROFID_TCAM_PE 84 -#define ICE_SID_PROFID_REDIR_PE 85 -#define ICE_SID_FLD_VEC_PE 86 -#define ICE_SID_CDID_KEY_BUILDER_PE 87 -#define ICE_SID_CDID_REDIR_PE 88 - -#define ICE_SID_RXPARSER_FLAG_REDIR 97 - -/* Label Metadata section IDs */ -#define ICE_SID_LBL_FIRST 0x80000010 -#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 -#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 -#define ICE_SID_LBL_RESERVED_12 0x80000012 -#define ICE_SID_LBL_RESERVED_13 0x80000013 -#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 -#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 -#define ICE_SID_LBL_PTYPE 0x80000016 -#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 -#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 -#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 -#define ICE_SID_LBL_RXPARSER_PG 0x8000001A -#define ICE_SID_LBL_TXPARSER_PG 0x8000001B -#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C -#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D -#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E -#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F -#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 -#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 -#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 -#define ICE_SID_LBL_FLAG 0x80000023 -#define ICE_SID_LBL_REG 0x80000024 -#define ICE_SID_LBL_SW_PTG 0x80000025 -#define ICE_SID_LBL_ACL_PTG 0x80000026 -#define ICE_SID_LBL_PE_PTG 0x80000027 -#define ICE_SID_LBL_RSS_PTG 0x80000028 -#define ICE_SID_LBL_FD_PTG 0x80000029 -#define ICE_SID_LBL_SW_VSIG 0x8000002A -#define ICE_SID_LBL_ACL_VSIG 0x8000002B -#define ICE_SID_LBL_PE_VSIG 0x8000002C -#define ICE_SID_LBL_RSS_VSIG 0x8000002D -#define ICE_SID_LBL_FD_VSIG 0x8000002E -#define ICE_SID_LBL_PTYPE_META 0x8000002F -#define ICE_SID_LBL_SW_PROFID 0x80000030 -#define ICE_SID_LBL_ACL_PROFID 0x80000031 -#define ICE_SID_LBL_PE_PROFID 0x80000032 -#define ICE_SID_LBL_RSS_PROFID 0x80000033 -#define ICE_SID_LBL_FD_PROFID 0x80000034 -#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 -#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 -#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 -#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 -/* The following define MUST be updated to reflect the last label section ID */ -#define ICE_SID_LBL_LAST 0x80000038 - -enum ice_block { - ICE_BLK_SW = 0, - ICE_BLK_ACL, - ICE_BLK_FD, - ICE_BLK_RSS, - ICE_BLK_PE, - ICE_BLK_COUNT -}; - -enum ice_sect { - ICE_XLT0 = 0, - ICE_XLT_KB, - ICE_XLT1, - ICE_XLT2, - ICE_PROF_TCAM, - ICE_PROF_REDIR, - ICE_VEC_TBL, - ICE_CDID_KB, - ICE_CDID_REDIR, - ICE_SECT_COUNT -}; - /* Packet Type (PTYPE) values */ #define ICE_PTYPE_MAC_PAY 1 #define ICE_PTYPE_IPV4FRAG_PAY 22 @@ -401,10 +157,18 @@ * fields of the packet are now little endian. */ struct ice_boost_key_value { -#define ICE_BOOST_REMAINING_HV_KEY 15 +#define ICE_BOOST_REMAINING_HV_KEY 15 u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; - __le16 hv_dst_port_key; - __le16 hv_src_port_key; + union { + struct { + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + } /* udp_tunnel */; + struct { + __le16 hv_vlan_id_key; + __le16 hv_etype_key; + } vlan; + }; u8 tcam_search_key; }; #pragma pack() @@ -457,33 +221,15 @@ u8 redir_value[STRUCT_HACK_VAR_LEN]; }; -/* package buffer building */ - -struct ice_buf_build { - struct ice_buf buf; - u16 reserved_section_table_entries; -}; - -struct ice_pkg_enum { - struct ice_buf_table *buf_table; - u32 buf_idx; - - u32 type; - struct ice_buf_hdr *buf; - u32 sect_idx; - void *sect; - u32 sect_type; - - u32 entry_idx; - void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); -}; - /* Tunnel enabling */ enum ice_tunnel_type { TNL_VXLAN = 0, TNL_GENEVE, + TNL_GRETAP, TNL_GTP, + TNL_GTPC, + TNL_GTPU, TNL_LAST = 0xFF, TNL_ALL = 0xFF, }; @@ -726,10 +472,13 @@ #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT enum ice_prof_type { + ICE_PROF_INVALID = 0x0, ICE_PROF_NON_TUN = 0x1, ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_GRE = 0x4, - ICE_PROF_TUN_ALL = 0x6, + ICE_PROF_TUN_GTPU = 0x8, + ICE_PROF_TUN_GTPC = 0x10, + ICE_PROF_TUN_ALL = 0x1E, ICE_PROF_ALL = 0xFF, }; diff --git a/sys/dev/ice/ice_flow.h b/sys/dev/ice/ice_flow.h --- a/sys/dev/ice/ice_flow.h +++ b/sys/dev/ice/ice_flow.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c --- a/sys/dev/ice/ice_flow.c +++ b/sys/dev/ice/ice_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -400,6 +400,7 @@ * This will give us the direction flags. */ struct ice_fv_word es[ICE_MAX_FV_WORDS]; + ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX); }; @@ -566,8 +567,8 @@ u8 seg, enum ice_flow_field fld) { enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; + u8 fv_words = (u8)hw->blk[params->blk].es.fvw; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; - u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; u16 off; @@ -593,7 +594,6 @@ case ICE_FLOW_FIELD_IDX_IPV4_TTL: case ICE_FLOW_FIELD_IDX_IPV4_PROT: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; - /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. @@ -606,7 +606,6 @@ case ICE_FLOW_FIELD_IDX_IPV6_TTL: case ICE_FLOW_FIELD_IDX_IPV6_PROT: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; - /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. @@ -666,7 +665,7 @@ */ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; - flds[fld].xtrct.prot_id = prot_id; + flds[fld].xtrct.prot_id = (u8)prot_id; flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); @@ -702,7 +701,7 @@ else idx = params->es_cnt; - params->es[idx].prot_id = prot_id; + params->es[idx].prot_id = (u8)prot_id; params->es[idx].off = off; params->es_cnt++; } @@ -952,8 +951,7 @@ } /* Add a HW profile for this flow profile */ - status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, - params->es); + status = ice_add_prof(hw, blk, prof_id, params->ptypes, params->es); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1286,13 +1284,13 @@ { struct ice_flow_seg_info *seg; u64 val; - u8 i; + u16 i; /* set inner most segment */ seg = &segs[seg_cnt - 1]; ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds, - ICE_FLOW_FIELD_IDX_MAX) + (u16)ICE_FLOW_FIELD_IDX_MAX) ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c --- a/sys/dev/ice/ice_fw_logging.c +++ b/sys/dev/ice/ice_fw_logging.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_fwlog.h b/sys/dev/ice/ice_fwlog.h --- a/sys/dev/ice/ice_fwlog.h +++ b/sys/dev/ice/ice_fwlog.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -72,7 +72,7 @@ /* options used to configure firmware logging */ u16 options; /* minimum number of log events sent per Admin Receive Queue event */ - u8 log_resolution; + u16 log_resolution; }; void ice_fwlog_set_support_ena(struct ice_hw *hw); diff --git a/sys/dev/ice/ice_fwlog.c b/sys/dev/ice/ice_fwlog.c --- a/sys/dev/ice/ice_fwlog.c +++ b/sys/dev/ice/ice_fwlog.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h --- a/sys/dev/ice/ice_hw_autogen.h +++ b/sys/dev/ice/ice_hw_autogen.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h --- a/sys/dev/ice/ice_iflib.h +++ b/sys/dev/ice/ice_iflib.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -285,10 +285,16 @@ /* Ethertype filters enabled */ bool enable_tx_fc_filter; bool enable_tx_lldp_filter; - + /* Other tunable flags */ bool enable_health_events; + /* 5-layer scheduler topology enabled */ + bool tx_balance_en; + + /* Allow additional non-standard FEC mode */ + bool allow_no_fec_mod_in_auto; + int rebuild_ticks; /* driver state flags, only access using atomic functions */ @@ -297,6 +303,8 @@ /* NVM link override settings */ struct ice_link_default_override_tlv ldo_tlv; + u16 fw_debug_dump_cluster_mask; + struct sx *iflib_ctx_lock; /* Tri-state feature flags (capable/enabled) */ diff --git a/sys/dev/ice/ice_iflib_recovery_txrx.c b/sys/dev/ice/ice_iflib_recovery_txrx.c --- a/sys/dev/ice/ice_iflib_recovery_txrx.c +++ b/sys/dev/ice/ice_iflib_recovery_txrx.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_iflib_sysctls.h b/sys/dev/ice/ice_iflib_sysctls.h --- a/sys/dev/ice/ice_iflib_sysctls.h +++ b/sys/dev/ice/ice_iflib_sysctls.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_iflib_txrx.c b/sys/dev/ice/ice_iflib_txrx.c --- a/sys/dev/ice/ice_iflib_txrx.c +++ b/sys/dev/ice/ice_iflib_txrx.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,7 +55,7 @@ static int ice_ift_rxd_available(void *arg, uint16_t rxqid, qidx_t pidx, qidx_t budget); static void ice_ift_rxd_flush(void *arg, uint16_t rxqid, uint8_t flidx, qidx_t pidx); static void ice_ift_rxd_refill(void *arg, if_rxd_update_t iru); -static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m); +static qidx_t ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi); /* Macro to help extract the NIC mode flexible Rx descriptor fields from the * advanced 32byte Rx descriptors. @@ -79,7 +79,7 @@ .ift_rxd_pkt_get = ice_ift_rxd_pkt_get, .ift_rxd_refill = ice_ift_rxd_refill, .ift_rxd_flush = ice_ift_rxd_flush, - .ift_txq_select = ice_ift_queue_select, + .ift_txq_select_v2 = ice_ift_queue_select, }; /** @@ -284,7 +284,6 @@ ice_ift_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct ice_softc *sc = (struct ice_softc *)arg; - if_softc_ctx_t scctx = sc->scctx; struct ice_rx_queue *rxq = &sc->pf_vsi.rx_queues[ri->iri_qsidx]; union ice_32b_rx_flex_desc *cur; u16 status0, plen, ptype; @@ -342,7 +341,7 @@ /* Get packet type and set checksum flags */ ptype = le16toh(cur->wb.ptype_flex_flags0) & ICE_RX_FLEX_DESC_PTYPE_M; - if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0) + if ((iflib_get_ifp(sc->ctx)->if_capenable & IFCAP_RXCSUM) != 0) ice_rx_checksum(rxq, &ri->iri_csum_flags, &ri->iri_csum_data, status0, ptype); @@ -408,12 +407,13 @@ } static qidx_t -ice_ift_queue_select(void *arg, struct mbuf *m) +ice_ift_queue_select(void *arg, struct mbuf *m, if_pkt_info_t pi) { struct ice_softc *sc = (struct ice_softc *)arg; + struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_vsi *vsi = &sc->pf_vsi; u16 tc_base_queue, tc_qcount; - u8 up, tc; + u8 dscp_val, up, tc; #ifdef ALTQ /* Included to match default iflib behavior */ @@ -431,12 +431,21 @@ return (0); } - /* Use default TC unless overridden */ + /* Use default TC unless overridden later */ tc = 0; /* XXX: Get default TC for traffic if >1 TC? */ - if (m->m_flags & M_VLANTAG) { + local_dcbx_cfg = &sc->hw.port_info->qos_cfg.local_dcbx_cfg; + +#if defined(INET) || defined(INET6) + if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) && + (pi->ipi_flags & (IPI_TX_IPV4 | IPI_TX_IPV6))) { + dscp_val = pi->ipi_ip_tos >> 2; + tc = local_dcbx_cfg->dscp_map[dscp_val]; + } else +#endif /* defined(INET) || defined(INET6) */ + if (m->m_flags & M_VLANTAG) { /* ICE_QOS_MODE_VLAN */ up = EVL_PRIOFTAG(m->m_pkthdr.ether_vtag); - tc = sc->hw.port_info->qos_cfg.local_dcbx_cfg.etscfg.prio_table[up]; + tc = local_dcbx_cfg->etscfg.prio_table[up]; } tc_base_queue = vsi->tc_info[tc].qoffset; diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h --- a/sys/dev/ice/ice_lan_tx_rx.h +++ b/sys/dev/ice/ice_lan_tx_rx.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -948,10 +948,10 @@ __le64 qw1; }; -#define ICE_TX_GSC_DESC_START 0 /* 7 BITS */ -#define ICE_TX_GSC_DESC_OFFSET 7 /* 4 BITS */ -#define ICE_TX_GSC_DESC_TYPE 11 /* 2 BITS */ -#define ICE_TX_GSC_DESC_ENA 13 /* 1 BIT */ +#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */ +#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */ +#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */ +#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */ #define ICE_TXD_CTX_QW1_DTYPE_S 0 #define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S) diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h --- a/sys/dev/ice/ice_lib.h +++ b/sys/dev/ice/ice_lib.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -119,6 +119,9 @@ /* global sysctl indicating whether FW health status events should be enabled */ extern bool ice_enable_health_events; +/* global sysctl indicating whether to enable 5-layer scheduler topology */ +extern bool ice_tx_balance_en; + /** * @struct ice_bar_info * @brief PCI BAR mapping information @@ -203,6 +206,16 @@ #define ICE_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) +/** + * ICE_DEBUG_DUMP + * @brief Private ioctl command number for retrieving debug dump data + * + * The ioctl command number used by a userspace tool for accessing the driver for + * getting debug dump data from the firmware. + */ +#define ICE_DEBUG_DUMP \ + (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6) + #define ICE_AQ_LEN 1023 #define ICE_MBXQ_LEN 512 #define ICE_SBQ_LEN 512 @@ -329,6 +342,7 @@ #define ICE_FEC_STRING_RS "RS-FEC" #define ICE_FEC_STRING_BASER "FC-FEC/BASE-R" #define ICE_FEC_STRING_NONE "None" +#define ICE_FEC_STRING_DIS_AUTO "Auto (w/ No-FEC)" /* Strings used for displaying Flow Control mode * @@ -364,6 +378,12 @@ ICE_PROMISC_MCAST_TX | \ ICE_PROMISC_MCAST_RX) +/* + * Only certain cluster IDs are valid for the FW debug dump functionality, + * so define a mask of those here. + */ +#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x1af + struct ice_softc; /** @@ -548,6 +568,20 @@ struct ice_vsi_hw_stats hw_stats; }; +/** + * @struct ice_debug_dump_cmd + * @brief arguments/return value for debug dump ioctl + */ +struct ice_debug_dump_cmd { + u32 offset; /* offset to read/write from table, in bytes */ + u16 cluster_id; + u16 table_id; + u16 data_size; /* size of data field, in bytes */ + u16 reserved1; + u32 reserved2; + u8 data[]; +}; + /** * @enum ice_state * @brief Driver state flags @@ -574,6 +608,7 @@ ICE_STATE_LINK_DEFAULT_OVERRIDE_PENDING, ICE_STATE_LLDP_RX_FLTR_FROM_DRIVER, ICE_STATE_MULTIPLE_TCS, + ICE_STATE_DO_FW_DEBUG_DUMP, /* This entry must be last */ ICE_STATE_LAST, }; @@ -832,8 +867,8 @@ void ice_add_rxq_sysctls(struct ice_rx_queue *rxq); int ice_config_rss(struct ice_vsi *vsi); void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc); -void ice_load_pkg_file(struct ice_softc *sc); -void ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status); +enum ice_status ice_load_pkg_file(struct ice_softc *sc); +void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status); uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter); void ice_save_pci_info(struct ice_hw *hw, device_t dev); int ice_replay_all_vsi_cfg(struct ice_softc *sc); @@ -865,5 +900,7 @@ void ice_set_default_local_lldp_mib(struct ice_softc *sc); void ice_init_health_events(struct ice_softc *sc); void ice_cfg_pba_num(struct ice_softc *sc); +int ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd); +u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg); #endif /* _ICE_LIB_H_ */ diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c --- a/sys/dev/ice/ice_lib.c +++ b/sys/dev/ice/ice_lib.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -161,20 +161,29 @@ ice_debug_print_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *event); static bool ice_check_ets_bw(u8 *table); +static u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); static bool ice_dcb_needs_reconfig(struct ice_softc *sc, struct ice_dcbx_cfg *old_cfg, struct ice_dcbx_cfg *new_cfg); static void ice_dcb_recfg(struct ice_softc *sc); -static u8 ice_dcb_num_tc(u8 tc_map); +static u8 ice_dcb_tc_contig(u8 tc_map); static int ice_ets_str_to_tbl(const char *str, u8 *table, u8 limit); static int ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map); static void ice_sbuf_print_ets_cfg(struct sbuf *sbuf, const char *name, struct ice_dcb_ets_cfg *ets); static void ice_stop_pf_vsi(struct ice_softc *sc); static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt); -static void ice_do_dcb_reconfig(struct ice_softc *sc); +static void ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib); static int ice_config_pfc(struct ice_softc *sc, u8 new_mode); -static u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg); +void +ice_add_dscp2tc_map_sysctls(struct ice_softc *sc, + struct sysctl_ctx_list *ctx, + struct sysctl_oid_list *ctx_list); +static void ice_set_default_local_mib_settings(struct ice_softc *sc); +static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg); +static void ice_start_dcbx_agent(struct ice_softc *sc); +static void ice_fw_debug_dump_print_cluster(struct ice_softc *sc, + struct sbuf *sbuf, u16 cluster_id); static int ice_module_init(void); static int ice_module_exit(void); @@ -228,6 +237,11 @@ static int ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS); static int ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS); static int ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS); +static int ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS); /** * ice_map_bar - Map PCIe BAR memory @@ -567,7 +581,6 @@ MPASS(vsi->rx_qmap != NULL); /* TODO: - * Handle multiple Traffic Classes * Handle scattered queues (for VFs) */ if (vsi->qmap_type != ICE_RESMGR_ALLOC_CONTIGUOUS) @@ -578,7 +591,6 @@ ctx->info.q_mapping[0] = CPU_TO_LE16(vsi->rx_qmap[0]); ctx->info.q_mapping[1] = CPU_TO_LE16(vsi->num_rx_queues); - /* Calculate the next power-of-2 of number of queues */ if (vsi->num_rx_queues) pow = flsl(vsi->num_rx_queues - 1); @@ -587,6 +599,17 @@ qmap = (pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M; ctx->info.tc_mapping[0] = CPU_TO_LE16(qmap); + /* Fill out default driver TC queue info for VSI */ + vsi->tc_info[0].qoffset = 0; + vsi->tc_info[0].qcount_rx = vsi->num_rx_queues; + vsi->tc_info[0].qcount_tx = vsi->num_tx_queues; + for (int i = 1; i < ICE_MAX_TRAFFIC_CLASS; i++) { + vsi->tc_info[i].qoffset = 0; + vsi->tc_info[i].qcount_rx = 1; + vsi->tc_info[i].qcount_tx = 1; + } + vsi->tc_map = 0x1; + return 0; } @@ -1748,7 +1771,7 @@ * Add a MAC address filter for a given VSI. This is a wrapper around * ice_add_mac to simplify the interface. First, it only accepts a single * address, so we don't have to mess around with the list setup in other - * functions. Second, it ignores the ICE_ERR_ALREADY_EXIST error, so that + * functions. Second, it ignores the ICE_ERR_ALREADY_EXISTS error, so that * callers don't need to worry about attempting to add the same filter twice. */ int @@ -1955,8 +1978,8 @@ device_t dev = sc->dev; enum ice_status status; - /* Sanity check that the data length matches */ - MPASS(le16toh(e->desc.datalen) == sizeof(struct ice_aqc_get_link_status_data)); + /* Sanity check that the data length isn't too small */ + MPASS(le16toh(e->desc.datalen) >= ICE_GET_LINK_STATUS_DATALEN_V1); /* * Even though the adapter gets link status information inside the @@ -3085,7 +3108,10 @@ if (strcmp(req_fec, "auto") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_AUTO)) == 0) { - new_mode = ICE_FEC_AUTO; + if (sc->allow_no_fec_mod_in_auto) + new_mode = ICE_FEC_DIS_AUTO; + else + new_mode = ICE_FEC_AUTO; } else if (strcmp(req_fec, "fc") == 0 || strcmp(req_fec, ice_fec_str(ICE_FEC_BASER)) == 0) { new_mode = ICE_FEC_BASER; @@ -3641,6 +3667,23 @@ return (0); } +/** + * ice_dscp_is_mapped - Check for non-zero DSCP to TC mappings + * @dcbcfg: Configuration struct to check for mappings in + * + * @return true if there exists a non-zero DSCP to TC mapping + * inside the input DCB configuration struct. + */ +static bool +ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg) +{ + for (int i = 0; i < ICE_DSCP_NUM_VAL; i++) + if (dcbcfg->dscp_map[i] != 0) + return (true); + + return (false); +} + #define ICE_SYSCTL_HELP_FW_LLDP_AGENT \ "\nDisplay or change FW LLDP agent state:" \ "\n\t0 - disabled" \ @@ -3660,6 +3703,7 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; + struct ice_dcbx_cfg *local_dcbx_cfg; struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; enum ice_status status; @@ -3706,6 +3750,15 @@ if (old_state != 0 && fw_lldp_enabled == true) return (0); + /* Block transition to FW LLDP if DSCP mode is enabled */ + local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg; + if ((local_dcbx_cfg->pfc_mode == ICE_QOS_MODE_DSCP) && + ice_dscp_is_mapped(local_dcbx_cfg)) { + device_printf(dev, + "Cannot enable FW-LLDP agent while DSCP QoS is active.\n"); + return (EOPNOTSUPP); + } + if (fw_lldp_enabled == false) { status = ice_aq_stop_lldp(hw, true, true, NULL); /* EPERM is returned if the LLDP agent is already shutdown */ @@ -3744,6 +3797,7 @@ return (EIO); } } + ice_start_dcbx_agent(sc); hw->port_info->qos_cfg.is_sw_lldp = false; } @@ -3855,7 +3909,7 @@ return (EIO); } - ice_do_dcb_reconfig(sc); + ice_do_dcb_reconfig(sc, false); return (0); } @@ -3937,9 +3991,11 @@ return (ret); } - /* Prepare updated ETS TLV */ + /* Prepare updated ETS CFG/REC TLVs */ memcpy(local_dcbx_cfg->etscfg.prio_table, new_up2tc, sizeof(new_up2tc)); + memcpy(local_dcbx_cfg->etsrec.prio_table, new_up2tc, + sizeof(new_up2tc)); status = ice_set_dcb_cfg(pi); if (status) { @@ -3950,7 +4006,7 @@ return (EIO); } - ice_do_dcb_reconfig(sc); + ice_do_dcb_reconfig(sc, false); return (0); } @@ -3998,7 +4054,7 @@ return (EIO); } - ice_do_dcb_reconfig(sc); + ice_do_dcb_reconfig(sc, false); return (0); } @@ -4070,6 +4126,97 @@ return ice_config_pfc(sc, user_pfc); } +#define ICE_SYSCTL_HELP_PFC_MODE \ +"\nDisplay and set the current QoS mode for the firmware" \ +"\n\t0: VLAN UP mode" \ +"\n\t1: DSCP mode" + +/** + * ice_sysctl_pfc_mode + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: unused + * @req: sysctl request pointer + * + * Gets and sets whether the port is in DSCP or VLAN PCP-based + * PFC mode. This is also used to set whether DSCP or VLAN PCP + * -based settings are configured for DCB. + */ +static int +ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS) +{ + struct ice_softc *sc = (struct ice_softc *)arg1; + struct ice_dcbx_cfg *local_dcbx_cfg; + struct ice_port_info *pi; + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + enum ice_status status; + u8 user_pfc_mode, aq_pfc_mode; + int ret; + + UNREFERENCED_PARAMETER(arg2); + + if (ice_driver_is_detaching(sc)) + return (ESHUTDOWN); + + if (req->oldptr == NULL && req->newptr == NULL) { + ret = SYSCTL_OUT(req, 0, sizeof(u8)); + return (ret); + } + + pi = hw->port_info; + local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + + user_pfc_mode = local_dcbx_cfg->pfc_mode; + + /* Read in the new mode */ + ret = sysctl_handle_8(oidp, &user_pfc_mode, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); + + /* Don't allow setting changes in FW DCB mode */ + if (!hw->port_info->qos_cfg.is_sw_lldp) + return (EPERM); + + /* Currently, there are only two modes */ + switch (user_pfc_mode) { + case 0: + aq_pfc_mode = ICE_AQC_PFC_VLAN_BASED_PFC; + break; + case 1: + aq_pfc_mode = ICE_AQC_PFC_DSCP_BASED_PFC; + break; + default: + device_printf(dev, + "%s: Valid input range is 0-1 (input %d)\n", + __func__, user_pfc_mode); + return (EINVAL); + } + + status = ice_aq_set_pfc_mode(hw, aq_pfc_mode, NULL); + if (status == ICE_ERR_NOT_SUPPORTED) { + device_printf(dev, + "%s: Failed to set PFC mode; DCB not supported\n", + __func__); + return (ENODEV); + } + if (status) { + device_printf(dev, + "%s: Failed to set PFC mode; status %s, aq_err %s\n", + __func__, ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return (EIO); + } + + /* Reset settings to default when mode is changed */ + ice_set_default_local_mib_settings(sc); + /* Cache current settings and reconfigure */ + local_dcbx_cfg->pfc_mode = user_pfc_mode; + ice_do_dcb_reconfig(sc, false); + + return (0); +} + /** * ice_add_device_sysctls - add device specific dynamic sysctls * @sc: device private structure @@ -4141,6 +4288,18 @@ OID_AUTO, "pfc", CTLTYPE_U8 | CTLFLAG_RW, sc, 0, ice_sysctl_pfc_config, "CU", ICE_SYSCTL_HELP_PFC_CONFIG); + SYSCTL_ADD_PROC(ctx, ctx_list, + OID_AUTO, "pfc_mode", CTLTYPE_U8 | CTLFLAG_RWTUN, + sc, 0, ice_sysctl_pfc_mode, "CU", ICE_SYSCTL_HELP_PFC_MODE); + + SYSCTL_ADD_PROC(ctx, ctx_list, + OID_AUTO, "allow_no_fec_modules_in_auto", + CTLTYPE_U8 | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + sc, 0, ice_sysctl_allow_no_fec_mod_in_auto, "CU", + "Allow \"No FEC\" mode in FEC auto-negotiation"); + + ice_add_dscp2tc_map_sysctls(sc, ctx, ctx_list); + /* Differentiate software and hardware statistics, by keeping hw stats * in their own node. This isn't in ice_add_device_tunables, because * we won't have any CTLFLAG_TUN sysctls under this node. @@ -5206,6 +5365,55 @@ } } +/** + * ice_add_dscp2tc_map_sysctls - Add sysctl tree for DSCP to TC mapping + * @sc: pointer to device private softc + * @ctx: the sysctl ctx to use + * @ctx_list: list of sysctl children for device (to add sysctl tree to) + * + * Add a sysctl tree for individual dscp2tc_map sysctls. Each child of this + * node can map 8 DSCPs to TC values; there are 8 of these in turn for a total + * of 64 DSCP to TC map values that the user can configure. + */ +void +ice_add_dscp2tc_map_sysctls(struct ice_softc *sc, + struct sysctl_ctx_list *ctx, + struct sysctl_oid_list *ctx_list) +{ + struct sysctl_oid_list *node_list; + struct sysctl_oid *node; + struct sbuf *namebuf, *descbuf; + int first_dscp_val, last_dscp_val; + + node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "dscp2tc_map", CTLFLAG_RD, + NULL, "Map of DSCP values to DCB TCs"); + node_list = SYSCTL_CHILDREN(node); + + namebuf = sbuf_new_auto(); + descbuf = sbuf_new_auto(); + for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + sbuf_clear(namebuf); + sbuf_clear(descbuf); + + first_dscp_val = i * 8; + last_dscp_val = first_dscp_val + 7; + + sbuf_printf(namebuf, "%d-%d", first_dscp_val, last_dscp_val); + sbuf_printf(descbuf, "Map DSCP values %d to %d to TCs", + first_dscp_val, last_dscp_val); + + sbuf_finish(namebuf); + sbuf_finish(descbuf); + + SYSCTL_ADD_PROC(ctx, node_list, + OID_AUTO, sbuf_data(namebuf), CTLTYPE_STRING | CTLFLAG_RW, + sc, i, ice_sysctl_dscp2tc_map, "A", sbuf_data(descbuf)); + } + + sbuf_delete(namebuf); + sbuf_delete(descbuf); +} + /** * ice_add_device_tunables - Add early tunable sysctls and sysctl nodes * @sc: device private structure @@ -5584,6 +5792,39 @@ return (0); } +#define ICE_SYSCTL_DEBUG_MASK_HELP \ +"\nSelect debug statements to print to kernel messages" \ +"\nFlags:" \ +"\n\t 0x1 - Function Tracing" \ +"\n\t 0x2 - Driver Initialization" \ +"\n\t 0x4 - Release" \ +"\n\t 0x8 - FW Logging" \ +"\n\t 0x10 - Link" \ +"\n\t 0x20 - PHY" \ +"\n\t 0x40 - Queue Context" \ +"\n\t 0x80 - NVM" \ +"\n\t 0x100 - LAN" \ +"\n\t 0x200 - Flow" \ +"\n\t 0x400 - DCB" \ +"\n\t 0x800 - Diagnostics" \ +"\n\t 0x1000 - Flow Director" \ +"\n\t 0x2000 - Switch" \ +"\n\t 0x4000 - Scheduler" \ +"\n\t 0x8000 - RDMA" \ +"\n\t 0x10000 - DDP Package" \ +"\n\t 0x20000 - Resources" \ +"\n\t 0x40000 - ACL" \ +"\n\t 0x80000 - PTP" \ +"\n\t 0x100000 - Admin Queue messages" \ +"\n\t 0x200000 - Admin Queue descriptors" \ +"\n\t 0x400000 - Admin Queue descriptor buffers" \ +"\n\t 0x800000 - Admin Queue commands" \ +"\n\t 0x1000000 - Parser" \ +"\n\t ..." \ +"\n\t 0x8000000 - (Reserved for user)" \ +"\n\t" \ +"\nUse \"sysctl -x\" to view flags properly." + /** * ice_add_debug_tunables - Add tunables helpful for debugging the device driver * @sc: device private structure @@ -5613,7 +5854,7 @@ SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "debug_mask", ICE_CTLFLAG_DEBUG | CTLFLAG_RW | CTLFLAG_TUN, &sc->hw.debug_mask, 0, - "Debug message enable/disable mask"); + ICE_SYSCTL_DEBUG_MASK_HELP); /* Load the default value from the global sysctl first */ sc->enable_tx_fc_filter = ice_enable_tx_fc_filter; @@ -5623,6 +5864,12 @@ &sc->enable_tx_fc_filter, 0, "Drop Ethertype 0x8808 control frames originating from software on this PF"); + sc->tx_balance_en = ice_tx_balance_en; + SYSCTL_ADD_BOOL(ctx, debug_list, OID_AUTO, "tx_balance", + ICE_CTLFLAG_DEBUG | CTLFLAG_RWTUN, + &sc->tx_balance_en, 0, + "Enable 5-layer scheduler topology"); + /* Load the default value from the global sysctl first */ sc->enable_tx_lldp_filter = ice_enable_tx_lldp_filter; @@ -5768,6 +6015,300 @@ return (0); } +#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \ +"\nSelect clusters to dump with \"dump\" sysctl" \ +"\nFlags:" \ +"\n\t 0x1 - Switch" \ +"\n\t 0x2 - ACL" \ +"\n\t 0x4 - Tx Scheduler" \ +"\n\t 0x8 - Profile Configuration" \ +"\n\t 0x20 - Link" \ +"\n\t 0x80 - DCB" \ +"\n\t 0x100 - L2P" \ +"\n\t" \ +"\nUse \"sysctl -x\" to view flags properly." + +/** + * ice_sysctl_fw_debug_dump_cluster_setting - Set which clusters to dump + * from FW when FW debug dump occurs + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: unused + * @req: sysctl request pointer + */ +static int +ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS) +{ + struct ice_softc *sc = (struct ice_softc *)arg1; + device_t dev = sc->dev; + u16 clusters; + int ret; + + UNREFERENCED_PARAMETER(arg2); + + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); + + if (ice_driver_is_detaching(sc)) + return (ESHUTDOWN); + + clusters = sc->fw_debug_dump_cluster_mask; + + ret = sysctl_handle_16(oidp, &clusters, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); + + if (!clusters || + (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK))) { + device_printf(dev, + "%s: ERROR: Incorrect settings requested\n", + __func__); + return (EINVAL); + } + + sc->fw_debug_dump_cluster_mask = clusters; + + return (0); +} + +#define ICE_FW_DUMP_AQ_COUNT_LIMIT (10000) + +/** + * ice_fw_debug_dump_print_cluster - Print formatted cluster data from FW + * @sc: the device softc + * @sbuf: initialized sbuf to print data to + * @cluster_id: FW cluster ID to print data from + * + * Reads debug data from the specified cluster id in the FW and prints it to + * the input sbuf. This function issues multiple AQ commands to the FW in + * order to get all of the data in the cluster. + * + * @remark Only intended to be used by the sysctl handler + * ice_sysctl_fw_debug_dump_do_dump + */ +static void +ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 cluster_id) +{ + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + u16 data_buf_size = ICE_AQ_MAX_BUF_LEN; + const u8 reserved_buf[8] = {}; + enum ice_status status; + int counter = 0; + u8 *data_buf; + + /* Other setup */ + data_buf = (u8 *)malloc(data_buf_size, M_ICE, M_NOWAIT | M_ZERO); + if (!data_buf) + return; + + /* Input parameters / loop variables */ + u16 table_id = 0; + u32 offset = 0; + + /* Output from the Get Internal Data AQ command */ + u16 ret_buf_size = 0; + u16 ret_next_table = 0; + u32 ret_next_index = 0; + + ice_debug(hw, ICE_DBG_DIAG, "%s: dumping cluster id %d\n", __func__, + cluster_id); + + for (;;) { + /* Do not trust the FW behavior to be completely correct */ + if (counter++ >= ICE_FW_DUMP_AQ_COUNT_LIMIT) { + device_printf(dev, + "%s: Exceeded counter limit for cluster %d\n", + __func__, cluster_id); + break; + } + + ice_debug(hw, ICE_DBG_DIAG, "---\n"); + ice_debug(hw, ICE_DBG_DIAG, + "table_id 0x%04x offset 0x%08x buf_size %d\n", + table_id, offset, data_buf_size); + + status = ice_aq_get_internal_data(hw, cluster_id, table_id, + offset, data_buf, data_buf_size, &ret_buf_size, + &ret_next_table, &ret_next_index, NULL); + if (status) { + device_printf(dev, + "%s: ice_aq_get_internal_data in cluster %d: err %s aq_err %s\n", + __func__, cluster_id, ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + break; + } + + ice_debug(hw, ICE_DBG_DIAG, + "ret_table_id 0x%04x ret_offset 0x%08x ret_buf_size %d\n", + ret_next_table, ret_next_index, ret_buf_size); + + /* Print cluster id */ + u32 print_cluster_id = (u32)cluster_id; + sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id)); + /* Print table id */ + u32 print_table_id = (u32)table_id; + sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id)); + /* Print table length */ + u32 print_table_length = (u32)ret_buf_size; + sbuf_bcat(sbuf, &print_table_length, sizeof(print_table_length)); + /* Print current offset */ + u32 print_curr_offset = offset; + sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset)); + /* Print reserved bytes */ + sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf)); + /* Print data */ + sbuf_bcat(sbuf, data_buf, ret_buf_size); + + /* Adjust loop variables */ + memset(data_buf, 0, data_buf_size); + bool same_table_next = (table_id == ret_next_table); + bool last_table_next = (ret_next_table == 0xff || ret_next_table == 0xffff); + bool last_offset_next = (ret_next_index == 0xffffffff || ret_next_index == 0); + + if ((!same_table_next && !last_offset_next) || + (same_table_next && last_table_next)) { + device_printf(dev, + "%s: Unexpected conditions for same_table_next(%d) last_table_next(%d) last_offset_next(%d), ending cluster (%d)\n", + __func__, same_table_next, last_table_next, last_offset_next, cluster_id); + break; + } + + if (!same_table_next && !last_table_next && last_offset_next) { + /* We've hit the end of the table */ + table_id = ret_next_table; + offset = 0; + } + else if (!same_table_next && last_table_next && last_offset_next) { + /* We've hit the end of the cluster */ + break; + } + else if (same_table_next && !last_table_next && last_offset_next) { + if (cluster_id == 0x1 && table_id < 39) + table_id += 1; + else + break; + } + else { /* if (same_table_next && !last_table_next && !last_offset_next) */ + /* More data left in the table */ + offset = ret_next_index; + } + } + + free(data_buf, M_ICE); +} + +#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \ +"\nWrite 1 to output a FW debug dump containing the clusters specified by the \"clusters\" sysctl" \ +"\nThe \"-b\" flag must be used in order to dump this data as binary data because" \ +"\nthis data is opaque and not a string." + +#define ICE_FW_DUMP_BASE_TEXT_SIZE (1024 * 1024) +#define ICE_FW_DUMP_CLUST0_TEXT_SIZE (2 * 1024 * 1024) +#define ICE_FW_DUMP_CLUST1_TEXT_SIZE (128 * 1024) +#define ICE_FW_DUMP_CLUST2_TEXT_SIZE (2 * 1024 * 1024) + +/** + * ice_sysctl_fw_debug_dump_do_dump - Dump data from FW to sysctl output + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: unused + * @req: sysctl request pointer + * + * Sysctl handler for the debug.dump.dump sysctl. Prints out a specially- + * formatted dump of some debug FW data intended to be processed by a special + * Intel tool. Prints out the cluster data specified by the "clusters" + * sysctl. + * + * @remark The actual AQ calls and printing are handled by a helper + * function above. + */ +static int +ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS) +{ + struct ice_softc *sc = (struct ice_softc *)arg1; + device_t dev = sc->dev; + struct sbuf *sbuf; + int bit, ret; + + UNREFERENCED_PARAMETER(arg2); + + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); + + if (ice_driver_is_detaching(sc)) + return (ESHUTDOWN); + + /* If the user hasn't written "1" to this sysctl yet: */ + if (!ice_test_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP)) { + /* Avoid output on the first set of reads to this sysctl in + * order to prevent a null byte from being written to the + * end result when called via sysctl(8). + */ + if (req->oldptr == NULL && req->newptr == NULL) { + ret = SYSCTL_OUT(req, 0, 0); + return (ret); + } + + char input_buf[2] = ""; + ret = sysctl_handle_string(oidp, input_buf, sizeof(input_buf), req); + if ((ret) || (req->newptr == NULL)) + return (ret); + + /* If we get '1', then indicate we'll do a dump in the next + * sysctl read call. + */ + if (input_buf[0] == '1') { + ice_set_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP); + return (0); + } + + return (EINVAL); + } + + /* --- FW debug dump state is set --- */ + + if (!sc->fw_debug_dump_cluster_mask) { + device_printf(dev, + "%s: Debug Dump failed because no cluster was specified.\n", + __func__); + ret = EINVAL; + goto out; + } + + /* Caller just wants the upper bound for size */ + if (req->oldptr == NULL && req->newptr == NULL) { + size_t est_output_len = ICE_FW_DUMP_BASE_TEXT_SIZE; + if (sc->fw_debug_dump_cluster_mask & 0x1) + est_output_len += ICE_FW_DUMP_CLUST0_TEXT_SIZE; + if (sc->fw_debug_dump_cluster_mask & 0x2) + est_output_len += ICE_FW_DUMP_CLUST1_TEXT_SIZE; + if (sc->fw_debug_dump_cluster_mask & 0x4) + est_output_len += ICE_FW_DUMP_CLUST2_TEXT_SIZE; + + ret = SYSCTL_OUT(req, 0, est_output_len); + return (ret); + } + + sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + sbuf_clear_flags(sbuf, SBUF_INCLUDENUL); + + ice_debug(&sc->hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__); + + for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask, + sizeof(sc->fw_debug_dump_cluster_mask) * 8) + ice_fw_debug_dump_print_cluster(sc, sbuf, bit); + + sbuf_finish(sbuf); + sbuf_delete(sbuf); + +out: + ice_clear_state(&sc->state, ICE_STATE_DO_FW_DEBUG_DUMP); + return (ret); +} + /** * ice_add_debug_sysctls - Add sysctls helpful for debugging the device driver * @sc: device private structure @@ -5779,8 +6320,8 @@ static void ice_add_debug_sysctls(struct ice_softc *sc) { - struct sysctl_oid *sw_node; - struct sysctl_oid_list *debug_list, *sw_list; + struct sysctl_oid *sw_node, *dump_node; + struct sysctl_oid_list *debug_list, *sw_list, *dump_list; device_t dev = sc->dev; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); @@ -5929,7 +6470,21 @@ ice_sysctl_dump_ethertype_mac_filters, "A", "Ethertype/MAC Filters"); -} + dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump", + ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, + "Internal FW Dump"); + dump_list = SYSCTL_CHILDREN(dump_node); + + SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters", + ICE_CTLFLAG_DEBUG | CTLTYPE_U16 | CTLFLAG_RW, sc, 0, + ice_sysctl_fw_debug_dump_cluster_setting, "SU", + ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING); + + SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump", + ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, + ice_sysctl_fw_debug_dump_do_dump, "", + ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP); +} /** * ice_vsi_disable_tx - Disable (unconfigure) Tx queues for a VSI @@ -5972,21 +6527,26 @@ } ice_for_each_traffic_class(tc) { + struct ice_tc_info *tc_info = &vsi->tc_info[tc]; + u16 start_idx, end_idx; + + /* Skip rest of disabled TCs once the first + * disabled TC is found */ + if (!(vsi->tc_map & BIT(tc))) + break; + + /* Fill out TX queue information for this TC */ + start_idx = tc_info->qoffset; + end_idx = start_idx + tc_info->qcount_tx; buf_idx = 0; - for (j = 0; j < vsi->num_tx_queues; j++) { + for (j = start_idx; j < end_idx; j++) { struct ice_tx_queue *txq = &vsi->tx_queues[j]; - if (txq->tc != tc) - continue; - q_ids[buf_idx] = vsi->tx_qmap[j]; q_handles[buf_idx] = txq->q_handle; q_teids[buf_idx] = txq->q_teid; buf_idx++; } - /* Skip TC if no queues belong to it */ - if (buf_idx == 0) - continue; status = ice_dis_vsi_txq(hw->port_info, vsi->idx, tc, buf_idx, q_handles, q_ids, q_teids, ICE_NO_RESET, 0, NULL); @@ -6005,9 +6565,9 @@ } /* Clear buffers */ - memset(q_teids, 0, q_teids_size); - memset(q_ids, 0, q_ids_size); - memset(q_handles, 0, q_handles_size); + memset(q_teids, 0, q_teids_size); + memset(q_ids, 0, q_ids_size); + memset(q_handles, 0, q_handles_size); } /* free_q_handles: */ @@ -6463,15 +7023,15 @@ * @pkg_status: the status result of ice_copy_and_init_pkg * * Called by ice_load_pkg after an attempt to download the DDP package - * contents to the device. Determines whether the download was successful or - * not and logs an appropriate message for the system administrator. + * contents to the device to log an appropriate message for the system + * administrator about download status. * - * @post if a DDP package was previously downloaded on another port and it - * is not compatible with this driver, pkg_status will be updated to reflect - * this, and the driver will transition to safe mode. + * @post ice_is_init_pkg_successful function is used to determine + * whether the download was successful and DDP package is compatible + * with this driver. Otherwise driver will transition to Safe Mode. */ void -ice_log_pkg_init(struct ice_softc *sc, enum ice_status *pkg_status) +ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; @@ -6485,60 +7045,37 @@ ice_os_pkg_version_str(hw, os_pkg); sbuf_finish(os_pkg); - switch (*pkg_status) { - case ICE_SUCCESS: - /* The package download AdminQ command returned success because - * this download succeeded or ICE_ERR_AQ_NO_WORK since there is - * already a package loaded on the device. - */ - if (hw->pkg_ver.major == hw->active_pkg_ver.major && - hw->pkg_ver.minor == hw->active_pkg_ver.minor && - hw->pkg_ver.update == hw->active_pkg_ver.update && - hw->pkg_ver.draft == hw->active_pkg_ver.draft && - !memcmp(hw->pkg_name, hw->active_pkg_name, - sizeof(hw->pkg_name))) { - switch (hw->pkg_dwnld_status) { - case ICE_AQ_RC_OK: - device_printf(dev, - "The DDP package was successfully loaded: %s.\n", - sbuf_data(active_pkg)); - break; - case ICE_AQ_RC_EEXIST: - device_printf(dev, - "DDP package already present on device: %s.\n", - sbuf_data(active_pkg)); - break; - default: - /* We do not expect this to occur, but the - * extra messaging is here in case something - * changes in the ice_init_pkg flow. - */ - device_printf(dev, - "DDP package already present on device: %s. An unexpected error occurred, pkg_dwnld_status %s.\n", - sbuf_data(active_pkg), - ice_aq_str(hw->pkg_dwnld_status)); - break; - } - } else if (pkg_ver_compatible(&hw->active_pkg_ver) == 0) { - device_printf(dev, - "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", - sbuf_data(active_pkg), - sbuf_data(os_pkg)); - } else if (pkg_ver_compatible(&hw->active_pkg_ver) > 0) { - device_printf(dev, - "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - sbuf_data(active_pkg), - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); - *pkg_status = ICE_ERR_NOT_SUPPORTED; - } else { - device_printf(dev, - "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - sbuf_data(active_pkg), - ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); - *pkg_status = ICE_ERR_NOT_SUPPORTED; - } + switch (pkg_status) { + case ICE_DDP_PKG_SUCCESS: + device_printf(dev, + "The DDP package was successfully loaded: %s.\n", + sbuf_data(active_pkg)); + break; + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_ALREADY_LOADED: + device_printf(dev, + "DDP package already present on device: %s.\n", + sbuf_data(active_pkg)); break; - case ICE_ERR_NOT_SUPPORTED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + device_printf(dev, + "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package %s. The ice_ddp module has package: %s.\n", + sbuf_data(active_pkg), + sbuf_data(os_pkg)); + break; + case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH: + device_printf(dev, + "The device has a DDP package that is higher than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + sbuf_data(active_pkg), + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_FILE_VERSION_TOO_LOW: + device_printf(dev, + "The device has a DDP package that is lower than the driver supports. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", + sbuf_data(active_pkg), + ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); + break; + case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED: /* * This assumes that the active_pkg_ver will not be * initialized if the ice_ddp package version is not @@ -6558,9 +7095,7 @@ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else { device_printf(dev, - "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - ice_status_str(*pkg_status), - ice_aq_str(hw->pkg_dwnld_status), + "An unknown error occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(os_pkg), sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); @@ -6578,54 +7113,41 @@ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } else { device_printf(dev, - "An unknown error (%s aq_err %s) occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", - ice_status_str(*pkg_status), - ice_aq_str(hw->pkg_dwnld_status), + "An unknown error occurred when loading the DDP package. The ice_ddp module has package %s. The device has package %s. The driver requires version %d.%d.x.x. Entering Safe Mode.\n", sbuf_data(os_pkg), sbuf_data(active_pkg), ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR); } } break; - case ICE_ERR_CFG: - case ICE_ERR_BUF_TOO_SHORT: - case ICE_ERR_PARAM: + case ICE_DDP_PKG_INVALID_FILE: device_printf(dev, "The DDP package in the ice_ddp module is invalid. Entering Safe Mode\n"); break; - case ICE_ERR_FW_DDP_MISMATCH: + case ICE_DDP_PKG_FW_MISMATCH: device_printf(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n"); break; - case ICE_ERR_AQ_ERROR: - switch (hw->pkg_dwnld_status) { - case ICE_AQ_RC_ENOSEC: - case ICE_AQ_RC_EBADSIG: - device_printf(dev, - "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); - goto free_sbufs; - case ICE_AQ_RC_ESVN: - device_printf(dev, - "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); - goto free_sbufs; - case ICE_AQ_RC_EBADMAN: - case ICE_AQ_RC_EBADBUF: - device_printf(dev, - "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); - goto free_sbufs; - default: - break; - } - /* fall-through */ + case ICE_DDP_PKG_NO_SEC_MANIFEST: + case ICE_DDP_PKG_FILE_SIGNATURE_INVALID: + device_printf(dev, + "The DDP package in the ice_ddp module cannot be loaded because its signature is not valid. Please use a valid ice_ddp module. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW: + device_printf(dev, + "The DDP package in the ice_ddp module could not be loaded because its security revision is too low. Please use an updated ice_ddp module. Entering Safe Mode.\n"); + break; + case ICE_DDP_PKG_MANIFEST_INVALID: + case ICE_DDP_PKG_BUFFER_INVALID: + device_printf(dev, + "An error occurred on the device while loading the DDP package. Entering Safe Mode.\n"); + break; default: device_printf(dev, - "An unknown error (%s aq_err %s) occurred when loading the DDP package. Entering Safe Mode.\n", - ice_status_str(*pkg_status), - ice_aq_str(hw->pkg_dwnld_status)); + "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n"); break; } -free_sbufs: sbuf_delete(active_pkg); sbuf_delete(os_pkg); } @@ -6643,39 +7165,71 @@ * ice_deinit_hw(). This allows the firmware reference to be immediately * released using firmware_put. */ -void +enum ice_status ice_load_pkg_file(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; - enum ice_status status; + enum ice_ddp_state state; const struct firmware *pkg; + enum ice_status status = ICE_SUCCESS; + u8 cached_layer_count; + u8 *buf_copy; pkg = firmware_get("ice_ddp"); if (!pkg) { - device_printf(dev, "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); + device_printf(dev, + "The DDP package module (ice_ddp) failed to load or could not be found. Entering Safe Mode.\n"); if (cold) device_printf(dev, - "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); - ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); - ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); - return; + "The DDP package module cannot be automatically loaded while booting. You may want to specify ice_ddp_load=\"YES\" in your loader.conf\n"); + status = ICE_ERR_CFG; + goto err_load_pkg; + } + + /* Check for topology change */ + if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_TX_BALANCE)) { + cached_layer_count = hw->num_tx_sched_layers; + buf_copy = (u8 *)malloc(pkg->datasize, M_ICE, M_NOWAIT); + if (buf_copy == NULL) + return ICE_ERR_NO_MEMORY; + memcpy(buf_copy, pkg->data, pkg->datasize); + status = ice_cfg_tx_topo(&sc->hw, buf_copy, pkg->datasize); + free(buf_copy, M_ICE); + /* Success indicates a change was made */ + if (status == ICE_SUCCESS) { + /* 9 -> 5 */ + if (cached_layer_count == 9) + device_printf(dev, + "Transmit balancing feature enabled\n"); + else + device_printf(dev, + "Transmit balancing feature disabled\n"); + ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_en); + return (status); + } } /* Copy and download the pkg contents */ - status = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); + state = ice_copy_and_init_pkg(hw, (const u8 *)pkg->data, pkg->datasize); /* Release the firmware reference */ firmware_put(pkg, FIRMWARE_UNLOAD); /* Check the active DDP package version and log a message */ - ice_log_pkg_init(sc, &status); + ice_log_pkg_init(sc, state); /* Place the driver into safe mode */ - if (status != ICE_SUCCESS) { - ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); - ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); - } + if (ice_is_init_pkg_successful(state)) + return (ICE_ERR_ALREADY_EXISTS); + +err_load_pkg: + ice_zero_bitmap(sc->feat_cap, ICE_FEATURE_COUNT); + ice_zero_bitmap(sc->feat_en, ICE_FEATURE_COUNT); + ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_cap); + ice_set_bit(ICE_FEATURE_SAFE_MODE, sc->feat_en); + + return (status); } /** @@ -7333,37 +7887,30 @@ } /** - * ice_init_dcb_setup - Initialize DCB settings for HW + * ice_start_dcbx_agent - Start DCBX agent in FW via AQ command * @sc: the device softc * - * This needs to be called after the fw_lldp_agent sysctl is added, since that - * can update the device's LLDP agent status if a tunable value is set. + * @pre device is DCB capable and the FW LLDP agent has started * - * Get and store the initial state of DCB settings on driver load. Print out - * informational messages as well. + * Checks DCBX status and starts the DCBX agent if it is not in + * a valid state via an AQ command. */ -void -ice_init_dcb_setup(struct ice_softc *sc) +static void +ice_start_dcbx_agent(struct ice_softc *sc) { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; bool dcbx_agent_status; enum ice_status status; - /* Don't do anything if DCB isn't supported */ - if (!hw->func_caps.common_cap.dcb) { - device_printf(dev, "%s: No DCB support\n", - __func__); - return; - } - hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw); + if (hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_DONE && hw->port_info->qos_cfg.dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) { /* * Start DCBX agent, but not LLDP. The return value isn't * checked here because a more detailed dcbx agent status is - * retrieved and checked in ice_init_dcb() and below. + * retrieved and checked in ice_init_dcb() and elsewhere. */ status = ice_aq_start_stop_dcbx(hw, true, &dcbx_agent_status, NULL); if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EPERM) @@ -7372,6 +7919,35 @@ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status)); } +} + +/** + * ice_init_dcb_setup - Initialize DCB settings for HW + * @sc: the device softc + * + * This needs to be called after the fw_lldp_agent sysctl is added, since that + * can update the device's LLDP agent status if a tunable value is set. + * + * Get and store the initial state of DCB settings on driver load. Print out + * informational messages as well. + */ +void +ice_init_dcb_setup(struct ice_softc *sc) +{ + struct ice_dcbx_cfg *local_dcbx_cfg; + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + enum ice_status status; + u8 pfcmode_ret; + + /* Don't do anything if DCB isn't supported */ + if (!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_DCB)) { + device_printf(dev, "%s: No DCB support\n", __func__); + return; + } + + /* Starts DCBX agent if it needs starting */ + ice_start_dcbx_agent(sc); /* This sets hw->port_info->qos_cfg.is_sw_lldp */ status = ice_init_dcb(hw, true); @@ -7410,6 +7986,31 @@ ice_add_rx_lldp_filter(sc); device_printf(dev, "Firmware LLDP agent disabled\n"); } + + /* Query and cache PFC mode */ + status = ice_aq_query_pfc_mode(hw, &pfcmode_ret, NULL); + if (status) { + device_printf(dev, "PFC mode query failed, err %s aq_err %s\n", + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + } + local_dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg; + switch (pfcmode_ret) { + case ICE_AQC_PFC_VLAN_BASED_PFC: + local_dcbx_cfg->pfc_mode = ICE_QOS_MODE_VLAN; + break; + case ICE_AQC_PFC_DSCP_BASED_PFC: + local_dcbx_cfg->pfc_mode = ICE_QOS_MODE_DSCP; + break; + default: + /* DCB is disabled, but we shouldn't get here */ + break; + } + + /* Set default SW MIB for init */ + ice_set_default_local_mib_settings(sc); + + ice_set_bit(ICE_FEATURE_DCB, sc->feat_en); } /** @@ -7419,7 +8020,7 @@ * Scans a TC mapping table inside dcbcfg to find traffic classes * enabled and @returns a bitmask of enabled TCs */ -static u8 +u8 ice_dcb_get_tc_map(const struct ice_dcbx_cfg *dcbcfg) { u8 tc_map = 0; @@ -7434,6 +8035,10 @@ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) tc_map |= BIT(dcbcfg->etscfg.prio_table[i]); break; + case ICE_QOS_MODE_DSCP: + for (i = 0; i < ICE_DSCP_NUM_VAL; i++) + tc_map |= BIT(dcbcfg->dscp_map[i]); + break; default: /* Invalid Mode */ tc_map = ICE_DFLT_TRAFFIC_CLASS; @@ -7444,32 +8049,22 @@ } /** - * ice_dcb_num_tc - Count the number of TCs in a bitmap - * @tc_map: bitmap of enabled traffic classes + * ice_dcb_get_num_tc - Get the number of TCs from DCBX config + * @dcbcfg: config to retrieve number of TCs from * - * @return the number of traffic classes in - * an 8-bit TC bitmap, or 0 if they are noncontiguous + * @return number of contiguous TCs found in dcbcfg's ETS Configuration + * Priority Assignment Table, a value from 1 to 8. If there are + * non-contiguous TCs used (e.g. assigning 1 and 3 without using 2), + * then returns 0. */ static u8 -ice_dcb_num_tc(u8 tc_map) +ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg) { - bool tc_unused = false; - u8 ret = 0; - int i = 0; + u8 tc_map; - ice_for_each_traffic_class(i) { - if (tc_map & BIT(i)) { - if (!tc_unused) { - ret++; - } else { - /* Non-contiguous TCs detected */ - return (0); - } - } else - tc_unused = true; - } + tc_map = ice_dcb_get_tc_map(dcbcfg); - return (ret); + return (ice_dcb_tc_contig(tc_map)); } /** @@ -7541,6 +8136,13 @@ struct ice_hw *hw = &sc->hw; bool needs_reconfig = false; + /* No change detected in DCBX config */ + if (!memcmp(old_cfg, new_cfg, sizeof(*old_cfg))) { + ice_debug(hw, ICE_DBG_DCB, + "No change detected in local DCBX configuration\n"); + return (false); + } + /* Check if ETS config has changed */ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg, sizeof(new_cfg->etscfg))) { @@ -7555,21 +8157,29 @@ /* These are just informational */ if (memcmp(&new_cfg->etscfg.tcbwtable, &old_cfg->etscfg.tcbwtable, - sizeof(new_cfg->etscfg.tcbwtable))) + sizeof(new_cfg->etscfg.tcbwtable))) { ice_debug(hw, ICE_DBG_DCB, "ETS TCBW table changed\n"); + needs_reconfig = true; + } if (memcmp(&new_cfg->etscfg.tsatable, &old_cfg->etscfg.tsatable, - sizeof(new_cfg->etscfg.tsatable))) + sizeof(new_cfg->etscfg.tsatable))) { ice_debug(hw, ICE_DBG_DCB, "ETS TSA table changed\n"); + needs_reconfig = true; + } } /* Check if PFC config has changed */ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) { - needs_reconfig = true; ice_debug(hw, ICE_DBG_DCB, "PFC config changed\n"); + needs_reconfig = true; } + /* Check if APP table has changed */ + if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) + ice_debug(hw, ICE_DBG_DCB, "APP Table changed\n"); + ice_debug(hw, ICE_DBG_DCB, "%s result: %d\n", __func__, needs_reconfig); return (needs_reconfig); @@ -7604,8 +8214,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { + u16 qcounts[ICE_MAX_TRAFFIC_CLASS] = {}; u16 offset = 0, qmap = 0, pow = 0; - u16 num_txq_per_tc, num_rxq_per_tc, qcount_rx; + u16 num_q_per_tc, qcount_rx, rem_queues; int i, j, k; if (vsi->num_tcs == 0) { @@ -7615,15 +8226,20 @@ } qcount_rx = vsi->num_rx_queues; - num_rxq_per_tc = min(qcount_rx / vsi->num_tcs, ICE_MAX_RXQS_PER_TC); - if (!num_rxq_per_tc) - num_rxq_per_tc = 1; + num_q_per_tc = min(qcount_rx / vsi->num_tcs, ICE_MAX_RXQS_PER_TC); - /* Have TX queue count match RX queue count */ - num_txq_per_tc = num_rxq_per_tc; + if (!num_q_per_tc) + num_q_per_tc = 1; - /* find the (rounded up) power-of-2 of qcount */ - pow = flsl(num_rxq_per_tc - 1); + /* Set initial values for # of queues to use for each active TC */ + ice_for_each_traffic_class(i) + if (i < vsi->num_tcs) + qcounts[i] = num_q_per_tc; + + /* If any queues are unassigned, add them to TC 0 */ + rem_queues = qcount_rx % vsi->num_tcs; + if (rem_queues > 0) + qcounts[0] += rem_queues; /* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. @@ -7649,8 +8265,11 @@ /* TC is enabled */ vsi->tc_info[i].qoffset = offset; - vsi->tc_info[i].qcount_rx = num_rxq_per_tc; - vsi->tc_info[i].qcount_tx = num_txq_per_tc; + vsi->tc_info[i].qcount_rx = qcounts[i]; + vsi->tc_info[i].qcount_tx = qcounts[i]; + + /* find the (rounded up) log-2 of queue count for current TC */ + pow = fls(qcounts[i] - 1); qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & ICE_AQ_VSI_TC_Q_OFFSET_M) | @@ -7659,14 +8278,14 @@ ctxt->info.tc_mapping[i] = CPU_TO_LE16(qmap); /* Store traffic class and handle data in queue structures */ - for (j = offset, k = 0; j < offset + num_txq_per_tc; j++, k++) { + for (j = offset, k = 0; j < offset + qcounts[i]; j++, k++) { vsi->tx_queues[j].q_handle = k; vsi->tx_queues[j].tc = i; - } - for (j = offset; j < offset + num_rxq_per_tc; j++) + vsi->rx_queues[j].tc = i; + } - offset += num_rxq_per_tc; + offset += qcounts[i]; } /* Rx queue mapping */ @@ -7729,6 +8348,13 @@ for (i = 0; i < num_tcs; i++) max_txqs[i] = vsi->tc_info[i].qcount_tx; + if (hw->debug_mask & ICE_DBG_DCB) { + device_printf(dev, "%s: max_txqs:", __func__); + ice_for_each_traffic_class(i) + printf(" %d", max_txqs[i]); + printf("\n"); + } + /* Update LAN Tx queue info in firmware */ status = ice_cfg_vsi_lan(hw->port_info, vsi->idx, vsi->tc_map, max_txqs); @@ -7745,6 +8371,35 @@ return (0); } +/** + * ice_dcb_tc_contig - Count TCs if they're contiguous + * @tc_map: pointer to priority table + * + * @return The number of traffic classes in + * an 8-bit TC bitmap, or if there is a gap, then returns 0. + */ +static u8 +ice_dcb_tc_contig(u8 tc_map) +{ + bool tc_unused = false; + u8 ret = 0; + + /* Scan bitmask for contiguous TCs starting with TC0 */ + for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + /* Non-contiguous TCs detected */ + return (0); + } + } else + tc_unused = true; + } + + return (ret); +} + /** * ice_dcb_recfg - Reconfigure VSI with new DCB settings * @sc: the device private softc @@ -7768,7 +8423,7 @@ * the default TC instead. There's no support for * non-contiguous TCs being used. */ - if (ice_dcb_num_tc(tc_map) == 0) { + if (ice_dcb_tc_contig(tc_map) == 0) { tc_map = ICE_DFLT_TRAFFIC_CLASS; ice_set_default_local_lldp_mib(sc); } @@ -7782,9 +8437,58 @@ } +/** + * ice_set_default_local_mib_settings - Set Local LLDP MIB to default settings + * @sc: device softc structure + * + * Overwrites the driver's SW local LLDP MIB with default settings. This + * ensures the driver has a valid MIB when it next uses the Set Local LLDP MIB + * admin queue command. + */ +static void +ice_set_default_local_mib_settings(struct ice_softc *sc) +{ + struct ice_dcbx_cfg *dcbcfg; + struct ice_hw *hw = &sc->hw; + struct ice_port_info *pi; + u8 maxtcs, maxtcs_ets, old_pfc_mode; + + pi = hw->port_info; + + dcbcfg = &pi->qos_cfg.local_dcbx_cfg; + + maxtcs = hw->func_caps.common_cap.maxtc; + /* This value is only 3 bits; 8 TCs maps to 0 */ + maxtcs_ets = maxtcs & ICE_IEEE_ETS_MAXTC_M; + + /* VLAN vs DSCP mode needs to be preserved */ + old_pfc_mode = dcbcfg->pfc_mode; + + /** + * Setup the default settings used by the driver for the Set Local + * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no + * PFC, TSA=2). + */ + memset(dcbcfg, 0, sizeof(*dcbcfg)); + + dcbcfg->etscfg.willing = 1; + dcbcfg->etscfg.tcbwtable[0] = 100; + dcbcfg->etscfg.maxtcs = maxtcs_ets; + dcbcfg->etscfg.tsatable[0] = 2; + + dcbcfg->etsrec = dcbcfg->etscfg; + dcbcfg->etsrec.willing = 0; + + dcbcfg->pfc.willing = 1; + dcbcfg->pfc.pfccap = maxtcs; + + dcbcfg->pfc_mode = old_pfc_mode; +} + /** * ice_do_dcb_reconfig - notify RDMA and reconfigure PF LAN VSI * @sc: the device private softc + * @pending_mib: FW has a pending MIB change to execute * * @pre Determined that the DCB configuration requires a change * @@ -7792,7 +8496,7 @@ * found in the hw struct's/port_info's/ local dcbx configuration. */ static void -ice_do_dcb_reconfig(struct ice_softc *sc) +ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib) { struct ice_aqc_port_ets_elem port_ets = { 0 }; struct ice_dcbx_cfg *local_dcbx_cfg; @@ -7800,16 +8504,31 @@ struct ice_port_info *pi; device_t dev = sc->dev; enum ice_status status; - u8 tc_map; pi = sc->hw.port_info; local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; ice_rdma_notify_dcb_qos_change(sc); + /* If there's a pending MIB, tell the FW to execute the MIB change + * now. + */ + if (pending_mib) { + status = ice_lldp_execute_pending_mib(hw); + if ((status == ICE_ERR_AQ_ERROR) && + (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)) { + device_printf(dev, + "Execute Pending LLDP MIB AQ call failed, no pending MIB\n"); + } else if (status) { + device_printf(dev, + "Execute Pending LLDP MIB AQ call failed, err %s aq_err %s\n", + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + /* This won't break traffic, but QoS will not work as expected */ + } + } /* Set state when there's more than one TC */ - tc_map = ice_dcb_get_tc_map(local_dcbx_cfg); - if (ice_dcb_num_tc(tc_map) > 1) { + if (ice_dcb_get_num_tc(local_dcbx_cfg) > 1) { device_printf(dev, "Multiple traffic classes enabled\n"); ice_set_state(&sc->state, ICE_STATE_MULTIPLE_TCS); } else { @@ -7857,7 +8576,7 @@ struct ice_port_info *pi; device_t dev = sc->dev; struct ice_hw *hw = &sc->hw; - bool needs_reconfig; + bool needs_reconfig, mib_is_pending; enum ice_status status; u8 mib_type, bridge_type; @@ -7871,6 +8590,8 @@ ICE_AQ_LLDP_MIB_TYPE_S; bridge_type = (params->type & ICE_AQ_LLDP_BRID_TYPE_M) >> ICE_AQ_LLDP_BRID_TYPE_S; + mib_is_pending = (params->state & ICE_AQ_LLDP_MIB_CHANGE_STATE_M) >> + ICE_AQ_LLDP_MIB_CHANGE_STATE_S; /* Ignore if event is not for Nearest Bridge */ if (bridge_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID) @@ -7897,32 +8618,32 @@ tmp_dcbx_cfg = *local_dcbx_cfg; memset(local_dcbx_cfg, 0, sizeof(*local_dcbx_cfg)); - /* Get updated DCBX data from firmware */ - status = ice_get_dcb_cfg(pi); - if (status) { - device_printf(dev, - "%s: Failed to get Local DCB config; status %s, aq_err %s\n", - __func__, ice_status_str(status), - ice_aq_str(hw->adminq.sq_last_status)); - return; - } - - /* No change detected in DCBX config */ - if (!memcmp(&tmp_dcbx_cfg, local_dcbx_cfg, - sizeof(tmp_dcbx_cfg))) { - ice_debug(hw, ICE_DBG_DCB, "No change detected in local DCBX configuration\n"); - return; + /* Update the current local_dcbx_cfg with new data */ + if (mib_is_pending) { + ice_get_dcb_cfg_from_mib_change(pi, event); + } else { + /* Get updated DCBX data from firmware */ + status = ice_get_dcb_cfg(pi); + if (status) { + device_printf(dev, + "%s: Failed to get Local DCB config; status %s, aq_err %s\n", + __func__, ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return; + } } /* Check to see if DCB needs reconfiguring */ needs_reconfig = ice_dcb_needs_reconfig(sc, &tmp_dcbx_cfg, local_dcbx_cfg); - if (!needs_reconfig) + if (!needs_reconfig && !mib_is_pending) return; - /* Reconfigure */ - ice_do_dcb_reconfig(sc); + /* Reconfigure -- this will also notify FW that configuration is done, + * if the FW MIB change is only pending instead of executed. + */ + ice_do_dcb_reconfig(sc, mib_is_pending); } /** @@ -8745,6 +9466,12 @@ if (err) return (err); + if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { + device_printf(dev, "%s: Driver must rebuild data structures after a reset. Operation aborted.\n", + __func__); + return (EBUSY); + } + if (ifd_len < sizeof(struct ice_nvm_access_cmd)) { device_printf(dev, "%s: ifdrv length is too small. Got %zu, but expected %zu\n", __func__, ifd_len, sizeof(struct ice_nvm_access_cmd)); @@ -9155,7 +9882,7 @@ u8 health_mask; if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) || - (!sc->enable_health_events)) + (!sc->enable_health_events)) return; health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | @@ -9349,43 +10076,34 @@ } /** - * ice_set_default_local_lldp_mib - Set Local LLDP MIB to default settings + * ice_set_default_local_lldp_mib - Possibly apply local LLDP MIB to FW * @sc: device softc structure * - * This function needs to be called after link up; it makes sure the FW - * has certain PFC/DCB settings. This is intended to workaround a FW behavior - * where these settings seem to be cleared on link up. + * This function needs to be called after link up; it makes sure the FW has + * certain PFC/DCB settings. In certain configurations this will re-apply a + * default local LLDP MIB configuration; this is intended to workaround a FW + * behavior where these settings seem to be cleared on link up. */ void ice_set_default_local_lldp_mib(struct ice_softc *sc) { - struct ice_dcbx_cfg *dcbcfg; struct ice_hw *hw = &sc->hw; struct ice_port_info *pi; device_t dev = sc->dev; enum ice_status status; - u8 maxtcs, maxtcs_ets; - pi = hw->port_info; - - dcbcfg = &pi->qos_cfg.local_dcbx_cfg; + /* Set Local MIB can disrupt flow control settings for + * non-DCB-supported devices. + */ + if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_DCB)) + return; - maxtcs = hw->func_caps.common_cap.maxtc; - /* This value is only 3 bits; 8 TCs maps to 0 */ - maxtcs_ets = maxtcs & ICE_IEEE_ETS_MAXTC_M; + pi = hw->port_info; - /** - * Setup the default settings used by the driver for the Set Local - * LLDP MIB Admin Queue command (0x0A08). (1TC w/ 100% BW, ETS, no - * PFC). - */ - memset(dcbcfg, 0, sizeof(*dcbcfg)); - dcbcfg->etscfg.willing = 1; - dcbcfg->etscfg.tcbwtable[0] = 100; - dcbcfg->etscfg.maxtcs = maxtcs_ets; - dcbcfg->etsrec = dcbcfg->etscfg; - dcbcfg->pfc.willing = 1; - dcbcfg->pfc.pfccap = maxtcs; + /* Don't overwrite a custom SW configuration */ + if (!pi->qos_cfg.is_sw_lldp && + !ice_test_state(&sc->state, ICE_STATE_MULTIPLE_TCS)) + ice_set_default_local_mib_settings(sc); status = ice_set_dcb_cfg(pi); @@ -9488,6 +10206,10 @@ dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE; else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) dcbcfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + else + device_printf(dev, "Get CEE DCB Cfg AQ cmd err %s aq_err %s\n", + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); maxtcs = hw->func_caps.common_cap.maxtc; dcbx_status = ice_get_dcbx_status(hw); @@ -9518,6 +10240,14 @@ sbuf_printf(sbuf, "pfc.pfcena: 0x%0x\n", dcbcfg->pfc.pfcena); if (arg2 == ICE_AQ_LLDP_MIB_LOCAL) { + sbuf_printf(sbuf, "dscp_map:\n"); + for (int i = 0; i < 8; i++) { + for (int j = 0; j < 8; j++) + sbuf_printf(sbuf, " %d", + dcbcfg->dscp_map[i * 8 + j]); + sbuf_printf(sbuf, "\n"); + } + sbuf_printf(sbuf, "\nLocal registers:\n"); sbuf_printf(sbuf, "PRTDCB_GENC.NUMTC: %d\n", (rd32(hw, PRTDCB_GENC) & PRTDCB_GENC_NUMTC_M) @@ -9744,3 +10474,290 @@ return (0); } + +/** + * ice_sysctl_dscp2tc_map - Map DSCP to hardware TCs + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: which eight DSCP to UP mappings to configure (0 - 7) + * @req: sysctl request pointer + * + * Gets or sets the current DSCP to UP table cached by the driver. Since there + * are 64 possible DSCP values to configure, this sysctl only configures + * chunks of 8 in that space at a time. + * + * This sysctl is only relevant in DSCP mode, and will only function in SW DCB + * mode. + */ +static int +ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS) +{ + struct ice_softc *sc = (struct ice_softc *)arg1; + struct ice_dcbx_cfg *local_dcbx_cfg; + struct ice_port_info *pi; + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + enum ice_status status; + struct sbuf *sbuf; + int ret; + + /* Store input rates from user */ + char dscp_user_buf[128] = ""; + u8 new_dscp_table_seg[ICE_MAX_TRAFFIC_CLASS] = {}; + + if (ice_driver_is_detaching(sc)) + return (ESHUTDOWN); + + if (req->oldptr == NULL && req->newptr == NULL) { + ret = SYSCTL_OUT(req, 0, 128); + return (ret); + } + + pi = hw->port_info; + local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + + sbuf = sbuf_new(NULL, dscp_user_buf, 128, SBUF_FIXEDLEN | SBUF_INCLUDENUL); + + /* Format DSCP-to-UP data for output */ + for (int i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + sbuf_printf(sbuf, "%d", local_dcbx_cfg->dscp_map[arg2 * 8 + i]); + if (i != ICE_MAX_TRAFFIC_CLASS - 1) + sbuf_printf(sbuf, ","); + } + + sbuf_finish(sbuf); + sbuf_delete(sbuf); + + /* Read in the new DSCP mapping values */ + ret = sysctl_handle_string(oidp, dscp_user_buf, sizeof(dscp_user_buf), req); + if ((ret) || (req->newptr == NULL)) + return (ret); + + /* Don't allow setting changes in FW DCB mode */ + if (!hw->port_info->qos_cfg.is_sw_lldp) { + device_printf(dev, "%s: DSCP mapping is not allowed in FW DCBX mode\n", + __func__); + return (EINVAL); + } + + /* Convert 8 values in a string to a table; this is similar to what + * needs to be done for ETS settings, so this function can be re-used + * for that purpose. + */ + ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg, 8); + if (ret) { + device_printf(dev, "%s: Could not parse input DSCP2TC table: %s\n", + __func__, dscp_user_buf); + return (ret); + } + + memcpy(&local_dcbx_cfg->dscp_map[arg2 * 8], new_dscp_table_seg, + sizeof(new_dscp_table_seg)); + + local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING; + + status = ice_set_dcb_cfg(pi); + if (status) { + device_printf(dev, + "%s: Failed to set DCB config; status %s, aq_err %s\n", + __func__, ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + return (EIO); + } + + ice_do_dcb_reconfig(sc, false); + + return (0); +} + +/** + * ice_handle_debug_dump_ioctl - Handle a debug dump ioctl request + * @sc: the device private softc + * @ifd: ifdrv ioctl request pointer + */ +int +ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd) +{ + size_t ifd_len = ifd->ifd_len; + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + struct ice_debug_dump_cmd *ddc; + enum ice_status status; + int err = 0; + + /* Returned arguments from the Admin Queue */ + u16 ret_buf_size = 0; + u16 ret_next_table = 0; + u32 ret_next_index = 0; + + /* + * ifioctl forwards SIOCxDRVSPEC to iflib without performing + * a privilege check. In turn, iflib forwards the ioctl to the driver + * without performing a privilege check. Perform one here to ensure + * that non-privileged threads cannot access this interface. + */ + err = priv_check(curthread, PRIV_DRIVER); + if (err) + return (err); + + if (ice_test_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET)) { + device_printf(dev, + "%s: Driver must rebuild data structures after a reset. Operation aborted.\n", + __func__); + return (EBUSY); + } + + if (ifd_len < sizeof(*ddc)) { + device_printf(dev, + "%s: ifdrv length is too small. Got %zu, but expected %zu\n", + __func__, ifd_len, sizeof(*ddc)); + return (EINVAL); + } + + if (ifd->ifd_data == NULL) { + device_printf(dev, "%s: ifd data buffer not present.\n", + __func__); + return (EINVAL); + } + + ddc = (struct ice_debug_dump_cmd *)malloc(ifd_len, M_ICE, M_ZERO | M_NOWAIT); + if (!ddc) + return (ENOMEM); + + /* Copy the NVM access command and data in from user space */ + /* coverity[tainted_data_argument] */ + err = copyin(ifd->ifd_data, ddc, ifd_len); + if (err) { + device_printf(dev, "%s: Copying request from user space failed, err %s\n", + __func__, ice_err_str(err)); + goto out; + } + + /* The data_size arg must be at least 1 for the AQ cmd to work */ + if (ddc->data_size == 0) { + device_printf(dev, + "%s: data_size must be greater than 0\n", __func__); + err = EINVAL; + goto out; + } + /* ...and it can't be too long */ + if (ddc->data_size > (ifd_len - sizeof(*ddc))) { + device_printf(dev, + "%s: data_size (%d) is larger than ifd_len space (%zu)?\n", __func__, + ddc->data_size, ifd_len - sizeof(*ddc)); + err = EINVAL; + goto out; + } + + /* Make sure any possible data buffer space is zeroed */ + memset(ddc->data, 0, ifd_len - sizeof(*ddc)); + + status = ice_aq_get_internal_data(hw, ddc->cluster_id, ddc->table_id, ddc->offset, + (u8 *)ddc->data, ddc->data_size, &ret_buf_size, &ret_next_table, &ret_next_index, NULL); + ice_debug(hw, ICE_DBG_DIAG, "%s: ret_buf_size %d, ret_next_table %d, ret_next_index %d\n", + __func__, ret_buf_size, ret_next_table, ret_next_index); + if (status) { + device_printf(dev, + "%s: Get Internal Data AQ command failed, err %s aq_err %s\n", + __func__, + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + goto aq_error; + } + + ddc->table_id = ret_next_table; + ddc->offset = ret_next_index; + ddc->data_size = ret_buf_size; + + /* Copy the possibly modified contents of the handled request out */ + err = copyout(ddc, ifd->ifd_data, ifd->ifd_len); + if (err) { + device_printf(dev, "%s: Copying response back to user space failed, err %s\n", + __func__, ice_err_str(err)); + goto out; + } + +aq_error: + /* Convert private status to an error code for proper ioctl response */ + switch (status) { + case ICE_SUCCESS: + err = (0); + break; + case ICE_ERR_NO_MEMORY: + err = (ENOMEM); + break; + case ICE_ERR_OUT_OF_RANGE: + err = (ENOTTY); + break; + case ICE_ERR_AQ_ERROR: + err = (EIO); + break; + case ICE_ERR_PARAM: + default: + err = (EINVAL); + break; + } + +out: + free(ddc, M_ICE); + return (err); +} + +/** + * ice_sysctl_allow_no_fec_mod_in_auto - Change Auto FEC behavior + * @oidp: sysctl oid structure + * @arg1: pointer to private data structure + * @arg2: unused + * @req: sysctl request pointer + * + * Allows user to let "No FEC" mode to be used in "Auto" + * FEC mode during FEC negotiation. This is only supported + * on newer firmware versions. + */ +static int +ice_sysctl_allow_no_fec_mod_in_auto(SYSCTL_HANDLER_ARGS) +{ + struct ice_softc *sc = (struct ice_softc *)arg1; + struct ice_hw *hw = &sc->hw; + device_t dev = sc->dev; + u8 user_flag; + int ret; + + UNREFERENCED_PARAMETER(arg2); + + ret = priv_check(curthread, PRIV_DRIVER); + if (ret) + return (ret); + + if (ice_driver_is_detaching(sc)) + return (ESHUTDOWN); + + user_flag = (u8)sc->allow_no_fec_mod_in_auto; + + ret = sysctl_handle_bool(oidp, &user_flag, 0, req); + if ((ret) || (req->newptr == NULL)) + return (ret); + + if (!ice_fw_supports_fec_dis_auto(hw)) { + log(LOG_INFO, + "%s: Enabling or disabling of auto configuration of modules that don't support FEC is unsupported by the current firmware\n", + device_get_nameunit(dev)); + return (ENODEV); + } + + if (user_flag == (bool)sc->allow_no_fec_mod_in_auto) + return (0); + + sc->allow_no_fec_mod_in_auto = (u8)user_flag; + + if (sc->allow_no_fec_mod_in_auto) + log(LOG_INFO, "%s: Enabled auto configuration of No FEC modules\n", + device_get_nameunit(dev)); + else + log(LOG_INFO, + "%s: Auto configuration of No FEC modules reset to NVM defaults\n", + device_get_nameunit(dev)); + + return (0); +} + diff --git a/sys/dev/ice/ice_nvm.h b/sys/dev/ice/ice_nvm.h --- a/sys/dev/ice/ice_nvm.h +++ b/sys/dev/ice/ice_nvm.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -155,5 +155,6 @@ enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum); enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw); -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); +enum ice_status +ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags); #endif /* _ICE_NVM_H_ */ diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c --- a/sys/dev/ice/ice_nvm.c +++ b/sys/dev/ice/ice_nvm.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -406,7 +406,7 @@ status = ice_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true); /* Report the number of words successfully read */ - *words = bytes / 2; + *words = (u16)(bytes / 2); /* Byte swap the words up to the amount we actually read */ for (i = 0; i < *words; i++) @@ -983,7 +983,6 @@ struct ice_orom_civd_info *civd) { struct ice_orom_civd_info tmp; - enum ice_status status; u32 offset; /* The CIVD section is located in the Option ROM aligned to 512 bytes. @@ -992,6 +991,7 @@ * equal 0. */ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) { + enum ice_status status; u8 sum = 0, i; status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, @@ -1726,22 +1726,41 @@ /** * ice_nvm_write_activate * @hw: pointer to the HW struct - * @cmd_flags: NVM activate admin command bits (banks to be validated) + * @cmd_flags: flags for write activate command + * @response_flags: response indicators from firmware * * Update the control word with the required banks' validity bits * and dumps the Shadow RAM to flash (0x0707) + * + * cmd_flags controls which banks to activate, the preservation level to use + * when activating the NVM bank, and whether an EMP reset is required for + * activation. + * + * Note that the 16bit cmd_flags value is split between two separate 1 byte + * flag values in the descriptor. + * + * On successful return of the firmware command, the response_flags variable + * is updated with the flags reported by firmware indicating certain status, + * such as whether EMP reset is enabled. */ -enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +enum ice_status +ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags) { struct ice_aqc_nvm *cmd; struct ice_aq_desc desc; + enum ice_status status; cmd = &desc.params.nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); - cmd->cmd_flags = cmd_flags; + cmd->cmd_flags = ICE_LO_BYTE(cmd_flags); + cmd->offset_high = ICE_HI_BYTE(cmd_flags); - return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (!status && response_flags) + *response_flags = cmd->cmd_flags; + + return status; } /** @@ -1847,12 +1866,12 @@ /* Update flash data */ status = ice_aq_update_nvm(hw, ICE_AQC_NVM_MINSREV_MOD_ID, 0, sizeof(data), &data, - true, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); + false, ICE_AQC_NVM_SPECIAL_UPDATE, NULL); if (status) goto exit_release_res; /* Dump the Shadow RAM to the flash */ - status = ice_nvm_write_activate(hw, 0); + status = ice_nvm_write_activate(hw, 0, NULL); exit_release_res: ice_release_nvm(hw); diff --git a/sys/dev/ice/ice_opts.h b/sys/dev/ice/ice_opts.h --- a/sys/dev/ice/ice_opts.h +++ b/sys/dev/ice/ice_opts.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_osdep.h b/sys/dev/ice/ice_osdep.h --- a/sys/dev/ice/ice_osdep.h +++ b/sys/dev/ice/ice_osdep.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_osdep.c b/sys/dev/ice/ice_osdep.c --- a/sys/dev/ice/ice_osdep.c +++ b/sys/dev/ice/ice_osdep.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h --- a/sys/dev/ice/ice_protocol_type.h +++ b/sys/dev/ice/ice_protocol_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,6 +59,7 @@ ICE_MAC_OFOS = 0, ICE_MAC_IL, ICE_ETYPE_OL, + ICE_ETYPE_IL, ICE_VLAN_OFOS, ICE_IPV4_OFOS, ICE_IPV4_IL, @@ -73,6 +74,8 @@ ICE_VXLAN_GPE, ICE_NVGRE, ICE_GTP, + ICE_GTP_NO_PAY, + ICE_PPPOE, ICE_PROTOCOL_LAST }; @@ -104,6 +107,8 @@ ICE_SW_TUN_GTP_IPV4_UDP, ICE_SW_TUN_GTP_IPV6_TCP, ICE_SW_TUN_GTP_IPV6_UDP, + ICE_SW_TUN_GTPU, + ICE_SW_TUN_GTPC, ICE_SW_TUN_IPV4_GTPU_IPV4, ICE_SW_TUN_IPV4_GTPU_IPV6, ICE_SW_TUN_IPV6_GTPU_IPV4, @@ -141,6 +146,7 @@ ICE_PROT_IPV6_OF_OR_S = 40, ICE_PROT_IPV6_IL = 41, ICE_PROT_IPV6_IL_IL = 42, + ICE_PROT_IPV6_NEXT_PROTO = 43, ICE_PROT_IPV6_FRAG = 47, ICE_PROT_TCP_IL = 49, ICE_PROT_UDP_OF = 52, @@ -165,9 +171,11 @@ #define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */ +#define ICE_NAN_OFFSET 511 #define ICE_MAC_OFOS_HW 1 #define ICE_MAC_IL_HW 4 #define ICE_ETYPE_OL_HW 9 +#define ICE_ETYPE_IL_HW 10 #define ICE_VLAN_OF_HW 16 #define ICE_VLAN_OL_HW 17 #define ICE_IPV4_OFOS_HW 32 @@ -184,12 +192,15 @@ */ #define ICE_UDP_OF_HW 52 /* UDP Tunnels */ #define ICE_GRE_OF_HW 64 /* NVGRE */ +#define ICE_PPPOE_HW 103 #define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */ #define ICE_MDID_SIZE 2 -#define ICE_TUN_FLAG_MDID 21 -#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID) +#define ICE_TUN_FLAG_MDID 20 +#define ICE_TUN_FLAG_MDID_OFF(word) \ + (ICE_MDID_SIZE * (ICE_TUN_FLAG_MDID + (word))) #define ICE_TUN_FLAG_MASK 0xFF +#define ICE_DIR_FLAG_MASK 0x10 #define ICE_TUN_FLAG_VLAN_MASK 0x01 #define ICE_TUN_FLAG_FV_IND 2 @@ -287,6 +298,13 @@ u8 qfi; u8 rsvrd; }; +struct ice_pppoe_hdr { + u8 rsrvd_ver_type; + u8 rsrvd_code; + __be16 session_id; + __be16 length; + __be16 ppp_prot_id; /* control and data only */ +}; struct ice_nvgre { __be16 flags; @@ -305,6 +323,7 @@ struct ice_udp_tnl_hdr tnl_hdr; struct ice_nvgre nvgre_hdr; struct ice_udp_gtp_hdr gtp_hdr; + struct ice_pppoe_hdr pppoe_hdr; }; /* This is mapping table entry that maps every word within a given protocol diff --git a/sys/dev/ice/ice_rdma.h b/sys/dev/ice/ice_rdma.h --- a/sys/dev/ice/ice_rdma.h +++ b/sys/dev/ice/ice_rdma.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_rdma.c b/sys/dev/ice/ice_rdma.c --- a/sys/dev/ice/ice_rdma.c +++ b/sys/dev/ice/ice_rdma.c @@ -241,9 +241,7 @@ switch(res->res_type) { case ICE_RDMA_QSET_ALLOC: dcbx_cfg = &hw->port_info->qos_cfg.local_dcbx_cfg; - for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { - ena_tc |= BIT(dcbx_cfg->etscfg.prio_table[i]); - } + ena_tc = ice_dcb_get_tc_map(dcbx_cfg); ice_debug(hw, ICE_DBG_RDMA, "%s:%d ena_tc=%x\n", __func__, __LINE__, ena_tc); status = ice_cfg_vsi_rdma(hw->port_info, vsi->idx, ena_tc, @@ -401,6 +399,10 @@ qos_info->apps[j].prot_id = dcbx_cfg->app[j].prot_id; qos_info->apps[j].selector = dcbx_cfg->app[j].selector; } + + /* Gather DSCP-to-TC mapping and QoS/PFC mode */ + memcpy(qos_info->dscp_map, dcbx_cfg->dscp_map, sizeof(qos_info->dscp_map)); + qos_info->pfc_mode = dcbx_cfg->pfc_mode; } /** @@ -481,6 +483,7 @@ ice_rdma_register(struct ice_rdma_info *info) { struct ice_rdma_entry *entry; + struct ice_softc *sc; int err = 0; sx_xlock(&ice_rdma.mtx); @@ -513,6 +516,12 @@ */ LIST_FOREACH(entry, &ice_rdma.peers, node) { kobj_init((kobj_t)&entry->peer, ice_rdma.peer_class); + /* Gather DCB/QOS info into peer */ + sc = __containerof(entry, struct ice_softc, rdma_entry); + memset(&entry->peer.initial_qos_info, 0, sizeof(entry->peer.initial_qos_info)); + ice_rdma_cp_qos_info(&sc->hw, &sc->hw.port_info->qos_cfg.local_dcbx_cfg, + &entry->peer.initial_qos_info); + IRDMA_PROBE(&entry->peer); if (entry->initiated) IRDMA_OPEN(&entry->peer); diff --git a/sys/dev/ice/ice_resmgr.h b/sys/dev/ice/ice_resmgr.h --- a/sys/dev/ice/ice_resmgr.h +++ b/sys/dev/ice/ice_resmgr.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_resmgr.c b/sys/dev/ice/ice_resmgr.c --- a/sys/dev/ice/ice_resmgr.c +++ b/sys/dev/ice/ice_resmgr.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_rss.h b/sys/dev/ice/ice_rss.h --- a/sys/dev/ice/ice_rss.h +++ b/sys/dev/ice/ice_rss.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_sbq_cmd.h b/sys/dev/ice/ice_sbq_cmd.h --- a/sys/dev/ice/ice_sbq_cmd.h +++ b/sys/dev/ice/ice_sbq_cmd.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_sched.h b/sys/dev/ice/ice_sched.h --- a/sys/dev/ice/ice_sched.h +++ b/sys/dev/ice/ice_sched.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,6 +35,9 @@ #include "ice_common.h" +#define ICE_SCHED_5_LAYERS 5 +#define ICE_SCHED_9_LAYERS 9 + #define ICE_QGRP_LAYER_OFFSET 2 #define ICE_VSI_LAYER_OFFSET 4 #define ICE_AGG_LAYER_OFFSET 6 @@ -106,10 +109,18 @@ struct ice_aqc_rl_profile_elem *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status +ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, + struct ice_aqc_node_attr_elem *buf, u16 buf_size, + struct ice_sq_cd *cd); +enum ice_status ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes, struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status +ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, + struct ice_aqc_move_elem *buf, u16 buf_size, + u16 *grps_movd, struct ice_sq_cd *cd); +enum ice_status ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c --- a/sys/dev/ice/ice_sched.c +++ b/sys/dev/ice/ice_sched.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -468,7 +468,7 @@ * * Move scheduling elements (0x0408) */ -static enum ice_status +enum ice_status ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_move_elem *buf, u16 buf_size, u16 *grps_movd, struct ice_sq_cd *cd) @@ -909,6 +909,33 @@ hw->max_cgds = 0; } +/** + * ice_aq_cfg_node_attr - configure nodes' per-cone flattening attributes + * @hw: pointer to the HW struct + * @num_nodes: the number of nodes whose attributes to configure + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @cd: pointer to command details structure or NULL + * + * Configure Node Attributes (0x0417) + */ +enum ice_status +ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes, + struct ice_aqc_node_attr_elem *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_node_attr *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.node_attr; + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_cfg_node_attr); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + cmd->num_entries = CPU_TO_LE16(num_nodes); + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} + /** * ice_aq_cfg_l2_node_cgd - configures L2 node to CGD mapping * @hw: pointer to the HW struct @@ -1173,12 +1200,11 @@ * 5 or less sw_entry_point_layer */ /* calculate the VSI layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) + /* qgroup and VSI layers are same */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; return hw->sw_entry_point_layer; } @@ -1195,12 +1221,8 @@ * 7 or less sw_entry_point_layer */ /* calculate the aggregator layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; return hw->sw_entry_point_layer; } @@ -1417,9 +1439,10 @@ if (status) goto sched_query_out; - hw->num_tx_sched_layers = LE16_TO_CPU(buf->sched_props.logical_levels); + hw->num_tx_sched_layers = + (u8)LE16_TO_CPU(buf->sched_props.logical_levels); hw->num_tx_sched_phys_layers = - LE16_TO_CPU(buf->sched_props.phys_levels); + (u8)LE16_TO_CPU(buf->sched_props.phys_levels); hw->flattened_layers = buf->sched_props.flattening_bitmap; hw->max_cgds = buf->sched_props.max_pf_cgds; @@ -1585,10 +1608,11 @@ { struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; + u8 qgrp_layer, vsi_layer; u16 max_children; - u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + vsi_layer = ice_sched_get_vsi_layer(pi->hw); max_children = pi->hw->max_children[qgrp_layer]; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); @@ -1599,6 +1623,12 @@ if (!vsi_node) return NULL; + /* If the queue group and vsi layer are same then queues + * are all attached directly to VSI + */ + if (qgrp_layer == vsi_layer) + return vsi_node; + /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { @@ -1748,7 +1778,6 @@ { struct ice_sched_node *parent, *node; struct ice_hw *hw = pi->hw; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, qgl, vsil; @@ -1757,6 +1786,8 @@ vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle); for (i = vsil + 1; i <= qgl; i++) { + enum ice_status status; + if (!parent) return ICE_ERR_CFG; @@ -1850,7 +1881,6 @@ struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *parent = tc_node; - enum ice_status status; u32 first_node_teid; u16 num_added = 0; u8 i, vsil; @@ -1860,6 +1890,8 @@ vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { + enum ice_status status; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, @@ -3928,7 +3960,7 @@ u16 wakeup = 0; /* Get the wakeup integer value */ - bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE); + bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); wakeup_int = DIV_S64(hw->psm_clk_freq, bytes_per_sec); if (wakeup_int > 63) { wakeup = (u16)((1 << 15) | wakeup_int); @@ -3937,7 +3969,7 @@ * Convert Integer value to a constant multiplier */ wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; - wakeup_a = DIV_S64(ICE_RL_PROF_MULTIPLIER * + wakeup_a = DIV_S64((s64)ICE_RL_PROF_MULTIPLIER * hw->psm_clk_freq, bytes_per_sec); /* Get Fraction value */ @@ -3980,13 +4012,13 @@ return status; /* Bytes per second from Kbps */ - bytes_per_sec = DIV_S64(bw * 1000, BITS_PER_BYTE); + bytes_per_sec = DIV_S64((s64)bw * 1000, BITS_PER_BYTE); /* encode is 6 bits but really useful are 5 bits */ for (i = 0; i < 64; i++) { u64 pow_result = BIT_ULL(i); - ts_rate = DIV_S64(hw->psm_clk_freq, + ts_rate = DIV_S64((s64)hw->psm_clk_freq, pow_result * ICE_RL_PROF_TS_MULTIPLIER); if (ts_rate <= 0) continue; @@ -4045,7 +4077,7 @@ enum ice_status status; u8 profile_type; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (!hw || layer_num >= hw->num_tx_sched_layers) return NULL; switch (rl_type) { case ICE_MIN_BW: @@ -4061,8 +4093,6 @@ return NULL; } - if (!hw) - return NULL; LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], ice_aqc_rl_profile_info, list_entry) if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == @@ -4264,7 +4294,7 @@ struct ice_aqc_rl_profile_info *rl_prof_elem; enum ice_status status = ICE_SUCCESS; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (!hw || layer_num >= hw->num_tx_sched_layers) return ICE_ERR_PARAM; /* Check the existing list for RL profile */ LIST_FOR_EACH_ENTRY(rl_prof_elem, &hw->rl_prof_list[layer_num], @@ -4844,7 +4874,6 @@ enum ice_agg_type agg_type, u8 tc) { struct ice_sched_node *node = NULL; - struct ice_sched_node *child_node; switch (agg_type) { case ICE_AGG_TYPE_VSI: { @@ -4872,16 +4901,19 @@ case ICE_AGG_TYPE_Q: /* The current implementation allows single queue to modify */ - node = ice_sched_get_node(pi, id); + node = ice_sched_find_node_by_teid(pi->root, id); break; - case ICE_AGG_TYPE_QG: + case ICE_AGG_TYPE_QG: { + struct ice_sched_node *child_node; + /* The current implementation allows single qg to modify */ - child_node = ice_sched_get_node(pi, id); + child_node = ice_sched_find_node_by_teid(pi->root, id); if (!child_node) break; node = child_node->parent; break; + } default: break; diff --git a/sys/dev/ice/ice_status.h b/sys/dev/ice/ice_status.h --- a/sys/dev/ice/ice_status.h +++ b/sys/dev/ice/ice_status.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_strings.c b/sys/dev/ice/ice_strings.c --- a/sys/dev/ice/ice_strings.c +++ b/sys/dev/ice/ice_strings.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -624,6 +624,8 @@ return ICE_FEC_STRING_BASER; case ICE_FEC_NONE: return ICE_FEC_STRING_NONE; + case ICE_FEC_DIS_AUTO: + return ICE_FEC_STRING_DIS_AUTO; } /* The compiler generates errors on unhandled enum values if we omit @@ -762,6 +764,8 @@ return "FWD_TO_QGRP"; case ICE_DROP_PACKET: return "DROP_PACKET"; + case ICE_LG_ACTION: + return "LG_ACTION"; case ICE_INVAL_ACT: return "INVAL_ACT"; } @@ -1037,6 +1041,8 @@ return "LLDP_RX_FLTR_FROM_DRIVER"; case ICE_STATE_MULTIPLE_TCS: return "MULTIPLE_TCS"; + case ICE_STATE_DO_FW_DEBUG_DUMP: + return "DO_FW_DEBUG_DUMP"; case ICE_STATE_LAST: return NULL; } diff --git a/sys/dev/ice/ice_switch.h b/sys/dev/ice/ice_switch.h --- a/sys/dev/ice/ice_switch.h +++ b/sys/dev/ice/ice_switch.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,7 +33,7 @@ #ifndef _ICE_SWITCH_H_ #define _ICE_SWITCH_H_ -#include "ice_common.h" +#include "ice_type.h" #include "ice_protocol_type.h" #define ICE_SW_CFG_MAX_BUF_LEN 2048 @@ -43,6 +43,14 @@ #define ICE_FLTR_TX BIT(1) #define ICE_FLTR_TX_RX (ICE_FLTR_RX | ICE_FLTR_TX) +#define ICE_PROFID_IPV4_GTPC_TEID 41 +#define ICE_PROFID_IPV4_GTPC_NO_TEID 42 +#define ICE_PROFID_IPV4_GTPU_TEID 43 +#define ICE_PROFID_IPV6_GTPC_TEID 44 +#define ICE_PROFID_IPV6_GTPC_NO_TEID 45 +#define ICE_PROFID_IPV6_GTPU_TEID 46 +#define ICE_PROFID_IPV6_GTPU_IPV6_TCP 70 + #define DUMMY_ETH_HDR_LEN 16 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ @@ -232,6 +240,7 @@ u32 priority; u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */ u16 fltr_rule_id; + u16 lg_id; struct ice_adv_rule_flags_info flags_info; }; @@ -382,6 +391,42 @@ ICE_PROMISC_VLAN_TX = 0x80, }; +struct ice_dummy_pkt_offsets { + enum ice_protocol_type type; + u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */ +}; + +void +ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + enum ice_sw_tunnel_type tun_type, const u8 **pkt, + u16 *pkt_len, + const struct ice_dummy_pkt_offsets **offsets); + +enum ice_status +ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, + struct ice_aqc_sw_rules_elem *s_rule, + const u8 *dummy_pkt, u16 pkt_len, + const struct ice_dummy_pkt_offsets *offsets); + +enum ice_status +ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid); + +struct ice_adv_fltr_mgmt_list_entry * +ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, + u16 lkups_cnt, u16 recp_id, + struct ice_adv_rule_info *rinfo); + +enum ice_status +ice_adv_add_update_vsi_list(struct ice_hw *hw, + struct ice_adv_fltr_mgmt_list_entry *m_entry, + struct ice_adv_rule_info *cur_fltr, + struct ice_adv_rule_info *new_fltr); + +struct ice_vsi_list_map_info * +ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, + u16 *vsi_list_id); + /* VSI related commands */ enum ice_status ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, @@ -468,6 +513,8 @@ enum ice_status ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, u8 direction); +bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, + bool *rule_exists); enum ice_status ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid); @@ -498,4 +545,7 @@ u16 vsi_handle); void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw); void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw); +enum ice_status +ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, + u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd); #endif /* _ICE_SWITCH_H_ */ diff --git a/sys/dev/ice/ice_switch.c b/sys/dev/ice/ice_switch.c --- a/sys/dev/ice/ice_switch.c +++ b/sys/dev/ice/ice_switch.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -30,6 +30,7 @@ */ /*$FreeBSD$*/ +#include "ice_common.h" #include "ice_switch.h" #include "ice_flex_type.h" #include "ice_flow.h" @@ -39,6 +40,7 @@ #define ICE_ETH_VLAN_TCI_OFFSET 14 #define ICE_MAX_VLAN_ID 0xFFF #define ICE_IPV6_ETHER_ID 0x86DD +#define ICE_PPP_IPV6_PROTO_ID 0x0057 #define ICE_ETH_P_8021Q 0x8100 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem @@ -60,6 +62,9 @@ 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; +static bool +ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle); + /** * ice_init_def_sw_recp - initialize the recipe book keeping tables * @hw: pointer to the HW struct @@ -819,6 +824,8 @@ else /* remove VSI from mirror rule */ mr_list[i] = CPU_TO_LE16(id); } + + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } cmd = &desc.params.add_update_rule; @@ -902,6 +909,7 @@ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + lkup_type == ICE_SW_LKUP_DFLT || lkup_type == ICE_SW_LKUP_LAST) { sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { @@ -1002,7 +1010,7 @@ * * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware */ -static enum ice_status +enum ice_status ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { @@ -1047,8 +1055,6 @@ pi->sw_id = swid; pi->pf_vf_num = pf_vf_num; pi->is_vf = is_vf; - pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; - pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; break; default: ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n"); @@ -1517,7 +1523,7 @@ ice_aqc_opc_update_sw_rules, NULL); if (!status) { m_ent->lg_act_idx = l_id; - m_ent->counter_index = counter_id; + m_ent->counter_index = (u8)counter_id; } ice_free(hw, lg_act); @@ -1588,6 +1594,7 @@ lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || + lkup_type == ICE_SW_LKUP_DFLT || lkup_type == ICE_SW_LKUP_LAST) rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : ICE_AQC_SW_RULES_T_VSI_LIST_SET; @@ -1748,11 +1755,12 @@ */ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) { - struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; enum ice_status status = ICE_SUCCESS; struct LIST_HEAD_TYPE *rule_head; struct ice_lock *rule_lock; /* Lock to protect filter rule list */ + struct ice_switch_info *sw; + sw = hw->switch_info; rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; @@ -1811,7 +1819,6 @@ { enum ice_status status = ICE_SUCCESS; u16 vsi_list_id = 0; - if ((cur_fltr->fltr_act == ICE_FWD_TO_Q || cur_fltr->fltr_act == ICE_FWD_TO_QGRP)) return ICE_ERR_NOT_IMPL; @@ -1936,7 +1943,7 @@ * handle element. This can be extended further to search VSI list with more * than 1 vsi_count. Returns pointer to VSI list entry if found. */ -static struct ice_vsi_list_map_info * +struct ice_vsi_list_map_info * ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle, u16 *vsi_list_id) { @@ -2352,7 +2359,8 @@ if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; + if (m_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI) + m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id; /* update the src in case it is VSI num */ if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI) return ICE_ERR_PARAM; @@ -2780,6 +2788,83 @@ return ice_remove_eth_mac_rule(hw, em_list, hw->switch_info); } +/** + * ice_get_lg_act_aqc_res_type - get resource type for a large action + * @res_type: resource type to be filled in case of function success + * @num_acts: number of actions to hold with a large action entry + * + * Get resource type for a large action depending on the number + * of single actions that it contains. + */ +static enum ice_status +ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts) +{ + if (!res_type) + return ICE_ERR_BAD_PTR; + + /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1. + * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3. + * If num_acts is greater than 2, then use + * ICE_AQC_RES_TYPE_WIDE_TABLE_4. + * The num_acts cannot be equal to 0 or greater than 4. + */ + switch (num_acts) { + case 1: + *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_1; + break; + case 2: + *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_2; + break; + case 3: + case 4: + *res_type = ICE_AQC_RES_TYPE_WIDE_TABLE_4; + break; + default: + return ICE_ERR_PARAM; + } + + return ICE_SUCCESS; +} + +/** + * ice_alloc_res_lg_act - add large action resource + * @hw: pointer to the hardware structure + * @l_id: large action ID to fill it in + * @num_acts: number of actions to hold with a large action entry + */ +static enum ice_status +ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts) +{ + struct ice_aqc_alloc_free_res_elem *sw_buf; + enum ice_status status; + u16 buf_len, res_type; + + if (!l_id) + return ICE_ERR_BAD_PTR; + + status = ice_get_lg_act_aqc_res_type(&res_type, num_acts); + if (status) + return status; + + /* Allocate resource for large action */ + buf_len = ice_struct_size(sw_buf, elem, 1); + sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); + if (!sw_buf) + return ICE_ERR_NO_MEMORY; + + sw_buf->res_type = CPU_TO_LE16(res_type); + sw_buf->num_elems = CPU_TO_LE16(1); + + status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, + ice_aqc_opc_alloc_res, NULL); + if (!status) + *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp); + + ice_free(hw, sw_buf); + + return status; +} + /** * ice_rem_sw_rule_info * @hw: pointer to the hardware structure @@ -2832,24 +2917,19 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, u8 direction) { - struct ice_aqc_sw_rules_elem *s_rule; + struct ice_fltr_list_entry f_list_entry; + struct ice_sw_recipe *recp_list; struct ice_fltr_info f_info; struct ice_hw *hw = pi->hw; - enum ice_adminq_opc opcode; enum ice_status status; - u16 s_rule_size; + u8 lport = pi->lport; u16 hw_vsi_id; + recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT]; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; - hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); - s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : - ICE_SW_RULE_RX_TX_NO_HDR_SIZE; - - s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size); - if (!s_rule) - return ICE_ERR_NO_MEMORY; + hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM); @@ -2857,54 +2937,63 @@ f_info.flag = direction; f_info.fltr_act = ICE_FWD_TO_VSI; f_info.fwd_id.hw_vsi_id = hw_vsi_id; + f_info.vsi_handle = vsi_handle; if (f_info.flag & ICE_FLTR_RX) { f_info.src = pi->lport; f_info.src_id = ICE_SRC_ID_LPORT; - if (!set) - f_info.fltr_rule_id = - pi->dflt_rx_vsi_rule_id; } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; - if (!set) - f_info.fltr_rule_id = - pi->dflt_tx_vsi_rule_id; } + f_list_entry.fltr_info = f_info; if (set) - opcode = ice_aqc_opc_add_sw_rules; + status = ice_add_rule_internal(hw, recp_list, lport, + &f_list_entry); else - opcode = ice_aqc_opc_remove_sw_rules; - - ice_fill_sw_rule(hw, &f_info, s_rule, opcode); - - status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL); - if (status || !(f_info.flag & ICE_FLTR_TX_RX)) - goto out; - if (set) { - u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index); - - if (f_info.flag & ICE_FLTR_TX) { - pi->dflt_tx_vsi_num = hw_vsi_id; - pi->dflt_tx_vsi_rule_id = index; - } else if (f_info.flag & ICE_FLTR_RX) { - pi->dflt_rx_vsi_num = hw_vsi_id; - pi->dflt_rx_vsi_rule_id = index; - } - } else { - if (f_info.flag & ICE_FLTR_TX) { - pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL; - pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT; - } else if (f_info.flag & ICE_FLTR_RX) { - pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL; - pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT; + status = ice_remove_rule_internal(hw, recp_list, + &f_list_entry); + + return status; +} + +/** + * ice_check_if_dflt_vsi - check if VSI is default VSI + * @pi: pointer to the port_info structure + * @vsi_handle: vsi handle to check for in filter list + * @rule_exists: indicates if there are any VSI's in the rule list + * + * checks if the VSI is in a default VSI list, and also indicates + * if the default VSI list is empty + */ +bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, + bool *rule_exists) +{ + struct ice_fltr_mgmt_list_entry *fm_entry; + struct LIST_HEAD_TYPE *rule_head; + struct ice_sw_recipe *recp_list; + struct ice_lock *rule_lock; + bool ret = false; + recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT]; + rule_lock = &recp_list->filt_rule_lock; + rule_head = &recp_list->filt_rules; + + ice_acquire_lock(rule_lock); + + if (rule_exists && !LIST_EMPTY(rule_head)) + *rule_exists = true; + + LIST_FOR_EACH_ENTRY(fm_entry, rule_head, + ice_fltr_mgmt_list_entry, list_entry) { + if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) { + ret = true; + break; } } -out: - ice_free(hw, s_rule); - return status; + ice_release_lock(rule_lock); + return ret; } /** @@ -3546,6 +3635,13 @@ LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry, list_entry) { + /* Avoid enabling or disabling vlan zero twice when in double + * vlan mode + */ + if (ice_is_dvm_ena(hw) && + list_itr->fltr_info.l_data.vlan.tpid == 0) + continue; + vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id; if (rm_vlan_promisc) status = _ice_clear_vsi_promisc(hw, vsi_handle, @@ -3555,7 +3651,7 @@ status = _ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vlan_id, lport, sw); - if (status) + if (status && status != ICE_ERR_ALREADY_EXISTS) break; } @@ -3624,7 +3720,7 @@ break; case ICE_SW_LKUP_PROMISC: case ICE_SW_LKUP_PROMISC_VLAN: - ice_remove_promisc(hw, lkup, &remove_list_head); + ice_remove_promisc(hw, (u8)lkup, &remove_list_head); break; case ICE_SW_LKUP_MAC_VLAN: ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n"); @@ -3787,53 +3883,6 @@ counter_id); } -/** - * ice_alloc_res_lg_act - add large action resource - * @hw: pointer to the hardware structure - * @l_id: large action ID to fill it in - * @num_acts: number of actions to hold with a large action entry - */ -static enum ice_status -ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts) -{ - struct ice_aqc_alloc_free_res_elem *sw_buf; - enum ice_status status; - u16 buf_len; - - if (num_acts > ICE_MAX_LG_ACT || num_acts == 0) - return ICE_ERR_PARAM; - - /* Allocate resource for large action */ - buf_len = ice_struct_size(sw_buf, elem, 1); - sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); - if (!sw_buf) - return ICE_ERR_NO_MEMORY; - - sw_buf->num_elems = CPU_TO_LE16(1); - - /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1. - * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3. - * If num_acts is greater than 2, then use - * ICE_AQC_RES_TYPE_WIDE_TABLE_4. - * The num_acts cannot exceed 4. This was ensured at the - * beginning of the function. - */ - if (num_acts == 1) - sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1); - else if (num_acts == 2) - sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2); - else - sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4); - - status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, - ice_aqc_opc_alloc_res, NULL); - if (!status) - *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp); - - ice_free(hw, sw_buf); - return status; -} - /** * ice_add_mac_with_sw_marker - add filter with sw marker * @hw: pointer to the hardware structure @@ -4201,10 +4250,12 @@ ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi, u16 vsi_handle) { - struct ice_switch_info *sw = hw->switch_info; +struct ice_switch_info *sw; enum ice_status status = ICE_SUCCESS; u8 i; + sw = hw->switch_info; + /* Update the recipes that were created */ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct LIST_HEAD_TYPE *head; diff --git a/sys/dev/ice/ice_type.h b/sys/dev/ice/ice_type.h --- a/sys/dev/ice/ice_type.h +++ b/sys/dev/ice/ice_type.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -33,48 +33,15 @@ #ifndef _ICE_TYPE_H_ #define _ICE_TYPE_H_ -#define ETH_ALEN 6 - -#define ETH_HEADER_LEN 14 - -#define BIT(a) (1UL << (a)) -#ifndef BIT_ULL -#define BIT_ULL(a) (1ULL << (a)) -#endif /* BIT_ULL */ - -#define BITS_PER_BYTE 8 - -#define _FORCE_ - -#define ICE_BYTES_PER_WORD 2 -#define ICE_BYTES_PER_DWORD 4 -#define ICE_MAX_TRAFFIC_CLASS 8 - -#ifndef MIN_T -#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b)) -#endif - -#define IS_ASCII(_ch) ((_ch) < 0x80) - -#define STRUCT_HACK_VAR_LEN -/** - * ice_struct_size - size of struct with C99 flexible array member - * @ptr: pointer to structure - * @field: flexible array member (last member of the structure) - * @num: number of elements of that flexible array member - */ -#define ice_struct_size(ptr, field, num) \ - (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num)) - -#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0])) - +#include "ice_defs.h" #include "ice_status.h" #include "ice_hw_autogen.h" #include "ice_devids.h" #include "ice_osdep.h" #include "ice_bitops.h" /* Must come before ice_controlq.h */ -#include "ice_controlq.h" #include "ice_lan_tx_rx.h" +#include "ice_ddp_common.h" +#include "ice_controlq.h" #include "ice_flex_type.h" #include "ice_protocol_type.h" #include "ice_vlan_mode.h" @@ -135,6 +102,8 @@ #define ICE_LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF)) #define ICE_HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF)) #define ICE_LO_WORD(x) ((u16)((x) & 0xFFFF)) +#define ICE_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF)) +#define ICE_LO_BYTE(x) ((u8)((x) & 0xFF)) /* debug masks - set these bits in hw->debug_mask to control output */ #define ICE_DBG_TRACE BIT_ULL(0) /* for function-trace only */ @@ -203,11 +172,6 @@ #define ICE_CHANGE_LOCK_TIMEOUT 1000 #define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 -enum ice_aq_res_access_type { - ICE_RES_READ = 1, - ICE_RES_WRITE -}; - struct ice_driver_ver { u8 major_ver; u8 minor_ver; @@ -236,7 +200,8 @@ ICE_FEC_NONE = 0, ICE_FEC_RS, ICE_FEC_BASER, - ICE_FEC_AUTO + ICE_FEC_AUTO, + ICE_FEC_DIS_AUTO }; struct ice_phy_cache_mode_data { @@ -261,6 +226,7 @@ ICE_MAC_VF, ICE_MAC_E810, ICE_MAC_GENERIC, + ICE_MAC_GENERIC_3K, }; /* Media Types */ @@ -338,6 +304,15 @@ #define ICE_MAX_NUM_MIRROR_RULES 64 +#define ICE_L2TPV2_FLAGS_CTRL 0x8000 +#define ICE_L2TPV2_FLAGS_LEN 0x4000 +#define ICE_L2TPV2_FLAGS_SEQ 0x0800 +#define ICE_L2TPV2_FLAGS_OFF 0x0200 +#define ICE_L2TPV2_FLAGS_VER 0x0002 + +#define ICE_L2TPV2_PKT_LENGTH 6 +#define ICE_PPP_PKT_LENGTH 4 + /* Common HW capabilities for SW use */ struct ice_hw_common_caps { /* Write CSR protection */ @@ -406,6 +381,7 @@ u8 iscsi; u8 mgmt_cem; u8 iwarp; + u8 roce_lag; /* WoL and APM support */ #define ICE_WOL_SUPPORT_M BIT(0) @@ -437,6 +413,17 @@ #define ICE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0) bool ext_topo_dev_img_prog_en[ICE_EXT_TOPO_DEV_IMG_COUNT]; #define ICE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1) + bool tx_sched_topo_comp_mode_en; + bool dyn_flattening_en; +}; + +#define ICE_NAC_TOPO_PRIMARY_M BIT(0) +#define ICE_NAC_TOPO_DUAL_M BIT(1) +#define ICE_NAC_TOPO_ID_M MAKEMASK(0xf, 0) + +struct ice_nac_topology { + u32 mode; + u8 id; }; /* Function specific capabilities */ @@ -453,6 +440,7 @@ u32 num_vfs_exposed; /* Total number of VFs exposed */ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */ u32 num_funcs; + struct ice_nac_topology nac_topo; }; /* Information about MAC such as address, etc... */ @@ -862,10 +850,6 @@ #define ICE_SCHED_PORT_STATE_READY 0x1 u8 lport; #define ICE_LPORT_MASK 0xff - u16 dflt_tx_vsi_rule_id; - u16 dflt_tx_vsi_num; - u16 dflt_rx_vsi_rule_id; - u16 dflt_rx_vsi_num; struct ice_fc_info fc; struct ice_mac_info mac; struct ice_phy_info phy; @@ -887,7 +871,6 @@ ice_declare_bitmap(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; - /* Enum defining the different states of the mailbox snapshot in the * PF-VF mailbox overflow detection algorithm. The snapshot can be in * states: @@ -962,6 +945,13 @@ u16 async_watermark_val; }; +/* PHY configuration */ +enum ice_phy_cfg { + ICE_PHY_E810 = 1, + ICE_PHY_E822, + ICE_PHY_ETH56G, +}; + /* Port hardware description */ struct ice_hw { u8 *hw_addr; @@ -985,6 +975,7 @@ u8 revision_id; u8 pf_id; /* device profile info */ + enum ice_phy_cfg phy_cfg; u16 max_burst_size; /* driver sets this value */ @@ -1046,23 +1037,23 @@ /* true if VSIs can share unicast MAC addr */ u8 umac_shared; -#define ICE_PHY_PER_NAC 1 -#define ICE_MAX_QUAD 2 -#define ICE_NUM_QUAD_TYPE 2 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PHY_0_LAST_QUAD 1 -#define ICE_PORTS_PER_PHY 8 -#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY +#define ICE_PHY_PER_NAC_E822 1 +#define ICE_MAX_QUAD 2 +#define ICE_QUADS_PER_PHY_E822 2 +#define ICE_PORTS_PER_PHY_E822 8 +#define ICE_PORTS_PER_QUAD 4 +#define ICE_PORTS_PER_PHY_E810 4 +#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; u32 pkg_seg_id; + u32 pkg_sign_type; u32 active_track_id; + u8 pkg_has_signing_seg:1; u8 active_pkg_name[ICE_PKG_NAME_SIZE]; u8 active_pkg_in_nvm; - enum ice_aq_err pkg_dwnld_status; - /* Driver's package ver - (from the Ice Metadata section) */ struct ice_pkg_ver pkg_ver; u8 pkg_name[ICE_PKG_NAME_SIZE]; @@ -1173,6 +1164,7 @@ ICE_FWD_TO_Q, ICE_FWD_TO_QGRP, ICE_DROP_PACKET, + ICE_LG_ACTION, ICE_INVAL_ACT }; @@ -1344,6 +1336,12 @@ #define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 #define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 +/* FW version for FEC disable in Auto FEC mode */ +#define ICE_FW_FEC_DIS_AUTO_BRANCH 1 +#define ICE_FW_FEC_DIS_AUTO_MAJ 7 +#define ICE_FW_FEC_DIS_AUTO_MIN 0 +#define ICE_FW_FEC_DIS_AUTO_PATCH 5 + /* AQ API version for FW health reports */ #define ICE_FW_API_HEALTH_REPORT_MAJ 1 #define ICE_FW_API_HEALTH_REPORT_MIN 7 diff --git a/sys/dev/ice/ice_vlan_mode.h b/sys/dev/ice/ice_vlan_mode.h --- a/sys/dev/ice/ice_vlan_mode.h +++ b/sys/dev/ice/ice_vlan_mode.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/ice_vlan_mode.c b/sys/dev/ice/ice_vlan_mode.c --- a/sys/dev/ice/ice_vlan_mode.c +++ b/sys/dev/ice/ice_vlan_mode.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -32,6 +32,7 @@ #include "ice_common.h" +#include "ice_ddp_common.h" /** * ice_pkg_get_supported_vlan_mode - chk if DDP supports Double VLAN mode (DVM) * @hw: pointer to the HW struct diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c --- a/sys/dev/ice/if_ice_iflib.c +++ b/sys/dev/ice/if_ice_iflib.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -473,6 +473,8 @@ /* Setup ControlQ lengths */ ice_set_ctrlq_len(hw); +reinit_hw: + fw_mode = ice_get_fw_mode(hw); if (fw_mode == ICE_FW_MODE_REC) { device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); @@ -507,12 +509,22 @@ goto free_pci_mapping; } + ice_init_device_features(sc); + /* Notify firmware of the device driver version */ err = ice_send_version(sc); if (err) goto deinit_hw; - ice_load_pkg_file(sc); + /* + * Success indicates a change was made that requires a reinitialization + * of the hardware + */ + err = ice_load_pkg_file(sc); + if (err == ICE_SUCCESS) { + ice_deinit_hw(hw); + goto reinit_hw; + } err = ice_init_link_events(sc); if (err) { @@ -521,9 +533,19 @@ goto deinit_hw; } - ice_print_nvm_version(sc); + /* Initialize VLAN mode in FW; if dual VLAN mode is supported by the package + * and firmware, this will force them to use single VLAN mode. + */ + status = ice_set_vlan_mode(hw); + if (status) { + err = EIO; + device_printf(dev, "Unable to initialize VLAN mode, err %s aq_err %s\n", + ice_status_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + goto deinit_hw; + } - ice_init_device_features(sc); + ice_print_nvm_version(sc); /* Setup the MAC address */ iflib_set_mac(ctx, hw->port_info->mac.lan_addr); @@ -971,7 +993,7 @@ /* Allocate queue structure memory */ if (!(vsi->tx_queues = - (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_WAITOK | M_ZERO))) { + (struct ice_tx_queue *) malloc(sizeof(struct ice_tx_queue) * ntxqsets, M_ICE, M_NOWAIT | M_ZERO))) { device_printf(sc->dev, "Unable to allocate Tx queue memory\n"); return (ENOMEM); } @@ -979,7 +1001,7 @@ /* Allocate report status arrays */ for (i = 0, txq = vsi->tx_queues; i < ntxqsets; i++, txq++) { if (!(txq->tx_rsq = - (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_WAITOK))) { + (uint16_t *) malloc(sizeof(uint16_t) * sc->scctx->isc_ntxd[0], M_ICE, M_NOWAIT))) { device_printf(sc->dev, "Unable to allocate tx_rsq memory\n"); err = ENOMEM; goto free_tx_queues; @@ -1063,7 +1085,7 @@ /* Allocate queue structure memory */ if (!(vsi->rx_queues = - (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_WAITOK | M_ZERO))) { + (struct ice_rx_queue *) malloc(sizeof(struct ice_rx_queue) * nrxqsets, M_ICE, M_NOWAIT | M_ZERO))) { device_printf(sc->dev, "Unable to allocate Rx queue memory\n"); return (ENOMEM); } @@ -2296,7 +2318,7 @@ if (hw->port_info) ice_sched_clear_port(hw->port_info); - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); } /** @@ -2403,6 +2425,7 @@ { struct ice_hw *hw = &sc->hw; device_t dev = sc->dev; + enum ice_ddp_state pkg_state; enum ice_status status; int err; @@ -2497,10 +2520,9 @@ /* If we previously loaded the package, it needs to be reloaded now */ if (!ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) { - status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); - if (status) { - ice_log_pkg_init(sc, &status); - + pkg_state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); + if (!ice_is_init_pkg_successful(pkg_state)) { + ice_log_pkg_init(sc, pkg_state); ice_transition_safe_mode(sc); } } @@ -2576,7 +2598,8 @@ err_sched_cleanup: ice_sched_cleanup_all(hw); err_shutdown_ctrlq: - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); + ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET); ice_set_state(&sc->state, ICE_STATE_RESET_FAILED); device_printf(dev, "Driver rebuild failed, please reload the device driver\n"); } @@ -2688,13 +2711,6 @@ static void ice_init_device_features(struct ice_softc *sc) { - /* - * A failed pkg file download triggers safe mode, disabling advanced - * device feature support - */ - if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_SAFE_MODE)) - return; - /* Set capabilities that all devices support */ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_cap); ice_set_bit(ICE_FEATURE_RSS, sc->feat_cap); @@ -2705,12 +2721,16 @@ ice_set_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_cap); ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap); + ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap); + ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); /* Disable features due to hardware limitations... */ if (!sc->hw.func_caps.common_cap.rss_table_size) ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap); if (!sc->hw.func_caps.common_cap.iwarp || !ice_enable_irdma) ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap); + if (!sc->hw.func_caps.common_cap.dcb) + ice_clear_bit(ICE_FEATURE_DCB, sc->feat_cap); /* Disable features due to firmware limitations... */ if (!ice_is_fw_health_report_supported(&sc->hw)) ice_clear_bit(ICE_FEATURE_HEALTH_STATUS, sc->feat_cap); @@ -2729,6 +2749,10 @@ /* RSS is always enabled for iflib */ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_RSS)) ice_set_bit(ICE_FEATURE_RSS, sc->feat_en); + + /* Disable features based on sysctl settings */ + if (!ice_tx_balance_en) + ice_clear_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap); } /** @@ -2992,6 +3016,8 @@ switch (ifd->ifd_cmd) { case ICE_NVM_ACCESS: return ice_handle_nvm_access_ioctl(sc, ifd); + case ICE_DEBUG_DUMP: + return ice_handle_debug_dump_ioctl(sc, ifd); default: return EINVAL; } diff --git a/sys/dev/ice/irdma_di_if.m b/sys/dev/ice/irdma_di_if.m --- a/sys/dev/ice/irdma_di_if.m +++ b/sys/dev/ice/irdma_di_if.m @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2021, Intel Corporation +# Copyright (c) 2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/irdma_if.m b/sys/dev/ice/irdma_if.m --- a/sys/dev/ice/irdma_if.m +++ b/sys/dev/ice/irdma_if.m @@ -1,5 +1,5 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2021, Intel Corporation +# Copyright (c) 2022, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without diff --git a/sys/dev/ice/virtchnl.h b/sys/dev/ice/virtchnl.h --- a/sys/dev/ice/virtchnl.h +++ b/sys/dev/ice/virtchnl.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -196,10 +196,12 @@ /* opcodes 60 through 65 are reserved */ VIRTCHNL_OP_GET_QOS_CAPS = 66, VIRTCHNL_OP_CONFIG_QUEUE_TC_MAP = 67, - /* opcode 68, 69 are reserved */ + /* opcode 68 through 70 are reserved */ VIRTCHNL_OP_ENABLE_QUEUES_V2 = 107, VIRTCHNL_OP_DISABLE_QUEUES_V2 = 108, VIRTCHNL_OP_MAP_QUEUE_VECTOR = 111, + VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, + VIRTCHNL_OP_CONFIG_QUANTA = 113, VIRTCHNL_OP_MAX, }; @@ -274,12 +276,6 @@ return "VIRTCHNL_OP_DEL_FDIR_FILTER"; case VIRTCHNL_OP_GET_MAX_RSS_QREGION: return "VIRTCHNL_OP_GET_MAX_RSS_QREGION"; - case VIRTCHNL_OP_ENABLE_QUEUES_V2: - return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; - case VIRTCHNL_OP_DISABLE_QUEUES_V2: - return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; - case VIRTCHNL_OP_MAP_QUEUE_VECTOR: - return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: return "VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS"; case VIRTCHNL_OP_ADD_VLAN_V2: @@ -298,6 +294,12 @@ return "VIRTCHNL_OP_ENABLE_VLAN_FILTERING_V2"; case VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2: return "VIRTCHNL_OP_DISABLE_VLAN_FILTERING_V2"; + case VIRTCHNL_OP_ENABLE_QUEUES_V2: + return "VIRTCHNL_OP_ENABLE_QUEUES_V2"; + case VIRTCHNL_OP_DISABLE_QUEUES_V2: + return "VIRTCHNL_OP_DISABLE_QUEUES_V2"; + case VIRTCHNL_OP_MAP_QUEUE_VECTOR: + return "VIRTCHNL_OP_MAP_QUEUE_VECTOR"; case VIRTCHNL_OP_MAX: return "VIRTCHNL_OP_MAX"; default: @@ -492,21 +494,14 @@ /* RX descriptor IDs (range from 0 to 63) */ enum virtchnl_rx_desc_ids { VIRTCHNL_RXDID_0_16B_BASE = 0, - /* 32B_BASE and FLEX_SPLITQ share desc ids as default descriptors - * because they can be differentiated based on queue model; e.g. single - * queue model can only use 32B_BASE and split queue model can only use - * FLEX_SPLITQ. Having these as 1 allows them to be used as default - * descriptors without negotiation. - */ VIRTCHNL_RXDID_1_32B_BASE = 1, - VIRTCHNL_RXDID_1_FLEX_SPLITQ = 1, VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2, VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3, VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4, VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5, VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6, VIRTCHNL_RXDID_7_HW_RSVD = 7, - /* 9 through 15 are reserved */ + /* 8 through 15 are reserved */ VIRTCHNL_RXDID_16_COMMS_GENERIC = 16, VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17, VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18, @@ -520,7 +515,6 @@ enum virtchnl_rx_desc_id_bitmasks { VIRTCHNL_RXDID_0_16B_BASE_M = BIT(VIRTCHNL_RXDID_0_16B_BASE), VIRTCHNL_RXDID_1_32B_BASE_M = BIT(VIRTCHNL_RXDID_1_32B_BASE), - VIRTCHNL_RXDID_1_FLEX_SPLITQ_M = BIT(VIRTCHNL_RXDID_1_FLEX_SPLITQ), VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC), VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = BIT(VIRTCHNL_RXDID_3_FLEX_SQ_SW), VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = BIT(VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB), @@ -1211,6 +1205,46 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); +/* enum virthcnl_hash_filter + * + * Bits defining the hash filters in the hena field of the virtchnl_rss_hena + * structure. Each bit indicates a specific hash filter for RSS. + * + * Note that not all bits are supported on all hardware. The VF should use + * VIRTCHNL_OP_GET_RSS_HENA_CAPS to determine which bits the PF is capable of + * before using VIRTCHNL_OP_SET_RSS_HENA to enable specific filters. + */ +enum virtchnl_hash_filter { + /* Bits 0 through 28 are reserved for future use */ + /* Bit 29, 30, and 32 are not supported on XL710 a X710 */ + VIRTCHNL_HASH_FILTER_UNICAST_IPV4_UDP = 29, + VIRTCHNL_HASH_FILTER_MULTICAST_IPV4_UDP = 30, + VIRTCHNL_HASH_FILTER_IPV4_UDP = 31, + VIRTCHNL_HASH_FILTER_IPV4_TCP_SYN_NO_ACK = 32, + VIRTCHNL_HASH_FILTER_IPV4_TCP = 33, + VIRTCHNL_HASH_FILTER_IPV4_SCTP = 34, + VIRTCHNL_HASH_FILTER_IPV4_OTHER = 35, + VIRTCHNL_HASH_FILTER_FRAG_IPV4 = 36, + /* Bits 37 and 38 are reserved for future use */ + /* Bit 39, 40, and 42 are not supported on XL710 a X710 */ + VIRTCHNL_HASH_FILTER_UNICAST_IPV6_UDP = 39, + VIRTCHNL_HASH_FILTER_MULTICAST_IPV6_UDP = 40, + VIRTCHNL_HASH_FILTER_IPV6_UDP = 41, + VIRTCHNL_HASH_FILTER_IPV6_TCP_SYN_NO_ACK = 42, + VIRTCHNL_HASH_FILTER_IPV6_TCP = 43, + VIRTCHNL_HASH_FILTER_IPV6_SCTP = 44, + VIRTCHNL_HASH_FILTER_IPV6_OTHER = 45, + VIRTCHNL_HASH_FILTER_FRAG_IPV6 = 46, + /* Bit 37 is reserved for future use */ + VIRTCHNL_HASH_FILTER_FCOE_OX = 48, + VIRTCHNL_HASH_FILTER_FCOE_RX = 49, + VIRTCHNL_HASH_FILTER_FCOE_OTHER = 50, + /* Bits 51 through 62 are reserved for future use */ + VIRTCHNL_HASH_FILTER_L2_PAYLOAD = 63, +}; + +#define VIRTCHNL_HASH_FILTER_INVALID (0) + /* VIRTCHNL_OP_GET_RSS_HENA_CAPS * VIRTCHNL_OP_SET_RSS_HENA * VF sends these messages to get and set the hash filter enable bits for RSS. @@ -1219,6 +1253,7 @@ * traffic types that are hashed by the hardware. */ struct virtchnl_rss_hena { + /* see enum virtchnl_hash_filter */ u64 hena; }; @@ -1378,13 +1413,6 @@ u8 link_status; u8 pad[3]; } link_event_adv; - struct { - /* link_speed provided in Mbps */ - u32 link_speed; - u16 vport_id; - u8 link_status; - u8 pad; - } link_event_adv_vport; } event_data; s32 severity; @@ -1410,6 +1438,7 @@ }; #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 +#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 #define PROTO_HDR_SHIFT 5 #define PROTO_HDR_FIELD_START(proto_hdr_type) \ (proto_hdr_type << PROTO_HDR_SHIFT) @@ -1581,6 +1610,10 @@ PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN), VIRTCHNL_PROTO_HDR_GTPU_UP_QFI = PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP), + /* L2TPv2 */ + VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID = + PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV2), + VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID, }; struct virtchnl_proto_hdr { @@ -1601,13 +1634,26 @@ u8 tunnel_level; /** * specify where protocol header start from. + * must be 0 when sending a raw packet request. * 0 - from the outer layer * 1 - from the first inner layer * 2 - from the second inner layer * .... - **/ - int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ - struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + */ + int count; + /** + * number of proto layers, must < VIRTCHNL_MAX_NUM_PROTO_HDRS + * must be 0 for a raw packet request. + */ + union { + struct virtchnl_proto_hdr + proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + struct { + u16 pkt_len; + u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; + u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET]; + } raw; + }; }; VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); @@ -1796,18 +1842,28 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_tc_mapping); -/* TX and RX queue types are valid in legacy as well as split queue models. - * With Split Queue model, 2 additional types are introduced - TX_COMPLETION - * and RX_BUFFER. In split queue model, RX corresponds to the queue where HW - * posts completions. - */ +/* VIRTCHNL_OP_CONFIG_QUEUE_BW */ +struct virtchnl_queue_bw { + u16 queue_id; + u8 tc; + u8 pad; + struct virtchnl_shaper_bw shaper; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); + +struct virtchnl_queues_bw_cfg { + u16 vsi_id; + u16 num_queues; + struct virtchnl_queue_bw cfg[1]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_queues_bw_cfg); + +/* queue types */ enum virtchnl_queue_type { VIRTCHNL_QUEUE_TYPE_TX = 0, VIRTCHNL_QUEUE_TYPE_RX = 1, - VIRTCHNL_QUEUE_TYPE_TX_COMPLETION = 2, - VIRTCHNL_QUEUE_TYPE_RX_BUFFER = 3, - VIRTCHNL_QUEUE_TYPE_CONFIG_TX = 4, - VIRTCHNL_QUEUE_TYPE_CONFIG_RX = 5 }; /* structure to specify a chunk of contiguous queues */ @@ -1831,19 +1887,13 @@ /* VIRTCHNL_OP_ENABLE_QUEUES_V2 * VIRTCHNL_OP_DISABLE_QUEUES_V2 - * VIRTCHNL_OP_DEL_QUEUES - * - * If VIRTCHNL version was negotiated in VIRTCHNL_OP_VERSION as 2.0 - * then all of these ops are available. * - * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES - * then VIRTCHNL_OP_ENABLE_QUEUES_V2 and VIRTCHNL_OP_DISABLE_QUEUES_V2 are - * available. + * These opcodes can be used if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in + * VIRTCHNL_OP_GET_VF_RESOURCES * - * PF sends these messages to enable, disable or delete queues specified in - * chunks. PF sends virtchnl_del_ena_dis_queues struct to specify the queues - * to be enabled/disabled/deleted. Also applicable to single queue RX or - * TX. CP performs requested action and returns status. + * VF sends virtchnl_ena_dis_queues struct to specify the queues to be + * enabled/disabled in chunks. Also applicable to single queue RX or + * TX. PF performs requested action and returns status. */ struct virtchnl_del_ena_dis_queues { u16 vport_id; @@ -1877,13 +1927,13 @@ /* VIRTCHNL_OP_MAP_QUEUE_VECTOR * - * If VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated in VIRTCHNL_OP_GET_VF_RESOURCES - * then only VIRTCHNL_OP_MAP_QUEUE_VECTOR is available. + * This opcode can be used only if VIRTCHNL_VF_LARGE_NUM_QPAIRS was negotiated + * in VIRTCHNL_OP_GET_VF_RESOURCES * - * PF sends this message to map or unmap queues to vectors and ITR index - * registers. External data buffer contains virtchnl_queue_vector_maps structure + * VF sends this message to map queues to vectors and ITR index registers. + * External data buffer contains virtchnl_queue_vector_maps structure * that contains num_qv_maps of virtchnl_queue_vector structures. - * CP maps the requested queue vector maps after validating the queue and vector + * PF maps the requested queue vector maps after validating the queue and vector * ids and returns a status code. */ struct virtchnl_queue_vector_maps { @@ -1895,6 +1945,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_queue_vector_maps); +struct virtchnl_quanta_cfg { + u16 quanta_size; + struct virtchnl_queue_chunk queue_select; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); + /* Since VF messages are limited by u16 size, precalculate the maximum possible * values of nested elements in virtchnl structures that virtual channel can * possibly handle in a single message. @@ -2130,6 +2187,31 @@ sizeof(q_tc->tc[0]); } break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + valid_len = sizeof(struct virtchnl_queues_bw_cfg); + if (msglen >= valid_len) { + struct virtchnl_queues_bw_cfg *q_bw = + (struct virtchnl_queues_bw_cfg *)msg; + if (q_bw->num_queues == 0) { + err_msg_format = true; + break; + } + valid_len += (q_bw->num_queues - 1) * + sizeof(q_bw->cfg[0]); + } + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + valid_len = sizeof(struct virtchnl_quanta_cfg); + if (msglen >= valid_len) { + struct virtchnl_quanta_cfg *q_quanta = + (struct virtchnl_quanta_cfg *)msg; + if (q_quanta->quanta_size == 0 || + q_quanta->queue_select.num_queues == 0) { + err_msg_format = true; + break; + } + } + break; case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: break; case VIRTCHNL_OP_ADD_VLAN_V2: diff --git a/sys/dev/ice/virtchnl_inline_ipsec.h b/sys/dev/ice/virtchnl_inline_ipsec.h --- a/sys/dev/ice/virtchnl_inline_ipsec.h +++ b/sys/dev/ice/virtchnl_inline_ipsec.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -478,6 +478,15 @@ /* Set TC (congestion domain) if true. For future use. */ u8 set_tc; + + /* 0 for NAT-T unsupported, 1 for NAT-T supported */ + u8 is_udp; + + /* reserved */ + u8 reserved; + + /* NAT-T UDP port number. Only valid in case NAT-T supported */ + u16 udp_port; }; #pragma pack(1) diff --git a/sys/dev/ice/virtchnl_lan_desc.h b/sys/dev/ice/virtchnl_lan_desc.h deleted file mode 100644 --- a/sys/dev/ice/virtchnl_lan_desc.h +++ /dev/null @@ -1,550 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * 3. Neither the name of the Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -/*$FreeBSD$*/ - -#ifndef _VIRTCHNL_LAN_DESC_H_ -#define _VIRTCHNL_LAN_DESC_H_ - -/* Rx */ -/* For splitq virtchnl_rx_flex_desc_adv desc members */ -#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S 0 -#define VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_M \ - MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_RXDID_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S 0 -#define VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_M \ - MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_PTYPE_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S 10 -#define VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_M \ - MAKEMASK(0x3UL, VIRTCHNL_RX_FLEX_DESC_ADV_UMBCAST_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S 12 -#define VIRTCHNL_RX_FLEX_DESC_ADV_FF0_M \ - MAKEMASK(0xFUL, VIRTCHNL_RX_FLEX_DESC_ADV_FF0_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S 0 -#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_M \ - MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_PBUF_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S 14 -#define VIRTCHNL_RX_FLEX_DESC_ADV_GEN_M \ - BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_GEN_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S 15 -#define VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_M \ - BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_BUFQ_ID_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S 0 -#define VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_M \ - MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_ADV_LEN_HDR_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S 10 -#define VIRTCHNL_RX_FLEX_DESC_ADV_RSC_M \ - BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_RSC_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S 11 -#define VIRTCHNL_RX_FLEX_DESC_ADV_SPH_M \ - BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_SPH_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S 12 -#define VIRTCHNL_RX_FLEX_DESC_ADV_MISS_M \ - BIT_ULL(VIRTCHNL_RX_FLEX_DESC_ADV_MISS_S) -#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_S 13 -#define VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M \ - MAKEMASK(0x7UL, VIRTCHNL_RX_FLEX_DESC_ADV_FF1_M) - -enum virtchnl_rx_flex_desc_adv_status_error_0_qw1_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_DD_S = 0, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_EOF_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_HBO_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S, -}; - -enum virtchnl_rx_flex_desc_adv_status_error_0_qw0_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LPBK_S = 0, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RXE_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_CRCP_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_S, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS0_LAST /* this entry must be last!!! */ -}; - -enum virtchnl_rx_flex_desc_adv_status_error_1_bits { - /* Note: These are predefined bit offsets */ - /* 2 bits */ - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_RSVD_S = 0, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_S = 2, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_S = 3, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_S = 4, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_S = 5, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_S = 6, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_S = 7, - VIRTCHNL_RX_FLEX_DESC_ADV_STATUS1_LAST /* this entry must be last!!! */ -}; - -/* for singleq (flex) virtchnl_rx_flex_desc fields */ -/* for virtchnl_rx_flex_desc.ptype_flex_flags0 member */ -#define VIRTCHNL_RX_FLEX_DESC_PTYPE_S 0 -#define VIRTCHNL_RX_FLEX_DESC_PTYPE_M \ - MAKEMASK(0x3FFUL, VIRTCHNL_RX_FLEX_DESC_PTYPE_S) /* 10-bits */ - -/* for virtchnl_rx_flex_desc.pkt_length member */ -#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S 0 -#define VIRTCHNL_RX_FLEX_DESC_PKT_LEN_M \ - MAKEMASK(0x3FFFUL, VIRTCHNL_RX_FLEX_DESC_PKT_LEN_S) /* 14-bits */ - -enum virtchnl_rx_flex_desc_status_error_0_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_FLEX_DESC_STATUS0_DD_S = 0, - VIRTCHNL_RX_FLEX_DESC_STATUS0_EOF_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_HBO_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_L3L4P_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_IPE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_L4E_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_LPBK_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_IPV6EXADD_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_RXE_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_CRCP_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_RSS_VALID_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_L2TAG1P_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S, - VIRTCHNL_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */ -}; - -enum virtchnl_rx_flex_desc_status_error_1_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */ - VIRTCHNL_RX_FLEX_DESC_STATUS1_NAT_S = 4, - VIRTCHNL_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5, - /* [10:6] reserved */ - VIRTCHNL_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14, - VIRTCHNL_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15, - VIRTCHNL_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ -}; - -/* For singleq (non flex) virtchnl_singleq_base_rx_desc legacy desc members */ -#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S 63 -#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_M \ - BIT_ULL(VIRTCHNL_RX_BASE_DESC_QW1_LEN_SPH_S) -#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S 52 -#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_M \ - MAKEMASK(0x7FFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_HBUF_S) -#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S 38 -#define VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_M \ - MAKEMASK(0x3FFFULL, VIRTCHNL_RX_BASE_DESC_QW1_LEN_PBUF_S) -#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S 30 -#define VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_M \ - MAKEMASK(0xFFULL, VIRTCHNL_RX_BASE_DESC_QW1_PTYPE_S) -#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S 19 -#define VIRTCHNL_RX_BASE_DESC_QW1_ERROR_M \ - MAKEMASK(0xFFUL, VIRTCHNL_RX_BASE_DESC_QW1_ERROR_S) -#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S 0 -#define VIRTCHNL_RX_BASE_DESC_QW1_STATUS_M \ - MAKEMASK(0x7FFFFUL, VIRTCHNL_RX_BASE_DESC_QW1_STATUS_S) - -enum virtchnl_rx_base_desc_status_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_BASE_DESC_STATUS_DD_S = 0, - VIRTCHNL_RX_BASE_DESC_STATUS_EOF_S = 1, - VIRTCHNL_RX_BASE_DESC_STATUS_L2TAG1P_S = 2, - VIRTCHNL_RX_BASE_DESC_STATUS_L3L4P_S = 3, - VIRTCHNL_RX_BASE_DESC_STATUS_CRCP_S = 4, - VIRTCHNL_RX_BASE_DESC_STATUS_RSVD_S = 5, /* 3 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_EXT_UDP_0_S = 8, - VIRTCHNL_RX_BASE_DESC_STATUS_UMBCAST_S = 9, /* 2 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_FLM_S = 11, - VIRTCHNL_RX_BASE_DESC_STATUS_FLTSTAT_S = 12, /* 2 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_LPBK_S = 14, - VIRTCHNL_RX_BASE_DESC_STATUS_IPV6EXADD_S = 15, - VIRTCHNL_RX_BASE_DESC_STATUS_RSVD1_S = 16, /* 2 BITS */ - VIRTCHNL_RX_BASE_DESC_STATUS_INT_UDP_0_S = 18, - VIRTCHNL_RX_BASE_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -enum virtchnl_rx_base_desc_ext_status_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_BASE_DESC_EXT_STATUS_L2TAG2P_S = 0 -}; - -enum virtchnl_rx_base_desc_error_bits { - /* Note: These are predefined bit offsets */ - VIRTCHNL_RX_BASE_DESC_ERROR_RXE_S = 0, - VIRTCHNL_RX_BASE_DESC_ERROR_ATRAEFAIL_S = 1, - VIRTCHNL_RX_BASE_DESC_ERROR_HBO_S = 2, - VIRTCHNL_RX_BASE_DESC_ERROR_L3L4E_S = 3, /* 3 BITS */ - VIRTCHNL_RX_BASE_DESC_ERROR_IPE_S = 3, - VIRTCHNL_RX_BASE_DESC_ERROR_L4E_S = 4, - VIRTCHNL_RX_BASE_DESC_ERROR_EIPE_S = 5, - VIRTCHNL_RX_BASE_DESC_ERROR_OVERSIZE_S = 6, - VIRTCHNL_RX_BASE_DESC_ERROR_PPRS_S = 7 -}; - -enum virtchnl_rx_base_desc_fltstat_values { - VIRTCHNL_RX_BASE_DESC_FLTSTAT_NO_DATA = 0, - VIRTCHNL_RX_BASE_DESC_FLTSTAT_FD_ID = 1, - VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSV = 2, - VIRTCHNL_RX_BASE_DESC_FLTSTAT_RSS_HASH = 3, -}; - -/* Receive Descriptors */ -/* splitq buf - | 16| 0| - ---------------------------------------------------------------- - | RSV | Buffer ID | - ---------------------------------------------------------------- - | Rx packet buffer adresss | - ---------------------------------------------------------------- - | Rx header buffer adresss | - ---------------------------------------------------------------- - | RSV | - ---------------------------------------------------------------- - | 0| - */ -struct virtchnl_splitq_rx_buf_desc { - struct { - __le16 buf_id; /* Buffer Identifier */ - __le16 rsvd0; - __le32 rsvd1; - } qword0; - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - __le64 rsvd2; -}; /* read used with buffer queues*/ - -/* singleq buf - | 0| - ---------------------------------------------------------------- - | Rx packet buffer adresss | - ---------------------------------------------------------------- - | Rx header buffer adresss | - ---------------------------------------------------------------- - | RSV | - ---------------------------------------------------------------- - | RSV | - ---------------------------------------------------------------- - | 0| - */ -struct virtchnl_singleq_rx_buf_desc { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - __le64 rsvd1; - __le64 rsvd2; -}; /* read used with buffer queues*/ - -union virtchnl_rx_buf_desc { - struct virtchnl_singleq_rx_buf_desc read; - struct virtchnl_splitq_rx_buf_desc split_rd; -}; - -/* (0x00) singleq wb(compl) */ -struct virtchnl_singleq_base_rx_desc { - struct { - struct { - __le16 mirroring_status; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow Director filter id */ - } hi_dword; - } qword0; - struct { - /* status/error/PTYPE/length */ - __le64 status_error_ptype_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - __le32 reserved; - __le32 fd_id; - } qword3; -}; /* writeback */ - -/* (0x01) singleq flex compl */ -struct virtchnl_rx_flex_desc { - /* Qword 0 */ - u8 rxdid; /* descriptor builder profile id */ - u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */ - __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */ - __le16 pkt_len; /* [15:14] are reserved */ - __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */ - /* sph=[11:11] */ - /* ff1/ext=[15:12] */ - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le16 flex_meta0; - __le16 flex_meta1; - - /* Qword 2 */ - __le16 status_error1; - u8 flex_flags2; - u8 time_stamp_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le16 flex_meta2; - __le16 flex_meta3; - union { - struct { - __le16 flex_meta4; - __le16 flex_meta5; - } flex; - __le32 ts_high; - } flex_ts; -}; - -/* (0x02) */ -struct virtchnl_rx_flex_desc_nic { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flex_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le32 rss_hash; - - /* Qword 2 */ - __le16 status_error1; - u8 flexi_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le32 flow_id; - union { - struct { - __le16 rsvd; - __le16 flow_id_ipv6; - } flex; - __le32 ts_high; - } flex_ts; -}; - -/* Rx Flex Descriptor Switch Profile - * RxDID Profile Id 3 - * Flex-field 0: Source Vsi - */ -struct virtchnl_rx_flex_desc_sw { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flex_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le16 src_vsi; /* [10:15] are reserved */ - __le16 flex_md1_rsvd; - - /* Qword 2 */ - __le16 status_error1; - u8 flex_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le32 rsvd; /* flex words 2-3 are reserved */ - __le32 ts_high; -}; - -/* Rx Flex Descriptor NIC Profile - * RxDID Profile Id 6 - * Flex-field 0: RSS hash lower 16-bits - * Flex-field 1: RSS hash upper 16-bits - * Flex-field 2: Flow Id lower 16-bits - * Flex-field 3: Source Vsi - * Flex-field 4: reserved, Vlan id taken from L2Tag - */ -struct virtchnl_rx_flex_desc_nic_2 { - /* Qword 0 */ - u8 rxdid; - u8 mir_id_umb_cast; - __le16 ptype_flex_flags0; - __le16 pkt_len; - __le16 hdr_len_sph_flex_flags1; - - /* Qword 1 */ - __le16 status_error0; - __le16 l2tag1; - __le32 rss_hash; - - /* Qword 2 */ - __le16 status_error1; - u8 flexi_flags2; - u8 ts_low; - __le16 l2tag2_1st; - __le16 l2tag2_2nd; - - /* Qword 3 */ - __le16 flow_id; - __le16 src_vsi; - union { - struct { - __le16 rsvd; - __le16 flow_id_ipv6; - } flex; - __le32 ts_high; - } flex_ts; -}; - -/* Rx Flex Descriptor Advanced (Split Queue Model) - * RxDID Profile Id 7 - */ -struct virtchnl_rx_flex_desc_adv { - /* Qword 0 */ - u8 rxdid_ucast; /* profile_id=[3:0] */ - /* rsvd=[5:4] */ - /* ucast=[7:6] */ - u8 status_err0_qw0; - __le16 ptype_err_fflags0; /* ptype=[9:0] */ - /* ip_hdr_err=[10:10] */ - /* udp_len_err=[11:11] */ - /* ff0=[15:12] */ - __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ - /* gen=[14:14] only in splitq */ - /* bufq_id=[15:15] only in splitq */ - __le16 hdrlen_flags; /* header=[9:0] */ - /* rsc=[10:10] only in splitq */ - /* sph=[11:11] only in splitq */ - /* ext_udp_0=[12:12] */ - /* int_udp_0=[13:13] */ - /* trunc_mirr=[14:14] */ - /* miss_prepend=[15:15] */ - /* Qword 1 */ - u8 status_err0_qw1; - u8 status_err1; - u8 fflags1; - u8 ts_low; - __le16 fmd0; - __le16 fmd1; - /* Qword 2 */ - __le16 fmd2; - u8 fflags2; - u8 hash3; - __le16 fmd3; - __le16 fmd4; - /* Qword 3 */ - __le16 fmd5; - __le16 fmd6; - __le16 fmd7_0; - __le16 fmd7_1; -}; /* writeback */ - -/* Rx Flex Descriptor Advanced (Split Queue Model) NIC Profile - * RxDID Profile Id 8 - * Flex-field 0: BufferID - * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW) - * Flex-field 2: Hash[15:0] - * Flex-flags 2: Hash[23:16] - * Flex-field 3: L2TAG2 - * Flex-field 5: L2TAG1 - * Flex-field 7: Timestamp (upper 32 bits) - */ -struct virtchnl_rx_flex_desc_adv_nic_3 { - /* Qword 0 */ - u8 rxdid_ucast; /* profile_id=[3:0] */ - /* rsvd=[5:4] */ - /* ucast=[7:6] */ - u8 status_err0_qw0; - __le16 ptype_err_fflags0; /* ptype=[9:0] */ - /* ip_hdr_err=[10:10] */ - /* udp_len_err=[11:11] */ - /* ff0=[15:12] */ - __le16 pktlen_gen_bufq_id; /* plen=[13:0] */ - /* gen=[14:14] only in splitq */ - /* bufq_id=[15:15] only in splitq */ - __le16 hdrlen_flags; /* header=[9:0] */ - /* rsc=[10:10] only in splitq */ - /* sph=[11:11] only in splitq */ - /* ext_udp_0=[12:12] */ - /* int_udp_0=[13:13] */ - /* trunc_mirr=[14:14] */ - /* miss_prepend=[15:15] */ - /* Qword 1 */ - u8 status_err0_qw1; - u8 status_err1; - u8 fflags1; - u8 ts_low; - __le16 buf_id; /* only in splitq */ - union { - __le16 raw_cs; - __le16 l2tag1; - __le16 rscseglen; - } misc; - /* Qword 2 */ - __le16 hash1; - union { - u8 fflags2; - u8 mirrorid; - u8 hash2; - } ff2_mirrid_hash2; - u8 hash3; - __le16 l2tag2; - __le16 fmd4; - /* Qword 3 */ - __le16 l2tag1; - __le16 fmd6; - __le32 ts_high; -}; /* writeback */ - -union virtchnl_rx_desc { - struct virtchnl_singleq_rx_buf_desc read; - struct virtchnl_singleq_base_rx_desc base_wb; - struct virtchnl_rx_flex_desc flex_wb; - struct virtchnl_rx_flex_desc_nic flex_nic_wb; - struct virtchnl_rx_flex_desc_sw flex_sw_wb; - struct virtchnl_rx_flex_desc_nic_2 flex_nic_2_wb; - struct virtchnl_rx_flex_desc_adv flex_adv_wb; - struct virtchnl_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb; -}; - -#endif /* _VIRTCHNL_LAN_DESC_H_ */ diff --git a/sys/modules/ice/Makefile b/sys/modules/ice/Makefile --- a/sys/modules/ice/Makefile +++ b/sys/modules/ice/Makefile @@ -14,7 +14,7 @@ # Core source SRCS += ice_lib.c ice_osdep.c ice_resmgr.c ice_strings.c SRCS += ice_iflib_recovery_txrx.c ice_iflib_txrx.c if_ice_iflib.c -SRCS += ice_fw_logging.c +SRCS += ice_fw_logging.c ice_ddp_common.c # RDMA Client interface # TODO: Is this the right way to compile this? diff --git a/sys/modules/ice_ddp/Makefile b/sys/modules/ice_ddp/Makefile --- a/sys/modules/ice_ddp/Makefile +++ b/sys/modules/ice_ddp/Makefile @@ -1,6 +1,6 @@ # $FreeBSD$ KMOD= ice_ddp -FIRMWS= ${SRCTOP}/sys/contrib/dev/ice/ice-1.3.27.0.pkg:ice_ddp:0x01031b00 +FIRMWS= ${SRCTOP}/sys/contrib/dev/ice/ice-1.3.30.0.pkg:ice_ddp:0x01031e00 .include