diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 index d8a20e128c84..b967a3be2a4f 100644 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -1,454 +1,456 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # common files stuff between i386 and amd64 include "conf/files.x86" # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # elf-vdso.so.o standard \ dependency "$S/amd64/amd64/sigtramp.S assym.inc $S/conf/vdso_amd64.ldscript $S/tools/amd64_vdso.sh" \ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_vdso.sh" \ no-implicit-rule before-depend \ clean "elf-vdso.so.o elf-vdso.so.1 vdso_offsets.h sigtramp.pico" # elf-vdso32.so.o optional compat_freebsd32 \ dependency "$S/amd64/ia32/ia32_sigtramp.S ia32_assym.h $S/conf/vdso_amd64_ia32.ldscript $S/tools/amd64_ia32_vdso.sh" \ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_ia32_vdso.sh" \ no-implicit-rule before-depend \ clean "elf-vdso32.so.o elf-vdso32.so.1 vdso_ia32_offsets.h ia32_sigtramp.pico" # cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_x86_64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_x86_64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" # cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c offset.inc" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -fcommon -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # amd64/acpica/acpi_machdep.c optional acpi amd64/acpica/acpi_wakeup.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.inc" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # #amd64/amd64/apic_vector.S standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/copyout.c standard amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/efirt_machdep.c optional efirt amd64/amd64/efirt_support.S optional efirt amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/exec_machdep.c standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/initcpu.c standard amd64/amd64/io.c optional io amd64/amd64/locore.S standard no-obj amd64/amd64/xen-locore.S optional xenhvm \ compile-with "${NORMAL_S} -g0" \ no-ctfconvert amd64/amd64/machdep.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mpboot.S optional smp amd64/amd64/pmap.c standard amd64/amd64/prof_machdep.c optional profiling-routine amd64/amd64/ptrace_machdep.c standard amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 amd64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 amd64/pci/pci_cfgreg.c optional pci cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/amd64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" crypto/aesni/aeskeys_amd64.S optional aesni crypto/des/des_enc.c optional netsmb crypto/openssl/amd64/chacha-x86_64.S optional ossl crypto/openssl/amd64/poly1305-x86_64.S optional ossl crypto/openssl/amd64/sha1-x86_64.S optional ossl crypto/openssl/amd64/sha256-x86_64.S optional ossl crypto/openssl/amd64/sha512-x86_64.S optional ossl dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdgpio/amdgpio.c optional amdgpio dev/axgbe/if_axgbe_pci.c optional axp dev/axgbe/xgbe-desc.c optional axp dev/axgbe/xgbe-dev.c optional axp dev/axgbe/xgbe-drv.c optional axp dev/axgbe/xgbe-mdio.c optional axp dev/axgbe/xgbe-sysctl.c optional axp dev/axgbe/xgbe-txrx.c optional axp dev/axgbe/xgbe_osdep.c optional axp dev/axgbe/xgbe-i2c.c optional axp dev/axgbe/xgbe-phy-v2.c optional axp dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv dev/iavf/if_iavf_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_lib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_osdep.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_txrx_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_common.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_adminq.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_vc_common.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_vc_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_vlan_mode.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fw_logging.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fwlog.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_rdma.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/irdma_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" +dev/ice/ice_ddp_common.c optional ice pci \ + compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/ioat/ioat.c optional ioat pci dev/ioat/ioat_test.c optional ioat pci dev/ixl/if_ixl.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iflib.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_dcb.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional ntb_transport | if_ntb dev/ntb/ntb.c optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_if.m optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_hw/ntb_hw_amd.c optional ntb_hw_amd | ntb_hw dev/ntb/ntb_hw/ntb_hw_intel.c optional ntb_hw_intel | ntb_hw dev/ntb/ntb_hw/ntb_hw_plx.c optional ntb_hw_plx | ntb_hw dev/ntb/test/ntb_tool.c optional ntb_tool dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng !random_loadable dev/random/nehemiah.c optional padlock_rng !random_loadable dev/qlxge/qls_dbg.c optional qlxge pci dev/qlxge/qls_dump.c optional qlxge pci dev/qlxge/qls_hw.c optional qlxge pci dev/qlxge/qls_ioctl.c optional qlxge pci dev/qlxge/qls_isr.c optional qlxge pci dev/qlxge/qls_os.c optional qlxge pci dev/qlxgb/qla_dbg.c optional qlxgb pci dev/qlxgb/qla_hw.c optional qlxgb pci dev/qlxgb/qla_ioctl.c optional qlxgb pci dev/qlxgb/qla_isr.c optional qlxgb pci dev/qlxgb/qla_misc.c optional qlxgb pci dev/qlxgb/qla_os.c optional qlxgb pci dev/qlxgbe/ql_dbg.c optional qlxgbe pci dev/qlxgbe/ql_hw.c optional qlxgbe pci dev/qlxgbe/ql_ioctl.c optional qlxgbe pci dev/qlxgbe/ql_isr.c optional qlxgbe pci dev/qlxgbe/ql_misc.c optional qlxgbe pci dev/qlxgbe/ql_os.c optional qlxgbe pci dev/qlxgbe/ql_reset.c optional qlxgbe pci dev/qlxgbe/ql_fw.c optional qlxgbe pci dev/qlxgbe/ql_boot.c optional qlxgbe pci dev/qlxgbe/ql_minidump.c optional qlxgbe pci dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/sfxge/common/ef10_ev.c optional sfxge pci dev/sfxge/common/ef10_filter.c optional sfxge pci dev/sfxge/common/ef10_image.c optional sfxge pci dev/sfxge/common/ef10_intr.c optional sfxge pci dev/sfxge/common/ef10_mac.c optional sfxge pci dev/sfxge/common/ef10_mcdi.c optional sfxge pci dev/sfxge/common/ef10_nic.c optional sfxge pci dev/sfxge/common/ef10_nvram.c optional sfxge pci dev/sfxge/common/ef10_phy.c optional sfxge pci dev/sfxge/common/ef10_rx.c optional sfxge pci dev/sfxge/common/ef10_tx.c optional sfxge pci dev/sfxge/common/ef10_vpd.c optional sfxge pci dev/sfxge/common/efx_bootcfg.c optional sfxge pci dev/sfxge/common/efx_crc32.c optional sfxge pci dev/sfxge/common/efx_ev.c optional sfxge pci dev/sfxge/common/efx_filter.c optional sfxge pci dev/sfxge/common/efx_hash.c optional sfxge pci dev/sfxge/common/efx_intr.c optional sfxge pci dev/sfxge/common/efx_lic.c optional sfxge pci dev/sfxge/common/efx_mac.c optional sfxge pci dev/sfxge/common/efx_mcdi.c optional sfxge pci dev/sfxge/common/efx_mon.c optional sfxge pci dev/sfxge/common/efx_nic.c optional sfxge pci dev/sfxge/common/efx_nvram.c optional sfxge pci dev/sfxge/common/efx_phy.c optional sfxge pci dev/sfxge/common/efx_port.c optional sfxge pci dev/sfxge/common/efx_rx.c optional sfxge pci dev/sfxge/common/efx_sram.c optional sfxge pci dev/sfxge/common/efx_tunnel.c optional sfxge pci dev/sfxge/common/efx_tx.c optional sfxge pci dev/sfxge/common/efx_vpd.c optional sfxge pci dev/sfxge/common/hunt_nic.c optional sfxge pci dev/sfxge/common/mcdi_mon.c optional sfxge pci dev/sfxge/common/medford_nic.c optional sfxge pci dev/sfxge/common/medford2_nic.c optional sfxge pci dev/sfxge/common/siena_mac.c optional sfxge pci dev/sfxge/common/siena_mcdi.c optional sfxge pci dev/sfxge/common/siena_nic.c optional sfxge pci dev/sfxge/common/siena_nvram.c optional sfxge pci dev/sfxge/common/siena_phy.c optional sfxge pci dev/sfxge/common/siena_sram.c optional sfxge pci dev/sfxge/common/siena_vpd.c optional sfxge pci dev/sfxge/sfxge.c optional sfxge pci dev/sfxge/sfxge_dma.c optional sfxge pci dev/sfxge/sfxge_ev.c optional sfxge pci dev/sfxge/sfxge_intr.c optional sfxge pci dev/sfxge/sfxge_mcdi.c optional sfxge pci dev/sfxge/sfxge_nvram.c optional sfxge pci dev/sfxge/sfxge_port.c optional sfxge pci dev/sfxge/sfxge_rx.c optional sfxge pci dev/sfxge/sfxge_tx.c optional sfxge pci dev/smartpqi/smartpqi_cam.c optional smartpqi dev/smartpqi/smartpqi_cmd.c optional smartpqi dev/smartpqi/smartpqi_discovery.c optional smartpqi dev/smartpqi/smartpqi_event.c optional smartpqi dev/smartpqi/smartpqi_helper.c optional smartpqi dev/smartpqi/smartpqi_init.c optional smartpqi dev/smartpqi/smartpqi_intr.c optional smartpqi dev/smartpqi/smartpqi_ioctl.c optional smartpqi dev/smartpqi/smartpqi_main.c optional smartpqi dev/smartpqi/smartpqi_mem.c optional smartpqi dev/smartpqi/smartpqi_misc.c optional smartpqi dev/smartpqi/smartpqi_queue.c optional smartpqi dev/smartpqi/smartpqi_request.c optional smartpqi dev/smartpqi/smartpqi_response.c optional smartpqi dev/smartpqi/smartpqi_sis.c optional smartpqi dev/smartpqi/smartpqi_tag.c optional smartpqi dev/speaker/spkr.c optional speaker dev/sume/if_sume.c optional sume dev/superio/superio.c optional superio isa dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/tpm/tpm.c optional tpm dev/tpm/tpm20.c optional tpm dev/tpm/tpm_crb.c optional tpm acpi dev/tpm/tpm_tis.c optional tpm acpi dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmd/vmd.c optional vmd | vmd_bus dev/wbwd/wbwd.c optional wbwd dev/xen/pci/xen_acpi_pci.c optional xenhvm dev/xen/pci/xen_pci.c optional xenhvm isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/imgact_aout.c optional compat_aout kern/link_elf_obj.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_freebsd32 amd64/ia32/ia32_reg.c optional compat_freebsd32 amd64/ia32/ia32_signal.c optional compat_freebsd32 amd64/ia32/ia32_syscall.c optional compat_freebsd32 amd64/ia32/ia32_misc.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 compat/ndis/winx64_wrap.S optional ndisapi pci # # x86 real mode BIOS emulator, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa contrib/x86emu/x86emu.c optional x86bios | dpms | pci | vesa # Common files where we currently configure the system differently, but perhaps shouldn't # config(8) doesn't have a way to force standard options, so we've been inconsistent # about marking non-optional things 'standard'. x86/acpica/madt.c optional acpi x86/isa/atpic.c optional atpic isa x86/isa/elcr.c optional atpic isa | mptable x86/isa/isa.c standard x86/isa/isa_dma.c standard x86/pci/pci_early_quirks.c optional pci x86/x86/io_apic.c standard x86/x86/local_apic.c standard x86/x86/mptable.c optional mptable x86/x86/mptable_pci.c optional mptable pci x86/x86/msi.c optional pci x86/xen/pv.c optional xenhvm x86/xen/pvcpu_enum.c optional xenhvm x86/xen/xen_pci_bus.c optional xenhvm compat/linuxkpi/common/src/linux_fpu.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" contrib/openzfs/module/zcommon/zfs_fletcher_avx512.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_intel.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_sse.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx2.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx512bw.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx512f.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_sse2.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_ssse3.c optional zfs compile-with "${ZFS_C}" # Clock calibration subroutine; uses floating-point arithmetic subr_clockcalib.o standard \ dependency "$S/kern/subr_clockcalib.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} -mmmx -msse -msse2 ${.IMPSRC}" \ no-implicit-rule \ clean "subr_clockcalib.o" diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 index 88cb483f178f..6b96de28128d 100644 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -1,603 +1,605 @@ # $FreeBSD$ ## ## Kernel ## kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_devmap.c standard kern/subr_intr.c optional intrng kern/subr_physmem.c standard libkern/bcmp.c standard libkern/strlen.c standard libkern/arm64/crc32c_armv8.S standard arm/arm/generic_timer.c standard arm/arm/gic.c standard arm/arm/gic_acpi.c optional acpi arm/arm/gic_fdt.c optional fdt arm/arm/pmu.c standard arm/arm/pmu_fdt.c optional fdt arm64/acpica/acpi_iort.c optional acpi arm64/acpica/acpi_machdep.c optional acpi arm64/acpica/OsdEnvironment.c optional acpi arm64/acpica/acpi_wakeup.c optional acpi arm64/acpica/pci_cfgreg.c optional acpi pci arm64/arm64/autoconf.c standard arm64/arm64/bus_machdep.c standard arm64/arm64/bus_space_asm.S standard arm64/arm64/busdma_bounce.c standard arm64/arm64/busdma_machdep.c standard arm64/arm64/bzero.S standard arm64/arm64/clock.c standard arm64/arm64/copyinout.S standard arm64/arm64/cpu_errata.c standard arm64/arm64/cpufunc_asm.S standard arm64/arm64/db_disasm.c optional ddb arm64/arm64/db_interface.c optional ddb arm64/arm64/db_trace.c optional ddb arm64/arm64/debug_monitor.c standard arm64/arm64/disassem.c optional ddb arm64/arm64/dump_machdep.c standard arm64/arm64/efirt_machdep.c optional efirt arm64/arm64/elf32_machdep.c optional compat_freebsd32 arm64/arm64/elf_machdep.c standard arm64/arm64/exception.S standard arm64/arm64/exec_machdep.c standard arm64/arm64/freebsd32_machdep.c optional compat_freebsd32 arm64/arm64/gdb_machdep.c optional gdb arm64/arm64/gicv3_its.c optional intrng fdt arm64/arm64/gic_v3.c standard arm64/arm64/gic_v3_acpi.c optional acpi arm64/arm64/gic_v3_fdt.c optional fdt arm64/arm64/hyp_stub.S standard arm64/arm64/identcpu.c standard arm64/arm64/locore.S standard no-obj arm64/arm64/machdep.c standard arm64/arm64/machdep_boot.c standard arm64/arm64/mem.c standard arm64/arm64/memcmp.S standard arm64/arm64/memcpy.S standard arm64/arm64/memset.S standard arm64/arm64/minidump_machdep.c standard arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt arm64/arm64/pmap.c standard arm64/arm64/ptrace_machdep.c standard arm64/arm64/sigtramp.S standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/support_ifunc.c standard arm64/arm64/support.S standard arm64/arm64/swtch.S standard arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard arm64/arm64/uma_machdep.c standard arm64/arm64/undefined.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard arm64/coresight/coresight.c standard arm64/coresight/coresight_acpi.c optional acpi arm64/coresight/coresight_fdt.c optional fdt arm64/coresight/coresight_if.m standard arm64/coresight/coresight_cmd.c standard arm64/coresight/coresight_cpu_debug.c optional fdt arm64/coresight/coresight_etm4x.c standard arm64/coresight/coresight_etm4x_acpi.c optional acpi arm64/coresight/coresight_etm4x_fdt.c optional fdt arm64/coresight/coresight_funnel.c standard arm64/coresight/coresight_funnel_acpi.c optional acpi arm64/coresight/coresight_funnel_fdt.c optional fdt arm64/coresight/coresight_replicator.c standard arm64/coresight/coresight_replicator_acpi.c optional acpi arm64/coresight/coresight_replicator_fdt.c optional fdt arm64/coresight/coresight_tmc.c standard arm64/coresight/coresight_tmc_acpi.c optional acpi arm64/coresight/coresight_tmc_fdt.c optional fdt arm64/iommu/iommu.c optional iommu arm64/iommu/iommu_if.m optional iommu arm64/iommu/smmu.c optional iommu arm64/iommu/smmu_acpi.c optional acpi iommu arm64/iommu/smmu_quirks.c optional iommu dev/iommu/busdma_iommu.c optional iommu dev/iommu/iommu_gas.c optional iommu crypto/armv8/armv8_crypto.c optional armv8crypto armv8_crypto_wrap.o optional armv8crypto \ dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "armv8_crypto_wrap.o" aesv8-armx.o optional armv8crypto \ dependency "$S/crypto/openssl/aarch64/aesv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "aesv8-armx.o" ghashv8-armx.o optional armv8crypto \ dependency "$S/crypto/openssl/aarch64/ghashv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "ghashv8-armx.o" crypto/des/des_enc.c optional netsmb crypto/openssl/ossl_aarch64.c optional ossl crypto/openssl/aarch64/chacha-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}" crypto/openssl/aarch64/poly1305-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}" crypto/openssl/aarch64/sha1-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}" crypto/openssl/aarch64/sha256-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}" crypto/openssl/aarch64/sha512-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${PROF} ${.IMPSRC}" dev/acpica/acpi_bus_if.m optional acpi dev/acpica/acpi_if.m optional acpi dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/ahci/ahci_generic.c optional ahci cddl/dev/dtrace/aarch64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/aarch64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/aarch64/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" ## ## Device drivers ## dev/axgbe/if_axgbe.c optional axa dev/axgbe/xgbe-desc.c optional axa dev/axgbe/xgbe-dev.c optional axa dev/axgbe/xgbe-drv.c optional axa dev/axgbe/xgbe-mdio.c optional axa dev/axgbe/xgbe-sysctl.c optional axa dev/axgbe/xgbe-txrx.c optional axa dev/axgbe/xgbe_osdep.c optional axa dev/axgbe/xgbe-phy-v1.c optional axa dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/dpaa2/dpaa2_bp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_cmd_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_con.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_io.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mac.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/dpaa2_mc_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_mc_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mcp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_ni.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_rc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/memac_mdio_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/memac_mdio_common.c optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_if.m optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt dev/dwc/if_dwc.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 | fdt dwc_socfpga soc_intel_stratix10 dev/dwc/if_dwc_if.m optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 | fdt dwc_socfpga soc_intel_stratix10 dev/gpio/pl061.c optional pl061 gpio dev/gpio/pl061_acpi.c optional pl061 gpio acpi dev/gpio/pl061_fdt.c optional pl061 gpio fdt dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_vlan_mode.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fw_logging.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fwlog.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_rdma.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/irdma_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" +dev/ice/ice_ddp_common.c optional ice pci \ + compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/iicbus/sy8106a.c optional sy8106a fdt dev/iicbus/twsi/mv_twsi.c optional twsi fdt dev/iicbus/twsi/a10_twsi.c optional twsi fdt dev/iicbus/twsi/twsi.c optional twsi fdt dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/mbox/mbox_if.m optional soc_brcm_bcm2837 dev/mmc/host/dwmmc.c optional dwmmc fdt dev/mmc/host/dwmmc_altera.c optional dwmmc dwmmc_altera fdt dev/mmc/host/dwmmc_hisi.c optional dwmmc dwmmc_hisi fdt dev/mmc/host/dwmmc_rockchip.c optional dwmmc rk_dwmmc fdt dev/neta/if_mvneta_fdt.c optional neta fdt dev/neta/if_mvneta.c optional neta mdio mii dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofwpci.c optional fdt pci dev/pci/controller/pci_n1sdp.c optional pci_n1sdp acpi dev/pci/pci_host_generic.c optional pci dev/pci/pci_host_generic_acpi.c optional pci acpi dev/pci/pci_host_generic_fdt.c optional pci fdt dev/pci/pci_dw_mv.c optional pci fdt dev/pci/pci_dw.c optional pci fdt dev/pci/pci_dw_if.m optional pci fdt dev/psci/psci.c standard dev/psci/smccc_arm64.S standard dev/psci/smccc.c standard dev/safexcel/safexcel.c optional safexcel fdt dev/sdhci/sdhci_xenon.c optional sdhci_xenon sdhci dev/sdhci/sdhci_xenon_acpi.c optional sdhci_xenon sdhci acpi dev/sdhci/sdhci_xenon_fdt.c optional sdhci_xenon sdhci fdt dev/uart/uart_cpu_arm64.c optional uart dev/uart/uart_dev_mu.c optional uart uart_mu dev/uart/uart_dev_pl011.c optional uart pl011 dev/usb/controller/dwc_otg_hisi.c optional dwcotg fdt soc_hisi_hi6220 dev/usb/controller/dwc3.c optional xhci acpi dwc3 | xhci fdt dwc3 dev/usb/controller/ehci_mv.c optional ehci_mv fdt dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_acpi.c optional ehci acpi dev/usb/controller/generic_ehci_fdt.c optional ehci fdt dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/usb/controller/musb_otg_allwinner.c optional musb fdt soc_allwinner_a64 dev/usb/controller/usb_nop_xceiv.c optional fdt dev/usb/controller/generic_xhci.c optional xhci dev/usb/controller/generic_xhci_acpi.c optional xhci acpi dev/usb/controller/generic_xhci_fdt.c optional xhci fdt dev/vnic/mrml_bridge.c optional vnic fdt dev/vnic/nic_main.c optional vnic pci dev/vnic/nicvf_main.c optional vnic pci pci_iov dev/vnic/nicvf_queues.c optional vnic pci pci_iov dev/vnic/thunder_bgx_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_bgx.c optional soc_cavm_thunderx pci vnic pci dev/vnic/thunder_mdio_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_mdio.c optional soc_cavm_thunderx pci vnic dev/vnic/lmac_if.m optional inet | inet6 | vnic ## ## SoC Support ## # Allwinner common files arm/allwinner/a10_timer.c optional a10_timer fdt arm/allwinner/a10_codec.c optional sound a10_codec arm/allwinner/a31_dmac.c optional a31_dmac arm/allwinner/sunxi_dma_if.m optional a31_dmac arm/allwinner/aw_cir.c optional evdev aw_cir fdt arm/allwinner/aw_dwc3.c optional aw_dwc3 fdt arm/allwinner/aw_gpio.c optional gpio aw_gpio fdt arm/allwinner/aw_mmc.c optional mmc aw_mmc fdt | mmccam aw_mmc fdt arm/allwinner/aw_nmi.c optional aw_nmi fdt \ compile-with "${NORMAL_C} -I$S/contrib/device-tree/include" arm/allwinner/aw_pwm.c optional aw_pwm fdt arm/allwinner/aw_r_intc.c optional aw_r_intc fdt arm/allwinner/aw_rsb.c optional aw_rsb fdt arm/allwinner/aw_rtc.c optional aw_rtc fdt arm/allwinner/aw_sid.c optional aw_sid nvmem fdt arm/allwinner/aw_spi.c optional aw_spi fdt arm/allwinner/aw_syscon.c optional aw_syscon syscon fdt arm/allwinner/aw_thermal.c optional aw_thermal nvmem fdt arm/allwinner/aw_usbphy.c optional ehci aw_usbphy fdt arm/allwinner/aw_usb3phy.c optional xhci aw_usbphy fdt arm/allwinner/aw_wdog.c optional aw_wdog fdt arm/allwinner/axp81x.c optional axp81x fdt arm/allwinner/if_awg.c optional awg syscon aw_sid nvmem fdt # Allwinner clock driver arm/allwinner/clkng/aw_ccung.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_frac.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_m.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_mipi.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nkmp.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nm.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nmm.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_np.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_prediv_mux.c optional aw_ccu fdt arm/allwinner/clkng/ccu_a64.c optional soc_allwinner_a64 aw_ccu fdt arm/allwinner/clkng/ccu_h3.c optional soc_allwinner_h5 aw_ccu fdt arm/allwinner/clkng/ccu_h6.c optional soc_allwinner_h6 aw_ccu fdt arm/allwinner/clkng/ccu_h6_r.c optional soc_allwinner_h6 aw_ccu fdt arm/allwinner/clkng/ccu_sun8i_r.c optional aw_ccu fdt arm/allwinner/clkng/ccu_de2.c optional aw_ccu fdt # Allwinner padconf files arm/allwinner/a64/a64_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/a64/a64_r_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/h3/h3_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h3/h3_r_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h6/h6_padconf.c optional soc_allwinner_h6 fdt arm/allwinner/h6/h6_r_padconf.c optional soc_allwinner_h6 fdt # Altera/Intel dev/altera/dwc/if_dwc_socfpga.c optional fdt dwc_socfpga arm64/intel/firmware.c optional soc_intel_stratix10 arm64/intel/stratix10-soc-fpga-mgr.c optional soc_intel_stratix10 arm64/intel/stratix10-svc.c optional soc_intel_stratix10 # Annapurna arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" # Broadcom arm64/broadcom/brcmmdio/mdio_mux_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_nexus_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c optional soc_brcm_ns2 fdt pci arm64/broadcom/genet/if_genet.c optional soc_brcm_bcm2838 fdt genet arm/broadcom/bcm2835/bcm2835_audio.c optional sound vchiq fdt \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" arm/broadcom/bcm2835/bcm2835_bsc.c optional bcm2835_bsc fdt arm/broadcom/bcm2835/bcm2835_clkman.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_cpufreq.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_dma.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_fbd.c optional vt soc_brcm_bcm2837 fdt | vt soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_firmware.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_ft5406.c optional evdev bcm2835_ft5406 fdt arm/broadcom/bcm2835/bcm2835_gpio.c optional gpio soc_brcm_bcm2837 fdt | gpio soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_intr.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_mbox.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_rng.c optional !random_loadable soc_brcm_bcm2837 fdt | !random_loadable soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhci.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhost.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_spi.c optional bcm2835_spi fdt arm/broadcom/bcm2835/bcm2835_vcbus.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_vcio.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_wdog.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2836.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm283x_dwc_fdt.c optional dwcotg fdt soc_brcm_bcm2837 | dwcotg fdt soc_brcm_bcm2838 arm/broadcom/bcm2835/bcm2838_pci.c optional soc_brcm_bcm2838 fdt pci arm/broadcom/bcm2835/bcm2838_xhci.c optional soc_brcm_bcm2838 fdt pci xhci arm/broadcom/bcm2835/raspberrypi_gpio.c optional soc_brcm_bcm2837 gpio | soc_brcm_bcm2838 gpio contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" # Cavium arm64/cavium/thunder_pcie_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_pem.c optional soc_cavm_thunderx pci arm64/cavium/thunder_pcie_pem_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_common.c optional soc_cavm_thunderx pci # i.MX8 Clock support arm64/freescale/imx/imx8mq_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_gate.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_mux.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_composite.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_sscg_pll.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_frac_pll.c optional fdt soc_freescale_imx8 # iMX drivers arm/freescale/imx/imx_gpio.c optional gpio soc_freescale_imx8 arm/freescale/imx/imx_i2c.c optional fsliic arm/freescale/imx/imx_machdep.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx7gpc.c optional fdt soc_freescale_imx8 dev/ffec/if_ffec.c optional ffec # Marvell arm/mv/a37x0_gpio.c optional a37x0_gpio gpio fdt arm/mv/a37x0_iic.c optional a37x0_iic iicbus fdt arm/mv/a37x0_spi.c optional a37x0_spi spibus fdt arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt arm/mv/gpio.c optional mv_gpio fdt arm/mv/mvebu_gpio.c optional mv_gpio fdt arm/mv/mvebu_pinctrl.c optional mvebu_pinctrl fdt arm/mv/mv_ap806_clock.c optional soc_marvell_8k fdt arm/mv/mv_ap806_gicp.c optional mv_ap806_gicp fdt arm/mv/mv_ap806_sei.c optional mv_ap806_sei fdt arm/mv/mv_cp110_clock.c optional soc_marvell_8k fdt arm/mv/mv_cp110_icu.c optional mv_cp110_icu fdt arm/mv/mv_cp110_icu_bus.c optional mv_cp110_icu fdt arm/mv/mv_thermal.c optional soc_marvell_8k mv_thermal fdt arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt # NVidia arm/nvidia/tegra_abpmisc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ahci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_efuse.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ehci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_gpio.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_i2c.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_lic.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_mc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_pcie.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_sdhci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm_if.m optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_uart.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_usbphy.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_xhci.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_gpio.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_regulators.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_rtc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_car.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_per.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_pll.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_super.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_coretemp.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_cpufreq.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pinmux.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pmc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_xusbpadctl.c optional fdt soc_nvidia_tegra210 # Nvidia firmware for Tegra tegra210_xusb_fw.c optional tegra210_xusb_fw \ dependency "$S/conf/files.arm64" \ compile-with "${AWK} -f $S/tools/fw_stub.awk tegra210_xusb.fw:tegra210_xusb_fw -mtegra210_xusb_fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "tegra210_xusb_fw.c" tegra210_xusb.fwo optional tegra210_xusb_fw \ dependency "tegra210_xusb.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "tegra210_xusb.fwo" tegra210_xusb.fw optional tegra210_xusb_fw \ dependency "$S/contrib/dev/nvidia/tegra210_xusb.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "tegra210_xusb.fw" # NXP arm/freescale/vybrid/vf_i2c.c optional vf_i2c iicbus soc_nxp_ls arm64/qoriq/ls1046_gpio.c optional ls1046_gpio gpio fdt soc_nxp_ls arm64/qoriq/qoriq_dw_pci.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_therm.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_therm_if.m optional pci fdt soc_nxp_ls arm64/qoriq/clk/ls1046a_clkgen.c optional clk soc_nxp_ls arm64/qoriq/clk/ls1088a_clkgen.c optional clk soc_nxp_ls arm64/qoriq/clk/lx2160a_clkgen.c optional clk soc_nxp_ls arm64/qoriq/clk/qoriq_clk_pll.c optional clk soc_nxp_ls arm64/qoriq/clk/qoriq_clkgen.c optional clk soc_nxp_ls dev/ahci/ahci_fsl_fdt.c optional soc_nxp_ls ahci fdt # Qualcomm arm64/qualcomm/qcom_gcc.c optional qcom_gcc fdt # RockChip Drivers arm64/rockchip/rk3399_emmcphy.c optional fdt rk_emmcphy soc_rockchip_rk3399 arm64/rockchip/rk_dwc3.c optional fdt rk_dwc3 soc_rockchip_rk3399 arm64/rockchip/rk_i2c.c optional fdt rk_i2c soc_rockchip_rk3328 | fdt rk_i2c soc_rockchip_rk3399 arm64/rockchip/rk805.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 arm64/rockchip/rk_grf.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/rk_pinctrl.c optional fdt rk_pinctrl soc_rockchip_rk3328 | fdt rk_pinctrl soc_rockchip_rk3399 arm64/rockchip/rk_gpio.c optional fdt rk_gpio soc_rockchip_rk3328 | fdt rk_gpio soc_rockchip_rk3399 arm64/rockchip/rk_iodomain.c optional fdt rk_iodomain arm64/rockchip/rk_spi.c optional fdt rk_spi arm64/rockchip/rk_usb2phy.c optional fdt rk_usb2phy soc_rockchip_rk3328 | soc_rockchip_rk3399 arm64/rockchip/rk_typec_phy.c optional fdt rk_typec_phy soc_rockchip_rk3399 arm64/rockchip/if_dwc_rk.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 arm64/rockchip/rk_tsadc_if.m optional fdt soc_rockchip_rk3399 arm64/rockchip/rk_tsadc.c optional fdt soc_rockchip_rk3399 arm64/rockchip/rk_pwm.c optional fdt rk_pwm arm64/rockchip/rk_pcie.c optional fdt pci soc_rockchip_rk3399 arm64/rockchip/rk_pcie_phy.c optional fdt pci soc_rockchip_rk3399 # RockChip Clock support arm64/rockchip/clk/rk_cru.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_armclk.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_composite.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_fract.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_gate.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_mux.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_pll.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3328_cru.c optional fdt soc_rockchip_rk3328 arm64/rockchip/clk/rk3399_cru.c optional fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3399_pmucru.c optional fdt soc_rockchip_rk3399 # Xilinx arm/xilinx/uart_dev_cdnc.c optional uart soc_xilinx_zynq # Linuxkpi compat/linuxkpi/common/src/linux_fpu.c optional compat_linuxkpi \ compile-with "${LINUXKPI_C}" # Cloudabi arm64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 arm64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_armv6_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_armv6_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-littleaarch64 --binary-architecture aarch64 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_aarch64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_aarch64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-littleaarch64 --binary-architecture aarch64 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc index c01e0e81ab1a..05f331e2c3e0 100644 --- a/sys/conf/files.powerpc +++ b/sys/conf/files.powerpc @@ -1,369 +1,371 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # # There is only an asm version on ppc64. cddl/compat/opensolaris/kern/opensolaris_atomic.c optional zfs powerpc | dtrace powerpc | zfs powerpcspe | dtrace powerpcspe compile-with "${ZFS_C}" cddl/dev/dtrace/powerpc/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/powerpc/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/powerpc/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" crypto/des/des_enc.c optional netsmb dev/aacraid/aacraid_endian.c optional aacraid dev/adb/adb_bus.c optional adb dev/adb/adb_kbd.c optional adb dev/adb/adb_mouse.c optional adb dev/adb/adb_hb_if.m optional adb dev/adb/adb_if.m optional adb dev/adb/adb_buttons.c optional adb dev/agp/agp_apple.c optional agp powermac dev/fb/fb.c optional sc dev/hwpmc/hwpmc_e500.c optional hwpmc dev/hwpmc/hwpmc_mpc7xxx.c optional hwpmc dev/hwpmc/hwpmc_power8.c optional hwpmc dev/hwpmc/hwpmc_powerpc.c optional hwpmc dev/hwpmc/hwpmc_ppc970.c optional hwpmc dev/iicbus/ad7417.c optional ad7417 powermac dev/iicbus/adm1030.c optional powermac windtunnel | adm1030 powermac dev/iicbus/adt746x.c optional adt746x powermac dev/iicbus/ds1631.c optional ds1631 powermac dev/iicbus/ds1775.c optional ds1775 powermac dev/iicbus/max6690.c optional max6690 powermac dev/iicbus/ofw_iicbus.c optional iicbus aim dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_opal.c optional powernv ipmi -dev/ice/if_ice_iflib.c optional ice pci powerpc64 \ +dev/ice/if_ice_iflib.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_lib.c optional ice pci powerpc64 \ +dev/ice/ice_lib.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_osdep.c optional ice pci powerpc64 \ +dev/ice/ice_osdep.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_resmgr.c optional ice pci powerpc64 \ +dev/ice/ice_resmgr.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_strings.c optional ice pci powerpc64 \ +dev/ice/ice_strings.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 \ +dev/ice/ice_iflib_recovery_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 \ +dev/ice/ice_iflib_txrx.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_common.c optional ice pci powerpc64 \ +dev/ice/ice_common.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_controlq.c optional ice pci powerpc64 \ +dev/ice/ice_controlq.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_dcb.c optional ice pci powerpc64 \ +dev/ice/ice_dcb.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_flex_pipe.c optional ice pci powerpc64 \ +dev/ice/ice_flex_pipe.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_flow.c optional ice pci powerpc64 \ +dev/ice/ice_flow.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_nvm.c optional ice pci powerpc64 \ +dev/ice/ice_nvm.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_sched.c optional ice pci powerpc64 \ +dev/ice/ice_sched.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_switch.c optional ice pci powerpc64 \ +dev/ice/ice_switch.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_vlan_mode.c optional ice pci powerpc64 \ +dev/ice/ice_vlan_mode.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_fw_logging.c optional ice pci powerpc64 \ +dev/ice/ice_fw_logging.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_fwlog.c optional ice pci powerpc64 \ +dev/ice/ice_fwlog.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/ice_rdma.c optional ice pci powerpc64 \ +dev/ice/ice_rdma.c optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_C} -I$S/dev/ice" -dev/ice/irdma_if.m optional ice pci powerpc64 \ +dev/ice/irdma_if.m optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_M} -I$S/dev/ice" -dev/ice/irdma_di_if.m optional ice pci powerpc64 \ +dev/ice/irdma_di_if.m optional ice pci powerpc64 | ice pci powerpc64le \ compile-with "${NORMAL_M} -I$S/dev/ice" +dev/ice/ice_ddp_common.c optional ice pci powerpc64 | ice pci powerpc64le \ + compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp powerpc64 | ice pci powerpc64le \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp powerpc64 | ice pci powerpc64le \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp powerpc64 | ice pci powerpc64le \ dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/ixl/if_ixl.c optional ixl pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iflib.c optional ixl pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/if_iavf.c optional iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/iavf_vc.c optional iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci powerpc64 | \ iavf pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_dcb.c optional ixl pci powerpc64 \ compile-with "${NORMAL_C} -I$S/dev/ixl" # Most ofw stuff below is brought in by conf/files for options FDT, but # we always want it, even on non-FDT platforms. dev/fdt/simplebus.c standard dev/ofw/openfirm.c standard dev/ofw/openfirmio.c standard dev/ofw/ofw_bus_if.m standard dev/ofw/ofw_cpu.c standard dev/ofw/ofw_if.m standard dev/ofw/ofw_bus_subr.c standard dev/ofw/ofw_console.c optional aim dev/ofw/ofw_disk.c optional ofwd aim dev/ofw/ofwbus.c standard dev/ofw/ofwpci.c optional pci dev/ofw/ofw_standard.c optional aim powerpc dev/ofw/ofw_subr.c standard dev/powermac_nvram/powermac_nvram.c optional powermac_nvram powermac dev/quicc/quicc_bfe_fdt.c optional quicc mpc85xx dev/random/darn.c optional powerpc64 !random_loadable | powerpc64le !random_loadable dev/scc/scc_bfe_macio.c optional scc powermac dev/sdhci/fsl_sdhci.c optional mpc85xx sdhci dev/sec/sec.c optional sec mpc85xx dev/sound/macio/aoa.c optional snd_davbus | snd_ai2s powermac dev/sound/macio/davbus.c optional snd_davbus powermac dev/sound/macio/i2s.c optional snd_ai2s powermac dev/sound/macio/onyx.c optional snd_ai2s iicbus powermac dev/sound/macio/snapper.c optional snd_ai2s iicbus powermac dev/sound/macio/tumbler.c optional snd_ai2s iicbus powermac dev/syscons/scgfbrndr.c optional sc dev/tsec/if_tsec.c optional tsec dev/tsec/if_tsec_fdt.c optional tsec dev/uart/uart_cpu_powerpc.c optional uart dev/usb/controller/ehci_fsl.c optional ehci mpc85xx dev/vt/hw/ofwfb/ofwfb.c optional vt aim kern/subr_atomic64.c optional powerpc | powerpcspe kern/subr_dummy_vdso_tc.c standard kern/syscalls.c optional ktr kern/subr_sfbuf.c standard libkern/ashldi3.c optional powerpc | powerpcspe libkern/ashrdi3.c optional powerpc | powerpcspe libkern/bcmp.c standard libkern/bcopy.c standard libkern/cmpdi2.c optional powerpc | powerpcspe libkern/divdi3.c optional powerpc | powerpcspe libkern/ffs.c standard libkern/ffsl.c standard libkern/ffsll.c standard libkern/flsll.c standard libkern/lshrdi3.c optional powerpc | powerpcspe libkern/memcmp.c standard libkern/memset.c standard libkern/moddi3.c optional powerpc | powerpcspe libkern/qdivrem.c optional powerpc | powerpcspe libkern/strlen.c standard libkern/ucmpdi2.c optional powerpc | powerpcspe libkern/udivdi3.c optional powerpc | powerpcspe libkern/umoddi3.c optional powerpc | powerpcspe powerpc/aim/locore.S optional aim no-obj powerpc/aim/aim_machdep.c optional aim powerpc/aim/mmu_oea.c optional aim powerpc powerpc/aim/mmu_oea64.c optional aim powerpc/aim/mmu_radix.c optional aim powerpc64 | aim powerpc64le powerpc/aim/moea64_native.c optional aim powerpc/aim/mp_cpudep.c optional aim powerpc/aim/slb.c optional aim powerpc64 | aim powerpc64le powerpc/amigaone/platform_amigaone.c optional amigaone powerpc/amigaone/cpld_a1222.c optional powerpc amigaone | powerpcspe amigaone powerpc/amigaone/cpld_x5000.c optional powerpc amigaone | powerpc64 amigaone powerpc/booke/locore.S optional booke no-obj powerpc/booke/booke_machdep.c optional booke powerpc/booke/machdep_e500.c optional booke_e500 powerpc/booke/mp_cpudep.c optional booke smp powerpc/booke/platform_bare.c optional booke powerpc/booke/pmap.c optional booke powerpc/booke/spe.c optional powerpcspe powerpc/cpufreq/dfs.c optional cpufreq powerpc/cpufreq/mpc85xx_jog.c optional cpufreq mpc85xx powerpc/cpufreq/pcr.c optional cpufreq aim powerpc/cpufreq/pmcr.c optional cpufreq aim powerpc64 | cpufreq aim powerpc64le powerpc/cpufreq/pmufreq.c optional cpufreq aim pmu powerpc/fpu/fpu_add.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_compare.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_div.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_emu.c optional fpu_emu powerpc/fpu/fpu_explode.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_implode.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_mul.c optional fpu_emu | powerpcspe powerpc/fpu/fpu_sqrt.c optional fpu_emu powerpc/fpu/fpu_subr.c optional fpu_emu | powerpcspe powerpc/mambo/mambocall.S optional mambo powerpc/mambo/mambo.c optional mambo powerpc/mambo/mambo_console.c optional mambo powerpc/mambo/mambo_disk.c optional mambo powerpc/mikrotik/platform_rb.c optional mikrotik powerpc/mikrotik/rb_led.c optional mikrotik powerpc/mpc85xx/atpic.c optional mpc85xx isa powerpc/mpc85xx/ds1553_bus_fdt.c optional ds1553 powerpc/mpc85xx/ds1553_core.c optional ds1553 powerpc/mpc85xx/fsl_diu.c optional mpc85xx diu powerpc/mpc85xx/fsl_espi.c optional mpc85xx spibus powerpc/mpc85xx/fsl_sata.c optional mpc85xx ata powerpc/mpc85xx/i2c.c optional mpc85xx iicbus powerpc/mpc85xx/isa.c optional mpc85xx isa powerpc/mpc85xx/lbc.c optional mpc85xx powerpc/mpc85xx/mpc85xx.c optional mpc85xx powerpc/mpc85xx/mpc85xx_cache.c optional mpc85xx powerpc/mpc85xx/mpc85xx_gpio.c optional mpc85xx gpio powerpc/mpc85xx/platform_mpc85xx.c optional mpc85xx powerpc/mpc85xx/pci_mpc85xx.c optional pci mpc85xx powerpc/mpc85xx/pci_mpc85xx_pcib.c optional pci mpc85xx powerpc/mpc85xx/qoriq_gpio.c optional mpc85xx gpio powerpc/ofw/ofw_machdep.c standard powerpc/ofw/ofw_pcibus.c optional pci powerpc/ofw/ofw_pcib_pci.c optional pci powerpc/ofw/ofw_real.c optional aim powerpc/ofw/ofw_syscons.c optional sc aim powerpc/ofw/ofwcall32.S optional aim powerpc powerpc/ofw/ofwcall64.S optional aim powerpc64 | aim powerpc64le powerpc/ofw/openpic_ofw.c standard powerpc/ofw/rtas.c optional aim powerpc/ofw/ofw_initrd.c optional md_root_mem powerpc64 | md_root_mem powerpc64le powerpc/powermac/ata_kauai.c optional powermac ata | powermac atamacio powerpc/powermac/ata_macio.c optional powermac ata | powermac atamacio powerpc/powermac/ata_dbdma.c optional powermac ata | powermac atamacio powerpc/powermac/atibl.c optional powermac atibl powerpc/powermac/cuda.c optional powermac cuda powerpc/powermac/cpcht.c optional powermac pci powerpc/powermac/dbdma.c optional powermac pci powerpc/powermac/fcu.c optional powermac fcu powerpc/powermac/grackle.c optional powermac pci powerpc/powermac/hrowpic.c optional powermac pci powerpc/powermac/kiic.c optional powermac kiic powerpc/powermac/macgpio.c optional powermac pci powerpc/powermac/macio.c optional powermac pci powerpc/powermac/nvbl.c optional powermac nvbl powerpc/powermac/platform_powermac.c optional powermac powerpc/powermac/powermac_thermal.c optional powermac powerpc/powermac/pswitch.c optional powermac pswitch powerpc/powermac/pmu.c optional powermac pmu powerpc/powermac/smu.c optional powermac smu powerpc/powermac/smusat.c optional powermac smu powerpc/powermac/uninorth.c optional powermac powerpc/powermac/uninorthpci.c optional powermac pci powerpc/powermac/vcoregpio.c optional powermac powerpc/powernv/opal.c optional powernv powerpc/powernv/opal_async.c optional powernv powerpc/powernv/opal_console.c optional powernv powerpc/powernv/opal_dbg.c optional powernv gdb powerpc/powernv/opal_dev.c optional powernv powerpc/powernv/opal_flash.c optional powernv opalflash powerpc/powernv/opal_hmi.c optional powernv powerpc/powernv/opal_i2c.c optional iicbus fdt powernv powerpc/powernv/opal_i2cm.c optional iicbus fdt powernv powerpc/powernv/opal_nvram.c optional powernv nvram powerpc/powernv/opal_pci.c optional powernv pci powerpc/powernv/opal_sensor.c optional powernv powerpc/powernv/opalcall.S optional powernv powerpc/powernv/platform_powernv.c optional powernv powerpc/powernv/powernv_centaur.c optional powernv powerpc/powernv/powernv_xscom.c optional powernv powerpc/powernv/xive.c optional powernv powerpc/powerpc/altivec.c optional !powerpcspe powerpc/powerpc/autoconf.c standard powerpc/powerpc/bus_machdep.c standard powerpc/powerpc/busdma_machdep.c standard powerpc/powerpc/clock.c standard powerpc/powerpc/copyinout.c optional aim powerpc/powerpc/cpu.c standard powerpc/powerpc/cpu_subr64.S optional powerpc64 | powerpc64le powerpc/powerpc/db_disasm.c optional ddb powerpc/powerpc/db_hwwatch.c optional ddb powerpc/powerpc/db_interface.c optional ddb powerpc/powerpc/db_trace.c optional ddb powerpc/powerpc/dump_machdep.c standard powerpc/powerpc/elf32_machdep.c optional powerpc | powerpcspe | compat_freebsd32 powerpc/powerpc/elf64_machdep.c optional powerpc64 | powerpc64le powerpc/powerpc/exec_machdep.c standard powerpc/powerpc/fpu.c standard powerpc/powerpc/gdb_machdep.c optional gdb powerpc/powerpc/interrupt.c standard powerpc/powerpc/intr_machdep.c standard powerpc/powerpc/iommu_if.m standard powerpc/powerpc/machdep.c standard powerpc/powerpc/mem.c optional mem powerpc/powerpc/minidump_machdep.c optional powerpc64 | powerpc64le powerpc/powerpc/mp_machdep.c optional smp powerpc/powerpc/nexus.c standard powerpc/powerpc/openpic.c standard powerpc/powerpc/pic_if.m standard powerpc/powerpc/pmap_dispatch.c standard powerpc/powerpc/platform.c standard powerpc/powerpc/platform_if.m standard powerpc/powerpc/ptrace_machdep.c standard powerpc/powerpc/sc_machdep.c optional sc powerpc/powerpc/setjmp.S standard powerpc/powerpc/sigcode32.S optional powerpc | powerpcspe | compat_freebsd32 powerpc/powerpc/sigcode64.S optional powerpc64 | powerpc64le powerpc/powerpc/swtch32.S optional powerpc | powerpcspe powerpc/powerpc/swtch64.S optional powerpc64 | powerpc64le powerpc/powerpc/stack_machdep.c optional ddb | stack powerpc/powerpc/support.S optional powerpc64 | powerpc64le | booke powerpc/powerpc/syncicache.c standard powerpc/powerpc/sys_machdep.c standard powerpc/powerpc/trap.c standard powerpc/powerpc/uio_machdep.c standard powerpc/powerpc/uma_machdep.c standard powerpc/powerpc/vm_machdep.c standard powerpc/ps3/ehci_ps3.c optional ps3 ehci powerpc/ps3/ohci_ps3.c optional ps3 ohci powerpc/ps3/if_glc.c optional ps3 glc powerpc/ps3/mmu_ps3.c optional ps3 powerpc/ps3/platform_ps3.c optional ps3 powerpc/ps3/ps3bus.c optional ps3 powerpc/ps3/ps3cdrom.c optional ps3 scbus powerpc/ps3/ps3disk.c optional ps3 powerpc/ps3/ps3pic.c optional ps3 powerpc/ps3/ps3_syscons.c optional ps3 vt powerpc/ps3/ps3-hvcall.S optional ps3 powerpc/pseries/phyp-hvcall.S optional pseries powerpc64 | pseries powerpc64le powerpc/pseries/mmu_phyp.c optional pseries powerpc64 | pseries powerpc64le powerpc/pseries/phyp_console.c optional pseries powerpc64 uart | pseries powerpc64le uart powerpc/pseries/phyp_dbg.c optional pseries powerpc64 gdb | pseries powerpc64le gdb powerpc/pseries/phyp_llan.c optional llan powerpc/pseries/phyp_vscsi.c optional pseries powerpc64 scbus | pseries powerpc64le scbus powerpc/pseries/platform_chrp.c optional pseries powerpc/pseries/plpar_iommu.c optional pseries powerpc64 | pseries powerpc64le powerpc/pseries/plpar_pcibus.c optional pseries powerpc64 pci | pseries powerpc64le pci powerpc/pseries/rtas_dev.c optional pseries powerpc/pseries/rtas_pci.c optional pseries pci powerpc/pseries/vdevice.c optional pseries powerpc64 | pseries powerpc64le powerpc/pseries/xics.c optional pseries powerpc64 | pseries powerpc64le powerpc/psim/iobus.c optional psim powerpc/psim/ata_iobus.c optional ata psim powerpc/psim/openpic_iobus.c optional psim powerpc/psim/uart_iobus.c optional uart psim diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h index a07ca6780a3c..92ad8055b666 100644 --- a/sys/dev/ice/ice_adminq_cmd.h +++ b/sys/dev/ice/ice_adminq_cmd.h @@ -1,3243 +1,3349 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_ADMINQ_CMD_H_ #define _ICE_ADMINQ_CMD_H_ /* This header file defines the Admin Queue commands, error codes and * descriptor format. It is shared between Firmware and Software. */ +#include "ice_osdep.h" +#include "ice_defs.h" +#include "ice_bitops.h" + #define ICE_MAX_VSI 768 #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +enum ice_aq_res_access_type { + ICE_RES_READ = 1, + ICE_RES_WRITE +}; + struct ice_aqc_generic { __le32 param0; __le32 param1; __le32 addr_high; __le32 addr_low; }; /* Get version (direct 0x0001) */ struct ice_aqc_get_ver { __le32 rom_ver; __le32 fw_build; u8 fw_branch; u8 fw_major; u8 fw_minor; u8 fw_patch; u8 api_branch; u8 api_major; u8 api_minor; u8 api_patch; }; /* Send driver version (indirect 0x0002) */ struct ice_aqc_driver_ver { u8 major_ver; u8 minor_ver; u8 build_ver; u8 subbuild_ver; u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; /* Queue Shutdown (direct 0x0003) */ struct ice_aqc_q_shutdown { u8 driver_unloading; #define ICE_AQC_DRIVER_UNLOADING BIT(0) u8 reserved[15]; }; /* Get Expanded Error Code (0x0005, direct) */ struct ice_aqc_get_exp_err { __le32 reason; #define ICE_AQC_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF __le32 identifier; u8 rsvd[8]; }; /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ struct ice_aqc_req_res { __le16 res_id; #define ICE_AQC_RES_ID_NVM 1 #define ICE_AQC_RES_ID_SDP 2 #define ICE_AQC_RES_ID_CHNG_LOCK 3 #define ICE_AQC_RES_ID_GLBL_LOCK 4 __le16 access_type; #define ICE_AQC_RES_ACCESS_READ 1 #define ICE_AQC_RES_ACCESS_WRITE 2 /* Upon successful completion, FW writes this value and driver is * expected to release resource before timeout. This value is provided * in milliseconds. */ __le32 timeout; #define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000 #define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000 #define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000 #define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000 /* For SDP: pin ID of the SDP */ __le32 res_number; /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */ __le16 status; #define ICE_AQ_RES_GLBL_SUCCESS 0 #define ICE_AQ_RES_GLBL_IN_PROG 1 #define ICE_AQ_RES_GLBL_DONE 2 u8 reserved[2]; }; /* Get function capabilities (indirect 0x000A) * Get device capabilities (indirect 0x000B) */ struct ice_aqc_list_caps { u8 cmd_flags; u8 pf_index; u8 reserved[2]; __le32 count; __le32 addr_high; __le32 addr_low; }; /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; #define ICE_AQC_CAPS_SWITCHING_MODE 0x0001 #define ICE_AQC_CAPS_MANAGEABILITY_MODE 0x0002 #define ICE_AQC_CAPS_OS2BMC 0x0004 #define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005 #define ICE_AQC_MAX_VALID_FUNCTIONS 0x8 #define ICE_AQC_CAPS_ALTERNATE_RAM 0x0006 #define ICE_AQC_CAPS_WOL_PROXY 0x0008 #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_802_1QBG 0x0015 #define ICE_AQC_CAPS_802_1BR 0x0016 #define ICE_AQC_CAPS_VSI 0x0017 #define ICE_AQC_CAPS_DCB 0x0018 #define ICE_AQC_CAPS_RSVD 0x0021 #define ICE_AQC_CAPS_ISCSI 0x0022 #define ICE_AQC_CAPS_RSS 0x0040 #define ICE_AQC_CAPS_RXQS 0x0041 #define ICE_AQC_CAPS_TXQS 0x0042 #define ICE_AQC_CAPS_MSIX 0x0043 #define ICE_AQC_CAPS_MAX_MTU 0x0047 -#define ICE_AQC_CAPS_NVM_VER 0x0048 -#define ICE_AQC_CAPS_OROM_VER 0x004A -#define ICE_AQC_CAPS_NET_VER 0x004C #define ICE_AQC_CAPS_CEM 0x00F2 #define ICE_AQC_CAPS_IWARP 0x0051 #define ICE_AQC_CAPS_LED 0x0061 #define ICE_AQC_CAPS_SDP 0x0062 #define ICE_AQC_CAPS_WR_CSR_PROT 0x0064 #define ICE_AQC_CAPS_LOGI_TO_PHYSI_PORT_MAP 0x0073 #define ICE_AQC_CAPS_SKU 0x0074 #define ICE_AQC_CAPS_PORT_MAP 0x0075 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0 0x0081 #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1 0x0082 #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2 0x0083 #define ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3 0x0084 +#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 +#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087 +#define ICE_AQC_CAPS_DYN_FLATTENING 0x0090 +#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092 u8 major_ver; u8 minor_ver; /* Number of resources described by this capability */ __le32 number; /* Only meaningful for some types of resources */ __le32 logical_id; /* Only meaningful for some types of resources */ __le32 phys_id; __le64 rsvd1; __le64 rsvd2; }; /* Manage MAC address, read command - indirect (0x0107) * This struct is also used for the response */ struct ice_aqc_manage_mac_read { __le16 flags; /* Zeroed by device driver */ #define ICE_AQC_MAN_MAC_LAN_ADDR_VALID BIT(4) #define ICE_AQC_MAN_MAC_SAN_ADDR_VALID BIT(5) #define ICE_AQC_MAN_MAC_PORT_ADDR_VALID BIT(6) #define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7) #define ICE_AQC_MAN_MAC_MC_MAG_EN BIT(8) #define ICE_AQC_MAN_MAC_WOL_PRESERVE_ON_PFR BIT(9) #define ICE_AQC_MAN_MAC_READ_S 4 #define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S) u8 rsvd[2]; u8 num_addr; /* Used in response */ u8 rsvd1[3]; __le32 addr_high; __le32 addr_low; }; /* Response buffer format for manage MAC read command */ struct ice_aqc_manage_mac_read_resp { u8 lport_num; u8 addr_type; #define ICE_AQC_MAN_MAC_ADDR_TYPE_LAN 0 #define ICE_AQC_MAN_MAC_ADDR_TYPE_WOL 1 u8 mac_addr[ETH_ALEN]; }; /* Manage MAC address, write command - direct (0x0108) */ struct ice_aqc_manage_mac_write { u8 rsvd; u8 flags; #define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) #define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) #define ICE_AQC_MAN_MAC_WR_S 6 #define ICE_AQC_MAN_MAC_WR_M MAKEMASK(3, ICE_AQC_MAN_MAC_WR_S) #define ICE_AQC_MAN_MAC_UPDATE_LAA 0 #define ICE_AQC_MAN_MAC_UPDATE_LAA_WOL BIT(ICE_AQC_MAN_MAC_WR_S) /* byte stream in network order */ u8 mac_addr[ETH_ALEN]; __le32 addr_high; __le32 addr_low; }; /* Clear PXE Command and response (direct 0x0110) */ struct ice_aqc_clear_pxe { u8 rx_cnt; #define ICE_AQC_CLEAR_PXE_RX_CNT 0x2 u8 reserved[15]; }; /* Configure No-Drop Policy Command (direct 0x0112) */ struct ice_aqc_config_no_drop_policy { u8 opts; #define ICE_AQC_FORCE_NO_DROP BIT(0) u8 rsvd[15]; }; /* Get switch configuration (0x0200) */ struct ice_aqc_get_sw_cfg { /* Reserved for command and copy of request flags for response */ __le16 flags; /* First desc in case of command and next_elem in case of response * In case of response, if it is not zero, means all the configuration * was not returned and new command shall be sent with this value in * the 'first desc' field */ __le16 element; /* Reserved for command, only used for response */ __le16 num_elems; __le16 rsvd; __le32 addr_high; __le32 addr_low; }; /* Each entry in the response buffer is of the following type: */ struct ice_aqc_get_sw_cfg_resp_elem { /* VSI/Port Number */ __le16 vsi_port_num; #define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S 0 #define ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M \ (0x3FF << ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_S) #define ICE_AQC_GET_SW_CONF_RESP_TYPE_S 14 #define ICE_AQC_GET_SW_CONF_RESP_TYPE_M (0x3 << ICE_AQC_GET_SW_CONF_RESP_TYPE_S) #define ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT 0 #define ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT 1 #define ICE_AQC_GET_SW_CONF_RESP_VSI 2 /* SWID VSI/Port belongs to */ __le16 swid; /* Bit 14..0 : PF/VF number VSI belongs to * Bit 15 : VF indication bit */ __le16 pf_vf_num; #define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S 0 #define ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M \ (0x7FFF << ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_S) #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; /* Set Port parameters, (direct, 0x0203) */ struct ice_aqc_set_port_params { __le16 cmd_flags; #define ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS BIT(0) #define ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS BIT(1) #define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2) __le16 bad_frame_vsi; #define ICE_AQC_SET_P_PARAMS_VSI_S 0 #define ICE_AQC_SET_P_PARAMS_VSI_M (0x3FF << ICE_AQC_SET_P_PARAMS_VSI_S) #define ICE_AQC_SET_P_PARAMS_VSI_VALID BIT(15) __le16 swid; #define ICE_AQC_SET_P_PARAMS_SWID_S 0 #define ICE_AQC_SET_P_PARAMS_SWID_M (0xFF << ICE_AQC_SET_P_PARAMS_SWID_S) #define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S 8 #define ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_M \ (0x3F << ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S) #define ICE_AQC_SET_P_PARAMS_IS_LOGI_PORT BIT(14) #define ICE_AQC_SET_P_PARAMS_SWID_VALID BIT(15) u8 reserved[10]; }; /* These resource type defines are used for all switch resource * commands where a resource type is required, such as: * Get Resource Allocation command (indirect 0x0204) * Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) * Get Allocated Resource Descriptors Command (indirect 0x020A) */ #define ICE_AQC_RES_TYPE_VEB_COUNTER 0x00 #define ICE_AQC_RES_TYPE_VLAN_COUNTER 0x01 #define ICE_AQC_RES_TYPE_MIRROR_RULE 0x02 #define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03 #define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04 #define ICE_AQC_RES_TYPE_RECIPE 0x05 #define ICE_AQC_RES_TYPE_PROFILE 0x06 #define ICE_AQC_RES_TYPE_SWID 0x07 #define ICE_AQC_RES_TYPE_VSI 0x08 #define ICE_AQC_RES_TYPE_FLU 0x09 #define ICE_AQC_RES_TYPE_WIDE_TABLE_1 0x0A #define ICE_AQC_RES_TYPE_WIDE_TABLE_2 0x0B #define ICE_AQC_RES_TYPE_WIDE_TABLE_4 0x0C #define ICE_AQC_RES_TYPE_GLOBAL_RSS_HASH 0x20 #define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21 #define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22 #define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23 #define ICE_AQC_RES_TYPE_FLEX_DESC_PROG 0x30 #define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_PROFID 0x48 #define ICE_AQC_RES_TYPE_SWITCH_PROF_BLDR_TCAM 0x49 #define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_PROFID 0x50 #define ICE_AQC_RES_TYPE_ACL_PROF_BLDR_TCAM 0x51 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60 #define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61 /* Resource types 0x62-67 are reserved for Hash profile builder */ #define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID 0x68 #define ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM 0x69 #define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 #define ICE_AQC_RES_TYPE_S 0 #define ICE_AQC_RES_TYPE_M (0x07F << ICE_AQC_RES_TYPE_S) /* Get Resource Allocation command (indirect 0x0204) */ struct ice_aqc_get_res_alloc { __le16 resp_elem_num; /* Used in response, reserved in command */ u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Get Resource Allocation Response Buffer per response */ struct ice_aqc_get_res_resp_elem { __le16 res_type; /* Types defined above cmd 0x0204 */ __le16 total_capacity; /* Resources available to all PF's */ __le16 total_function; /* Resources allocated for a PF */ __le16 total_shared; /* Resources allocated as shared */ __le16 total_free; /* Resources un-allocated/not reserved by any PF */ }; /* Allocate Resources command (indirect 0x0208) * Free Resources command (indirect 0x0209) */ struct ice_aqc_alloc_free_res_cmd { __le16 num_entries; /* Number of Resource entries */ u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Resource descriptor */ struct ice_aqc_res_elem { union { __le16 sw_resp; __le16 flu_resp; } e; }; /* Buffer for Allocate/Free Resources commands */ struct ice_aqc_alloc_free_res_elem { __le16 res_type; /* Types defined above cmd 0x0204 */ #define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S 8 #define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \ (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S) __le16 num_elems; struct ice_aqc_res_elem elem[STRUCT_HACK_VAR_LEN]; }; /* Get Allocated Resource Descriptors Command (indirect 0x020A) */ struct ice_aqc_get_allocd_res_desc { union { struct { __le16 res; /* Types defined above cmd 0x0204 */ __le16 first_desc; __le32 reserved; } cmd; struct { __le16 res; __le16 next_desc; __le16 num_desc; __le16 reserved; } resp; } ops; __le32 addr_high; __le32 addr_low; }; /* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */ struct ice_aqc_set_vlan_mode { u8 reserved; u8 l2tag_prio_tagging; #define ICE_AQ_VLAN_PRIO_TAG_S 0 #define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S) #define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0 #define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1 #define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2 #define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3 #define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4 #define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4 #define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7 u8 l2tag_reserved[64]; u8 rdma_packet; #define ICE_AQ_VLAN_RDMA_TAG_S 0 #define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S) #define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10 #define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A u8 rdma_reserved[2]; u8 mng_vlan_prot_id; #define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10 #define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11 u8 prot_id_reserved[30]; }; /* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */ struct ice_aqc_get_vlan_mode { u8 vlan_mode; #define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0) u8 l2tag_prio_tagging; u8 reserved[98]; }; /* Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) * Get VSI (indirect 0x0212) * Free VSI (indirect 0x0213) */ struct ice_aqc_add_get_update_free_vsi { __le16 vsi_num; #define ICE_AQ_VSI_NUM_S 0 #define ICE_AQ_VSI_NUM_M (0x03FF << ICE_AQ_VSI_NUM_S) #define ICE_AQ_VSI_IS_VALID BIT(15) __le16 cmd_flags; #define ICE_AQ_VSI_KEEP_ALLOC 0x1 u8 vf_id; u8 reserved; __le16 vsi_flags; #define ICE_AQ_VSI_TYPE_S 0 #define ICE_AQ_VSI_TYPE_M (0x3 << ICE_AQ_VSI_TYPE_S) #define ICE_AQ_VSI_TYPE_VF 0x0 #define ICE_AQ_VSI_TYPE_VMDQ2 0x1 #define ICE_AQ_VSI_TYPE_PF 0x2 #define ICE_AQ_VSI_TYPE_EMP_MNG 0x3 __le32 addr_high; __le32 addr_low; }; /* Response descriptor for: * Add VSI (indirect 0x0210) * Update VSI (indirect 0x0211) * Free VSI (indirect 0x0213) */ struct ice_aqc_add_update_free_vsi_resp { __le16 vsi_num; __le16 ext_status; __le16 vsi_used; __le16 vsi_free; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_get_vsi_resp { __le16 vsi_num; u8 vf_id; /* The vsi_flags field uses the ICE_AQ_VSI_TYPE_* defines for values. * These are found above in struct ice_aqc_add_get_update_free_vsi. */ u8 vsi_flags; __le16 vsi_used; __le16 vsi_free; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_vsi_props { __le16 valid_sections; #define ICE_AQ_VSI_PROP_SW_VALID BIT(0) #define ICE_AQ_VSI_PROP_SECURITY_VALID BIT(1) #define ICE_AQ_VSI_PROP_VLAN_VALID BIT(2) #define ICE_AQ_VSI_PROP_OUTER_TAG_VALID BIT(3) #define ICE_AQ_VSI_PROP_INGRESS_UP_VALID BIT(4) #define ICE_AQ_VSI_PROP_EGRESS_UP_VALID BIT(5) #define ICE_AQ_VSI_PROP_RXQ_MAP_VALID BIT(6) #define ICE_AQ_VSI_PROP_Q_OPT_VALID BIT(7) #define ICE_AQ_VSI_PROP_OUTER_UP_VALID BIT(8) #define ICE_AQ_VSI_PROP_FLOW_DIR_VALID BIT(11) #define ICE_AQ_VSI_PROP_PASID_VALID BIT(12) /* switch section */ u8 sw_id; u8 sw_flags; #define ICE_AQ_VSI_SW_FLAG_ALLOW_LB BIT(5) #define ICE_AQ_VSI_SW_FLAG_LOCAL_LB BIT(6) #define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7) u8 sw_flags2; #define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0 #define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S) #define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0) +#define ICE_AQ_VSI_SW_FLAG_RX_PASS_PRUNE_ENA BIT(3) #define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4) u8 veb_stat_id; #define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0 #define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S) #define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5) /* security section */ u8 sec_flags; #define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0) #define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2) #define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4 #define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S) #define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0) u8 sec_reserved; /* VLAN section */ __le16 port_based_inner_vlan; /* VLANS include priority bits */ u8 inner_vlan_reserved[2]; u8 inner_vlan_flags; #define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0 #define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S) #define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1 #define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2 #define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3 #define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2) #define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3 #define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) #define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) #define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) #define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) #define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S) #define ICE_AQ_VSI_INNER_VLAN_BLOCK_TX_DESC BIT(5) u8 inner_vlan_reserved2[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ #define ICE_AQ_VSI_UP_TABLE_UP0_S 0 #define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S) #define ICE_AQ_VSI_UP_TABLE_UP1_S 3 #define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S) #define ICE_AQ_VSI_UP_TABLE_UP2_S 6 #define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S) #define ICE_AQ_VSI_UP_TABLE_UP3_S 9 #define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S) #define ICE_AQ_VSI_UP_TABLE_UP4_S 12 #define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S) #define ICE_AQ_VSI_UP_TABLE_UP5_S 15 #define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S) #define ICE_AQ_VSI_UP_TABLE_UP6_S 18 #define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S) #define ICE_AQ_VSI_UP_TABLE_UP7_S 21 #define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S) __le32 egress_table; /* same defines as for ingress table */ /* outer tags section */ __le16 port_based_outer_vlan; u8 outer_vlan_flags; #define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0 #define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S) #define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0 #define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1 #define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2 #define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3 #define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2 #define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S) #define ICE_AQ_VSI_OUTER_TAG_NONE 0x0 #define ICE_AQ_VSI_OUTER_TAG_STAG 0x1 #define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2 #define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3 #define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4) #define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5 #define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) #define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1 #define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2 #define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3 #define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7) u8 outer_vlan_reserved; /* queue mapping section */ __le16 mapping_flags; #define ICE_AQ_VSI_Q_MAP_CONTIG 0x0 #define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0) __le16 q_mapping[16]; #define ICE_AQ_VSI_Q_S 0 #define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S) __le16 tc_mapping[8]; #define ICE_AQ_VSI_TC_Q_OFFSET_S 0 #define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S) #define ICE_AQ_VSI_TC_Q_NUM_S 11 #define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S) /* queueing option section */ u8 q_opt_rss; #define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0 #define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) #define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0 #define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2 #define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3 #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2 #define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S) #define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6 #define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) #define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) #define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) #define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) #define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) u8 q_opt_tc; #define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0 #define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S) #define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7) u8 q_opt_flags; #define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0) u8 q_opt_reserved[3]; /* outer up section */ __le32 outer_up_table; /* same structure and defines as ingress tbl */ /* section 10 */ __le16 sect_10_reserved; /* flow director section */ __le16 fd_options; #define ICE_AQ_VSI_FD_ENABLE BIT(0) #define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1) #define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3) __le16 max_fd_fltr_dedicated; __le16 max_fd_fltr_shared; __le16 fd_def_q; #define ICE_AQ_VSI_FD_DEF_Q_S 0 #define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S) #define ICE_AQ_VSI_FD_DEF_GRP_S 12 #define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S) __le16 fd_report_opt; #define ICE_AQ_VSI_FD_REPORT_Q_S 0 #define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S) #define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12 #define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S) #define ICE_AQ_VSI_FD_DEF_DROP BIT(15) /* PASID section */ __le32 pasid_id; #define ICE_AQ_VSI_PASID_ID_S 0 #define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S) #define ICE_AQ_VSI_PASID_ID_VALID BIT(31) u8 reserved[24]; }; /* Add/update mirror rule - direct (0x0260) */ #define ICE_AQC_RULE_ID_VALID_S 7 #define ICE_AQC_RULE_ID_VALID_M (0x1 << ICE_AQC_RULE_ID_VALID_S) #define ICE_AQC_RULE_ID_S 0 #define ICE_AQC_RULE_ID_M (0x3F << ICE_AQC_RULE_ID_S) /* Following defines to be used while processing caller specified mirror list * of VSI indexes. */ /* Action: Byte.bit (1.7) * 0 = Remove VSI from mirror rule * 1 = Add VSI to mirror rule */ #define ICE_AQC_RULE_ACT_S 15 #define ICE_AQC_RULE_ACT_M (0x1 << ICE_AQC_RULE_ACT_S) /* Action: 1.2:0.0 = Mirrored VSI */ #define ICE_AQC_RULE_MIRRORED_VSI_S 0 #define ICE_AQC_RULE_MIRRORED_VSI_M (0x7FF << ICE_AQC_RULE_MIRRORED_VSI_S) /* This is to be used by add/update mirror rule Admin Queue command. * In case of add mirror rule - if rule ID is specified as * INVAL_MIRROR_RULE_ID, new rule ID is allocated from shared pool. * If specified rule_id is valid, then it is used. If specified rule_id * is in use then new mirroring rule is added. */ #define ICE_INVAL_MIRROR_RULE_ID 0xFFFF struct ice_aqc_add_update_mir_rule { __le16 rule_id; __le16 rule_type; #define ICE_AQC_RULE_TYPE_S 0 #define ICE_AQC_RULE_TYPE_M (0x7 << ICE_AQC_RULE_TYPE_S) /* VPORT ingress/egress */ #define ICE_AQC_RULE_TYPE_VPORT_INGRESS 0x1 #define ICE_AQC_RULE_TYPE_VPORT_EGRESS 0x2 /* Physical port ingress mirroring. * All traffic received by this port */ #define ICE_AQC_RULE_TYPE_PPORT_INGRESS 0x6 /* Physical port egress mirroring. All traffic sent by this port */ #define ICE_AQC_RULE_TYPE_PPORT_EGRESS 0x7 /* Number of mirrored entries. * The values are in the command buffer */ __le16 num_entries; /* Destination VSI */ __le16 dest; __le32 addr_high; __le32 addr_low; }; /* Delete mirror rule - direct(0x0261) */ struct ice_aqc_delete_mir_rule { __le16 rule_id; __le16 rsvd; /* Byte.bit: 20.0 = Keep allocation. If set VSI stays part of * the PF allocated resources, otherwise it is returned to the * shared pool */ #define ICE_AQC_FLAG_KEEP_ALLOCD_S 0 #define ICE_AQC_FLAG_KEEP_ALLOCD_M (0x1 << ICE_AQC_FLAG_KEEP_ALLOCD_S) __le16 flags; u8 reserved[10]; }; /* Set/Get storm config - (direct 0x0280, 0x0281) */ /* This structure holds get storm configuration response and same structure * is used to perform set_storm_cfg */ struct ice_aqc_storm_cfg { __le32 bcast_thresh_size; __le32 mcast_thresh_size; /* Bit 18:0 - Traffic upper threshold size * Bit 31:19 - Reserved */ #define ICE_AQ_THRESHOLD_S 0 #define ICE_AQ_THRESHOLD_M (0x7FFFF << ICE_AQ_THRESHOLD_S) __le32 storm_ctrl_ctrl; /* Bit 0: MDIPW - Drop Multicast packets in previous window * Bit 1: MDICW - Drop multicast packets in current window * Bit 2: BDIPW - Drop broadcast packets in previous window * Bit 3: BDICW - Drop broadcast packets in current window */ #define ICE_AQ_STORM_CTRL_MDIPW_DROP_MULTICAST BIT(0) #define ICE_AQ_STORM_CTRL_MDICW_DROP_MULTICAST BIT(1) #define ICE_AQ_STORM_CTRL_BDIPW_DROP_MULTICAST BIT(2) #define ICE_AQ_STORM_CTRL_BDICW_DROP_MULTICAST BIT(3) /* Bit 7:5 : Reserved */ /* Bit 27:8 : Interval - BSC/MSC Time-interval specification: The * interval size for applying ingress broadcast or multicast storm * control. */ #define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S 8 #define ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_M \ (0xFFFFF << ICE_AQ_STORM_BSC_MSC_TIME_INTERVAL_S) __le32 reserved; }; #define ICE_MAX_NUM_RECIPES 64 /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { /* ops: add switch rules, referring the number of rules. * ops: update switch rules, referring the number of filters * ops: remove switch rules, referring the entry index. * ops: get switch rules, referring to the number of filters. */ __le16 num_rules_fltr_entry_index; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Add/Update/Get/Remove lookup Rx/Tx command/response entry * This structures describes the lookup rules and associated actions. "index" * is returned as part of a response to a successful Add command, and can be * used to identify the rule for Update/Get/Remove commands. */ struct ice_sw_rule_lkup_rx_tx { __le16 recipe_id; #define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10 /* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */ __le16 src; __le32 act; /* Bit 0:1 - Action type */ #define ICE_SINGLE_ACT_TYPE_S 0x00 #define ICE_SINGLE_ACT_TYPE_M (0x3 << ICE_SINGLE_ACT_TYPE_S) /* Bit 2 - Loop back enable * Bit 3 - LAN enable */ #define ICE_SINGLE_ACT_LB_ENABLE BIT(2) #define ICE_SINGLE_ACT_LAN_ENABLE BIT(3) /* Action type = 0 - Forward to VSI or VSI list */ #define ICE_SINGLE_ACT_VSI_FORWARDING 0x0 #define ICE_SINGLE_ACT_VSI_ID_S 4 #define ICE_SINGLE_ACT_VSI_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_ID_S) #define ICE_SINGLE_ACT_VSI_LIST_ID_S 4 #define ICE_SINGLE_ACT_VSI_LIST_ID_M (0x3FF << ICE_SINGLE_ACT_VSI_LIST_ID_S) /* This bit needs to be set if action is forward to VSI list */ #define ICE_SINGLE_ACT_VSI_LIST BIT(14) #define ICE_SINGLE_ACT_VALID_BIT BIT(17) #define ICE_SINGLE_ACT_DROP BIT(18) /* Action type = 1 - Forward to Queue of Queue group */ #define ICE_SINGLE_ACT_TO_Q 0x1 #define ICE_SINGLE_ACT_Q_INDEX_S 4 #define ICE_SINGLE_ACT_Q_INDEX_M (0x7FF << ICE_SINGLE_ACT_Q_INDEX_S) #define ICE_SINGLE_ACT_Q_REGION_S 15 #define ICE_SINGLE_ACT_Q_REGION_M (0x7 << ICE_SINGLE_ACT_Q_REGION_S) #define ICE_SINGLE_ACT_Q_PRIORITY BIT(18) /* Action type = 2 - Prune */ #define ICE_SINGLE_ACT_PRUNE 0x2 #define ICE_SINGLE_ACT_EGRESS BIT(15) #define ICE_SINGLE_ACT_INGRESS BIT(16) #define ICE_SINGLE_ACT_PRUNET BIT(17) /* Bit 18 should be set to 0 for this action */ /* Action type = 2 - Pointer */ #define ICE_SINGLE_ACT_PTR 0x2 #define ICE_SINGLE_ACT_PTR_VAL_S 4 #define ICE_SINGLE_ACT_PTR_VAL_M (0x1FFF << ICE_SINGLE_ACT_PTR_VAL_S) + /* Bit 17 should be set if pointed action includes a FWD cmd */ +#define ICE_SINGLE_ACT_PTR_HAS_FWD BIT(17) /* Bit 18 should be set to 1 */ #define ICE_SINGLE_ACT_PTR_BIT BIT(18) /* Action type = 3 - Other actions. Last two bits * are other action identifier */ #define ICE_SINGLE_ACT_OTHER_ACTS 0x3 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_S 17 #define ICE_SINGLE_OTHER_ACT_IDENTIFIER_M \ (0x3 << ICE_SINGLE_OTHER_ACT_IDENTIFIER_S) /* Bit 17:18 - Defines other actions */ /* Other action = 0 - Mirror VSI */ #define ICE_SINGLE_OTHER_ACT_MIRROR 0 #define ICE_SINGLE_ACT_MIRROR_VSI_ID_S 4 #define ICE_SINGLE_ACT_MIRROR_VSI_ID_M \ (0x3FF << ICE_SINGLE_ACT_MIRROR_VSI_ID_S) /* Other action = 3 - Set Stat count */ #define ICE_SINGLE_OTHER_ACT_STAT_COUNT 3 #define ICE_SINGLE_ACT_STAT_COUNT_INDEX_S 4 #define ICE_SINGLE_ACT_STAT_COUNT_INDEX_M \ (0x7F << ICE_SINGLE_ACT_STAT_COUNT_INDEX_S) __le16 index; /* The index of the rule in the lookup table */ /* Length and values of the header to be matched per recipe or * lookup-type */ __le16 hdr_len; u8 hdr[STRUCT_HACK_VAR_LEN]; }; /* Add/Update/Remove large action command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the action for Update/Get/Remove commands. */ struct ice_sw_rule_lg_act { __le16 index; /* Index in large action table */ __le16 size; /* Max number of large actions */ #define ICE_MAX_LG_ACT 4 /* Bit 0:1 - Action type */ #define ICE_LG_ACT_TYPE_S 0 #define ICE_LG_ACT_TYPE_M (0x7 << ICE_LG_ACT_TYPE_S) /* Action type = 0 - Forward to VSI or VSI list */ #define ICE_LG_ACT_VSI_FORWARDING 0 #define ICE_LG_ACT_VSI_ID_S 3 #define ICE_LG_ACT_VSI_ID_M (0x3FF << ICE_LG_ACT_VSI_ID_S) #define ICE_LG_ACT_VSI_LIST_ID_S 3 #define ICE_LG_ACT_VSI_LIST_ID_M (0x3FF << ICE_LG_ACT_VSI_LIST_ID_S) /* This bit needs to be set if action is forward to VSI list */ #define ICE_LG_ACT_VSI_LIST BIT(13) #define ICE_LG_ACT_VALID_BIT BIT(16) /* Action type = 1 - Forward to Queue of Queue group */ #define ICE_LG_ACT_TO_Q 0x1 #define ICE_LG_ACT_Q_INDEX_S 3 #define ICE_LG_ACT_Q_INDEX_M (0x7FF << ICE_LG_ACT_Q_INDEX_S) #define ICE_LG_ACT_Q_REGION_S 14 #define ICE_LG_ACT_Q_REGION_M (0x7 << ICE_LG_ACT_Q_REGION_S) #define ICE_LG_ACT_Q_PRIORITY_SET BIT(17) /* Action type = 2 - Prune */ #define ICE_LG_ACT_PRUNE 0x2 #define ICE_LG_ACT_EGRESS BIT(14) #define ICE_LG_ACT_INGRESS BIT(15) #define ICE_LG_ACT_PRUNET BIT(16) /* Action type = 3 - Mirror VSI */ #define ICE_LG_OTHER_ACT_MIRROR 0x3 #define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) /* Action type = 5 - Generic Value */ #define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) #define ICE_LG_ACT_GENERIC_OFFSET_S 19 #define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S) #define ICE_LG_ACT_GENERIC_PRIORITY_S 22 #define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S) #define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7 /* Action = 7 - Set Stat count */ #define ICE_LG_ACT_STAT_COUNT 0x7 #define ICE_LG_ACT_STAT_COUNT_S 3 #define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S) __le32 act[STRUCT_HACK_VAR_LEN]; /* array of size for actions */ }; /* Add/Update/Remove VSI list command/response entry * "index" is returned as part of a response to a successful Add command, and * can be used to identify the VSI list for Update/Get/Remove commands. */ struct ice_sw_rule_vsi_list { __le16 index; /* Index of VSI/Prune list */ __le16 number_vsi; __le16 vsi[STRUCT_HACK_VAR_LEN]; /* Array of number_vsi VSI numbers */ }; #pragma pack(1) /* Query VSI list command/response entry */ struct ice_sw_rule_vsi_list_query { __le16 index; ice_declare_bitmap(vsi_list, ICE_MAX_VSI); }; #pragma pack() #pragma pack(1) /* Add switch rule response: * Content of return buffer is same as the input buffer. The status field and * LUT index are updated as part of the response */ struct ice_aqc_sw_rules_elem { __le16 type; /* Switch rule type, one of T_... */ #define ICE_AQC_SW_RULES_T_LKUP_RX 0x0 #define ICE_AQC_SW_RULES_T_LKUP_TX 0x1 #define ICE_AQC_SW_RULES_T_LG_ACT 0x2 #define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3 #define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4 #define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5 #define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6 __le16 status; union { struct ice_sw_rule_lkup_rx_tx lkup_tx_rx; struct ice_sw_rule_lg_act lg_act; struct ice_sw_rule_vsi_list vsi_list; struct ice_sw_rule_vsi_list_query vsi_list_query; } pdata; }; #pragma pack() /* PFC Ignore (direct 0x0301) * The command and response use the same descriptor structure */ struct ice_aqc_pfc_ignore { u8 tc_bitmap; u8 cmd_flags; /* unused in response */ #define ICE_AQC_PFC_IGNORE_SET BIT(7) #define ICE_AQC_PFC_IGNORE_CLEAR 0 u8 reserved[14]; }; /* Set PFC Mode (direct 0x0303) * Query PFC Mode (direct 0x0302) */ struct ice_aqc_set_query_pfc_mode { u8 pfc_mode; /* For Set Command response, reserved in all other cases */ #define ICE_AQC_PFC_NOT_CONFIGURED 0 /* For Query Command response, reserved in all other cases */ #define ICE_AQC_DCB_DIS 0 #define ICE_AQC_PFC_VLAN_BASED_PFC 1 #define ICE_AQC_PFC_DSCP_BASED_PFC 2 u8 rsvd[15]; }; /* Set DCB Parameters (direct 0x0306) */ struct ice_aqc_set_dcb_params { u8 cmd_flags; /* unused in response */ #define ICE_AQC_LINK_UP_DCB_CFG BIT(0) #define ICE_AQC_PERSIST_DCB_CFG BIT(1) u8 valid_flags; /* unused in response */ #define ICE_AQC_LINK_UP_DCB_CFG_VALID BIT(0) #define ICE_AQC_PERSIST_DCB_CFG_VALID BIT(1) u8 rsvd[14]; }; /* Get Default Topology (indirect 0x0400) */ struct ice_aqc_get_topo { u8 port_num; u8 num_branches; __le16 reserved1; __le32 reserved2; __le32 addr_high; __le32 addr_low; }; +/* Get/Set Tx Topology (indirect 0x0418/0x0417) */ +struct ice_aqc_get_set_tx_topo { + u8 set_flags; +#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0) +#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1) +#define ICE_AQC_TX_TOPO_FLAGS_SET_PSM BIT(2) +#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4) +#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5) + u8 get_flags; +#define ICE_AQC_TX_TOPO_GET_NO_UPDATE 0 +#define ICE_AQC_TX_TOPO_GET_PSM 1 +#define ICE_AQC_TX_TOPO_GET_RAM 2 + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) * Add TSE (indirect 0x0401) * Delete TSE (indirect 0x040F) * Move TSE (indirect 0x0408) * Suspend Nodes (indirect 0x0409) * Resume Nodes (indirect 0x040A) */ struct ice_aqc_sched_elem_cmd { __le16 num_elem_req; /* Used by commands */ __le16 num_elem_resp; /* Used by responses */ __le32 reserved; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_txsched_move_grp_info_hdr { __le32 src_parent_teid; __le32 dest_parent_teid; __le16 num_elems; u8 flags; u8 reserved; }; struct ice_aqc_move_elem { struct ice_aqc_txsched_move_grp_info_hdr hdr; __le32 teid[STRUCT_HACK_VAR_LEN]; }; struct ice_aqc_elem_info_bw { __le16 bw_profile_idx; __le16 bw_alloc; }; struct ice_aqc_txsched_elem { u8 elem_type; /* Special field, reserved for some aq calls */ #define ICE_AQC_ELEM_TYPE_UNDEFINED 0x0 #define ICE_AQC_ELEM_TYPE_ROOT_PORT 0x1 #define ICE_AQC_ELEM_TYPE_TC 0x2 #define ICE_AQC_ELEM_TYPE_SE_GENERIC 0x3 #define ICE_AQC_ELEM_TYPE_ENTRY_POINT 0x4 #define ICE_AQC_ELEM_TYPE_LEAF 0x5 #define ICE_AQC_ELEM_TYPE_SE_PADDED 0x6 u8 valid_sections; #define ICE_AQC_ELEM_VALID_GENERIC BIT(0) #define ICE_AQC_ELEM_VALID_CIR BIT(1) #define ICE_AQC_ELEM_VALID_EIR BIT(2) #define ICE_AQC_ELEM_VALID_SHARED BIT(3) u8 generic; #define ICE_AQC_ELEM_GENERIC_MODE_M 0x1 #define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1 #define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S) #define ICE_AQC_ELEM_GENERIC_SP_S 0x4 #define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S) #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5 #define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \ (0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S) u8 flags; /* Special field, reserved for some aq calls */ #define ICE_AQC_ELEM_FLAG_SUSPEND_M 0x1 struct ice_aqc_elem_info_bw cir_bw; struct ice_aqc_elem_info_bw eir_bw; __le16 srl_id; __le16 reserved2; }; struct ice_aqc_txsched_elem_data { __le32 parent_teid; __le32 node_teid; struct ice_aqc_txsched_elem data; }; struct ice_aqc_txsched_topo_grp_info_hdr { __le32 parent_teid; __le16 num_elems; __le16 reserved2; }; struct ice_aqc_add_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data generic[STRUCT_HACK_VAR_LEN]; }; struct ice_aqc_get_topo_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data generic[ICE_AQC_TOPO_MAX_LEVEL_NUM]; }; struct ice_aqc_delete_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; __le32 teid[STRUCT_HACK_VAR_LEN]; }; /* Query Port ETS (indirect 0x040E) * * This indirect command is used to query port TC node configuration. */ struct ice_aqc_query_port_ets { __le32 port_teid; __le32 reserved; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_port_ets_elem { u8 tc_valid_bits; u8 reserved[3]; /* 3 bits for UP per TC 0-7, 4th byte reserved */ __le32 up2tc; u8 tc_bw_share[8]; __le32 port_eir_prof_id; __le32 port_cir_prof_id; /* 3 bits per Node priority to TC 0-7, 4th byte reserved */ __le32 tc_node_prio; #define ICE_TC_NODE_PRIO_S 0x4 u8 reserved1[4]; __le32 tc_node_teid[8]; /* Used for response, reserved in command */ }; /* Rate limiting profile for * Add RL profile (indirect 0x0410) * Query RL profile (indirect 0x0411) * Remove RL profile (indirect 0x0415) * These indirect commands acts on single or multiple * RL profiles with specified data. */ struct ice_aqc_rl_profile { __le16 num_profiles; __le16 num_processed; /* Only for response. Reserved in Command. */ u8 reserved[4]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_rl_profile_elem { u8 level; u8 flags; #define ICE_AQC_RL_PROFILE_TYPE_S 0x0 #define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S) #define ICE_AQC_RL_PROFILE_TYPE_CIR 0 #define ICE_AQC_RL_PROFILE_TYPE_EIR 1 #define ICE_AQC_RL_PROFILE_TYPE_SRL 2 /* The following flag is used for Query RL Profile Data */ #define ICE_AQC_RL_PROFILE_INVAL_S 0x7 #define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S) __le16 profile_id; __le16 max_burst_size; __le16 rl_multiply; __le16 wake_up_calc; __le16 rl_encode; }; +/* Config Node Attributes (indirect 0x0419) + * Query Node Attributes (indirect 0x041A) + */ +struct ice_aqc_node_attr { + __le16 num_entries; /* Number of attributes structures in the buffer */ + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_node_attr_elem { + __le32 node_teid; + __le16 max_children; + __le16 children_level; +}; + /* Configure L2 Node CGD (indirect 0x0414) * This indirect command allows configuring a congestion domain for given L2 * node TEIDs in the scheduler topology. */ struct ice_aqc_cfg_l2_node_cgd { __le16 num_l2_nodes; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_cfg_l2_node_cgd_elem { __le32 node_teid; u8 cgd; u8 reserved[3]; }; /* Query Scheduler Resource Allocation (indirect 0x0412) * This indirect command retrieves the scheduler resources allocated by * EMP Firmware to the given PF. */ struct ice_aqc_query_txsched_res { u8 reserved[8]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_generic_sched_props { __le16 phys_levels; __le16 logical_levels; u8 flattening_bitmap; u8 max_device_cgds; u8 max_pf_cgds; u8 rsvd0; __le16 rdma_qsets; u8 rsvd1[22]; }; struct ice_aqc_layer_props { u8 logical_layer; u8 chunk_size; __le16 max_device_nodes; __le16 max_pf_nodes; u8 rsvd0[4]; __le16 max_sibl_grp_sz; __le16 max_cir_rl_profiles; __le16 max_eir_rl_profiles; __le16 max_srl_profiles; u8 rsvd1[14]; }; struct ice_aqc_query_txsched_res_resp { struct ice_aqc_generic_sched_props sched_props; struct ice_aqc_layer_props layer_props[ICE_AQC_TOPO_MAX_LEVEL_NUM]; }; /* Query Node to Root Topology (indirect 0x0413) * This command uses ice_aqc_get_elem as its data buffer. */ struct ice_aqc_query_node_to_root { __le32 teid; __le32 num_nodes; /* Response only */ __le32 addr_high; __le32 addr_low; }; /* Get PHY capabilities (indirect 0x0600) */ struct ice_aqc_get_phy_caps { u8 lport_num; u8 reserved; __le16 param0; /* 18.0 - Report qualified modules */ #define ICE_AQC_GET_PHY_RQM BIT(0) /* 18.1 - 18.3 : Report mode * 000b - Report topology capabilities, without media * 001b - Report topology capabilities, with media * 010b - Report Active configuration * 011b - Report PHY Type and FEC mode capabilities * 100b - Report Default capabilities */ #define ICE_AQC_REPORT_MODE_S 1 #define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S) #define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0 #define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1) #define ICE_AQC_REPORT_ACTIVE_CFG BIT(2) #define ICE_AQC_REPORT_DFLT_CFG BIT(3) __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* This is #define of PHY type (Extended): * The first set of defines is for phy_type_low. */ #define ICE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0) #define ICE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1) #define ICE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2) #define ICE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3) #define ICE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4) #define ICE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5) #define ICE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6) #define ICE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7) #define ICE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8) #define ICE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9) #define ICE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10) #define ICE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11) #define ICE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12) #define ICE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13) #define ICE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14) #define ICE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15) #define ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16) #define ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17) #define ICE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18) #define ICE_PHY_TYPE_LOW_25GBASE_T BIT_ULL(19) #define ICE_PHY_TYPE_LOW_25GBASE_CR BIT_ULL(20) #define ICE_PHY_TYPE_LOW_25GBASE_CR_S BIT_ULL(21) #define ICE_PHY_TYPE_LOW_25GBASE_CR1 BIT_ULL(22) #define ICE_PHY_TYPE_LOW_25GBASE_SR BIT_ULL(23) #define ICE_PHY_TYPE_LOW_25GBASE_LR BIT_ULL(24) #define ICE_PHY_TYPE_LOW_25GBASE_KR BIT_ULL(25) #define ICE_PHY_TYPE_LOW_25GBASE_KR_S BIT_ULL(26) #define ICE_PHY_TYPE_LOW_25GBASE_KR1 BIT_ULL(27) #define ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC BIT_ULL(28) #define ICE_PHY_TYPE_LOW_25G_AUI_C2C BIT_ULL(29) #define ICE_PHY_TYPE_LOW_40GBASE_CR4 BIT_ULL(30) #define ICE_PHY_TYPE_LOW_40GBASE_SR4 BIT_ULL(31) #define ICE_PHY_TYPE_LOW_40GBASE_LR4 BIT_ULL(32) #define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33) #define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34) #define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35) #define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36) #define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37) #define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38) #define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39) #define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40) #define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41) #define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42) #define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43) #define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44) #define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45) #define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46) #define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47) #define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48) #define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49) #define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50) #define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51) #define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52) #define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53) #define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54) #define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55) #define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56) #define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57) #define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58) #define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59) #define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60) #define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61) #define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62) #define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63) #define ICE_PHY_TYPE_LOW_MAX_INDEX 63 /* The second set of defines is for phy_type_high. */ #define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0) #define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1) #define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) #define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) #define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) -#define ICE_PHY_TYPE_HIGH_MAX_INDEX 5 +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; #define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0) #define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1) #define ICE_AQC_PHY_LOW_POWER_MODE BIT(2) #define ICE_AQC_PHY_EN_LINK BIT(3) #define ICE_AQC_PHY_AN_MODE BIT(4) #define ICE_AQC_PHY_EN_MOD_QUAL BIT(5) #define ICE_AQC_PHY_EN_LESM BIT(6) #define ICE_AQC_PHY_EN_AUTO_FEC BIT(7) #define ICE_AQC_PHY_CAPS_MASK MAKEMASK(0xff, 0) u8 low_power_ctrl_an; #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) #define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1) #define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2) #define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3) __le16 eee_cap; #define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0) #define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1) #define ICE_AQC_PHY_EEE_EN_10GBASE_T BIT(2) #define ICE_AQC_PHY_EEE_EN_1000BASE_KX BIT(3) #define ICE_AQC_PHY_EEE_EN_10GBASE_KR BIT(4) #define ICE_AQC_PHY_EEE_EN_25GBASE_KR BIT(5) #define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6) #define ICE_AQC_PHY_EEE_EN_50GBASE_KR2 BIT(7) #define ICE_AQC_PHY_EEE_EN_50GBASE_KR_PAM4 BIT(8) #define ICE_AQC_PHY_EEE_EN_100GBASE_KR4 BIT(9) #define ICE_AQC_PHY_EEE_EN_100GBASE_KR2_PAM4 BIT(10) __le16 eeer_value; u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ u8 phy_fw_ver[8]; u8 link_fec_options; #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) #define ICE_AQC_PHY_FEC_25G_RS_528_REQ BIT(2) #define ICE_AQC_PHY_FEC_25G_KR_REQ BIT(3) #define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) +#define ICE_AQC_PHY_FEC_DIS BIT(5) #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) #define ICE_AQC_PHY_FEC_MASK MAKEMASK(0xdf, 0) u8 module_compliance_enforcement; #define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0) u8 extended_compliance_code; #define ICE_MODULE_TYPE_TOTAL_BYTE 3 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; #define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 #define ICE_AQC_MOD_TYPE_IDENT 1 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7) #define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 u8 qualified_module_count; u8 rsvd2[7]; /* Bytes 47:41 reserved */ #define ICE_AQC_QUAL_MOD_COUNT_MAX 16 struct { u8 v_oui[3]; u8 rsvd3; u8 v_part[16]; __le32 v_rev; __le64 rsvd4; } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; }; /* Set PHY capabilities (direct 0x0601) * NOTE: This command must be followed by setup link and restart auto-neg */ struct ice_aqc_set_phy_cfg { u8 lport_num; u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; /* Set PHY config command data structure */ struct ice_aqc_set_phy_cfg_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; #define ICE_AQ_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0) #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) #define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) #define ICE_AQ_PHY_ENA_LINK BIT(3) #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) #define ICE_AQ_PHY_ENA_LESM BIT(6) #define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) u8 low_power_ctrl_an; __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ u8 module_compliance_enforcement; }; /* Set MAC Config command data structure (direct 0x0603) */ struct ice_aqc_set_mac_cfg { __le16 max_frame_size; u8 params; #define ICE_AQ_SET_MAC_PACE_S 3 #define ICE_AQ_SET_MAC_PACE_M (0xF << ICE_AQ_SET_MAC_PACE_S) #define ICE_AQ_SET_MAC_PACE_TYPE_M BIT(7) #define ICE_AQ_SET_MAC_PACE_TYPE_RATE 0 #define ICE_AQ_SET_MAC_PACE_TYPE_FIXED ICE_AQ_SET_MAC_PACE_TYPE_M u8 tx_tmr_priority; __le16 tx_tmr_value; __le16 fc_refresh_threshold; u8 drop_opts; #define ICE_AQ_SET_MAC_AUTO_DROP_MASK BIT(0) #define ICE_AQ_SET_MAC_AUTO_DROP_NONE 0 #define ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS BIT(0) u8 reserved[7]; }; /* Restart AN command data structure (direct 0x0605) * Also used for response, with only the lport_num field present. */ struct ice_aqc_restart_an { u8 lport_num; u8 reserved; u8 cmd_flags; #define ICE_AQC_RESTART_AN_LINK_RESTART BIT(1) #define ICE_AQC_RESTART_AN_LINK_ENABLE BIT(2) u8 reserved2[13]; }; /* Get link status (indirect 0x0607), also used for Link Status Event */ struct ice_aqc_get_link_status { u8 lport_num; u8 reserved; __le16 cmd_flags; #define ICE_AQ_LSE_M 0x3 #define ICE_AQ_LSE_NOP 0x0 #define ICE_AQ_LSE_DIS 0x2 #define ICE_AQ_LSE_ENA 0x3 /* only response uses this flag */ #define ICE_AQ_LSE_IS_ENABLED 0x1 __le32 reserved2; __le32 addr_high; __le32 addr_low; }; +enum ice_get_link_status_data_version { + ICE_GET_LINK_STATUS_DATA_V1 = 1, +}; + +#define ICE_GET_LINK_STATUS_DATALEN_V1 32 + /* Get link status response data structure, also used for Link Status Event */ struct ice_aqc_get_link_status_data { u8 topo_media_conflict; #define ICE_AQ_LINK_TOPO_CONFLICT BIT(0) #define ICE_AQ_LINK_MEDIA_CONFLICT BIT(1) #define ICE_AQ_LINK_TOPO_CORRUPT BIT(2) #define ICE_AQ_LINK_TOPO_UNREACH_PRT BIT(4) #define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5) #define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6) #define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7) u8 link_cfg_err; #define ICE_AQ_LINK_CFG_ERR BIT(0) #define ICE_AQ_LINK_ACT_PORT_OPT_INVAL BIT(2) #define ICE_AQ_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3) #define ICE_AQ_LINK_TOPO_CRITICAL_SDP_ERR BIT(4) #define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5) #define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6) #define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7) u8 link_info; #define ICE_AQ_LINK_UP BIT(0) /* Link Status */ #define ICE_AQ_LINK_FAULT BIT(1) #define ICE_AQ_LINK_FAULT_TX BIT(2) #define ICE_AQ_LINK_FAULT_RX BIT(3) #define ICE_AQ_LINK_FAULT_REMOTE BIT(4) #define ICE_AQ_LINK_UP_PORT BIT(5) /* External Port Link Status */ #define ICE_AQ_MEDIA_AVAILABLE BIT(6) #define ICE_AQ_SIGNAL_DETECT BIT(7) u8 an_info; #define ICE_AQ_AN_COMPLETED BIT(0) #define ICE_AQ_LP_AN_ABILITY BIT(1) #define ICE_AQ_PD_FAULT BIT(2) /* Parallel Detection Fault */ #define ICE_AQ_FEC_EN BIT(3) #define ICE_AQ_PHY_LOW_POWER BIT(4) /* Low Power State */ #define ICE_AQ_LINK_PAUSE_TX BIT(5) #define ICE_AQ_LINK_PAUSE_RX BIT(6) #define ICE_AQ_QUALIFIED_MODULE BIT(7) u8 ext_info; #define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0) #define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */ /* Port Tx Suspended */ #define ICE_AQ_LINK_TX_S 2 #define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S) #define ICE_AQ_LINK_TX_ACTIVE 0 #define ICE_AQ_LINK_TX_DRAINED 1 #define ICE_AQ_LINK_TX_FLUSHED 3 u8 lb_status; #define ICE_AQ_LINK_LB_PHY_LCL BIT(0) #define ICE_AQ_LINK_LB_PHY_RMT BIT(1) #define ICE_AQ_LINK_LB_MAC_LCL BIT(2) #define ICE_AQ_LINK_LB_PHY_IDX_S 3 #define ICE_AQ_LINK_LB_PHY_IDX_M (0x7 << ICE_AQ_LB_PHY_IDX_S) __le16 max_frame_size; u8 cfg; #define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0) #define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1) #define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2) #define ICE_AQ_FEC_MASK MAKEMASK(0x7, 0) /* Pacing Config */ #define ICE_AQ_CFG_PACING_S 3 #define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S) #define ICE_AQ_CFG_PACING_TYPE_M BIT(7) #define ICE_AQ_CFG_PACING_TYPE_AVG 0 #define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M /* External Device Power Ability */ u8 power_desc; #define ICE_AQ_PWR_CLASS_M 0x3F #define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0 #define ICE_AQ_LINK_PWR_BASET_HIGH 1 #define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0 #define ICE_AQ_LINK_PWR_QSFP_CLASS_2 1 #define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 #define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 __le16 link_speed; #define ICE_AQ_LINK_SPEED_M 0x7FF #define ICE_AQ_LINK_SPEED_10MB BIT(0) #define ICE_AQ_LINK_SPEED_100MB BIT(1) #define ICE_AQ_LINK_SPEED_1000MB BIT(2) #define ICE_AQ_LINK_SPEED_2500MB BIT(3) #define ICE_AQ_LINK_SPEED_5GB BIT(4) #define ICE_AQ_LINK_SPEED_10GB BIT(5) #define ICE_AQ_LINK_SPEED_20GB BIT(6) #define ICE_AQ_LINK_SPEED_25GB BIT(7) #define ICE_AQ_LINK_SPEED_40GB BIT(8) #define ICE_AQ_LINK_SPEED_50GB BIT(9) #define ICE_AQ_LINK_SPEED_100GB BIT(10) #define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) __le32 reserved3; /* Aligns next field to 8-byte boundary */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ }; /* Set event mask command (direct 0x0613) */ struct ice_aqc_set_event_mask { u8 lport_num; u8 reserved[7]; __le16 event_mask; #define ICE_AQ_LINK_EVENT_UPDOWN BIT(1) #define ICE_AQ_LINK_EVENT_MEDIA_NA BIT(2) #define ICE_AQ_LINK_EVENT_LINK_FAULT BIT(3) #define ICE_AQ_LINK_EVENT_PHY_TEMP_ALARM BIT(4) #define ICE_AQ_LINK_EVENT_EXCESSIVE_ERRORS BIT(5) #define ICE_AQ_LINK_EVENT_SIGNAL_DETECT BIT(6) #define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7) #define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8) #define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9) #define ICE_AQ_LINK_EVENT_TOPO_CONFLICT BIT(10) #define ICE_AQ_LINK_EVENT_MEDIA_CONFLICT BIT(11) #define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12) u8 reserved1[6]; }; /* Set PHY Loopback command (direct 0x0619) */ struct ice_aqc_set_phy_lb { u8 lport_num; u8 lport_num_valid; #define ICE_AQ_PHY_LB_PORT_NUM_VALID BIT(0) u8 phy_index; u8 lb_mode; #define ICE_AQ_PHY_LB_EN BIT(0) #define ICE_AQ_PHY_LB_TYPE_M BIT(1) #define ICE_AQ_PHY_LB_TYPE_LOCAL 0 #define ICE_AQ_PHY_LB_TYPE_REMOTE ICE_AQ_PHY_LB_TYPE_M #define ICE_AQ_PHY_LB_LEVEL_M BIT(2) #define ICE_AQ_PHY_LB_LEVEL_PMD 0 #define ICE_AQ_PHY_LB_LEVEL_PCS ICE_AQ_PHY_LB_LEVEL_M u8 reserved2[12]; }; /* Set MAC Loopback command (direct 0x0620) */ struct ice_aqc_set_mac_lb { u8 lb_mode; #define ICE_AQ_MAC_LB_EN BIT(0) #define ICE_AQ_MAC_LB_OSC_CLK BIT(1) u8 reserved[15]; }; /* DNL Get Status command (indirect 0x0680) * Structure used for the response, the command uses the generic * ice_aqc_generic struct to pass a buffer address to the FW. */ struct ice_aqc_dnl_get_status { u8 ctx; u8 status; #define ICE_AQ_DNL_STATUS_IDLE 0x0 #define ICE_AQ_DNL_STATUS_RESERVED 0x1 #define ICE_AQ_DNL_STATUS_STOPPED 0x2 #define ICE_AQ_DNL_STATUS_FATAL 0x3 /* Fatal DNL engine error */ #define ICE_AQ_DNL_SRC_S 3 #define ICE_AQ_DNL_SRC_M (0x3 << ICE_AQ_DNL_SRC_S) #define ICE_AQ_DNL_SRC_NVM (0x0 << ICE_AQ_DNL_SRC_S) #define ICE_AQ_DNL_SRC_NVM_SCRATCH (0x1 << ICE_AQ_DNL_SRC_S) u8 stack_ptr; #define ICE_AQ_DNL_ST_PTR_S 0x0 #define ICE_AQ_DNL_ST_PTR_M (0x7 << ICE_AQ_DNL_ST_PTR_S) u8 engine_flags; #define ICE_AQ_DNL_FLAGS_ERROR BIT(2) #define ICE_AQ_DNL_FLAGS_NEGATIVE BIT(3) #define ICE_AQ_DNL_FLAGS_OVERFLOW BIT(4) #define ICE_AQ_DNL_FLAGS_ZERO BIT(5) #define ICE_AQ_DNL_FLAGS_CARRY BIT(6) #define ICE_AQ_DNL_FLAGS_JUMP BIT(7) __le16 pc; __le16 activity_id; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_dnl_get_status_data { __le16 activity_err_code; __le16 act_err_code; #define ICE_AQ_DNL_ACT_ERR_SUCCESS 0x0000 /* no error */ #define ICE_AQ_DNL_ACT_ERR_PARSE 0x8001 /* NVM parse error */ #define ICE_AQ_DNL_ACT_ERR_UNSUPPORTED 0x8002 /* unsupported action */ #define ICE_AQ_DNL_ACT_ERR_NOT_FOUND 0x8003 /* activity not found */ #define ICE_AQ_DNL_ACT_ERR_BAD_JUMP 0x8004 /* an illegal jump */ #define ICE_AQ_DNL_ACT_ERR_PSTO_OVER 0x8005 /* persistent store overflow */ #define ICE_AQ_DNL_ACT_ERR_ST_OVERFLOW 0x8006 /* stack overflow */ #define ICE_AQ_DNL_ACT_ERR_TIMEOUT 0x8007 /* activity timeout */ #define ICE_AQ_DNL_ACT_ERR_BREAK 0x0008 /* stopped at breakpoint */ #define ICE_AQ_DNL_ACT_ERR_INVAL_ARG 0x0101 /* invalid action argument */ __le32 execution_time; /* in nanoseconds */ __le16 lib_ver; u8 psto_local_sz; u8 psto_global_sz; u8 stack_sz; #define ICE_AQ_DNL_STACK_SZ_S 0 #define ICE_AQ_DNL_STACK_SZ_M (0xF << ICE_AQ_DNL_STACK_SZ_S) u8 port_count; #define ICE_AQ_DNL_PORT_CNT_S 0 #define ICE_AQ_DNL_PORT_CNT_M (0x1F << ICE_AQ_DNL_PORT_CNT_S) __le16 act_cache_cntr; u32 i2c_clk_cntr; u32 mdio_clk_cntr; u32 sb_iosf_clk_cntr; }; /* DNL run command (direct 0x0681) */ struct ice_aqc_dnl_run_command { u8 reserved0; u8 command; #define ICE_AQ_DNL_CMD_S 0 #define ICE_AQ_DNL_CMD_M (0x7 << ICE_AQ_DNL_CMD_S) #define ICE_AQ_DNL_CMD_RESET 0x0 #define ICE_AQ_DNL_CMD_RUN 0x1 #define ICE_AQ_DNL_CMD_STEP 0x3 #define ICE_AQ_DNL_CMD_ABORT 0x4 #define ICE_AQ_DNL_CMD_SET_PC 0x7 #define ICE_AQ_DNL_CMD_SRC_S 3 #define ICE_AQ_DNL_CMD_SRC_M (0x3 << ICE_AQ_DNL_CMD_SRC_S) #define ICE_AQ_DNL_CMD_SRC_DNL 0x0 #define ICE_AQ_DNL_CMD_SRC_SCRATCH 0x1 __le16 new_pc; u8 reserved1[12]; }; /* DNL call command (indirect 0x0682) * Struct is used for both command and response */ struct ice_aqc_dnl_call_command { u8 ctx; /* Used in command, reserved in response */ u8 reserved; __le16 activity_id; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* DNL call command/response buffer (indirect 0x0682) */ struct ice_aqc_dnl_call { __le32 stores[4]; }; /* Used for both commands: * DNL read sto command (indirect 0x0683) * DNL write sto command (indirect 0x0684) */ struct ice_aqc_dnl_read_write_command { u8 ctx; u8 sto_sel; /* STORE select */ #define ICE_AQC_DNL_STORE_SELECT_STORE 0x0 #define ICE_AQC_DNL_STORE_SELECT_PSTO 0x1 #define ICE_AQC_DNL_STORE_SELECT_STACK 0x2 __le16 offset; __le32 data; /* Used for write sto only */ __le32 addr_high; /* Used for read sto only */ __le32 addr_low; /* Used for read sto only */ }; /* Used for both command responses: * DNL read sto response (indirect 0x0683) * DNL write sto response (indirect 0x0684) */ struct ice_aqc_dnl_read_write_response { u8 reserved; u8 status; /* Reserved for read command */ __le16 size; /* Reserved for write command */ __le32 data; /* Reserved for write command */ __le32 addr_high; /* Reserved for write command */ __le32 addr_low; /* Reserved for write command */ }; /* DNL set breakpoints command (indirect 0x0686) */ struct ice_aqc_dnl_set_breakpoints_command { __le32 reserved[2]; __le32 addr_high; __le32 addr_low; }; /* DNL set breakpoints data buffer structure (indirect 0x0686) */ struct ice_aqc_dnl_set_breakpoints { u8 ctx; u8 ena; /* 0- disabled, 1- enabled */ __le16 offset; __le16 activity_id; }; /* DNL read log data command(indirect 0x0687) */ struct ice_aqc_dnl_read_log_command { __le16 reserved0; __le16 offset; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* DNL read log data response(indirect 0x0687) */ struct ice_aqc_dnl_read_log_response { __le16 reserved; __le16 size; __le32 data; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) u8 node_type_ctx; #define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0 #define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S) #define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0 #define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 #define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3 #define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4 #define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5 #define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 #define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 #define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 #define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_M \ (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) #define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0 #define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1 #define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2 #define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3 #define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 #define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 u8 index; }; struct ice_aqc_link_topo_addr { struct ice_aqc_link_topo_params topo_params; __le16 handle; #define ICE_AQC_LINK_TOPO_HANDLE_S 0 #define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) /* Used to decode the handle field */ #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) #define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 #define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 /* In case of a Mezzanine type */ #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6 #define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S) /* In case of a LOM type */ #define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \ (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) }; /* Get Link Topology Handle (direct, 0x06E0) */ struct ice_aqc_get_link_topo { struct ice_aqc_link_topo_addr addr; u8 node_part_num; #define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21 u8 rsvd[9]; }; /* Read/Write I2C (direct, 0x06E2/0x06E3) */ struct ice_aqc_i2c { struct ice_aqc_link_topo_addr topo_addr; __le16 i2c_addr; u8 i2c_params; #define ICE_AQC_I2C_DATA_SIZE_S 0 #define ICE_AQC_I2C_DATA_SIZE_M (0xF << ICE_AQC_I2C_DATA_SIZE_S) #define ICE_AQC_I2C_ADDR_TYPE_M BIT(4) #define ICE_AQC_I2C_ADDR_TYPE_7BIT 0 #define ICE_AQC_I2C_ADDR_TYPE_10BIT ICE_AQC_I2C_ADDR_TYPE_M #define ICE_AQC_I2C_DATA_OFFSET_S 5 #define ICE_AQC_I2C_DATA_OFFSET_M (0x3 << ICE_AQC_I2C_DATA_OFFSET_S) #define ICE_AQC_I2C_USE_REPEATED_START BIT(7) u8 rsvd; __le16 i2c_bus_addr; #define ICE_AQC_I2C_ADDR_7BIT_MASK 0x7F #define ICE_AQC_I2C_ADDR_10BIT_MASK 0x3FF u8 i2c_data[4]; /* Used only by write command, reserved in read. */ }; /* Read I2C Response (direct, 0x06E2) */ struct ice_aqc_read_i2c_resp { u8 i2c_data[16]; }; /* Read/Write MDIO (direct, 0x06E4/0x06E5) */ struct ice_aqc_mdio { struct ice_aqc_link_topo_addr topo_addr; u8 mdio_device_addr; #define ICE_AQC_MDIO_DEV_S 0 #define ICE_AQC_MDIO_DEV_M (0x1F << ICE_AQC_MDIO_DEV_S) #define ICE_AQC_MDIO_CLAUSE_22 BIT(5) #define ICE_AQC_MDIO_CLAUSE_45 BIT(6) u8 mdio_bus_address; #define ICE_AQC_MDIO_BUS_ADDR_S 0 #define ICE_AQC_MDIO_BUS_ADDR_M (0x1F << ICE_AQC_MDIO_BUS_ADDR_S) __le16 offset; __le16 data; /* Input in write cmd, output in read cmd. */ u8 rsvd1[4]; }; /* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */ struct ice_aqc_gpio_by_func { struct ice_aqc_link_topo_addr topo_addr; u8 io_func_num; #define ICE_AQC_GPIO_FUNC_S 0 #define ICE_AQC_GPIO_FUNC_M (0x1F << ICE_AQC_GPIO_IO_FUNC_NUM_S) u8 io_value; /* Input in write cmd, output in read cmd. */ #define ICE_AQC_GPIO_ON BIT(0) #define ICE_AQC_GPIO_OFF 0 u8 rsvd[8]; }; /* Set LED (direct, 0x06E8) */ struct ice_aqc_set_led { struct ice_aqc_link_topo_addr topo_addr; u8 color_and_blink; #define ICE_AQC_LED_COLOR_S 0 #define ICE_AQC_LED_COLOR_M (0x7 << ICE_AQC_LED_COLOR_S) #define ICE_AQC_LED_COLOR_SKIP 0 #define ICE_AQC_LED_COLOR_RED 1 #define ICE_AQC_LED_COLOR_ORANGE 2 #define ICE_AQC_LED_COLOR_YELLOW 3 #define ICE_AQC_LED_COLOR_GREEN 4 #define ICE_AQC_LED_COLOR_BLUE 5 #define ICE_AQC_LED_COLOR_PURPLE 6 #define ICE_AQC_LED_BLINK_S 3 #define ICE_AQC_LED_BLINK_M (0x7 << ICE_AQC_LED_BLINK_S) #define ICE_AQC_LED_BLINK_NONE 0 #define ICE_AQC_LED_BLINK_SLOW 1 #define ICE_AQC_LED_BLINK_SLOW_MAC 2 #define ICE_AQC_LED_BLINK_SLOW_FLTR 3 #define ICE_AQC_LED_BLINK_FAST 5 #define ICE_AQC_LED_BLINK_FAST_MAC 6 #define ICE_AQC_LED_BLINK_FAST_FLTR 7 u8 rsvd[9]; }; /* Set Port Identification LED (direct, 0x06E9) */ struct ice_aqc_set_port_id_led { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_PORT_ID_PORT_NUM_VALID BIT(0) u8 ident_mode; #define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0) #define ICE_AQC_PORT_IDENT_LED_ORIG 0 u8 rsvd[13]; }; /* Get Port Options (indirect, 0x06EA) */ struct ice_aqc_get_port_options { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_PORT_OPT_PORT_NUM_VALID BIT(0) u8 port_options_count; #define ICE_AQC_PORT_OPT_COUNT_S 0 #define ICE_AQC_PORT_OPT_COUNT_M (0xF << ICE_AQC_PORT_OPT_COUNT_S) u8 innermost_phy_index; u8 port_options; #define ICE_AQC_PORT_OPT_ACTIVE_S 0 #define ICE_AQC_PORT_OPT_ACTIVE_M (0xF << ICE_AQC_PORT_OPT_ACTIVE_S) #define ICE_AQC_PORT_OPT_FORCED BIT(6) #define ICE_AQC_PORT_OPT_VALID BIT(7) u8 pending_port_option_status; #define ICE_AQC_PENDING_PORT_OPT_IDX_S 0 #define ICE_AQC_PENDING_PORT_OPT_IDX_M (0xF << ICE_AQC_PENDING_PORT_OPT_IDX_S) #define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7) u8 rsvd[2]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_get_port_options_elem { u8 pmd; #define ICE_AQC_PORT_INV_PORT_OPT 4 #define ICE_AQC_PORT_OPT_PMD_COUNT_S 0 #define ICE_AQC_PORT_OPT_PMD_COUNT_M (0xF << ICE_AQC_PORT_OPT_PMD_COUNT_S) #define ICE_AQC_PORT_OPT_PMD_WIDTH_S 4 #define ICE_AQC_PORT_OPT_PMD_WIDTH_M (0xF << ICE_AQC_PORT_OPT_PMD_WIDTH_S) u8 max_lane_speed; #define ICE_AQC_PORT_OPT_MAX_LANE_S 0 #define ICE_AQC_PORT_OPT_MAX_LANE_M (0xF << ICE_AQC_PORT_OPT_MAX_LANE_S) #define ICE_AQC_PORT_OPT_MAX_LANE_100M 0 #define ICE_AQC_PORT_OPT_MAX_LANE_1G 1 #define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2 #define ICE_AQC_PORT_OPT_MAX_LANE_5G 3 #define ICE_AQC_PORT_OPT_MAX_LANE_10G 4 #define ICE_AQC_PORT_OPT_MAX_LANE_25G 5 #define ICE_AQC_PORT_OPT_MAX_LANE_50G 6 #define ICE_AQC_PORT_OPT_MAX_LANE_100G 7 u8 global_scid[2]; u8 phy_scid[2]; u8 pf2port_cid[2]; }; /* Set Port Option (direct, 0x06EB) */ struct ice_aqc_set_port_option { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_SET_PORT_OPT_PORT_NUM_VALID BIT(0) u8 selected_port_option; u8 rsvd[13]; }; /* Set/Get GPIO (direct, 0x06EC/0x06ED) */ struct ice_aqc_gpio { __le16 gpio_ctrl_handle; #define ICE_AQC_GPIO_HANDLE_S 0 #define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S) u8 gpio_num; u8 gpio_val; u8 rsvd[12]; }; /* Read/Write SFF EEPROM command (indirect 0x06EE) */ struct ice_aqc_sff_eeprom { u8 lport_num; u8 lport_num_valid; #define ICE_AQC_SFF_PORT_NUM_VALID BIT(0) __le16 i2c_bus_addr; #define ICE_AQC_SFF_I2CBUS_7BIT_M 0x7F #define ICE_AQC_SFF_I2CBUS_10BIT_M 0x3FF #define ICE_AQC_SFF_I2CBUS_TYPE_M BIT(10) #define ICE_AQC_SFF_I2CBUS_TYPE_7BIT 0 #define ICE_AQC_SFF_I2CBUS_TYPE_10BIT ICE_AQC_SFF_I2CBUS_TYPE_M #define ICE_AQC_SFF_SET_EEPROM_PAGE_S 11 #define ICE_AQC_SFF_SET_EEPROM_PAGE_M (0x3 << ICE_AQC_SFF_SET_EEPROM_PAGE_S) #define ICE_AQC_SFF_NO_PAGE_CHANGE 0 #define ICE_AQC_SFF_SET_23_ON_MISMATCH 1 #define ICE_AQC_SFF_SET_22_ON_MISMATCH 2 #define ICE_AQC_SFF_IS_WRITE BIT(15) __le16 i2c_mem_addr; __le16 eeprom_page; #define ICE_AQC_SFF_EEPROM_BANK_S 0 #define ICE_AQC_SFF_EEPROM_BANK_M (0xFF << ICE_AQC_SFF_EEPROM_BANK_S) #define ICE_AQC_SFF_EEPROM_PAGE_S 8 #define ICE_AQC_SFF_EEPROM_PAGE_M (0xFF << ICE_AQC_SFF_EEPROM_PAGE_S) __le32 addr_high; __le32 addr_low; }; /* SW Set GPIO command (indirect 0x6EF) * SW Get GPIO command (indirect 0x6F0) */ struct ice_aqc_sw_gpio { __le16 gpio_ctrl_handle; #define ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_S 0 #define ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_M (0x3FF << ICE_AQC_SW_GPIO_CONTROLLER_HANDLE_S) u8 gpio_num; #define ICE_AQC_SW_GPIO_NUMBER_S 0 #define ICE_AQC_SW_GPIO_NUMBER_M (0x1F << ICE_AQC_SW_GPIO_NUMBER_S) u8 gpio_params; #define ICE_AQC_SW_GPIO_PARAMS_DIRECTION BIT(1) #define ICE_AQC_SW_GPIO_PARAMS_VALUE BIT(0) u8 rsvd[12]; }; /* Program Topology Device NVM (direct, 0x06F2) */ struct ice_aqc_prog_topo_dev_nvm { struct ice_aqc_link_topo_params topo_params; u8 rsvd[12]; }; /* Read Topology Device NVM (direct, 0x06F3) */ struct ice_aqc_read_topo_dev_nvm { struct ice_aqc_link_topo_params topo_params; __le32 start_address; #define ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8 u8 data_read[ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE]; }; /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Write commands (indirect 0x0703) * NVM Write Activate commands (direct 0x0707) * NVM Shadow RAM Dump commands (direct 0x0707) */ struct ice_aqc_nvm { #define ICE_AQC_NVM_MAX_OFFSET 0xFFFFFF __le16 offset_low; u8 offset_high; /* For Write Activate offset_high is used as flags2 */ u8 cmd_flags; #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */ #define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) #define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ #define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4) #define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5) #define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6) #define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ #define ICE_AQC_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) #define ICE_AQC_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */ #define ICE_AQC_NVM_POR_FLAG 0 #define ICE_AQC_NVM_PERST_FLAG 1 #define ICE_AQC_NVM_EMPR_FLAG 2 #define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */ + /* For Write Activate, several flags are sent as part of a separate + * flags2 field using a separate byte. For simplicity of the software + * interface, we pass the flags as a 16 bit value so these flags are + * all offset by 8 bits + */ +#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */ __le16 module_typeid; __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF __le32 addr_high; __le32 addr_low; }; /* NVM Module_Type ID, needed offset and read_len for struct ice_aqc_nvm. */ #define ICE_AQC_NVM_SECTOR_UNIT 4096 /* In Bytes */ #define ICE_AQC_NVM_WORD_UNIT 2 /* In Bytes */ #define ICE_AQC_NVM_START_POINT 0 #define ICE_AQC_NVM_EMP_SR_PTR_OFFSET 0x90 #define ICE_AQC_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */ #define ICE_AQC_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0) #define ICE_AQC_NVM_EMP_SR_PTR_TYPE_S 15 #define ICE_AQC_NVM_EMP_SR_PTR_TYPE_M BIT(15) #define ICE_AQC_NVM_EMP_SR_PTR_TYPE_SECTOR 1 #define ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET 0x46 #define ICE_AQC_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */ #define ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */ #define ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID 0x129 #define ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */ #define ICE_AQC_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0) #define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */ #define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */ #define ICE_AQC_NVM_MINSREV_MOD_ID 0x130 +#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B /* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the * type field is excluded from the section when reading and writing from * a module using the module_typeid field with these AQ commands. */ struct ice_aqc_nvm_minsrev { __le16 length; __le16 validity; #define ICE_AQC_NVM_MINSREV_NVM_VALID BIT(0) #define ICE_AQC_NVM_MINSREV_OROM_VALID BIT(1) __le16 nvm_minsrev_l; __le16 nvm_minsrev_h; __le16 orom_minsrev_l; __le16 orom_minsrev_h; }; +struct ice_aqc_nvm_tx_topo_user_sel { + __le16 length; + u8 data; +#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4) + u8 reserved; +}; + /* Used for 0x0704 as well as for 0x0705 commands */ struct ice_aqc_nvm_cfg { u8 cmd_flags; #define ICE_AQC_ANVM_MULTIPLE_ELEMS BIT(0) #define ICE_AQC_ANVM_IMMEDIATE_FIELD BIT(1) #define ICE_AQC_ANVM_NEW_CFG BIT(2) u8 reserved; __le16 count; __le16 id; u8 reserved1[2]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_nvm_cfg_data { __le16 field_id; __le16 field_options; __le16 field_value; }; /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { u8 flags; #define ICE_AQC_NVM_CHECKSUM_VERIFY BIT(0) #define ICE_AQC_NVM_CHECKSUM_RECALC BIT(1) u8 rsvd; __le16 checksum; /* Used only by response */ #define ICE_AQC_NVM_CHECKSUM_CORRECT 0xBABA u8 rsvd2[12]; }; /* * Send to PF command (indirect 0x0801) ID is only used by PF * * Send to VF command (indirect 0x0802) ID is only used by PF * */ struct ice_aqc_pf_vf_msg { __le32 id; u32 reserved; __le32 addr_high; __le32 addr_low; }; /* Write/Read Alternate - Direct (direct 0x0900/0x0902) */ struct ice_aqc_read_write_alt_direct { __le32 dword0_addr; __le32 dword0_value; __le32 dword1_addr; __le32 dword1_value; }; /* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */ struct ice_aqc_read_write_alt_indirect { __le32 base_dword_addr; __le32 num_dwords; __le32 addr_high; __le32 addr_low; }; /* Done Alternate Write (direct 0x0904) */ struct ice_aqc_done_alt_write { u8 flags; #define ICE_AQC_CMD_UEFI_BIOS_MODE BIT(0) #define ICE_AQC_RESP_RESET_NEEDED BIT(1) u8 reserved[15]; }; /* Clear Port Alternate Write (direct 0x0906) */ struct ice_aqc_clear_port_alt_write { u8 reserved[16]; }; /* Get LLDP MIB (indirect 0x0A00) * Note: This is also used by the LLDP MIB Change Event (0x0A01) * as the format is the same. */ struct ice_aqc_lldp_get_mib { u8 type; #define ICE_AQ_LLDP_MIB_TYPE_S 0 #define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S) #define ICE_AQ_LLDP_MIB_LOCAL 0 #define ICE_AQ_LLDP_MIB_REMOTE 1 #define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2 #define ICE_AQ_LLDP_BRID_TYPE_S 2 #define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S) #define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0 #define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1 /* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */ #define ICE_AQ_LLDP_TX_S 0x4 #define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S) #define ICE_AQ_LLDP_TX_ACTIVE 0 #define ICE_AQ_LLDP_TX_SUSPENDED 1 #define ICE_AQ_LLDP_TX_FLUSHED 3 +/* DCBX mode */ +#define ICE_AQ_LLDP_DCBX_S 6 +#define ICE_AQ_LLDP_DCBX_M (0x3 << ICE_AQ_LLDP_DCBX_S) +#define ICE_AQ_LLDP_DCBX_NA 0 +#define ICE_AQ_LLDP_DCBX_CEE 1 +#define ICE_AQ_LLDP_DCBX_IEEE 2 /* The following bytes are reserved for the Get LLDP MIB command (0x0A00) * and in the LLDP MIB Change Event (0x0A01). They are valid for the * Get LLDP MIB (0x0A00) response only. */ - u8 reserved1; + u8 state; +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_S 0 +#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M \ + (0x1 << ICE_AQ_LLDP_MIB_CHANGE_STATE_S) +#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0 +#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1 __le16 local_len; __le16 remote_len; - u8 reserved2[2]; + u8 reserved[2]; __le32 addr_high; __le32 addr_low; }; /* Configure LLDP MIB Change Event (direct 0x0A01) */ /* For MIB Change Event use ice_aqc_lldp_get_mib structure above */ struct ice_aqc_lldp_set_mib_change { u8 command; #define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1 +#define ICE_AQ_LLDP_MIB_PENDING_S 1 +#define ICE_AQ_LLDP_MIB_PENDING_M \ + (0x1 << ICE_AQ_LLDP_MIB_PENDING_S) +#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0 +#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1 u8 reserved[15]; }; /* Add LLDP TLV (indirect 0x0A02) * Delete LLDP TLV (indirect 0x0A04) */ struct ice_aqc_lldp_add_delete_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved1[1]; __le16 len; u8 reserved2[4]; __le32 addr_high; __le32 addr_low; }; /* Update LLDP TLV (indirect 0x0A03) */ struct ice_aqc_lldp_update_tlv { u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ u8 reserved; __le16 old_len; __le16 new_offset; __le16 new_len; __le32 addr_high; __le32 addr_low; }; /* Stop LLDP (direct 0x0A05) */ struct ice_aqc_lldp_stop { u8 command; #define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0) #define ICE_AQ_LLDP_AGENT_STOP 0x0 #define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK #define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1) u8 reserved[15]; }; /* Start LLDP (direct 0x0A06) */ struct ice_aqc_lldp_start { u8 command; #define ICE_AQ_LLDP_AGENT_START BIT(0) #define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1) u8 reserved[15]; }; /* Get CEE DCBX Oper Config (0x0A07) * The command uses the generic descriptor struct and * returns the struct below as an indirect response. */ struct ice_aqc_get_cee_dcb_cfg_resp { u8 oper_num_tc; u8 oper_prio_tc[4]; u8 oper_tc_bw[8]; u8 oper_pfc_en; __le16 oper_app_prio; #define ICE_AQC_CEE_APP_FCOE_S 0 #define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S) #define ICE_AQC_CEE_APP_ISCSI_S 3 #define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S) #define ICE_AQC_CEE_APP_FIP_S 8 #define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S) __le32 tlv_status; #define ICE_AQC_CEE_PG_STATUS_S 0 #define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S) #define ICE_AQC_CEE_PFC_STATUS_S 3 #define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S) #define ICE_AQC_CEE_FCOE_STATUS_S 8 #define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S) #define ICE_AQC_CEE_ISCSI_STATUS_S 11 #define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S) #define ICE_AQC_CEE_FIP_STATUS_S 16 #define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S) u8 reserved[12]; }; /* Set Local LLDP MIB (indirect 0x0A08) * Used to replace the local MIB of a given LLDP agent. e.g. DCBX */ struct ice_aqc_lldp_set_local_mib { u8 type; #define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0) #define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0 #define SET_LOCAL_MIB_TYPE_CEE_M BIT(1) #define SET_LOCAL_MIB_TYPE_CEE_WILLING 0 #define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M u8 reserved0; __le16 length; u8 reserved1[4]; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_lldp_set_local_mib_resp { u8 status; #define SET_LOCAL_MIB_RESP_EVENT_M BIT(0) #define SET_LOCAL_MIB_RESP_MIB_CHANGE_SILENT 0 #define SET_LOCAL_MIB_RESP_MIB_CHANGE_EVENT SET_LOCAL_MIB_RESP_EVENT_M u8 reserved[15]; }; /* Stop/Start LLDP Agent (direct 0x0A09) * Used for stopping/starting specific LLDP agent. e.g. DCBX. * The same structure is used for the response, with the command field * being used as the status field. */ struct ice_aqc_lldp_stop_start_specific_agent { u8 command; #define ICE_AQC_START_STOP_AGENT_M BIT(0) #define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0 #define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M u8 reserved[15]; }; /* LLDP Filter Control (direct 0x0A0A) */ struct ice_aqc_lldp_filter_ctrl { u8 cmd_flags; #define ICE_AQC_LLDP_FILTER_ACTION_M MAKEMASK(3, 0) #define ICE_AQC_LLDP_FILTER_ACTION_ADD 0x0 #define ICE_AQC_LLDP_FILTER_ACTION_DELETE 0x1 #define ICE_AQC_LLDP_FILTER_ACTION_UPDATE 0x2 u8 reserved1; __le16 vsi_num; u8 reserved2[12]; }; /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ struct ice_aqc_get_set_rss_key { #define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) __le16 vsi_id; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; #define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28 #define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC #define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE) /** * struct ice_aqc_get_set_rss_keys - Get/Set RSS hash key command buffer * @standard_rss_key: 40 most significant bytes of hash key * @extended_hash_key: 12 least significant bytes of hash key * * Set/Get 40 byte hash key using standard_rss_key field, and set * extended_hash_key field to zero. Set/Get 52 byte hash key using * standard_rss_key field for 40 most significant bytes and the * extended_hash_key field for the 12 least significant bytes of hash key. */ struct ice_aqc_get_set_rss_keys { u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE]; u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE]; }; /* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) __le16 vsi_id; #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \ (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048 #define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2 #define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4 #define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \ (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) __le16 flags; __le32 reserved; __le32 addr_high; __le32 addr_low; }; /* Add Tx LAN Queues (indirect 0x0C30) */ struct ice_aqc_add_txqs { u8 num_qgrps; u8 reserved[3]; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; /* This is the descriptor of each queue entry for the Add Tx LAN Queues * command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp. */ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; u8 txq_ctx[22]; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; }; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of * each struct ice_aqc_add_tx_qgrp is variable due * to the variable number of queues in each group! */ struct ice_aqc_add_tx_qgrp { __le32 parent_teid; u8 num_txqs; u8 rsvd[3]; struct ice_aqc_add_txqs_perq txqs[STRUCT_HACK_VAR_LEN]; }; /* Disable Tx LAN Queues (indirect 0x0C31) */ struct ice_aqc_dis_txqs { u8 cmd_type; #define ICE_AQC_Q_DIS_CMD_S 0 #define ICE_AQC_Q_DIS_CMD_M (0x3 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_NO_FUNC_RESET (0 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_VM_RESET BIT(ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_VF_RESET (2 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_PF_RESET (3 << ICE_AQC_Q_DIS_CMD_S) #define ICE_AQC_Q_DIS_CMD_SUBSEQ_CALL BIT(2) #define ICE_AQC_Q_DIS_CMD_FLUSH_PIPE BIT(3) u8 num_entries; __le16 vmvf_and_timeout; #define ICE_AQC_Q_DIS_VMVF_NUM_S 0 #define ICE_AQC_Q_DIS_VMVF_NUM_M (0x3FF << ICE_AQC_Q_DIS_VMVF_NUM_S) #define ICE_AQC_Q_DIS_TIMEOUT_S 10 #define ICE_AQC_Q_DIS_TIMEOUT_M (0x3F << ICE_AQC_Q_DIS_TIMEOUT_S) __le32 blocked_cgds; __le32 addr_high; __le32 addr_low; }; /* The buffer for Disable Tx LAN Queues (indirect 0x0C31) * contains the following structures, arrayed one after the * other. * Note: Since the q_id is 16 bits wide, if the * number of queues is even, then 2 bytes of alignment MUST be * added before the start of the next group, to allow correct * alignment of the parent_teid field. */ #pragma pack(1) struct ice_aqc_dis_txq_item { __le32 parent_teid; u8 num_qs; u8 rsvd; /* The length of the q_id array varies according to num_qs */ #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15 #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \ (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \ (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) __le16 q_id[STRUCT_HACK_VAR_LEN]; }; #pragma pack() /* Tx LAN Queues Cleanup Event (0x0C31) */ struct ice_aqc_txqs_cleanup { __le16 caller_opc; __le16 cmd_tag; u8 reserved[12]; }; /* Move / Reconfigure Tx Queues (indirect 0x0C32) */ struct ice_aqc_move_txqs { u8 cmd_type; #define ICE_AQC_Q_CMD_TYPE_S 0 #define ICE_AQC_Q_CMD_TYPE_M (0x3 << ICE_AQC_Q_CMD_TYPE_S) #define ICE_AQC_Q_CMD_TYPE_MOVE 1 #define ICE_AQC_Q_CMD_TYPE_TC_CHANGE 2 #define ICE_AQC_Q_CMD_TYPE_MOVE_AND_TC 3 #define ICE_AQC_Q_CMD_SUBSEQ_CALL BIT(2) #define ICE_AQC_Q_CMD_FLUSH_PIPE BIT(3) u8 num_qs; u8 rsvd; u8 timeout; #define ICE_AQC_Q_CMD_TIMEOUT_S 2 #define ICE_AQC_Q_CMD_TIMEOUT_M (0x3F << ICE_AQC_Q_CMD_TIMEOUT_S) __le32 blocked_cgds; __le32 addr_high; __le32 addr_low; }; /* Per-queue data buffer for the Move Tx LAN Queues command/response */ struct ice_aqc_move_txqs_elem { __le16 txq_id; u8 q_cgd; u8 rsvd; __le32 q_teid; }; /* Indirect data buffer for the Move Tx LAN Queues command/response */ struct ice_aqc_move_txqs_data { __le32 src_teid; __le32 dest_teid; struct ice_aqc_move_txqs_elem txqs[STRUCT_HACK_VAR_LEN]; }; /* Add Tx RDMA Queue Set (indirect 0x0C33) */ struct ice_aqc_add_rdma_qset { u8 num_qset_grps; u8 reserved[7]; __le32 addr_high; __le32 addr_low; }; /* This is the descriptor of each qset entry for the Add Tx RDMA Queue Set * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset. */ struct ice_aqc_add_tx_rdma_qset_entry { __le16 tx_qset_id; u8 rsvd[2]; __le32 qset_teid; struct ice_aqc_txsched_elem info; }; /* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33) * is an array of the following structs. Please note that the length of * each struct ice_aqc_add_rdma_qset is variable due to the variable * number of queues in each group! */ struct ice_aqc_add_rdma_qset_data { __le32 parent_teid; __le16 num_qsets; u8 rsvd[2]; struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[STRUCT_HACK_VAR_LEN]; }; /* Move RDMA Queue Set (indirect 0x0C34) */ struct ice_aqc_move_rdma_qset_cmd { u8 num_rdma_qset; /* Used by commands and response */ +#define ICE_AQC_PF_MODE_SAME_PF 0x0 +#define ICE_AQC_PF_MODE_GIVE_OWNERSHIP 0x1 +#define ICE_AQC_PF_MODE_KEEP_OWNERSHIP 0x2 u8 flags; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Buffer */ struct ice_aqc_move_rdma_qset_buffer_desc { __le16 tx_qset_id; __le16 qset_teid; }; struct ice_aqc_move_rdma_qset_buffer { __le32 src_parent_teid; __le32 dest_parent_teid; struct ice_aqc_move_rdma_qset_buffer_desc descs[STRUCT_HACK_VAR_LEN]; }; /* Download Package (indirect 0x0C40) */ /* Also used for Update Package (indirect 0x0C42 and 0x0C41) */ struct ice_aqc_download_pkg { u8 flags; #define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01 u8 reserved[3]; __le32 reserved1; __le32 addr_high; __le32 addr_low; }; struct ice_aqc_download_pkg_resp { __le32 error_offset; __le32 error_info; __le32 addr_high; __le32 addr_low; }; /* Get Package Info List (indirect 0x0C43) */ struct ice_aqc_get_pkg_info_list { __le32 reserved1; __le32 reserved2; __le32 addr_high; __le32 addr_low; }; /* Version format for packages */ struct ice_pkg_ver { u8 major; u8 minor; u8 update; u8 draft; }; #define ICE_PKG_NAME_SIZE 32 #define ICE_SEG_ID_SIZE 28 #define ICE_SEG_NAME_SIZE 28 struct ice_aqc_get_pkg_info { struct ice_pkg_ver ver; char name[ICE_SEG_NAME_SIZE]; __le32 track_id; u8 is_in_nvm; u8 is_active; u8 is_active_at_boot; u8 is_modified; }; /* Get Package Info List response buffer format (0x0C43) */ struct ice_aqc_get_pkg_info_resp { __le32 count; struct ice_aqc_get_pkg_info pkg_info[STRUCT_HACK_VAR_LEN]; }; /* Driver Shared Parameters (direct, 0x0C90) */ struct ice_aqc_driver_shared_params { u8 set_or_get_op; #define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0) -#define ICE_AQC_DRIVER_PARAM_SET 0 -#define ICE_AQC_DRIVER_PARAM_GET 1 +#define ICE_AQC_DRIVER_PARAM_SET ((u8)0) +#define ICE_AQC_DRIVER_PARAM_GET ((u8)1) u8 param_indx; #define ICE_AQC_DRIVER_PARAM_MAX_IDX 15 u8 rsvd[2]; __le32 param_val; __le32 addr_high; __le32 addr_low; }; /* Lan Queue Overflow Event (direct, 0x1001) */ struct ice_aqc_event_lan_overflow { __le32 prtdcb_ruptq; __le32 qtx_ctl; u8 reserved[8]; }; /* Debug Dump Internal Data (indirect 0xFF08) */ struct ice_aqc_debug_dump_internals { u8 cluster_id; -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0 -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2 -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3 /* EMP_DRAM only dumpable in device debug mode */ -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4 -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5 /* AUX_REGS only dumpable in device debug mode */ -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6 -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7 -#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9 +#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21 u8 reserved; __le16 table_id; /* Used only for non-memory clusters */ __le32 idx; /* In table entries for tables, in bytes for memory */ __le32 addr_high; __le32 addr_low; }; enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_GENERAL = 0, ICE_AQC_FW_LOG_ID_CTRL, ICE_AQC_FW_LOG_ID_LINK, ICE_AQC_FW_LOG_ID_LINK_TOPO, ICE_AQC_FW_LOG_ID_DNL, ICE_AQC_FW_LOG_ID_I2C, ICE_AQC_FW_LOG_ID_SDP, ICE_AQC_FW_LOG_ID_MDIO, ICE_AQC_FW_LOG_ID_ADMINQ, ICE_AQC_FW_LOG_ID_HDMA, ICE_AQC_FW_LOG_ID_LLDP, ICE_AQC_FW_LOG_ID_DCBX, ICE_AQC_FW_LOG_ID_DCB, ICE_AQC_FW_LOG_ID_XLR, ICE_AQC_FW_LOG_ID_NVM, ICE_AQC_FW_LOG_ID_AUTH, ICE_AQC_FW_LOG_ID_VPD, ICE_AQC_FW_LOG_ID_IOSF, ICE_AQC_FW_LOG_ID_PARSER, ICE_AQC_FW_LOG_ID_SW, ICE_AQC_FW_LOG_ID_SCHEDULER, ICE_AQC_FW_LOG_ID_TXQ, ICE_AQC_FW_LOG_ID_RSVD, ICE_AQC_FW_LOG_ID_POST, ICE_AQC_FW_LOG_ID_WATCHDOG, ICE_AQC_FW_LOG_ID_TASK_DISPATCH, ICE_AQC_FW_LOG_ID_MNG, ICE_AQC_FW_LOG_ID_SYNCE, ICE_AQC_FW_LOG_ID_HEALTH, ICE_AQC_FW_LOG_ID_TSDRV, ICE_AQC_FW_LOG_ID_PFREG, ICE_AQC_FW_LOG_ID_MDLVER, ICE_AQC_FW_LOG_ID_MAX, }; - /* Set Health Status (direct 0xFF20) */ struct ice_aqc_set_health_status_config { u8 event_source; #define ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0) #define ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1) #define ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2) u8 reserved[15]; }; #define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE 0x102 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL 0x103 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM 0x104 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105 #define ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106 #define ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107 #define ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108 +#define ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109 #define ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B #define ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C #define ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D #define ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F #define ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110 #define ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111 #define ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112 #define ICE_AQC_HEALTH_STATUS_ERR_NETLIST 0x113 #define ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114 #define ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115 #define ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116 #define ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT 0x117 #define ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120 #define ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121 #define ICE_AQC_HEALTH_STATUS_INFO_RECOVERY 0x500 #define ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501 #define ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH 0x502 #define ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH 0x503 #define ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH 0x504 #define ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT 0x505 #define ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT 0x506 +#define ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507 +#define ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508 #define ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB 0x509 +#define ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A +#define ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET 0x50B +#define ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C +#define ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D +#define ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP 0x1000 +#define ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001 +#define ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002 /* Get Health Status codes (indirect 0xFF21) */ struct ice_aqc_get_supported_health_status_codes { __le16 health_code_count; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Get Health Status (indirect 0xFF22) */ struct ice_aqc_get_health_status { __le16 health_status_count; u8 reserved[6]; __le32 addr_high; __le32 addr_low; }; /* Get Health Status event buffer entry, (0xFF22) * repeated per reported health status */ struct ice_aqc_health_status_elem { __le16 health_status_code; __le16 event_source; #define ICE_AQC_HEALTH_STATUS_PF (0x1) #define ICE_AQC_HEALTH_STATUS_PORT (0x2) #define ICE_AQC_HEALTH_STATUS_GLOBAL (0x3) __le32 internal_data1; #define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF) __le32 internal_data2; }; /* Clear Health Status (direct 0xFF23) */ struct ice_aqc_clear_health_status { __le32 reserved[4]; }; /* Set FW Logging configuration (indirect 0xFF30) * Register for FW Logging (indirect 0xFF31) * Query FW Logging (indirect 0xFF32) * FW Log Event (indirect 0xFF33) * Get FW Log (indirect 0xFF34) * Clear FW Log (indirect 0xFF35) */ struct ice_aqc_fw_log { u8 cmd_flags; #define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0) #define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1) #define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2) #define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3) #define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0) #define ICE_AQC_FW_LOG_AQ_QUERY BIT(2) #define ICE_AQC_FW_LOG_PERSISTENT BIT(0) u8 rsp_flag; #define ICE_AQC_FW_LOG_MORE_DATA BIT(1) __le16 fw_rt_msb; union { struct { __le32 fw_rt_lsb; } sync; struct { __le16 log_resolution; #define ICE_AQC_FW_LOG_MIN_RESOLUTION (1) #define ICE_AQC_FW_LOG_MAX_RESOLUTION (128) __le16 mdl_cnt; } cfg; } ops; __le32 addr_high; __le32 addr_low; }; /* Response Buffer for: * Set Firmware Logging Configuration (0xFF30) * Query FW Logging (0xFF32) */ struct ice_aqc_fw_log_cfg_resp { __le16 module_identifier; u8 log_level; u8 rsvd0; }; /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags * @opcode: AQ command opcode * @datalen: length in bytes of indirect/external data buffer * @retval: return value from firmware * @cookie_high: opaque data high-half * @cookie_low: opaque data low-half * @params: command-specific parameters * * Descriptor format for commands the driver posts on the Admin Transmit Queue * (ATQ). The firmware writes back onto the command descriptor and returns * the result of the command. Asynchronous events that are not an immediate * result of the command are written to the Admin Receive Queue (ARQ) using * the same descriptor format. Descriptors are in little-endian notation with * 32-bit words. */ struct ice_aq_desc { __le16 flags; __le16 opcode; __le16 datalen; __le16 retval; __le32 cookie_high; __le32 cookie_low; union { u8 raw[16]; struct ice_aqc_generic generic; struct ice_aqc_get_ver get_ver; struct ice_aqc_driver_ver driver_ver; struct ice_aqc_q_shutdown q_shutdown; struct ice_aqc_get_exp_err exp_err; struct ice_aqc_req_res res_owner; struct ice_aqc_manage_mac_read mac_read; struct ice_aqc_manage_mac_write mac_write; struct ice_aqc_clear_pxe clear_pxe; struct ice_aqc_config_no_drop_policy no_drop; struct ice_aqc_add_update_mir_rule add_update_rule; struct ice_aqc_delete_mir_rule del_rule; struct ice_aqc_list_caps get_cap; struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; struct ice_aqc_dnl_get_status get_status; struct ice_aqc_dnl_run_command dnl_run; struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_dnl_read_write_command dnl_read_write; struct ice_aqc_dnl_read_write_response dnl_read_write_resp; struct ice_aqc_dnl_set_breakpoints_command dnl_set_brk; struct ice_aqc_dnl_read_log_command dnl_read_log; struct ice_aqc_dnl_read_log_response dnl_read_log_resp; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; struct ice_aqc_mdio read_write_mdio; struct ice_aqc_gpio_by_func read_write_gpio_by_func; struct ice_aqc_gpio read_write_gpio; struct ice_aqc_sw_gpio sw_read_write_gpio; struct ice_aqc_set_led set_led; struct ice_aqc_mdio read_mdio; struct ice_aqc_mdio write_mdio; struct ice_aqc_sff_eeprom read_write_sff_param; struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_port_options get_port_options; struct ice_aqc_set_port_option set_port_option; struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_set_port_params set_port_params; struct ice_aqc_sw_rules sw_rules; struct ice_aqc_storm_cfg storm_conf; struct ice_aqc_get_topo get_topo; struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_node_to_root query_node_to_root; struct ice_aqc_cfg_l2_node_cgd cfg_l2_node_cgd; struct ice_aqc_query_port_ets port_ets; struct ice_aqc_rl_profile rl_profile; + struct ice_aqc_node_attr node_attr; struct ice_aqc_nvm nvm; struct ice_aqc_nvm_cfg nvm_cfg; struct ice_aqc_nvm_checksum nvm_checksum; struct ice_aqc_pf_vf_msg virt; struct ice_aqc_read_write_alt_direct read_write_alt_direct; struct ice_aqc_read_write_alt_indirect read_write_alt_indirect; struct ice_aqc_done_alt_write done_alt_write; struct ice_aqc_clear_port_alt_write clear_port_alt_write; struct ice_aqc_pfc_ignore pfc_ignore; struct ice_aqc_set_query_pfc_mode set_query_pfc_mode; struct ice_aqc_set_dcb_params set_dcb_params; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; struct ice_aqc_lldp_add_delete_tlv lldp_add_delete_tlv; struct ice_aqc_lldp_update_tlv lldp_update_tlv; struct ice_aqc_lldp_stop lldp_stop; struct ice_aqc_lldp_start lldp_start; struct ice_aqc_lldp_set_local_mib lldp_set_mib; struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl; struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; struct ice_aqc_move_txqs move_txqs; struct ice_aqc_add_rdma_qset add_rdma_qset; + struct ice_aqc_move_rdma_qset_cmd move_rdma_qset; struct ice_aqc_txqs_cleanup txqs_cleanup; struct ice_aqc_add_get_update_free_vsi vsi_cmd; struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; struct ice_aqc_get_vsi_resp get_vsi_resp; struct ice_aqc_download_pkg download_pkg; struct ice_aqc_get_pkg_info_list get_pkg_info_list; struct ice_aqc_driver_shared_params drv_shared_params; struct ice_aqc_fw_log fw_log; struct ice_aqc_debug_dump_internals debug_dump; struct ice_aqc_set_mac_lb set_mac_lb; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_get_res_alloc get_res; struct ice_aqc_get_allocd_res_desc get_res_desc; struct ice_aqc_set_mac_cfg set_mac_cfg; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; struct ice_aqc_set_health_status_config set_health_status_config; struct ice_aqc_get_supported_health_status_codes get_supported_health_status_codes; struct ice_aqc_get_health_status get_health_status; struct ice_aqc_clear_health_status clear_health_status; struct ice_aqc_prog_topo_dev_nvm prog_topo_dev_nvm; struct ice_aqc_read_topo_dev_nvm read_topo_dev_nvm; + struct ice_aqc_get_set_tx_topo get_set_tx_topo; } params; }; /* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ #define ICE_AQ_LG_BUF 512 /* Flags sub-structure * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | */ /* command flags and offsets */ #define ICE_AQ_FLAG_DD_S 0 #define ICE_AQ_FLAG_CMP_S 1 #define ICE_AQ_FLAG_ERR_S 2 #define ICE_AQ_FLAG_VFE_S 3 #define ICE_AQ_FLAG_LB_S 9 #define ICE_AQ_FLAG_RD_S 10 #define ICE_AQ_FLAG_VFC_S 11 #define ICE_AQ_FLAG_BUF_S 12 #define ICE_AQ_FLAG_SI_S 13 #define ICE_AQ_FLAG_EI_S 14 #define ICE_AQ_FLAG_FE_S 15 #define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */ #define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */ #define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ #define ICE_AQ_FLAG_VFE BIT(ICE_AQ_FLAG_VFE_S) /* 0x8 */ #define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ #define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */ #define ICE_AQ_FLAG_VFC BIT(ICE_AQ_FLAG_VFC_S) /* 0x800 */ #define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */ #define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */ #define ICE_AQ_FLAG_EI BIT(ICE_AQ_FLAG_EI_S) /* 0x4000 */ #define ICE_AQ_FLAG_FE BIT(ICE_AQ_FLAG_FE_S) /* 0x8000 */ /* error codes */ enum ice_aq_err { ICE_AQ_RC_OK = 0, /* Success */ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */ ICE_AQ_RC_ENOENT = 2, /* No such element */ ICE_AQ_RC_ESRCH = 3, /* Bad opcode */ ICE_AQ_RC_EINTR = 4, /* Operation interrupted */ ICE_AQ_RC_EIO = 5, /* I/O error */ ICE_AQ_RC_ENXIO = 6, /* No such resource */ ICE_AQ_RC_E2BIG = 7, /* Arg too long */ ICE_AQ_RC_EAGAIN = 8, /* Try again */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EACCES = 10, /* Permission denied */ ICE_AQ_RC_EFAULT = 11, /* Bad address */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_EINVAL = 14, /* Invalid argument */ ICE_AQ_RC_ENOTTY = 15, /* Not a typewriter */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */ ICE_AQ_RC_ERANGE = 18, /* Parameter out of range */ ICE_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ ICE_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ ICE_AQ_RC_EFBIG = 22, /* File too big */ ICE_AQ_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */ ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */ ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */ ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */ ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */ ICE_AQ_RC_EACCES_BMCU = 29, /* BMC Update in progress */ }; /* Admin Queue command opcodes */ enum ice_adminq_opc { /* AQ commands */ ice_aqc_opc_get_ver = 0x0001, ice_aqc_opc_driver_ver = 0x0002, ice_aqc_opc_q_shutdown = 0x0003, ice_aqc_opc_get_exp_err = 0x0005, /* resource ownership */ ice_aqc_opc_req_res = 0x0008, ice_aqc_opc_release_res = 0x0009, /* device/function capabilities */ ice_aqc_opc_list_func_caps = 0x000A, ice_aqc_opc_list_dev_caps = 0x000B, /* manage MAC address */ ice_aqc_opc_manage_mac_read = 0x0107, ice_aqc_opc_manage_mac_write = 0x0108, /* PXE */ ice_aqc_opc_clear_pxe_mode = 0x0110, ice_aqc_opc_config_no_drop_policy = 0x0112, /* internal switch commands */ ice_aqc_opc_get_sw_cfg = 0x0200, ice_aqc_opc_set_port_params = 0x0203, /* Alloc/Free/Get Resources */ ice_aqc_opc_get_res_alloc = 0x0204, ice_aqc_opc_alloc_res = 0x0208, ice_aqc_opc_free_res = 0x0209, ice_aqc_opc_get_allocd_res_desc = 0x020A, ice_aqc_opc_set_vlan_mode_parameters = 0x020C, ice_aqc_opc_get_vlan_mode_parameters = 0x020D, /* VSI commands */ ice_aqc_opc_add_vsi = 0x0210, ice_aqc_opc_update_vsi = 0x0211, ice_aqc_opc_get_vsi_params = 0x0212, ice_aqc_opc_free_vsi = 0x0213, /* Mirroring rules - add/update, delete */ ice_aqc_opc_add_update_mir_rule = 0x0260, ice_aqc_opc_del_mir_rule = 0x0261, /* storm configuration */ ice_aqc_opc_set_storm_cfg = 0x0280, ice_aqc_opc_get_storm_cfg = 0x0281, /* switch rules population commands */ ice_aqc_opc_add_sw_rules = 0x02A0, ice_aqc_opc_update_sw_rules = 0x02A1, ice_aqc_opc_remove_sw_rules = 0x02A2, ice_aqc_opc_get_sw_rules = 0x02A3, ice_aqc_opc_clear_pf_cfg = 0x02A4, /* DCB commands */ ice_aqc_opc_pfc_ignore = 0x0301, ice_aqc_opc_query_pfc_mode = 0x0302, ice_aqc_opc_set_pfc_mode = 0x0303, ice_aqc_opc_set_dcb_params = 0x0306, /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, ice_aqc_opc_cfg_sched_elems = 0x0403, ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_move_sched_elems = 0x0408, ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_resume_sched_elems = 0x040A, ice_aqc_opc_query_port_ets = 0x040E, ice_aqc_opc_delete_sched_elems = 0x040F, ice_aqc_opc_add_rl_profiles = 0x0410, ice_aqc_opc_query_rl_profiles = 0x0411, ice_aqc_opc_query_sched_res = 0x0412, ice_aqc_opc_query_node_to_root = 0x0413, ice_aqc_opc_cfg_l2_node_cgd = 0x0414, ice_aqc_opc_remove_rl_profiles = 0x0415, + ice_aqc_opc_set_tx_topo = 0x0417, + ice_aqc_opc_get_tx_topo = 0x0418, + ice_aqc_opc_cfg_node_attr = 0x0419, + ice_aqc_opc_query_node_attr = 0x041A, /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, ice_aqc_opc_set_phy_cfg = 0x0601, ice_aqc_opc_set_mac_cfg = 0x0603, ice_aqc_opc_restart_an = 0x0605, ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, ice_aqc_opc_dnl_get_status = 0x0680, ice_aqc_opc_dnl_run = 0x0681, ice_aqc_opc_dnl_call = 0x0682, ice_aqc_opc_dnl_read_sto = 0x0683, ice_aqc_opc_dnl_write_sto = 0x0684, ice_aqc_opc_dnl_set_breakpoints = 0x0686, ice_aqc_opc_dnl_read_log = 0x0687, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, ice_aqc_opc_read_mdio = 0x06E4, ice_aqc_opc_write_mdio = 0x06E5, ice_aqc_opc_set_gpio_by_func = 0x06E6, ice_aqc_opc_get_gpio_by_func = 0x06E7, ice_aqc_opc_set_led = 0x06E8, ice_aqc_opc_set_port_id_led = 0x06E9, ice_aqc_opc_get_port_options = 0x06EA, ice_aqc_opc_set_port_option = 0x06EB, ice_aqc_opc_set_gpio = 0x06EC, ice_aqc_opc_get_gpio = 0x06ED, ice_aqc_opc_sff_eeprom = 0x06EE, ice_aqc_opc_sw_set_gpio = 0x06EF, ice_aqc_opc_sw_get_gpio = 0x06F0, ice_aqc_opc_prog_topo_dev_nvm = 0x06F2, ice_aqc_opc_read_topo_dev_nvm = 0x06F3, /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, ice_aqc_opc_nvm_erase = 0x0702, ice_aqc_opc_nvm_write = 0x0703, ice_aqc_opc_nvm_cfg_read = 0x0704, ice_aqc_opc_nvm_cfg_write = 0x0705, ice_aqc_opc_nvm_checksum = 0x0706, ice_aqc_opc_nvm_write_activate = 0x0707, ice_aqc_opc_nvm_sr_dump = 0x0707, ice_aqc_opc_nvm_save_factory_settings = 0x0708, ice_aqc_opc_nvm_update_empr = 0x0709, ice_aqc_opc_nvm_pkg_data = 0x070A, ice_aqc_opc_nvm_pass_component_tbl = 0x070B, /* PF/VF mailbox commands */ ice_mbx_opc_send_msg_to_pf = 0x0801, ice_mbx_opc_send_msg_to_vf = 0x0802, /* Alternate Structure Commands */ ice_aqc_opc_write_alt_direct = 0x0900, ice_aqc_opc_write_alt_indirect = 0x0901, ice_aqc_opc_read_alt_direct = 0x0902, ice_aqc_opc_read_alt_indirect = 0x0903, ice_aqc_opc_done_alt_write = 0x0904, ice_aqc_opc_clear_port_alt_write = 0x0906, /* LLDP commands */ ice_aqc_opc_lldp_get_mib = 0x0A00, ice_aqc_opc_lldp_set_mib_change = 0x0A01, ice_aqc_opc_lldp_add_tlv = 0x0A02, ice_aqc_opc_lldp_update_tlv = 0x0A03, ice_aqc_opc_lldp_delete_tlv = 0x0A04, ice_aqc_opc_lldp_stop = 0x0A05, ice_aqc_opc_lldp_start = 0x0A06, ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, ice_aqc_opc_lldp_set_local_mib = 0x0A08, ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, ice_aqc_opc_lldp_filter_ctrl = 0x0A0A, + ice_execute_pending_lldp_mib = 0x0A0B, /* RSS commands */ ice_aqc_opc_set_rss_key = 0x0B02, ice_aqc_opc_set_rss_lut = 0x0B03, ice_aqc_opc_get_rss_key = 0x0B04, ice_aqc_opc_get_rss_lut = 0x0B05, /* Tx queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, ice_aqc_opc_txqs_cleanup = 0x0C31, ice_aqc_opc_move_recfg_txqs = 0x0C32, ice_aqc_opc_add_rdma_qset = 0x0C33, ice_aqc_opc_move_rdma_qset = 0x0C34, /* package commands */ ice_aqc_opc_download_pkg = 0x0C40, ice_aqc_opc_upload_section = 0x0C41, ice_aqc_opc_update_pkg = 0x0C42, ice_aqc_opc_get_pkg_info_list = 0x0C43, ice_aqc_opc_driver_shared_params = 0x0C90, /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, /* debug commands */ ice_aqc_opc_debug_dump_internals = 0xFF08, /* SystemDiagnostic commands */ ice_aqc_opc_set_health_status_config = 0xFF20, ice_aqc_opc_get_supported_health_status_codes = 0xFF21, ice_aqc_opc_get_health_status = 0xFF22, ice_aqc_opc_clear_health_status = 0xFF23, /* FW Logging Commands */ ice_aqc_opc_fw_logs_config = 0xFF30, ice_aqc_opc_fw_logs_register = 0xFF31, ice_aqc_opc_fw_logs_query = 0xFF32, ice_aqc_opc_fw_logs_event = 0xFF33, ice_aqc_opc_fw_logs_get = 0xFF34, ice_aqc_opc_fw_logs_clear = 0xFF35 }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/sys/dev/ice/ice_alloc.h b/sys/dev/ice/ice_alloc.h index b281958be793..bfcb376d45b2 100644 --- a/sys/dev/ice/ice_alloc.h +++ b/sys/dev/ice/ice_alloc.h @@ -1,50 +1,50 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_ALLOC_H_ #define _ICE_ALLOC_H_ /* Memory types */ enum ice_memset_type { ICE_NONDMA_MEM = 0, ICE_DMA_MEM }; /* Memcpy types */ enum ice_memcpy_type { ICE_NONDMA_TO_NONDMA = 0, ICE_NONDMA_TO_DMA, ICE_DMA_TO_DMA, ICE_DMA_TO_NONDMA }; #endif /* _ICE_ALLOC_H_ */ diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h index 0e04cab87be9..c29963d0a318 100644 --- a/sys/dev/ice/ice_bitops.h +++ b/sys/dev/ice/ice_bitops.h @@ -1,530 +1,540 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_BITOPS_H_ #define _ICE_BITOPS_H_ +#include "ice_defs.h" +#include "ice_osdep.h" + /* Define the size of the bitmap chunk */ typedef u32 ice_bitmap_t; +/* NOTE! + * Do not use any of the functions declared in this file + * on memory that was not declared with ice_declare_bitmap. + * Not following this rule might cause issues like split + * locks. + */ + /* Number of bits per bitmap chunk */ #define BITS_PER_CHUNK (BITS_PER_BYTE * sizeof(ice_bitmap_t)) /* Determine which chunk a bit belongs in */ #define BIT_CHUNK(nr) ((nr) / BITS_PER_CHUNK) /* How many chunks are required to store this many bits */ -#define BITS_TO_CHUNKS(sz) DIVIDE_AND_ROUND_UP((sz), BITS_PER_CHUNK) +#define BITS_TO_CHUNKS(sz) (((sz) + BITS_PER_CHUNK - 1) / BITS_PER_CHUNK) /* Which bit inside a chunk this bit corresponds to */ #define BIT_IN_CHUNK(nr) ((nr) % BITS_PER_CHUNK) /* How many bits are valid in the last chunk, assumes nr > 0 */ #define LAST_CHUNK_BITS(nr) ((((nr) - 1) % BITS_PER_CHUNK) + 1) /* Generate a bitmask of valid bits in the last chunk, assumes nr > 0 */ #define LAST_CHUNK_MASK(nr) (((ice_bitmap_t)~0) >> \ (BITS_PER_CHUNK - LAST_CHUNK_BITS(nr))) #define ice_declare_bitmap(A, sz) \ ice_bitmap_t A[BITS_TO_CHUNKS(sz)] static inline bool ice_is_bit_set_internal(u16 nr, const ice_bitmap_t *bitmap) { return !!(*bitmap & BIT(nr)); } /* * If atomic version of the bitops are required, each specific OS * implementation will need to implement OS/platform specific atomic * version of the functions below: * * ice_clear_bit_internal * ice_set_bit_internal * ice_test_and_clear_bit_internal * ice_test_and_set_bit_internal * * and define macro ICE_ATOMIC_BITOPS to overwrite the default non-atomic * implementation. */ static inline void ice_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap) { *bitmap &= ~BIT(nr); } static inline void ice_set_bit_internal(u16 nr, ice_bitmap_t *bitmap) { *bitmap |= BIT(nr); } static inline bool ice_test_and_clear_bit_internal(u16 nr, ice_bitmap_t *bitmap) { if (ice_is_bit_set_internal(nr, bitmap)) { ice_clear_bit_internal(nr, bitmap); return true; } return false; } static inline bool ice_test_and_set_bit_internal(u16 nr, ice_bitmap_t *bitmap) { if (ice_is_bit_set_internal(nr, bitmap)) return true; ice_set_bit_internal(nr, bitmap); return false; } /** * ice_is_bit_set - Check state of a bit in a bitmap * @bitmap: the bitmap to check * @nr: the bit to check * * Returns true if bit nr of bitmap is set. False otherwise. Assumes that nr * is less than the size of the bitmap. */ static inline bool ice_is_bit_set(const ice_bitmap_t *bitmap, u16 nr) { return ice_is_bit_set_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_clear_bit - Clear a bit in a bitmap * @bitmap: the bitmap to change * @nr: the bit to change * * Clears the bit nr in bitmap. Assumes that nr is less than the size of the * bitmap. */ static inline void ice_clear_bit(u16 nr, ice_bitmap_t *bitmap) { ice_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_set_bit - Set a bit in a bitmap * @bitmap: the bitmap to change * @nr: the bit to change * * Sets the bit nr in bitmap. Assumes that nr is less than the size of the * bitmap. */ static inline void ice_set_bit(u16 nr, ice_bitmap_t *bitmap) { ice_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_test_and_clear_bit - Atomically clear a bit and return the old bit value * @nr: the bit to change * @bitmap: the bitmap to change * * Check and clear the bit nr in bitmap. Assumes that nr is less than the size * of the bitmap. */ static inline bool ice_test_and_clear_bit(u16 nr, ice_bitmap_t *bitmap) { return ice_test_and_clear_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /** * ice_test_and_set_bit - Atomically set a bit and return the old bit value * @nr: the bit to change * @bitmap: the bitmap to change * * Check and set the bit nr in bitmap. Assumes that nr is less than the size of * the bitmap. */ static inline bool ice_test_and_set_bit(u16 nr, ice_bitmap_t *bitmap) { return ice_test_and_set_bit_internal(BIT_IN_CHUNK(nr), &bitmap[BIT_CHUNK(nr)]); } /* ice_zero_bitmap - set bits of bitmap to zero. * @bmp: bitmap to set zeros * @size: Size of the bitmaps in bits * * Set all of the bits in a bitmap to zero. Note that this function assumes it * operates on an ice_bitmap_t which was declared using ice_declare_bitmap. It * will zero every bit in the last chunk, even if those bits are beyond the * size. */ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size) { ice_memset(bmp, 0, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t), ICE_NONDMA_MEM); } /** * ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap to intersect * @bmp2: The second bitmap to intersect wit the first * @size: Size of the bitmaps in bits * * This function performs a bitwise AND on two "source" bitmaps of the same size * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same * size as the "source" bitmaps to avoid buffer overflows. This function returns * a non-zero value if at least one bit location from both "source" bitmaps is * non-zero. */ static inline int ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t res = 0, mask; u16 i; /* Handle all but the last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) { dst[i] = bmp1[i] & bmp2[i]; res |= dst[i]; } /* We want to take care not to modify any bits outside of the bitmap * size, even in the destination bitmap. Thus, we won't directly * assign the last bitmap, but instead use a bitmask to ensure we only * modify bits which are within the size, and leave any bits above the * size value alone. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] & bmp2[i]) & mask); res |= dst[i] & mask; return res != 0; } /** * ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap to intersect * @bmp2: The second bitmap to intersect wit the first * @size: Size of the bitmaps in bits * * This function performs a bitwise OR on two "source" bitmaps of the same size * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same * size as the "source" bitmaps to avoid buffer overflows. */ static inline void ice_or_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) dst[i] = bmp1[i] | bmp2[i]; /* We want to only OR bits within the size. Furthermore, we also do * not want to modify destination bits which are beyond the specified * size. Use a bitmask to ensure that we only modify the bits that are * within the specified size. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] | bmp2[i]) & mask); } /** * ice_xor_bitmap - bitwise XOR 2 bitmaps and store result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap of XOR operation * @bmp2: The second bitmap to XOR with the first * @size: Size of the bitmaps in bits * * This function performs a bitwise XOR on two "source" bitmaps of the same size * and stores the result to "dst" bitmap. The "dst" bitmap must be of the same * size as the "source" bitmaps to avoid buffer overflows. */ static inline void ice_xor_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) dst[i] = bmp1[i] ^ bmp2[i]; /* We want to only XOR bits within the size. Furthermore, we also do * not want to modify destination bits which are beyond the specified * size. Use a bitmask to ensure that we only modify the bits that are * within the specified size. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] ^ bmp2[i]) & mask); } /** * ice_andnot_bitmap - bitwise ANDNOT 2 bitmaps and result in dst bitmap * @dst: Destination bitmap that receive the result of the operation * @bmp1: The first bitmap of ANDNOT operation * @bmp2: The second bitmap to ANDNOT operation * @size: Size of the bitmaps in bits * * This function performs a bitwise ANDNOT on two "source" bitmaps of the same * size, and stores the result to "dst" bitmap. The "dst" bitmap must be of the * same size as the "source" bitmaps to avoid buffer overflows. */ static inline void ice_andnot_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1, const ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) dst[i] = bmp1[i] & ~bmp2[i]; /* We want to only clear bits within the size. Furthermore, we also do * not want to modify destination bits which are beyond the specified * size. Use a bitmask to ensure that we only modify the bits that are * within the specified size. */ mask = LAST_CHUNK_MASK(size); dst[i] = (dst[i] & ~mask) | ((bmp1[i] & ~bmp2[i]) & mask); } /** * ice_find_next_bit - Find the index of the next set bit of a bitmap * @bitmap: the bitmap to scan * @size: the size in bits of the bitmap * @offset: the offset to start at * * Scans the bitmap and returns the index of the first set bit which is equal * to or after the specified offset. Will return size if no bits are set. */ static inline u16 ice_find_next_bit(const ice_bitmap_t *bitmap, u16 size, u16 offset) { u16 i, j; if (offset >= size) return size; /* Since the starting position may not be directly on a chunk * boundary, we need to be careful to handle the first chunk specially */ i = BIT_CHUNK(offset); if (bitmap[i] != 0) { u16 off = i * BITS_PER_CHUNK; for (j = offset % BITS_PER_CHUNK; j < BITS_PER_CHUNK; j++) { if (ice_is_bit_set(bitmap, off + j)) return min(size, (u16)(off + j)); } } /* Now we handle the remaining chunks, if any */ for (i++; i < BITS_TO_CHUNKS(size); i++) { if (bitmap[i] != 0) { u16 off = i * BITS_PER_CHUNK; for (j = 0; j < BITS_PER_CHUNK; j++) { if (ice_is_bit_set(bitmap, off + j)) return min(size, (u16)(off + j)); } } } return size; } /** * ice_find_first_bit - Find the index of the first set bit of a bitmap * @bitmap: the bitmap to scan * @size: the size in bits of the bitmap * * Scans the bitmap and returns the index of the first set bit. Will return * size if no bits are set. */ static inline u16 ice_find_first_bit(const ice_bitmap_t *bitmap, u16 size) { return ice_find_next_bit(bitmap, size, 0); } #define ice_for_each_set_bit(_bitpos, _addr, _maxlen) \ for ((_bitpos) = ice_find_first_bit((_addr), (_maxlen)); \ (_bitpos) < (_maxlen); \ (_bitpos) = ice_find_next_bit((_addr), (_maxlen), (_bitpos) + 1)) /** * ice_is_any_bit_set - Return true of any bit in the bitmap is set * @bitmap: the bitmap to check * @size: the size of the bitmap * * Equivalent to checking if ice_find_first_bit returns a value less than the * bitmap size. */ static inline bool ice_is_any_bit_set(ice_bitmap_t *bitmap, u16 size) { return ice_find_first_bit(bitmap, size) < size; } /** * ice_cp_bitmap - copy bitmaps. * @dst: bitmap destination * @src: bitmap to copy from * @size: Size of the bitmaps in bits * * This function copy bitmap from src to dst. Note that this function assumes * it is operating on a bitmap declared using ice_declare_bitmap. It will copy * the entire last chunk even if this contains bits beyond the size. */ static inline void ice_cp_bitmap(ice_bitmap_t *dst, ice_bitmap_t *src, u16 size) { ice_memcpy(dst, src, BITS_TO_CHUNKS(size) * sizeof(ice_bitmap_t), ICE_NONDMA_TO_NONDMA); } /** * ice_bitmap_set - set a number of bits in bitmap from a starting position * @dst: bitmap destination * @pos: first bit position to set * @num_bits: number of bits to set * * This function sets bits in a bitmap from pos to (pos + num_bits) - 1. * Note that this function assumes it is operating on a bitmap declared using * ice_declare_bitmap. */ static inline void ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits) { u16 i; for (i = pos; i < pos + num_bits; i++) ice_set_bit(i, dst); } /** * ice_bitmap_hweight - hamming weight of bitmap * @bm: bitmap pointer * @size: size of bitmap (in bits) * * This function determines the number of set bits in a bitmap. * Note that this function assumes it is operating on a bitmap declared using * ice_declare_bitmap. */ static inline int ice_bitmap_hweight(ice_bitmap_t *bm, u16 size) { int count = 0; u16 bit = 0; while (size > (bit = ice_find_next_bit(bm, size, bit))) { count++; bit++; } return count; } /** * ice_cmp_bitmaps - compares two bitmaps. * @bmp1: the bitmap to compare * @bmp2: the bitmap to compare with bmp1 * @size: Size of the bitmaps in bits * * This function compares two bitmaps, and returns result as true or false. */ static inline bool ice_cmp_bitmap(ice_bitmap_t *bmp1, ice_bitmap_t *bmp2, u16 size) { ice_bitmap_t mask; u16 i; /* Handle all but last chunk */ for (i = 0; i < BITS_TO_CHUNKS(size) - 1; i++) if (bmp1[i] != bmp2[i]) return false; /* We want to only compare bits within the size */ mask = LAST_CHUNK_MASK(size); if ((bmp1[i] & mask) != (bmp2[i] & mask)) return false; return true; } /** * ice_bitmap_from_array32 - copies u32 array source into bitmap destination * @dst: the destination bitmap * @src: the source u32 array * @size: size of the bitmap (in bits) * * This function copies the src bitmap stored in an u32 array into the dst * bitmap stored as an ice_bitmap_t. */ static inline void ice_bitmap_from_array32(ice_bitmap_t *dst, u32 *src, u16 size) { u32 remaining_bits, i; #define BITS_PER_U32 (sizeof(u32) * BITS_PER_BYTE) /* clear bitmap so we only have to set when iterating */ ice_zero_bitmap(dst, size); for (i = 0; i < (u32)(size / BITS_PER_U32); i++) { u32 bit_offset = i * BITS_PER_U32; u32 entry = src[i]; u32 j; for (j = 0; j < BITS_PER_U32; j++) { if (entry & BIT(j)) ice_set_bit((u16)(j + bit_offset), dst); } } /* still need to check the leftover bits (i.e. if size isn't evenly * divisible by BITS_PER_U32 **/ remaining_bits = size % BITS_PER_U32; if (remaining_bits) { u32 bit_offset = i * BITS_PER_U32; u32 entry = src[i]; u32 j; for (j = 0; j < remaining_bits; j++) { if (entry & BIT(j)) ice_set_bit((u16)(j + bit_offset), dst); } } } #undef BIT_CHUNK #undef BIT_IN_CHUNK #undef LAST_CHUNK_BITS #undef LAST_CHUNK_MASK #endif /* _ICE_BITOPS_H_ */ diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c index 3ae266b72d1f..c2efddeb4f7c 100644 --- a/sys/dev/ice/ice_common.c +++ b/sys/dev/ice/ice_common.c @@ -1,6330 +1,6469 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_sched.h" #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_switch.h" #define ICE_PF_RESET_WAIT_COUNT 300 -/** - * dump_phy_type - helper function that prints PHY type strings - * @hw: pointer to the HW structure - * @phy: 64 bit PHY type to decipher - * @i: bit index within phy - * @phy_string: string corresponding to bit i in phy - * @prefix: prefix string to differentiate multiple dumps - */ -static void -dump_phy_type(struct ice_hw *hw, u64 phy, u8 i, const char *phy_string, - const char *prefix) -{ - if (phy & BIT_ULL(i)) - ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", prefix, i, - phy_string); -} +static const char * const ice_link_mode_str_low[] = { + [0] = "100BASE_TX", + [1] = "100M_SGMII", + [2] = "1000BASE_T", + [3] = "1000BASE_SX", + [4] = "1000BASE_LX", + [5] = "1000BASE_KX", + [6] = "1G_SGMII", + [7] = "2500BASE_T", + [8] = "2500BASE_X", + [9] = "2500BASE_KX", + [10] = "5GBASE_T", + [11] = "5GBASE_KR", + [12] = "10GBASE_T", + [13] = "10G_SFI_DA", + [14] = "10GBASE_SR", + [15] = "10GBASE_LR", + [16] = "10GBASE_KR_CR1", + [17] = "10G_SFI_AOC_ACC", + [18] = "10G_SFI_C2C", + [19] = "25GBASE_T", + [20] = "25GBASE_CR", + [21] = "25GBASE_CR_S", + [22] = "25GBASE_CR1", + [23] = "25GBASE_SR", + [24] = "25GBASE_LR", + [25] = "25GBASE_KR", + [26] = "25GBASE_KR_S", + [27] = "25GBASE_KR1", + [28] = "25G_AUI_AOC_ACC", + [29] = "25G_AUI_C2C", + [30] = "40GBASE_CR4", + [31] = "40GBASE_SR4", + [32] = "40GBASE_LR4", + [33] = "40GBASE_KR4", + [34] = "40G_XLAUI_AOC_ACC", + [35] = "40G_XLAUI", + [36] = "50GBASE_CR2", + [37] = "50GBASE_SR2", + [38] = "50GBASE_LR2", + [39] = "50GBASE_KR2", + [40] = "50G_LAUI2_AOC_ACC", + [41] = "50G_LAUI2", + [42] = "50G_AUI2_AOC_ACC", + [43] = "50G_AUI2", + [44] = "50GBASE_CP", + [45] = "50GBASE_SR", + [46] = "50GBASE_FR", + [47] = "50GBASE_LR", + [48] = "50GBASE_KR_PAM4", + [49] = "50G_AUI1_AOC_ACC", + [50] = "50G_AUI1", + [51] = "100GBASE_CR4", + [52] = "100GBASE_SR4", + [53] = "100GBASE_LR4", + [54] = "100GBASE_KR4", + [55] = "100G_CAUI4_AOC_ACC", + [56] = "100G_CAUI4", + [57] = "100G_AUI4_AOC_ACC", + [58] = "100G_AUI4", + [59] = "100GBASE_CR_PAM4", + [60] = "100GBASE_KR_PAM4", + [61] = "100GBASE_CP2", + [62] = "100GBASE_SR2", + [63] = "100GBASE_DR", +}; + +static const char * const ice_link_mode_str_high[] = { + [0] = "100GBASE_KR2_PAM4", + [1] = "100G_CAUI2_AOC_ACC", + [2] = "100G_CAUI2", + [3] = "100G_AUI2_AOC_ACC", + [4] = "100G_AUI2", +}; /** - * ice_dump_phy_type_low - helper function to dump phy_type_low + * ice_dump_phy_type - helper function to dump phy_type * @hw: pointer to the HW structure * @low: 64 bit value for phy_type_low + * @high: 64 bit value for phy_type_high * @prefix: prefix string to differentiate multiple dumps */ static void -ice_dump_phy_type_low(struct ice_hw *hw, u64 low, const char *prefix) +ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) { + u32 i; + ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, (unsigned long long)low); - dump_phy_type(hw, low, 0, "100BASE_TX", prefix); - dump_phy_type(hw, low, 1, "100M_SGMII", prefix); - dump_phy_type(hw, low, 2, "1000BASE_T", prefix); - dump_phy_type(hw, low, 3, "1000BASE_SX", prefix); - dump_phy_type(hw, low, 4, "1000BASE_LX", prefix); - dump_phy_type(hw, low, 5, "1000BASE_KX", prefix); - dump_phy_type(hw, low, 6, "1G_SGMII", prefix); - dump_phy_type(hw, low, 7, "2500BASE_T", prefix); - dump_phy_type(hw, low, 8, "2500BASE_X", prefix); - dump_phy_type(hw, low, 9, "2500BASE_KX", prefix); - dump_phy_type(hw, low, 10, "5GBASE_T", prefix); - dump_phy_type(hw, low, 11, "5GBASE_KR", prefix); - dump_phy_type(hw, low, 12, "10GBASE_T", prefix); - dump_phy_type(hw, low, 13, "10G_SFI_DA", prefix); - dump_phy_type(hw, low, 14, "10GBASE_SR", prefix); - dump_phy_type(hw, low, 15, "10GBASE_LR", prefix); - dump_phy_type(hw, low, 16, "10GBASE_KR_CR1", prefix); - dump_phy_type(hw, low, 17, "10G_SFI_AOC_ACC", prefix); - dump_phy_type(hw, low, 18, "10G_SFI_C2C", prefix); - dump_phy_type(hw, low, 19, "25GBASE_T", prefix); - dump_phy_type(hw, low, 20, "25GBASE_CR", prefix); - dump_phy_type(hw, low, 21, "25GBASE_CR_S", prefix); - dump_phy_type(hw, low, 22, "25GBASE_CR1", prefix); - dump_phy_type(hw, low, 23, "25GBASE_SR", prefix); - dump_phy_type(hw, low, 24, "25GBASE_LR", prefix); - dump_phy_type(hw, low, 25, "25GBASE_KR", prefix); - dump_phy_type(hw, low, 26, "25GBASE_KR_S", prefix); - dump_phy_type(hw, low, 27, "25GBASE_KR1", prefix); - dump_phy_type(hw, low, 28, "25G_AUI_AOC_ACC", prefix); - dump_phy_type(hw, low, 29, "25G_AUI_C2C", prefix); - dump_phy_type(hw, low, 30, "40GBASE_CR4", prefix); - dump_phy_type(hw, low, 31, "40GBASE_SR4", prefix); - dump_phy_type(hw, low, 32, "40GBASE_LR4", prefix); - dump_phy_type(hw, low, 33, "40GBASE_KR4", prefix); - dump_phy_type(hw, low, 34, "40G_XLAUI_AOC_ACC", prefix); - dump_phy_type(hw, low, 35, "40G_XLAUI", prefix); - dump_phy_type(hw, low, 36, "50GBASE_CR2", prefix); - dump_phy_type(hw, low, 37, "50GBASE_SR2", prefix); - dump_phy_type(hw, low, 38, "50GBASE_LR2", prefix); - dump_phy_type(hw, low, 39, "50GBASE_KR2", prefix); - dump_phy_type(hw, low, 40, "50G_LAUI2_AOC_ACC", prefix); - dump_phy_type(hw, low, 41, "50G_LAUI2", prefix); - dump_phy_type(hw, low, 42, "50G_AUI2_AOC_ACC", prefix); - dump_phy_type(hw, low, 43, "50G_AUI2", prefix); - dump_phy_type(hw, low, 44, "50GBASE_CP", prefix); - dump_phy_type(hw, low, 45, "50GBASE_SR", prefix); - dump_phy_type(hw, low, 46, "50GBASE_FR", prefix); - dump_phy_type(hw, low, 47, "50GBASE_LR", prefix); - dump_phy_type(hw, low, 48, "50GBASE_KR_PAM4", prefix); - dump_phy_type(hw, low, 49, "50G_AUI1_AOC_ACC", prefix); - dump_phy_type(hw, low, 50, "50G_AUI1", prefix); - dump_phy_type(hw, low, 51, "100GBASE_CR4", prefix); - dump_phy_type(hw, low, 52, "100GBASE_SR4", prefix); - dump_phy_type(hw, low, 53, "100GBASE_LR4", prefix); - dump_phy_type(hw, low, 54, "100GBASE_KR4", prefix); - dump_phy_type(hw, low, 55, "100G_CAUI4_AOC_ACC", prefix); - dump_phy_type(hw, low, 56, "100G_CAUI4", prefix); - dump_phy_type(hw, low, 57, "100G_AUI4_AOC_ACC", prefix); - dump_phy_type(hw, low, 58, "100G_AUI4", prefix); - dump_phy_type(hw, low, 59, "100GBASE_CR_PAM4", prefix); - dump_phy_type(hw, low, 60, "100GBASE_KR_PAM4", prefix); - dump_phy_type(hw, low, 61, "100GBASE_CP2", prefix); - dump_phy_type(hw, low, 62, "100GBASE_SR2", prefix); - dump_phy_type(hw, low, 63, "100GBASE_DR", prefix); -} - -/** - * ice_dump_phy_type_high - helper function to dump phy_type_high - * @hw: pointer to the HW structure - * @high: 64 bit value for phy_type_high - * @prefix: prefix string to differentiate multiple dumps - */ -static void -ice_dump_phy_type_high(struct ice_hw *hw, u64 high, const char *prefix) -{ + for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) { + if (low & BIT_ULL(i)) + ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", + prefix, i, ice_link_mode_str_low[i]); + } + ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, (unsigned long long)high); - dump_phy_type(hw, high, 0, "100GBASE_KR2_PAM4", prefix); - dump_phy_type(hw, high, 1, "100G_CAUI2_AOC_ACC", prefix); - dump_phy_type(hw, high, 2, "100G_CAUI2", prefix); - dump_phy_type(hw, high, 3, "100G_AUI2_AOC_ACC", prefix); - dump_phy_type(hw, high, 4, "100G_AUI2", prefix); + for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) { + if (high & BIT_ULL(i)) + ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", + prefix, i, ice_link_mode_str_high[i]); + } } /** * ice_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the MAC type of the adapter based on the * vendor ID and device ID stored in the HW structure. */ enum ice_status ice_set_mac_type(struct ice_hw *hw) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (hw->vendor_id != ICE_INTEL_VENDOR_ID) return ICE_ERR_DEVICE_NOT_SUPPORTED; switch (hw->device_id) { case ICE_DEV_ID_E810C_BACKPLANE: case ICE_DEV_ID_E810C_QSFP: case ICE_DEV_ID_E810C_SFP: case ICE_DEV_ID_E810_XXV_BACKPLANE: case ICE_DEV_ID_E810_XXV_QSFP: case ICE_DEV_ID_E810_XXV_SFP: hw->mac_type = ICE_MAC_E810; break; case ICE_DEV_ID_E822C_10G_BASE_T: case ICE_DEV_ID_E822C_BACKPLANE: case ICE_DEV_ID_E822C_QSFP: case ICE_DEV_ID_E822C_SFP: case ICE_DEV_ID_E822C_SGMII: case ICE_DEV_ID_E822L_10G_BASE_T: case ICE_DEV_ID_E822L_BACKPLANE: case ICE_DEV_ID_E822L_SFP: case ICE_DEV_ID_E822L_SGMII: case ICE_DEV_ID_E823L_10G_BASE_T: case ICE_DEV_ID_E823L_1GBE: case ICE_DEV_ID_E823L_BACKPLANE: case ICE_DEV_ID_E823L_QSFP: case ICE_DEV_ID_E823L_SFP: case ICE_DEV_ID_E823C_10G_BASE_T: case ICE_DEV_ID_E823C_BACKPLANE: case ICE_DEV_ID_E823C_QSFP: case ICE_DEV_ID_E823C_SFP: case ICE_DEV_ID_E823C_SGMII: hw->mac_type = ICE_MAC_GENERIC; break; default: hw->mac_type = ICE_MAC_UNKNOWN; break; } ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); return ICE_SUCCESS; } /** * ice_is_e810 * @hw: pointer to the hardware structure * * returns true if the device is E810 based, false if not. */ bool ice_is_e810(struct ice_hw *hw) { return hw->mac_type == ICE_MAC_E810; } /** * ice_is_e810t * @hw: pointer to the hardware structure * * returns true if the device is E810T based, false if not. */ bool ice_is_e810t(struct ice_hw *hw) { switch (hw->device_id) { case ICE_DEV_ID_E810C_SFP: - if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T || - hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2) + switch (hw->subsystem_device_id) { + case ICE_SUBDEV_ID_E810T: + case ICE_SUBDEV_ID_E810T2: + case ICE_SUBDEV_ID_E810T3: + case ICE_SUBDEV_ID_E810T4: + case ICE_SUBDEV_ID_E810T5: + case ICE_SUBDEV_ID_E810T7: return true; + } break; case ICE_DEV_ID_E810C_QSFP: - if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2) + switch (hw->subsystem_device_id) { + case ICE_SUBDEV_ID_E810T2: + case ICE_SUBDEV_ID_E810T5: + case ICE_SUBDEV_ID_E810T6: return true; + } break; default: break; } return false; } +/** + * ice_is_e823 + * @hw: pointer to the hardware structure + * + * returns true if the device is E823-L or E823-C based, false if not. + */ +bool ice_is_e823(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_SFP: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_SGMII: + return true; + default: + return false; + } +} + /** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port * configuration, flow director filters, etc.). */ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_aq_manage_mac_read - manage MAC address read command * @hw: pointer to the HW struct * @buf: a virtual buffer to hold the manage MAC read response * @buf_size: Size of the virtual buffer * @cd: pointer to command details structure or NULL * * This function is used to return per PF station MAC address (0x0107). * NOTE: Upon successful completion of this command, MAC address information * is returned in user specified buffer. Please interpret user specified * buffer as "manage_mac_read" response. * Response such as various MAC addresses are stored in HW struct (port.mac) * ice_discover_dev_caps is expected to be called before this function is * called. */ enum ice_status ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_read_resp *resp; struct ice_aqc_manage_mac_read *cmd; struct ice_aq_desc desc; enum ice_status status; u16 flags; u8 i; cmd = &desc.params.mac_read; if (buf_size < sizeof(*resp)) return ICE_ERR_BUF_TOO_SHORT; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (status) return status; resp = (struct ice_aqc_manage_mac_read_resp *)buf; flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); return ICE_ERR_CFG; } /* A single port can report up to two (LAN and WoL) addresses */ for (i = 0; i < cmd->num_addr; i++) if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { ice_memcpy(hw->port_info->mac.lan_addr, resp[i].mac_addr, ETH_ALEN, - ICE_DMA_TO_NONDMA); + ICE_NONDMA_TO_NONDMA); ice_memcpy(hw->port_info->mac.perm_addr, resp[i].mac_addr, - ETH_ALEN, ICE_DMA_TO_NONDMA); + ETH_ALEN, ICE_NONDMA_TO_NONDMA); break; } return ICE_SUCCESS; } /** * ice_aq_get_phy_caps - returns PHY capabilities * @pi: port information structure * @qual_mods: report qualified modules * @report_mode: report mode capabilities * @pcaps: structure for PHY capabilities to be filled * @cd: pointer to command details structure or NULL * * Returns the various PHY capabilities supported on the Port (0x0600) */ enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) { struct ice_aqc_get_phy_caps *cmd; u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; enum ice_status status; const char *prefix; struct ice_hw *hw; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) return ICE_ERR_PARAM; hw = pi->hw; if (report_mode == ICE_AQC_REPORT_DFLT_CFG && !ice_fw_supports_report_dflt_cfg(hw)) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); if (qual_mods) cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM); cmd->param0 |= CPU_TO_LE16(report_mode); + status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); - if (report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) + switch (report_mode) { + case ICE_AQC_REPORT_TOPO_CAP_MEDIA: prefix = "phy_caps_media"; - else if (report_mode == ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA) + break; + case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: prefix = "phy_caps_no_media"; - else if (report_mode == ICE_AQC_REPORT_ACTIVE_CFG) + break; + case ICE_AQC_REPORT_ACTIVE_CFG: prefix = "phy_caps_active"; - else if (report_mode == ICE_AQC_REPORT_DFLT_CFG) + break; + case ICE_AQC_REPORT_DFLT_CFG: prefix = "phy_caps_default"; - else + break; + default: prefix = "phy_caps_invalid"; + } - ice_dump_phy_type_low(hw, LE64_TO_CPU(pcaps->phy_type_low), prefix); - ice_dump_phy_type_high(hw, LE64_TO_CPU(pcaps->phy_type_high), prefix); + ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low), + LE64_TO_CPU(pcaps->phy_type_high), prefix); ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", prefix, report_mode); ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, pcaps->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, pcaps->eee_cap); ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, pcaps->eeer_value); ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, pcaps->link_fec_options); ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", prefix, pcaps->module_compliance_enforcement); ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", prefix, pcaps->extended_compliance_code); ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, pcaps->module_type[0]); ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, pcaps->module_type[1]); ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, pcaps->module_type[2]); if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type, sizeof(pi->phy.link_info.module_type), ICE_NONDMA_TO_NONDMA); } return status; } /** * ice_aq_get_netlist_node * @hw: pointer to the hw struct * @cmd: get_link_topo AQ structure * @node_part_number: output node part number if node found * @node_handle: output node handle parameter if node found */ enum ice_status ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, u8 *node_part_number, u16 *node_handle) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); desc.params.get_link_topo = *cmd; if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) return ICE_ERR_NOT_SUPPORTED; if (node_handle) *node_handle = LE16_TO_CPU(desc.params.get_link_topo.addr.handle); if (node_part_number) *node_part_number = desc.params.get_link_topo.node_part_num; return ICE_SUCCESS; } #define MAX_NETLIST_SIZE 10 /** * ice_find_netlist_node * @hw: pointer to the hw struct * @node_type_ctx: type of netlist node to look for * @node_part_number: node part number to look for * @node_handle: output parameter if node found - optional * * Find and return the node handle for a given node type and part number in the * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST - * otherwise. If @node_handle provided, it would be set to found node handle. + * otherwise. If node_handle provided, it would be set to found node handle. */ enum ice_status ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, u16 *node_handle) { struct ice_aqc_get_link_topo cmd; u8 rec_node_part_number; - enum ice_status status; u16 rec_node_handle; u8 idx; for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) { + enum ice_status status; + memset(&cmd, 0, sizeof(cmd)); cmd.addr.topo_params.node_type_ctx = (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S); cmd.addr.topo_params.index = idx; status = ice_aq_get_netlist_node(hw, &cmd, &rec_node_part_number, &rec_node_handle); if (status) return status; if (rec_node_part_number == node_part_number) { if (node_handle) *node_handle = rec_node_handle; return ICE_SUCCESS; } } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_is_media_cage_present * @pi: port information structure * * Returns true if media cage is present, else false. If no cage, then * media type is backplane or BASE-T. */ static bool ice_is_media_cage_present(struct ice_port_info *pi) { struct ice_aqc_get_link_topo *cmd; struct ice_aq_desc desc; cmd = &desc.params.get_link_topo; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); cmd->addr.topo_params.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S); /* set node type */ cmd->addr.topo_params.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE); /* Node type cage can be used to determine if cage is present. If AQC * returns error (ENOENT), then no cage present. If no cage present then * connection type is backplane or BASE-T. */ return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL); } /** * ice_get_media_type - Gets media type * @pi: port information structure */ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) { struct ice_link_status *hw_link_info; if (!pi) return ICE_MEDIA_UNKNOWN; hw_link_info = &pi->phy.link_info; if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) /* If more than one media type is selected, report unknown */ return ICE_MEDIA_UNKNOWN; if (hw_link_info->phy_type_low) { /* 1G SGMII is a special case where some DA cable PHYs * may show this as an option when it really shouldn't * be since SGMII is meant to be between a MAC and a PHY * in a backplane. Try to detect this case and handle it */ if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) return ICE_MEDIA_DA; switch (hw_link_info->phy_type_low) { case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: - case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_50GBASE_SR2: case ICE_PHY_TYPE_LOW_50GBASE_LR2: case ICE_PHY_TYPE_LOW_50GBASE_SR: case ICE_PHY_TYPE_LOW_50GBASE_FR: case ICE_PHY_TYPE_LOW_50GBASE_LR: case ICE_PHY_TYPE_LOW_100GBASE_SR4: case ICE_PHY_TYPE_LOW_100GBASE_LR4: case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_2500BASE_T: case ICE_PHY_TYPE_LOW_5GBASE_T: case ICE_PHY_TYPE_LOW_10GBASE_T: case ICE_PHY_TYPE_LOW_25GBASE_T: return ICE_MEDIA_BASET; case ICE_PHY_TYPE_LOW_10G_SFI_DA: case ICE_PHY_TYPE_LOW_25GBASE_CR: case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_40GBASE_CR4: case ICE_PHY_TYPE_LOW_50GBASE_CR2: case ICE_PHY_TYPE_LOW_50GBASE_CP: case ICE_PHY_TYPE_LOW_100GBASE_CR4: case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: return ICE_MEDIA_DA; case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_40G_XLAUI: case ICE_PHY_TYPE_LOW_50G_LAUI2: case ICE_PHY_TYPE_LOW_50G_AUI2: case ICE_PHY_TYPE_LOW_50G_AUI1: case ICE_PHY_TYPE_LOW_100G_AUI4: case ICE_PHY_TYPE_LOW_100G_CAUI4: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_AUI; /* fall-through */ case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_5GBASE_KR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_40GBASE_KR4: case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_50GBASE_KR2: case ICE_PHY_TYPE_LOW_100GBASE_KR4: case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: return ICE_MEDIA_BACKPLANE; } } else { switch (hw_link_info->phy_type_high) { case ICE_PHY_TYPE_HIGH_100G_AUI2: case ICE_PHY_TYPE_HIGH_100G_CAUI2: if (ice_is_media_cage_present(pi)) return ICE_MEDIA_AUI; /* fall-through */ case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: return ICE_MEDIA_FIBER; } } return ICE_MEDIA_UNKNOWN; } +#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1 + /** * ice_aq_get_link_info * @pi: port information structure * @ena_lse: enable/disable LinkStatusEvent reporting * @link: pointer to link status structure - optional * @cd: pointer to command details structure or NULL * * Get Link Status (0x607). Returns the link status of the adapter. */ enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd) { struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status *resp; struct ice_link_status *li_old, *li; enum ice_media_type *hw_media_type; struct ice_fc_info *hw_fc_info; bool tx_pause, rx_pause; struct ice_aq_desc desc; enum ice_status status; struct ice_hw *hw; u16 cmd_flags; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; li_old = &pi->phy.link_info_old; hw_media_type = &pi->phy.media_type; li = &pi->phy.link_info; hw_fc_info = &pi->fc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; resp = &desc.params.get_link_status; resp->cmd_flags = CPU_TO_LE16(cmd_flags); resp->lport_num = pi->lport; - status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); - + status = ice_aq_send_cmd(hw, &desc, &link_data, + ice_get_link_status_datalen(hw), cd); if (status != ICE_SUCCESS) return status; /* save off old link status information */ *li_old = *li; /* update current link status information */ li->link_speed = LE16_TO_CPU(link_data.link_speed); li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low); li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); li->link_info = link_data.link_info; li->link_cfg_err = link_data.link_cfg_err; li->an_info = link_data.an_info; li->ext_info = link_data.ext_info; li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size); li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; li->topo_media_conflict = link_data.topo_media_conflict; li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | ICE_AQ_CFG_PACING_TYPE_M); /* update fc info */ tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); if (tx_pause && rx_pause) hw_fc_info->current_mode = ICE_FC_FULL; else if (tx_pause) hw_fc_info->current_mode = ICE_FC_TX_PAUSE; else if (rx_pause) hw_fc_info->current_mode = ICE_FC_RX_PAUSE; else hw_fc_info->current_mode = ICE_FC_NONE; li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED)); ice_debug(hw, ICE_DBG_LINK, "get link info\n"); ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)li->phy_type_low); ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)li->phy_type_high); ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", li->max_frame_size); ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); /* save link status information */ if (link) *link = *li; /* flag cleared so calling functions don't call AQ again */ pi->phy.get_link_info = false; return ICE_SUCCESS; } /** * ice_fill_tx_timer_and_fc_thresh * @hw: pointer to the HW struct * @cmd: pointer to MAC cfg structure * * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command * descriptor */ static void ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, struct ice_aqc_set_mac_cfg *cmd) { u16 fc_thres_val, tx_timer_val; u32 val; /* We read back the transmit timer and fc threshold value of * LFC. Thus, we will use index = * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. * * Also, because we are operating on transmit timer and fc * threshold of LFC, we don't turn on any bit in tx_tmr_priority */ #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX /* Retrieve the transmit timer */ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); tx_timer_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); /* Retrieve the fc threshold */ val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val); } /** * ice_aq_set_mac_cfg * @hw: pointer to the HW struct * @max_frame_size: Maximum Frame Size to be supported * @auto_drop: Tell HW to drop packets if TC queue is blocked * @cd: pointer to command details structure or NULL * * Set MAC configuration (0x0603) */ enum ice_status ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_cfg *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_mac_cfg; if (max_frame_size == 0) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); cmd->max_frame_size = CPU_TO_LE16(max_frame_size); if (ice_is_fw_auto_drop_supported(hw) && auto_drop) cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS; ice_fill_tx_timer_and_fc_thresh(hw, cmd); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_init_fltr_mgmt_struct - initializes filter management list and locks * @hw: pointer to the HW struct */ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) { struct ice_switch_info *sw; enum ice_status status; hw->switch_info = (struct ice_switch_info *) ice_malloc(hw, sizeof(*hw->switch_info)); sw = hw->switch_info; if (!sw) return ICE_ERR_NO_MEMORY; INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); if (status) { ice_free(hw, hw->switch_info); return status; } return ICE_SUCCESS; } /** * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct * @hw: pointer to the HW struct * @sw: pointer to switch info struct for which function clears filters */ static void ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) { struct ice_vsi_list_map_info *v_pos_map; struct ice_vsi_list_map_info *v_tmp_map; struct ice_sw_recipe *recps; u8 i; if (!sw) return; LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, ice_vsi_list_map_info, list_entry) { LIST_DEL(&v_pos_map->list_entry); ice_free(hw, v_pos_map); } recps = sw->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { struct ice_recp_grp_entry *rg_entry, *tmprg_entry; recps[i].root_rid = i; LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry, &recps[i].rg_list, ice_recp_grp_entry, l_entry) { LIST_DEL(&rg_entry->l_entry); ice_free(hw, rg_entry); } if (recps[i].adv_rule) { struct ice_adv_fltr_mgmt_list_entry *tmp_entry; struct ice_adv_fltr_mgmt_list_entry *lst_itr; ice_destroy_lock(&recps[i].filt_rule_lock); LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, &recps[i].filt_rules, ice_adv_fltr_mgmt_list_entry, list_entry) { LIST_DEL(&lst_itr->list_entry); ice_free(hw, lst_itr->lkups); ice_free(hw, lst_itr); } } else { struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; ice_destroy_lock(&recps[i].filt_rule_lock); LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, &recps[i].filt_rules, ice_fltr_mgmt_list_entry, list_entry) { LIST_DEL(&lst_itr->list_entry); ice_free(hw, lst_itr); } } if (recps[i].root_buf) ice_free(hw, recps[i].root_buf); } ice_rm_sw_replay_rule_info(hw, sw); ice_free(hw, sw->recp_list); ice_free(hw, sw); } /** * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks * @hw: pointer to the HW struct */ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) { ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); } /** * ice_get_itr_intrl_gran * @hw: pointer to the HW struct * * Determines the ITR/INTRL granularities based on the maximum aggregate * bandwidth according to the device's configuration during power-on. */ static void ice_get_itr_intrl_gran(struct ice_hw *hw) { u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> GL_PWR_MODE_CTL_CAR_MAX_BW_S; switch (max_agg_bw) { case ICE_MAX_AGG_BW_200G: case ICE_MAX_AGG_BW_100G: case ICE_MAX_AGG_BW_50G: hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; break; case ICE_MAX_AGG_BW_25G: hw->itr_gran = ICE_ITR_GRAN_MAX_25; hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; break; } } /** * ice_print_rollback_msg - print FW rollback message * @hw: pointer to the hardware structure */ void ice_print_rollback_msg(struct ice_hw *hw) { char nvm_str[ICE_NVM_VER_LEN] = { 0 }; struct ice_orom_info *orom; struct ice_nvm_info *nvm; orom = &hw->flash.orom; nvm = &hw->flash.nvm; SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor, nvm->eetrack, orom->major, orom->build, orom->patch); ice_warn(hw, "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n", nvm_str, hw->fw_maj_ver, hw->fw_min_ver); } /** * ice_set_umac_shared * @hw: pointer to the hw struct * * Set boolean flag to allow unicast MAC sharing */ void ice_set_umac_shared(struct ice_hw *hw) { hw->umac_shared = true; } /** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ enum ice_status ice_init_hw(struct ice_hw *hw) { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; u16 mac_buf_len; void *mac_buf; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Set MAC type based on DeviceID */ status = ice_set_mac_type(hw); if (status) return status; hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & PF_FUNC_RID_FUNCTION_NUMBER_M) >> PF_FUNC_RID_FUNCTION_NUMBER_S; status = ice_reset(hw, ICE_RESET_PFR); if (status) return status; ice_get_itr_intrl_gran(hw); status = ice_create_all_ctrlq(hw); if (status) goto err_unroll_cqinit; ice_fwlog_set_support_ena(hw); status = ice_fwlog_set(hw, &hw->fwlog_cfg); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n", status); } else { if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) { status = ice_fwlog_register(hw); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n", status); } else { status = ice_fwlog_unregister(hw); if (status) ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n", status); } } status = ice_init_nvm(hw); if (status) goto err_unroll_cqinit; if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK) ice_print_rollback_msg(hw); status = ice_clear_pf_cfg(hw); if (status) goto err_unroll_cqinit; ice_clear_pxe_mode(hw); status = ice_get_caps(hw); if (status) goto err_unroll_cqinit; hw->port_info = (struct ice_port_info *) ice_malloc(hw, sizeof(*hw->port_info)); if (!hw->port_info) { status = ICE_ERR_NO_MEMORY; goto err_unroll_cqinit; } /* set the back pointer to HW */ hw->port_info->hw = hw; /* Initialize port_info struct with switch configuration data */ status = ice_get_initial_sw_cfg(hw); if (status) goto err_unroll_alloc; hw->evb_veb = true; /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); goto err_unroll_alloc; } ice_sched_get_psm_clk_freq(hw); /* Initialize port_info struct with scheduler data */ status = ice_sched_init_port(hw->port_info); if (status) goto err_unroll_sched; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) { status = ICE_ERR_NO_MEMORY; goto err_unroll_sched; } /* Initialize port_info struct with PHY capabilities */ status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); ice_free(hw, pcaps); if (status) ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n", status); /* Initialize port_info struct with link information */ status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); if (status) goto err_unroll_sched; /* need a valid SW entry point to build a Tx tree */ if (!hw->sw_entry_point_layer) { ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); status = ICE_ERR_CFG; goto err_unroll_sched; } INIT_LIST_HEAD(&hw->agg_list); /* Initialize max burst size */ if (!hw->max_burst_size) ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); status = ice_init_fltr_mgmt_struct(hw); if (status) goto err_unroll_sched; /* Get MAC information */ /* A single port can report up to two (LAN and WoL) addresses */ mac_buf = ice_calloc(hw, 2, sizeof(struct ice_aqc_manage_mac_read_resp)); mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { status = ICE_ERR_NO_MEMORY; goto err_unroll_fltr_mgmt_struct; } status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); ice_free(hw, mac_buf); if (status) goto err_unroll_fltr_mgmt_struct; /* enable jumbo frame support at MAC level */ status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false, NULL); if (status) goto err_unroll_fltr_mgmt_struct; status = ice_init_hw_tbls(hw); if (status) goto err_unroll_fltr_mgmt_struct; ice_init_lock(&hw->tnl_lock); return ICE_SUCCESS; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); err_unroll_sched: ice_sched_cleanup_all(hw); err_unroll_alloc: ice_free(hw, hw->port_info); hw->port_info = NULL; err_unroll_cqinit: ice_destroy_all_ctrlq(hw); return status; } /** * ice_deinit_hw - unroll initialization operations done by ice_init_hw * @hw: pointer to the hardware structure * * This should be called only during nominal operation, not as a result of * ice_init_hw() failing since ice_init_hw() will take care of unrolling * applicable initializations if it fails for any reason. */ void ice_deinit_hw(struct ice_hw *hw) { ice_cleanup_fltr_mgmt_struct(hw); ice_sched_cleanup_all(hw); ice_sched_clear_agg(hw); ice_free_seg(hw); ice_free_hw_tbls(hw); ice_destroy_lock(&hw->tnl_lock); if (hw->port_info) { ice_free(hw, hw->port_info); hw->port_info = NULL; } ice_destroy_all_ctrlq(hw); /* Clear VSI contexts if not already cleared */ ice_clear_all_vsi_ctx(hw); } /** * ice_check_reset - Check to see if a global reset is complete * @hw: pointer to the hardware structure */ enum ice_status ice_check_reset(struct ice_hw *hw) { u32 cnt, reg = 0, grst_timeout, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> GLGEN_RSTCTL_GRSTDEL_S) + 10; for (cnt = 0; cnt < grst_timeout; cnt++) { ice_msec_delay(100, true); reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break; } if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ GLNVM_ULD_PCIER_DONE_1_M |\ GLNVM_ULD_CORER_DONE_M |\ GLNVM_ULD_GLOBR_DONE_M |\ GLNVM_ULD_POR_DONE_M |\ GLNVM_ULD_POR_DONE_1_M |\ GLNVM_ULD_PCIER_DONE_2_M) uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ? GLNVM_ULD_PE_DONE_M : 0); /* Device is Active; check Global Reset processes are done */ for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, GLNVM_ULD) & uld_mask; if (reg == uld_mask) { ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); break; } ice_msec_delay(10, true); } if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", reg); return ICE_ERR_RESET_FAILED; } return ICE_SUCCESS; } /** * ice_pf_reset - Reset the PF * @hw: pointer to the hardware structure * * If a global reset has been triggered, this function checks * for its completion and then issues the PF reset */ static enum ice_status ice_pf_reset(struct ice_hw *hw) { u32 cnt, reg; /* If at function entry a global reset was already in progress, i.e. * state is not 'device active' or any of the reset done bits are not * set in GLNVM_ULD, there is no need for a PF Reset; poll until the * global reset is done. */ if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { /* poll on global reset currently in progress until done */ if (ice_check_reset(hw)) return ICE_ERR_RESET_FAILED; return ICE_SUCCESS; } /* Reset the PF */ reg = rd32(hw, PFGEN_CTRL); wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); /* Wait for the PFR to complete. The wait time is the global config lock * timeout plus the PFR timeout which will account for a possible reset * that is occurring during a download package operation. */ for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + - ICE_PF_RESET_WAIT_COUNT; cnt++) { + ICE_PF_RESET_WAIT_COUNT; cnt++) { reg = rd32(hw, PFGEN_CTRL); if (!(reg & PFGEN_CTRL_PFSWR_M)) break; ice_msec_delay(1, true); } if (cnt == ICE_PF_RESET_WAIT_COUNT) { ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; } return ICE_SUCCESS; } /** * ice_reset - Perform different types of reset * @hw: pointer to the hardware structure * @req: reset request * * This function triggers a reset as specified by the req parameter. * * Note: * If anything other than a PF reset is triggered, PXE mode is restored. * This has to be cleared using ice_clear_pxe_mode again, once the AQ * interface has been restored in the rebuild flow. */ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) { u32 val = 0; switch (req) { case ICE_RESET_PFR: return ice_pf_reset(hw); case ICE_RESET_CORER: ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); val = GLGEN_RTRIG_CORER_M; break; case ICE_RESET_GLOBR: ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); val = GLGEN_RTRIG_GLOBR_M; break; default: return ICE_ERR_PARAM; } val |= rd32(hw, GLGEN_RTRIG); wr32(hw, GLGEN_RTRIG, val); ice_flush(hw); /* wait for the FW to be ready */ return ice_check_reset(hw); } /** * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_rxq_ctx: pointer to the rxq context * @rxq_index: the index of the Rx queue * * Copies rxq context from dense structure to HW register space */ static enum ice_status ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) { u8 i; if (!ice_rxq_ctx) return ICE_ERR_BAD_PTR; if (rxq_index > QRX_CTRL_MAX_INDEX) return ICE_ERR_PARAM; /* Copy each dword separately to HW */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { wr32(hw, QRX_CONTEXT(i, rxq_index), *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); } return ICE_SUCCESS; } /* LAN Rx Queue Context */ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), { 0 } }; /** * ice_write_rxq_ctx * @hw: pointer to the hardware structure * @rlan_ctx: pointer to the rxq context * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; if (!rlan_ctx) return ICE_ERR_BAD_PTR; rlan_ctx->prefena = 1; ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); } /** * ice_clear_rxq_ctx * @hw: pointer to the hardware structure * @rxq_index: the index of the Rx queue to clear * * Clears rxq context in HW register space */ enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) { u8 i; if (rxq_index > QRX_CTRL_MAX_INDEX) return ICE_ERR_PARAM; /* Clear each dword register separately */ for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) wr32(hw, QRX_CONTEXT(i, rxq_index), 0); return ICE_SUCCESS; } /* LAN Tx Queue Context */ const struct ice_ctx_ele ice_tlan_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), { 0 } }; /** * ice_copy_tx_cmpltnq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context * @tx_cmpltnq_index: the index of the completion queue * * Copies Tx completion queue context from dense structure to HW register space */ static enum ice_status ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx, u32 tx_cmpltnq_index) { u8 i; if (!ice_tx_cmpltnq_ctx) return ICE_ERR_BAD_PTR; if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) return ICE_ERR_PARAM; /* Copy each dword separately to HW */ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) { wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i, *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); } return ICE_SUCCESS; } /* LAN Tx Completion Queue Context */ static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161), ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192), { 0 } }; /** * ice_write_tx_cmpltnq_ctx * @hw: pointer to the hardware structure * @tx_cmpltnq_ctx: pointer to the completion queue context * @tx_cmpltnq_index: the index of the completion queue * * Converts completion queue context from sparse to dense structure and then * writes it to HW register space */ enum ice_status ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, u32 tx_cmpltnq_index) { u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index); } /** * ice_clear_tx_cmpltnq_ctx * @hw: pointer to the hardware structure * @tx_cmpltnq_index: the index of the completion queue to clear * * Clears Tx completion queue context in HW register space */ enum ice_status ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) { u8 i; if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) return ICE_ERR_PARAM; /* Clear each dword register separately */ for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0); return ICE_SUCCESS; } /** * ice_copy_tx_drbell_q_ctx_to_hw * @hw: pointer to the hardware structure * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context * @tx_drbell_q_index: the index of the doorbell queue * * Copies doorbell queue context from dense structure to HW register space */ static enum ice_status ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx, u32 tx_drbell_q_index) { u8 i; if (!ice_tx_drbell_q_ctx) return ICE_ERR_BAD_PTR; if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) return ICE_ERR_PARAM; /* Copy each dword separately to HW */ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) { wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i, *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); } return ICE_SUCCESS; } /* LAN Tx Doorbell Queue Context info */ static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0), ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64), ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80), ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84), ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94), ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96), ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104), ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108), ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112), ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128), ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144), { 0 } }; /** * ice_write_tx_drbell_q_ctx * @hw: pointer to the hardware structure * @tx_drbell_q_ctx: pointer to the doorbell queue context * @tx_drbell_q_index: the index of the doorbell queue * * Converts doorbell queue context from sparse to dense structure and then * writes it to HW register space */ enum ice_status ice_write_tx_drbell_q_ctx(struct ice_hw *hw, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, u32 tx_drbell_q_index) { u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info); return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index); } /** * ice_clear_tx_drbell_q_ctx * @hw: pointer to the hardware structure * @tx_drbell_q_index: the index of the doorbell queue to clear * * Clears doorbell queue context in HW register space */ enum ice_status ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) { u8 i; if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) return ICE_ERR_PARAM; /* Clear each dword register separately */ for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0); return ICE_SUCCESS; } /* FW Admin Queue command wrappers */ /** * ice_should_retry_sq_send_cmd * @opcode: AQ opcode * * Decide if we should retry the send command routine for the ATQ, depending * on the opcode. */ static bool ice_should_retry_sq_send_cmd(u16 opcode) { switch (opcode) { case ice_aqc_opc_dnl_get_status: case ice_aqc_opc_dnl_run: case ice_aqc_opc_dnl_call: case ice_aqc_opc_dnl_read_sto: case ice_aqc_opc_dnl_write_sto: case ice_aqc_opc_dnl_set_breakpoints: case ice_aqc_opc_dnl_read_log: case ice_aqc_opc_get_link_topo: case ice_aqc_opc_done_alt_write: case ice_aqc_opc_lldp_stop: case ice_aqc_opc_lldp_start: case ice_aqc_opc_lldp_filter_ctrl: return true; } return false; } /** * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * * Retry sending the FW Admin Queue command, multiple times, to the FW Admin * Queue if the EBUSY AQ error is returned. */ static enum ice_status ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aq_desc desc_cpy; enum ice_status status; bool is_cmd_for_retry; u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; opcode = LE16_TO_CPU(desc->opcode); is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM); if (is_cmd_for_retry) { if (buf) { buf_cpy = (u8 *)ice_malloc(hw, buf_size); if (!buf_cpy) return ICE_ERR_NO_MEMORY; } ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy), ICE_NONDMA_TO_NONDMA); } do { status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); if (!is_cmd_for_retry || status == ICE_SUCCESS || hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) break; if (buf_cpy) ice_memcpy(buf, buf_cpy, buf_size, ICE_NONDMA_TO_NONDMA); ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy), ICE_NONDMA_TO_NONDMA); ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false); } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); if (buf_cpy) ice_free(hw, buf_cpy); return status; } /** * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * @hw: pointer to the HW struct * @desc: descriptor describing the command * @buf: buffer to use for indirect commands (NULL for direct commands) * @buf_size: size of buffer for indirect commands (0 for direct commands) * @cd: pointer to command details structure * * Helper function to send FW Admin Queue commands to the FW Admin Queue. */ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); } /** * ice_aq_get_fw_ver * @hw: pointer to the HW struct * @cd: pointer to command details structure or NULL * * Get the firmware version (0x0001) from the admin queue commands */ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) { struct ice_aqc_get_ver *resp; struct ice_aq_desc desc; enum ice_status status; resp = &desc.params.get_ver; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) { hw->fw_branch = resp->fw_branch; hw->fw_maj_ver = resp->fw_major; hw->fw_min_ver = resp->fw_minor; hw->fw_patch = resp->fw_patch; hw->fw_build = LE32_TO_CPU(resp->fw_build); hw->api_branch = resp->api_branch; hw->api_maj_ver = resp->api_major; hw->api_min_ver = resp->api_minor; hw->api_patch = resp->api_patch; } return status; } /** * ice_aq_send_driver_ver * @hw: pointer to the HW struct * @dv: driver's major, minor version * @cd: pointer to command details structure or NULL * * Send the driver version (0x0002) to the firmware */ enum ice_status ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd) { struct ice_aqc_driver_ver *cmd; struct ice_aq_desc desc; u16 len; cmd = &desc.params.driver_ver; if (!dv) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->major_ver = dv->major_ver; cmd->minor_ver = dv->minor_ver; cmd->build_ver = dv->build_ver; cmd->subbuild_ver = dv->subbuild_ver; len = 0; while (len < sizeof(dv->driver_string) && IS_ASCII(dv->driver_string[len]) && dv->driver_string[len]) len++; return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); } /** * ice_aq_q_shutdown * @hw: pointer to the HW struct * @unloading: is the driver unloading itself * * Tell the Firmware that we're shutting down the AdminQ and whether * or not the driver is unloading as well (0x0003). */ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) { struct ice_aqc_q_shutdown *cmd; struct ice_aq_desc desc; cmd = &desc.params.q_shutdown; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); if (unloading) cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_aq_req_res * @hw: pointer to the HW struct * @res: resource ID * @access: access type * @sdp_number: resource number * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * * Requests common resource using the admin queue commands (0x0008). * When attempting to acquire the Global Config Lock, the driver can * learn of three states: * 1) ICE_SUCCESS - acquired lock, and can perform download package * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has * successfully downloaded the package; the driver does * not have to download the package and can continue * loading * * Note that if the caller is in an acquire lock, perform action, release lock * phase of operation, it is possible that the FW may detect a timeout and issue * a CORER. In this case, the driver will receive a CORER interrupt and will * have to determine its cause. The calling thread that is handling this flow * will likely get an error propagated back to it indicating the Download * Package, Update Package or the Release Resource AQ commands timed out. */ static enum ice_status ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd_resp; struct ice_aq_desc desc; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd_resp = &desc.params.res_owner; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); cmd_resp->res_id = CPU_TO_LE16(res); cmd_resp->access_type = CPU_TO_LE16(access); cmd_resp->res_number = CPU_TO_LE32(sdp_number); cmd_resp->timeout = CPU_TO_LE32(*timeout); *timeout = 0; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. */ /* Global config lock response utilizes an additional status field. * * If the Global config lock resource is held by some other driver, the * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field * and the timeout field indicates the maximum time the current owner * of the resource has to free it. */ if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { *timeout = LE32_TO_CPU(cmd_resp->timeout); return ICE_SUCCESS; } else if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_IN_PROG) { *timeout = LE32_TO_CPU(cmd_resp->timeout); return ICE_ERR_AQ_ERROR; } else if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_DONE) { return ICE_ERR_AQ_NO_WORK; } /* invalid FW response, force a timeout immediately */ *timeout = 0; return ICE_ERR_AQ_ERROR; } /* If the resource is held by some other driver, the command completes * with a busy return value and the timeout field indicates the maximum * time the current owner of the resource has to free it. */ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) *timeout = LE32_TO_CPU(cmd_resp->timeout); return status; } /** * ice_aq_release_res * @hw: pointer to the HW struct * @res: resource ID * @sdp_number: resource number * @cd: pointer to command details structure or NULL * * release common resource using the admin queue commands (0x0009) */ static enum ice_status ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, struct ice_sq_cd *cd) { struct ice_aqc_req_res *cmd; struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.res_owner; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); cmd->res_id = CPU_TO_LE16(res); cmd->res_number = CPU_TO_LE32(sdp_number); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_acquire_res * @hw: pointer to the HW structure * @res: resource ID * @access: access type (read or write) * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource. */ enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; u32 time_left = timeout; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has * previously acquired the resource and performed any necessary updates; * in this case the caller does not obtain the resource and has no * further work to do. */ if (status == ICE_ERR_AQ_NO_WORK) goto ice_acquire_res_exit; if (status) ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); /* If necessary, poll until the current lock owner timeouts */ timeout = time_left; while (status && timeout && time_left) { ice_msec_delay(delay, true); timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); if (status == ICE_ERR_AQ_NO_WORK) /* lock free, but no work to do */ break; if (!status) /* lock acquired */ break; } if (status && status != ICE_ERR_AQ_NO_WORK) ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); ice_acquire_res_exit: if (status == ICE_ERR_AQ_NO_WORK) { if (access == ICE_RES_WRITE) ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); else ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); } return status; } /** * ice_release_res * @hw: pointer to the HW structure * @res: resource ID * * This function will release a resource using the proper Admin Command. */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { enum ice_status status; u32 total_delay = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_release_res(hw, res, 0, NULL); /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ while ((status == ICE_ERR_AQ_TIMEOUT) && (total_delay < hw->adminq.sq_cmd_timeout)) { ice_msec_delay(1, true); status = ice_aq_release_res(hw, res, 0, NULL); total_delay++; } } /** * ice_aq_alloc_free_res - command to allocate/free resources * @hw: pointer to the HW struct * @num_entries: number of resource entries in buffer * @buf: Indirect buffer to hold data parameters and response * @buf_size: size of buffer for indirect commands * @opc: pass in the command opcode * @cd: pointer to command details structure or NULL * * Helper function to allocate/free resources using the admin queue commands */ enum ice_status ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_alloc_free_res_cmd *cmd; struct ice_aq_desc desc; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.sw_res_ctrl; if (!buf) return ICE_ERR_PARAM; if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries)) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, opc); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_entries = CPU_TO_LE16(num_entries); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_alloc_hw_res - allocate resource * @hw: pointer to the HW struct * @type: type of resource * @num: number of resources to allocate * @btm: allocate from bottom * @res: pointer to array that will receive the resources */ enum ice_status ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(buf, elem, num); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; /* Prepare buffer to allocate resource. */ buf->num_elems = CPU_TO_LE16(num); buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); if (btm) buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, ice_aqc_opc_alloc_res, NULL); if (status) goto ice_alloc_res_exit; ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num, ICE_NONDMA_TO_NONDMA); ice_alloc_res_exit: ice_free(hw, buf); return status; } /** * ice_free_hw_res - free allocated HW resource * @hw: pointer to the HW struct * @type: type of resource to free * @num: number of resources * @res: pointer to array that contains the resources to free */ enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; u16 buf_len; buf_len = ice_struct_size(buf, elem, num); buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); if (!buf) return ICE_ERR_NO_MEMORY; /* Prepare buffer to free resource. */ buf->num_elems = CPU_TO_LE16(num); buf->res_type = CPU_TO_LE16(type); ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num, ICE_NONDMA_TO_NONDMA); status = ice_aq_alloc_free_res(hw, num, buf, buf_len, ice_aqc_opc_free_res, NULL); if (status) ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); ice_free(hw, buf); return status; } /** * ice_get_num_per_func - determine number of resources per PF * @hw: pointer to the HW structure * @max: value to be evenly split between each PF * * Determine the number of valid functions by going through the bitmap returned * from parsing capabilities and use this to calculate the number of resources * per PF based on the max value passed in. */ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) { u8 funcs; #define ICE_CAPS_VALID_FUNCS_M 0xFF funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions & ICE_CAPS_VALID_FUNCS_M); if (!funcs) return 0; return max / funcs; } /** * ice_print_led_caps - print LED capabilities * @hw: pointer to the ice_hw instance * @caps: pointer to common caps instance * @prefix: string to prefix when printing * @dbg: set to indicate debug print */ static void ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, char const *prefix, bool dbg) { u8 i; if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix, caps->led_pin_num); else ice_info(hw, "%s: led_pin_num = %d\n", prefix, caps->led_pin_num); for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) { if (!caps->led[i]) continue; if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n", prefix, i, caps->led[i]); else ice_info(hw, "%s: led[%d] = %d\n", prefix, i, caps->led[i]); } } /** * ice_print_sdp_caps - print SDP capabilities * @hw: pointer to the ice_hw instance * @caps: pointer to common caps instance * @prefix: string to prefix when printing * @dbg: set to indicate debug print */ static void ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, char const *prefix, bool dbg) { u8 i; if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix, caps->sdp_pin_num); else ice_info(hw, "%s: sdp_pin_num = %d\n", prefix, caps->sdp_pin_num); for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) { if (!caps->sdp[i]) continue; if (dbg) ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n", prefix, i, caps->sdp[i]); else ice_info(hw, "%s: sdp[%d] = %d\n", prefix, i, caps->sdp[i]); } } /** * ice_parse_common_caps - parse common device/function capabilities * @hw: pointer to the HW struct * @caps: pointer to common capabilities structure * @elem: the capability element to parse * @prefix: message prefix for tracing capabilities * * Given a capability element, extract relevant details into the common * capability structure. * * Returns: true if the capability matches one of the common capability ids, * false otherwise. */ static bool ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, struct ice_aqc_list_caps_elem *elem, const char *prefix) { u32 logical_id = LE32_TO_CPU(elem->logical_id); u32 phys_id = LE32_TO_CPU(elem->phys_id); u32 number = LE32_TO_CPU(elem->number); u16 cap = LE16_TO_CPU(elem->cap); bool found = true; switch (cap) { case ICE_AQC_CAPS_SWITCHING_MODE: caps->switching_mode = number; ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix, caps->switching_mode); break; case ICE_AQC_CAPS_MANAGEABILITY_MODE: caps->mgmt_mode = number; caps->mgmt_protocols_mctp = logical_id; ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix, caps->mgmt_mode); ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix, caps->mgmt_protocols_mctp); break; case ICE_AQC_CAPS_OS2BMC: caps->os2bmc = number; ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc); break; case ICE_AQC_CAPS_VALID_FUNCTIONS: caps->valid_functions = number; ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, caps->valid_functions); break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, caps->sr_iov_1_1); break; case ICE_AQC_CAPS_802_1QBG: caps->evb_802_1_qbg = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number); break; case ICE_AQC_CAPS_802_1BR: caps->evb_802_1_qbh = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number); break; case ICE_AQC_CAPS_DCB: caps->dcb = (number == 1); caps->active_tc_bitmap = logical_id; caps->maxtc = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, caps->active_tc_bitmap); ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); break; case ICE_AQC_CAPS_ISCSI: caps->iscsi = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi); break; case ICE_AQC_CAPS_RSS: caps->rss_table_size = number; caps->rss_table_entry_width = logical_id; ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, caps->rss_table_size); ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, caps->rss_table_entry_width); break; case ICE_AQC_CAPS_RXQS: caps->num_rxq = number; caps->rxq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, caps->num_rxq); ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, caps->rxq_first_id); break; case ICE_AQC_CAPS_TXQS: caps->num_txq = number; caps->txq_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, caps->num_txq); ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, caps->txq_first_id); break; case ICE_AQC_CAPS_MSIX: caps->num_msix_vectors = number; caps->msix_vector_first_id = phys_id; ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, caps->num_msix_vectors); ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; - case ICE_AQC_CAPS_NVM_VER: - break; case ICE_AQC_CAPS_NVM_MGMT: caps->sec_rev_disabled = (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ? true : false; ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix, caps->sec_rev_disabled); caps->update_disabled = (number & ICE_NVM_MGMT_UPDATE_DISABLED) ? true : false; ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix, caps->update_disabled); caps->nvm_unified_update = (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? true : false; ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, caps->nvm_unified_update); break; case ICE_AQC_CAPS_CEM: caps->mgmt_cem = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix, caps->mgmt_cem); break; case ICE_AQC_CAPS_IWARP: caps->iwarp = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp); break; + case ICE_AQC_CAPS_ROCEV2_LAG: + caps->roce_lag = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n", + prefix, caps->roce_lag); + break; case ICE_AQC_CAPS_LED: if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) { caps->led[phys_id] = true; caps->led_pin_num++; ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id); } break; case ICE_AQC_CAPS_SDP: if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) { caps->sdp[phys_id] = true; caps->sdp_pin_num++; ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id); } break; case ICE_AQC_CAPS_WR_CSR_PROT: caps->wr_csr_prot = number; caps->wr_csr_prot |= (u64)logical_id << 32; ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix, (unsigned long long)caps->wr_csr_prot); break; case ICE_AQC_CAPS_WOL_PROXY: caps->num_wol_proxy_fltr = number; caps->wol_proxy_vsi_seid = logical_id; caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M); caps->acpi_prog_mthd = !!(phys_id & ICE_ACPI_PROG_MTHD_M); caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M); ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix, caps->num_wol_proxy_fltr); ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix, caps->wol_proxy_vsi_seid); ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n", prefix, caps->apm_wol_support); break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", prefix, caps->max_mtu); break; case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: caps->pcie_reset_avoidance = (number > 0); ice_debug(hw, ICE_DBG_INIT, "%s: pcie_reset_avoidance = %d\n", prefix, caps->pcie_reset_avoidance); break; case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: caps->reset_restrict_support = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: reset_restrict_support = %d\n", prefix, caps->reset_restrict_support); break; case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2: case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3: { - u8 index = cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0; + u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0); caps->ext_topo_dev_img_ver_high[index] = number; caps->ext_topo_dev_img_ver_low[index] = logical_id; caps->ext_topo_dev_img_part_num[index] = (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >> ICE_EXT_TOPO_DEV_IMG_PART_NUM_S; caps->ext_topo_dev_img_load_en[index] = (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; caps->ext_topo_dev_img_prog_en[index] = (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; ice_debug(hw, ICE_DBG_INIT, "%s: ext_topo_dev_img_ver_high[%d] = %d\n", prefix, index, caps->ext_topo_dev_img_ver_high[index]); ice_debug(hw, ICE_DBG_INIT, "%s: ext_topo_dev_img_ver_low[%d] = %d\n", prefix, index, caps->ext_topo_dev_img_ver_low[index]); ice_debug(hw, ICE_DBG_INIT, "%s: ext_topo_dev_img_part_num[%d] = %d\n", prefix, index, caps->ext_topo_dev_img_part_num[index]); ice_debug(hw, ICE_DBG_INIT, "%s: ext_topo_dev_img_load_en[%d] = %d\n", prefix, index, caps->ext_topo_dev_img_load_en[index]); ice_debug(hw, ICE_DBG_INIT, "%s: ext_topo_dev_img_prog_en[%d] = %d\n", prefix, index, caps->ext_topo_dev_img_prog_en[index]); break; } + case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: + caps->tx_sched_topo_comp_mode_en = (number == 1); + break; + case ICE_AQC_CAPS_DYN_FLATTENING: + caps->dyn_flattening_en = (number == 1); + ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n", + prefix, caps->dyn_flattening_en); + break; default: /* Not one of the recognized common capabilities */ found = false; } return found; } /** * ice_recalc_port_limited_caps - Recalculate port limited capabilities * @hw: pointer to the HW structure * @caps: pointer to capabilities structure to fix * * Re-calculate the capabilities that are dependent on the number of physical * ports; i.e. some features are not supported or function differently on * devices with more than 4 ports. */ static void ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) { /* This assumes device capabilities are always scanned before function * capabilities during the initialization flow. */ if (hw->dev_caps.num_funcs > 4) { /* Max 4 TCs per port */ caps->maxtc = 4; ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", caps->maxtc); if (caps->iwarp) { ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); caps->iwarp = 0; } /* print message only when processing device capabilities * during initialization. */ if (caps == &hw->dev_caps.common_cap) ice_info(hw, "RDMA functionality is not available with the current device configuration.\n"); } } /** * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps * @hw: pointer to the HW struct * @func_p: pointer to function capabilities structure * @cap: pointer to the capability element to parse * * Extract function capabilities for ICE_AQC_CAPS_VF. */ static void ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); u32 logical_id = LE32_TO_CPU(cap->logical_id); func_p->num_allocd_vfs = number; func_p->vf_base_id = logical_id; ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", func_p->num_allocd_vfs); ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", func_p->vf_base_id); } /** * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps * @hw: pointer to the HW struct * @func_p: pointer to function capabilities structure * @cap: pointer to the capability element to parse * * Extract function capabilities for ICE_AQC_CAPS_VSI. */ static void ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, struct ice_aqc_list_caps_elem *cap) { func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", LE32_TO_CPU(cap->number)); ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", func_p->guar_num_vsi); } /** * ice_parse_func_caps - Parse function capabilities * @hw: pointer to the HW struct * @func_p: pointer to function capabilities structure * @buf: buffer containing the function capability records * @cap_count: the number of capabilities * * Helper function to parse function (0x000A) capabilities list. For * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant * data into the function capabilities structured. */ static void ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, void *buf, u32 cap_count) { struct ice_aqc_list_caps_elem *cap_resp; u32 i; cap_resp = (struct ice_aqc_list_caps_elem *)buf; ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM); for (i = 0; i < cap_count; i++) { u16 cap = LE16_TO_CPU(cap_resp[i].cap); bool found; found = ice_parse_common_caps(hw, &func_p->common_cap, &cap_resp[i], "func caps"); switch (cap) { case ICE_AQC_CAPS_VF: ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); break; case ICE_AQC_CAPS_VSI: ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); break; default: /* Don't list common capabilities as unknown */ if (!found) ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", i, cap); break; } } ice_print_led_caps(hw, &func_p->common_cap, "func caps", true); ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true); ice_recalc_port_limited_caps(hw, &func_p->common_cap); } /** * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @cap: capability element to parse * * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. */ static void ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); dev_p->num_funcs = ice_hweight32(number); ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", dev_p->num_funcs); } /** * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @cap: capability element to parse * * Parse ICE_AQC_CAPS_VF for device capabilities. */ static void ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); dev_p->num_vfs_exposed = number; ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", dev_p->num_vfs_exposed); } /** * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @cap: capability element to parse * * Parse ICE_AQC_CAPS_VSI for device capabilities. */ static void ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, struct ice_aqc_list_caps_elem *cap) { u32 number = LE32_TO_CPU(cap->number); dev_p->num_vsi_allocd_to_host = number; ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", dev_p->num_vsi_allocd_to_host); } +/** + * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. + */ +static void +ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->nac_topo.mode = LE32_TO_CPU(cap->number); + dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M; + + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", + dev_p->nac_topo.id); +} + /** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure * @buf: buffer containing the device capability records * @cap_count: the number of capabilities * * Helper device to parse device (0x000B) capabilities list. For * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant * data into the device capabilities structured. */ static void ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, void *buf, u32 cap_count) { struct ice_aqc_list_caps_elem *cap_resp; u32 i; cap_resp = (struct ice_aqc_list_caps_elem *)buf; ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM); for (i = 0; i < cap_count; i++) { u16 cap = LE16_TO_CPU(cap_resp[i].cap); bool found; found = ice_parse_common_caps(hw, &dev_p->common_cap, &cap_resp[i], "dev caps"); switch (cap) { case ICE_AQC_CAPS_VALID_FUNCTIONS: ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); break; case ICE_AQC_CAPS_VF: ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); break; case ICE_AQC_CAPS_VSI: ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_NAC_TOPOLOGY: + ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", i, cap); break; } } ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true); ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true); ice_recalc_port_limited_caps(hw, &dev_p->common_cap); } /** * ice_aq_list_caps - query function/device capabilities * @hw: pointer to the HW struct * @buf: a buffer to hold the capabilities * @buf_size: size of the buffer * @cap_count: if not NULL, set to the number of capabilities reported * @opc: capabilities type to discover, device or function * @cd: pointer to command details structure or NULL * * Get the function (0x000A) or device (0x000B) capabilities description from * firmware and store it in the buffer. * * If the cap_count pointer is not NULL, then it is set to the number of * capabilities firmware will report. Note that if the buffer size is too * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The * cap_count will still be updated in this case. It is recommended that the * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that * firmware could return) to avoid this. */ static enum ice_status ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.get_cap; if (opc != ice_aqc_opc_list_func_caps && opc != ice_aqc_opc_list_dev_caps) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, opc); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (cap_count) *cap_count = LE32_TO_CPU(cmd->count); return status; } /** * ice_discover_dev_caps - Read and extract device capabilities * @hw: pointer to the hardware structure * @dev_caps: pointer to device capabilities structure * * Read the device capabilities and extract them into the dev_caps structure * for later use. */ static enum ice_status ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { enum ice_status status; u32 cap_count = 0; void *cbuf; cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); if (!cbuf) return ICE_ERR_NO_MEMORY; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum * possible size that firmware can return. */ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, ice_aqc_opc_list_dev_caps, NULL); if (!status) ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); ice_free(hw, cbuf); return status; } /** * ice_discover_func_caps - Read and extract function capabilities * @hw: pointer to the hardware structure * @func_caps: pointer to function capabilities structure * * Read the function capabilities and extract them into the func_caps structure * for later use. */ static enum ice_status ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) { enum ice_status status; u32 cap_count = 0; void *cbuf; cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); if (!cbuf) return ICE_ERR_NO_MEMORY; /* Although the driver doesn't know the number of capabilities the * device will return, we can simply send a 4KB buffer, the maximum * possible size that firmware can return. */ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, ice_aqc_opc_list_func_caps, NULL); if (!status) ice_parse_func_caps(hw, func_caps, cbuf, cap_count); ice_free(hw, cbuf); return status; } /** * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode * @hw: pointer to the hardware structure */ void ice_set_safe_mode_caps(struct ice_hw *hw) { struct ice_hw_func_caps *func_caps = &hw->func_caps; struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; struct ice_hw_common_caps cached_caps; u32 num_funcs; /* cache some func_caps values that should be restored after memset */ cached_caps = func_caps->common_cap; /* unset func capabilities */ memset(func_caps, 0, sizeof(*func_caps)); #define ICE_RESTORE_FUNC_CAP(name) \ func_caps->common_cap.name = cached_caps.name /* restore cached values */ ICE_RESTORE_FUNC_CAP(valid_functions); ICE_RESTORE_FUNC_CAP(txq_first_id); ICE_RESTORE_FUNC_CAP(rxq_first_id); ICE_RESTORE_FUNC_CAP(msix_vector_first_id); ICE_RESTORE_FUNC_CAP(max_mtu); ICE_RESTORE_FUNC_CAP(nvm_unified_update); /* one Tx and one Rx queue in safe mode */ func_caps->common_cap.num_rxq = 1; func_caps->common_cap.num_txq = 1; /* two MSIX vectors, one for traffic and one for misc causes */ func_caps->common_cap.num_msix_vectors = 2; func_caps->guar_num_vsi = 1; /* cache some dev_caps values that should be restored after memset */ cached_caps = dev_caps->common_cap; num_funcs = dev_caps->num_funcs; /* unset dev capabilities */ memset(dev_caps, 0, sizeof(*dev_caps)); #define ICE_RESTORE_DEV_CAP(name) \ dev_caps->common_cap.name = cached_caps.name /* restore cached values */ ICE_RESTORE_DEV_CAP(valid_functions); ICE_RESTORE_DEV_CAP(txq_first_id); ICE_RESTORE_DEV_CAP(rxq_first_id); ICE_RESTORE_DEV_CAP(msix_vector_first_id); ICE_RESTORE_DEV_CAP(max_mtu); ICE_RESTORE_DEV_CAP(nvm_unified_update); dev_caps->num_funcs = num_funcs; /* one Tx and one Rx queue per function in safe mode */ dev_caps->common_cap.num_rxq = num_funcs; dev_caps->common_cap.num_txq = num_funcs; /* two MSIX vectors per function */ dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; } /** * ice_get_caps - get info about the HW * @hw: pointer to the hardware structure */ enum ice_status ice_get_caps(struct ice_hw *hw) { enum ice_status status; status = ice_discover_dev_caps(hw, &hw->dev_caps); if (status) return status; return ice_discover_func_caps(hw, &hw->func_caps); } /** * ice_aq_manage_mac_write - manage MAC address write command * @hw: pointer to the HW struct * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address * @flags: flags to control write behavior * @cd: pointer to command details structure or NULL * * This function is used to write MAC address to the NVM (0x0108). */ enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_write *cmd; struct ice_aq_desc desc; cmd = &desc.params.mac_write; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); cmd->flags = flags; ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_clear_pxe_mode * @hw: pointer to the HW struct * * Tell the firmware that the driver is taking over from PXE (0x0110). */ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_clear_pxe_mode - clear pxe operations mode * @hw: pointer to the HW struct * * Make sure all PXE mode settings are cleared, including things * like descriptor fetch/write-back mode. */ void ice_clear_pxe_mode(struct ice_hw *hw) { if (ice_check_sq_alive(hw, &hw->adminq)) ice_aq_clear_pxe_mode(hw); } /** * ice_aq_set_port_params - set physical port parameters. * @pi: pointer to the port info struct * @bad_frame_vsi: defines the VSI to which bad frames are forwarded * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded * @double_vlan: if set double VLAN is enabled * @cd: pointer to command details structure or NULL * * Set Physical port parameters (0x0203) */ enum ice_status ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct ice_sq_cd *cd) { struct ice_aqc_set_port_params *cmd; struct ice_hw *hw = pi->hw; struct ice_aq_desc desc; u16 cmd_flags = 0; cmd = &desc.params.set_port_params; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); if (save_bad_pac) cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS; if (pad_short_pac) cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS; if (double_vlan) cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->cmd_flags = CPU_TO_LE16(cmd_flags); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_is_100m_speed_supported * @hw: pointer to the HW struct * * returns true if 100M speeds are supported by the device, * false otherwise. */ bool ice_is_100m_speed_supported(struct ice_hw *hw) { switch (hw->device_id) { - case ICE_DEV_ID_E822C_10G_BASE_T: case ICE_DEV_ID_E822C_SGMII: - case ICE_DEV_ID_E822L_10G_BASE_T: case ICE_DEV_ID_E822L_SGMII: - case ICE_DEV_ID_E823L_10G_BASE_T: case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823C_SGMII: return true; default: return false; } } /** * ice_get_link_speed_based_on_phy_type - returns link speed * @phy_type_low: lower part of phy_type * @phy_type_high: higher part of phy_type * * This helper function will convert an entry in PHY type structure * [phy_type_low, phy_type_high] to its corresponding link speed. * Note: In the structure of [phy_type_low, phy_type_high], there should * be one bit set, as this function will convert one PHY type to its * speed. * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned */ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) { u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; switch (phy_type_low) { case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_100M_SGMII: speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; break; case ICE_PHY_TYPE_LOW_1000BASE_T: case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_1G_SGMII: speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; break; case ICE_PHY_TYPE_LOW_2500BASE_T: case ICE_PHY_TYPE_LOW_2500BASE_X: case ICE_PHY_TYPE_LOW_2500BASE_KX: speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; break; case ICE_PHY_TYPE_LOW_5GBASE_T: case ICE_PHY_TYPE_LOW_5GBASE_KR: speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; break; case ICE_PHY_TYPE_LOW_10GBASE_T: case ICE_PHY_TYPE_LOW_10G_SFI_DA: case ICE_PHY_TYPE_LOW_10GBASE_SR: case ICE_PHY_TYPE_LOW_10GBASE_LR: case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: case ICE_PHY_TYPE_LOW_10G_SFI_C2C: speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; break; case ICE_PHY_TYPE_LOW_25GBASE_T: case ICE_PHY_TYPE_LOW_25GBASE_CR: case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: case ICE_PHY_TYPE_LOW_25GBASE_KR: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: case ICE_PHY_TYPE_LOW_25G_AUI_C2C: speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; break; case ICE_PHY_TYPE_LOW_40GBASE_CR4: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_40GBASE_KR4: case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: case ICE_PHY_TYPE_LOW_40G_XLAUI: speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; break; case ICE_PHY_TYPE_LOW_50GBASE_CR2: case ICE_PHY_TYPE_LOW_50GBASE_SR2: case ICE_PHY_TYPE_LOW_50GBASE_LR2: case ICE_PHY_TYPE_LOW_50GBASE_KR2: case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_LAUI2: case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI2: case ICE_PHY_TYPE_LOW_50GBASE_CP: case ICE_PHY_TYPE_LOW_50GBASE_SR: case ICE_PHY_TYPE_LOW_50GBASE_FR: case ICE_PHY_TYPE_LOW_50GBASE_LR: case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: case ICE_PHY_TYPE_LOW_50G_AUI1: speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; break; case ICE_PHY_TYPE_LOW_100GBASE_CR4: case ICE_PHY_TYPE_LOW_100GBASE_SR4: case ICE_PHY_TYPE_LOW_100GBASE_LR4: case ICE_PHY_TYPE_LOW_100GBASE_KR4: case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_CAUI4: case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: case ICE_PHY_TYPE_LOW_100G_AUI4: case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; break; default: speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; break; } switch (phy_type_high) { case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_CAUI2: case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: case ICE_PHY_TYPE_HIGH_100G_AUI2: speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; break; default: speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; break; } if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) return ICE_AQ_LINK_SPEED_UNKNOWN; else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) return ICE_AQ_LINK_SPEED_UNKNOWN; else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) return speed_phy_type_low; else return speed_phy_type_high; } /** * ice_update_phy_type * @phy_type_low: pointer to the lower part of phy_type * @phy_type_high: pointer to the higher part of phy_type * @link_speeds_bitmap: targeted link speeds bitmap * * Note: For the link_speeds_bitmap structure, you can check it at * [ice_aqc_get_link_status->link_speed]. Caller can pass in * link_speeds_bitmap include multiple speeds. * * Each entry in this [phy_type_low, phy_type_high] structure will * present a certain link speed. This helper function will turn on bits * in [phy_type_low, phy_type_high] structure based on the value of * link_speeds_bitmap input parameter. */ void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap) { u64 pt_high; u64 pt_low; int index; u16 speed; /* We first check with low part of phy_type */ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { pt_low = BIT_ULL(index); speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); if (link_speeds_bitmap & speed) *phy_type_low |= BIT_ULL(index); } /* We then check with high part of phy_type */ for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { pt_high = BIT_ULL(index); speed = ice_get_link_speed_based_on_phy_type(0, pt_high); if (link_speeds_bitmap & speed) *phy_type_high |= BIT_ULL(index); } } /** * ice_aq_set_phy_cfg * @hw: pointer to the HW struct * @pi: port info structure of the interested logical port * @cfg: structure with PHY configuration data to be set * @cd: pointer to command details structure or NULL * * Set the various PHY configuration parameters supported on the Port. * One or more of the Set PHY config parameters may be ignored in an MFP * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; enum ice_status status; if (!cfg) return ICE_ERR_PARAM; /* Ensure that only valid bits of cfg->caps can be turned on. */ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", cfg->caps); cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); desc.params.set_phy.lport_num = pi->lport; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)LE64_TO_CPU(cfg->phy_type_low)); ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)LE64_TO_CPU(cfg->phy_type_high)); ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", cfg->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", cfg->link_fec_opt); status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) status = ICE_SUCCESS; if (!status) pi->phy.curr_user_phy_cfg = *cfg; return status; } /** * ice_update_link_info - update status of the HW network link * @pi: port info structure of the interested logical port */ enum ice_status ice_update_link_info(struct ice_port_info *pi) { struct ice_link_status *li; enum ice_status status; if (!pi) return ICE_ERR_PARAM; li = &pi->phy.link_info; status = ice_aq_get_link_info(pi, true, NULL, NULL); if (status) return status; if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { struct ice_aqc_get_phy_caps_data *pcaps; struct ice_hw *hw; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status == ICE_SUCCESS) ice_memcpy(li->module_type, &pcaps->module_type, sizeof(li->module_type), ICE_NONDMA_TO_NONDMA); ice_free(hw, pcaps); } return status; } /** * ice_cache_phy_user_req * @pi: port information structure * @cache_data: PHY logging data * @cache_mode: PHY logging mode * * Log the user request on (FC, FEC, SPEED) for later user. */ static void ice_cache_phy_user_req(struct ice_port_info *pi, struct ice_phy_cache_mode_data cache_data, enum ice_phy_cache_mode cache_mode) { if (!pi) return; switch (cache_mode) { case ICE_FC_MODE: pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; break; case ICE_SPEED_MODE: pi->phy.curr_user_speed_req = cache_data.data.curr_user_speed_req; break; case ICE_FEC_MODE: pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; break; default: break; } } /** * ice_caps_to_fc_mode * @caps: PHY capabilities * * Convert PHY FC capabilities to ice FC mode */ enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) { if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) return ICE_FC_FULL; if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) return ICE_FC_TX_PAUSE; if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) return ICE_FC_RX_PAUSE; return ICE_FC_NONE; } /** * ice_caps_to_fec_mode * @caps: PHY capabilities * @fec_options: Link FEC options * * Convert PHY FEC capabilities to ice FEC mode */ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) { - if (caps & ICE_AQC_PHY_EN_AUTO_FEC) - return ICE_FEC_AUTO; + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) { + if (fec_options & ICE_AQC_PHY_FEC_DIS) + return ICE_FEC_DIS_AUTO; + else + return ICE_FEC_AUTO; + } if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | ICE_AQC_PHY_FEC_25G_KR_REQ)) return ICE_FEC_BASER; if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ | ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) return ICE_FEC_RS; return ICE_FEC_NONE; } /** * ice_cfg_phy_fc - Configure PHY FC data based on FC mode * @pi: port information structure * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ static enum ice_status ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { struct ice_phy_cache_mode_data cache_data; u8 pause_mask = 0x0; if (!pi || !cfg) return ICE_ERR_BAD_PTR; switch (req_mode) { case ICE_FC_AUTO: { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(pi->hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; /* Query the value of FC that both the NIC and attached media * can do. */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); if (status) { ice_free(pi->hw, pcaps); return status; } pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE; pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE; ice_free(pi->hw, pcaps); break; } case ICE_FC_FULL: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; break; case ICE_FC_RX_PAUSE: pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; break; case ICE_FC_TX_PAUSE: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; break; default: break; } /* clear the old pause settings */ cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | ICE_AQC_PHY_EN_RX_LINK_PAUSE); /* set the new capabilities */ cfg->caps |= pause_mask; /* Cache user FC request */ cache_data.data.curr_user_fc_req = req_mode; ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); return ICE_SUCCESS; } /** * ice_set_fc * @pi: port information structure * @aq_failures: pointer to status code, specific to ice_set_fc routine * @ena_auto_link_update: enable automatic link update * * Set the requested flow control mode. */ enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; struct ice_hw *hw; if (!pi || !aq_failures) return ICE_ERR_BAD_PTR; *aq_failures = 0; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; /* Get the current PHY config */ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; } ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); /* Configure the set PHY data */ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); if (status) { if (status != ICE_ERR_BAD_PTR) *aq_failures = ICE_SET_FC_AQ_FAIL_GET; goto out; } /* If the capabilities have changed, then set the new config */ if (cfg.caps != pcaps->caps) { int retry_count, retry_max = 10; /* Auto restart link so settings take effect */ if (ena_auto_link_update) cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_SET; goto out; } /* Update the link info * It sometimes takes a really long time for link to * come back from the atomic reset. Thus, we wait a * little bit. */ for (retry_count = 0; retry_count < retry_max; retry_count++) { status = ice_update_link_info(pi); if (status == ICE_SUCCESS) break; ice_msec_delay(100, true); } if (status) *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; } out: ice_free(hw, pcaps); return status; } /** * ice_phy_caps_equals_cfg * @phy_caps: PHY capabilities * @phy_cfg: PHY configuration * * Helper function to determine if PHY capabilities matches PHY * configuration */ bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, struct ice_aqc_set_phy_cfg_data *phy_cfg) { u8 caps_mask, cfg_mask; if (!phy_caps || !phy_cfg) return false; /* These bits are not common between capabilities and configuration. * Do not use them to determine equality. */ caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | ICE_AQC_PHY_EN_MOD_QUAL); cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; if (phy_caps->phy_type_low != phy_cfg->phy_type_low || phy_caps->phy_type_high != phy_cfg->phy_type_high || ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || phy_caps->eee_cap != phy_cfg->eee_cap || phy_caps->eeer_value != phy_cfg->eeer_value || phy_caps->link_fec_options != phy_cfg->link_fec_opt) return false; return true; } /** * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data * @pi: port information structure * @caps: PHY ability structure to copy data from * @cfg: PHY configuration structure to copy data to * * Helper function to copy AQC PHY get ability data to PHY set configuration * data structure */ void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg) { if (!pi || !caps || !cfg) return; ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); cfg->phy_type_low = caps->phy_type_low; cfg->phy_type_high = caps->phy_type_high; cfg->caps = caps->caps; cfg->low_power_ctrl_an = caps->low_power_ctrl_an; cfg->eee_cap = caps->eee_cap; cfg->eeer_value = caps->eeer_value; cfg->link_fec_opt = caps->link_fec_options; cfg->module_compliance_enforcement = caps->module_compliance_enforcement; } /** * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode * @pi: port information structure * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure */ enum ice_status ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw; if (!pi || !cfg) return ICE_ERR_BAD_PTR; hw = pi->hw; pcaps = (struct ice_aqc_get_phy_caps_data *) ice_malloc(hw, sizeof(*pcaps)); if (!pcaps) return ICE_ERR_NO_MEMORY; status = ice_aq_get_phy_caps(pi, false, (ice_fw_supports_report_dflt_cfg(hw) ? ICE_AQC_REPORT_DFLT_CFG : ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); if (status) goto out; cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC); cfg->link_fec_opt = pcaps->link_fec_options; switch (fec) { case ICE_FEC_BASER: /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: /* Clear BASE-R bits, and AND RS ability * bits and OR request bits. */ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: /* Clear all FEC option bits. */ cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; break; + case ICE_FEC_DIS_AUTO: + /* Set No FEC and auto FEC */ + if (!ice_fw_supports_fec_dis_auto(hw)) + return ICE_ERR_NOT_SUPPORTED; + cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; + /* fall-through */ case ICE_FEC_AUTO: /* AND auto FEC bit, and all caps bits. */ cfg->caps &= ICE_AQC_PHY_CAPS_MASK; cfg->link_fec_opt |= pcaps->link_fec_options; break; default: status = ICE_ERR_PARAM; break; } if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) && !ice_fw_supports_report_dflt_cfg(pi->hw)) { struct ice_link_default_override_tlv tlv; if (ice_get_link_default_override(&tlv, pi)) goto out; if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && (tlv.options & ICE_LINK_OVERRIDE_EN)) cfg->link_fec_opt = tlv.fec_options; } out: ice_free(hw, pcaps); return status; } /** * ice_get_link_status - get status of the HW network link * @pi: port information structure * @link_up: pointer to bool (true/false = linkup/linkdown) * * Variable link_up is true if link is up, false if link is down. * The variable link_up is invalid if status is non zero. As a * result of this call, link status reporting becomes enabled */ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) { struct ice_phy_info *phy_info; enum ice_status status = ICE_SUCCESS; if (!pi || !link_up) return ICE_ERR_PARAM; phy_info = &pi->phy; if (phy_info->get_link_info) { status = ice_update_link_info(pi); if (status) ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", status); } *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; return status; } /** * ice_aq_set_link_restart_an * @pi: pointer to the port information structure * @ena_link: if true: enable link, if false: disable link * @cd: pointer to command details structure or NULL * * Sets up the link and restarts the Auto-Negotiation over the link. */ enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd) { struct ice_aqc_restart_an *cmd; struct ice_aq_desc desc; cmd = &desc.params.restart_an; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; cmd->lport_num = pi->lport; if (ena_link) cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; else cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } /** * ice_aq_set_event_mask * @hw: pointer to the HW struct * @port_num: port number of the physical function * @mask: event mask to be set * @cd: pointer to command details structure or NULL * * Set event mask (0x0613) */ enum ice_status ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd) { struct ice_aqc_set_event_mask *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_event_mask; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); cmd->lport_num = port_num; cmd->event_mask = CPU_TO_LE16(mask); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_set_mac_loopback * @hw: pointer to the HW struct * @ena_lpbk: Enable or Disable loopback * @cd: pointer to command details structure or NULL * * Enable/disable loopback on a given port */ enum ice_status ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) { struct ice_aqc_set_mac_lb *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_mac_lb; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); if (ena_lpbk) cmd->lb_mode = ICE_AQ_MAC_LB_EN; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_set_port_id_led * @pi: pointer to the port information * @is_orig_mode: is this LED set to original mode (by the net-list) * @cd: pointer to command details structure or NULL * * Set LED value for the given port (0x06e9) */ enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_port_id_led *cmd; struct ice_hw *hw = pi->hw; struct ice_aq_desc desc; cmd = &desc.params.set_port_id_led; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); if (is_orig_mode) cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; else cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_sff_eeprom * @hw: pointer to the HW struct * @lport: bits [7:0] = logical port, bit [8] = logical port valid * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. * @page: QSFP page * @set_page: set or ignore the page * @data: pointer to data buffer to be read/written to the I2C device. * @length: 1-16 for read, 1 for write. * @write: 0 read, 1 for write. * @cd: pointer to command details structure or NULL * * Read/Write SFF EEPROM (0x06EE) */ enum ice_status ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd) { struct ice_aqc_sff_eeprom *cmd; struct ice_aq_desc desc; enum ice_status status; if (!data || (mem_addr & 0xff00)) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); cmd = &desc.params.read_write_sff_param; desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->lport_num = (u8)(lport & 0xff); cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) & ICE_AQC_SFF_I2CBUS_7BIT_M) | ((set_page << ICE_AQC_SFF_SET_EEPROM_PAGE_S) & ICE_AQC_SFF_SET_EEPROM_PAGE_M)); cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff); cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); if (write) cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE); status = ice_aq_send_cmd(hw, &desc, data, length, cd); return status; } /** * ice_aq_prog_topo_dev_nvm * @hw: pointer to the hardware structure * @topo_params: pointer to structure storing topology parameters for a device * @cd: pointer to command details structure or NULL * * Program Topology Device NVM (0x06F2) * */ enum ice_status ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, struct ice_sq_cd *cd) { struct ice_aqc_prog_topo_dev_nvm *cmd; struct ice_aq_desc desc; cmd = &desc.params.prog_topo_dev_nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm); ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), ICE_NONDMA_TO_NONDMA); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_read_topo_dev_nvm * @hw: pointer to the hardware structure * @topo_params: pointer to structure storing topology parameters for a device * @start_address: byte offset in the topology device NVM * @data: pointer to data buffer * @data_size: number of bytes to be read from the topology device NVM * @cd: pointer to command details structure or NULL * Read Topology Device NVM (0x06F3) * */ enum ice_status ice_aq_read_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, u32 start_address, u8 *data, u8 data_size, struct ice_sq_cd *cd) { struct ice_aqc_read_topo_dev_nvm *cmd; struct ice_aq_desc desc; enum ice_status status; if (!data || data_size == 0 || data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE) return ICE_ERR_PARAM; cmd = &desc.params.read_topo_dev_nvm; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm); - desc.datalen = data_size; + desc.datalen = CPU_TO_LE16(data_size); ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), ICE_NONDMA_TO_NONDMA); cmd->start_address = CPU_TO_LE32(start_address); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) return status; ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA); return ICE_SUCCESS; } /** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure * @params: RSS LUT parameters * @set: set true to set the table, false to get the table * * Internal function to get (0x0B05) or set (0x0B03) RSS look up table */ static enum ice_status __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) { u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; struct ice_aqc_get_set_rss_lut *cmd_resp; struct ice_aq_desc desc; enum ice_status status; u8 *lut; if (!params) return ICE_ERR_PARAM; vsi_handle = params->vsi_handle; lut = params->lut; if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) return ICE_ERR_PARAM; lut_size = params->lut_size; lut_type = params->lut_type; glob_lut_idx = params->global_lut_id; vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); cmd_resp = &desc.params.get_set_rss_lut; if (set) { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); } cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | ICE_AQC_GSET_RSS_LUT_VSI_VALID); switch (lut_type) { case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); break; default: status = ICE_ERR_PARAM; goto ice_aq_get_set_rss_lut_exit; } if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); if (!set) goto ice_aq_get_set_rss_lut_send; } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { if (!set) goto ice_aq_get_set_rss_lut_send; } else { goto ice_aq_get_set_rss_lut_send; } /* LUT size is only valid for Global and PF table types */ switch (lut_size) { case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; break; } /* fall-through */ default: status = ICE_ERR_PARAM; goto ice_aq_get_set_rss_lut_exit; } ice_aq_get_set_rss_lut_send: cmd_resp->flags = CPU_TO_LE16(flags); status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); ice_aq_get_set_rss_lut_exit: return status; } /** * ice_aq_get_rss_lut * @hw: pointer to the hardware structure * @get_params: RSS LUT parameters used to specify which RSS LUT to get * * get the RSS lookup table, PF or VSI type */ enum ice_status ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) { return __ice_aq_get_set_rss_lut(hw, get_params, false); } /** * ice_aq_set_rss_lut * @hw: pointer to the hardware structure * @set_params: RSS LUT parameters used to specify how to set the RSS LUT * * set the RSS lookup table, PF or VSI type */ enum ice_status ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) { return __ice_aq_get_set_rss_lut(hw, set_params, true); } /** * __ice_aq_get_set_rss_key * @hw: pointer to the HW struct * @vsi_id: VSI FW index * @key: pointer to key info struct * @set: set true to set the key, false to get the key * * get (0x0B04) or set (0x0B02) the RSS key per VSI */ static enum ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, struct ice_aqc_get_set_rss_keys *key, bool set) { struct ice_aqc_get_set_rss_key *cmd_resp; u16 key_size = sizeof(*key); struct ice_aq_desc desc; cmd_resp = &desc.params.get_set_rss_key; if (set) { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); } cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | ICE_AQC_GSET_RSS_KEY_VSI_VALID); return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); } /** * ice_aq_get_rss_key * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @key: pointer to key info struct * * get the RSS key per VSI */ enum ice_status ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *key) { if (!ice_is_vsi_valid(hw, vsi_handle) || !key) return ICE_ERR_PARAM; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), key, false); } /** * ice_aq_set_rss_key * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @keys: pointer to key info struct * * set the RSS key per VSI */ enum ice_status ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys) { if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) return ICE_ERR_PARAM; return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), keys, true); } /** * ice_aq_add_lan_txq * @hw: pointer to the hardware structure * @num_qgrps: Number of added queue groups * @qg_list: list of queue groups to be added * @buf_size: size of buffer for indirect command * @cd: pointer to command details structure or NULL * * Add Tx LAN queue (0x0C30) * * NOTE: * Prior to calling add Tx LAN queue: * Initialize the following as part of the Tx queue context: * Completion queue ID if the queue uses Completion queue, Quanta profile, * Cache profile and Packet shaper profile. * * After add Tx LAN queue AQ command is completed: * Interrupts should be associated with specific queues, * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue * flow. */ enum ice_status ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_add_tx_qgrp *list; struct ice_aqc_add_txqs *cmd; struct ice_aq_desc desc; u16 i, sum_size = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.add_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); if (!qg_list) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; for (i = 0, list = qg_list; i < num_qgrps; i++) { sum_size += ice_struct_size(list, txqs, list->num_txqs); list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + list->num_txqs); } if (buf_size != sum_size) return ICE_ERR_PARAM; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_qgrps = num_qgrps; return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); } /** * ice_aq_dis_lan_txq * @hw: pointer to the hardware structure * @num_qgrps: number of groups in the list * @qg_list: the list of groups to disable * @buf_size: the total size of the qg_list buffer in bytes * @rst_src: if called due to reset, specifies the reset source * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * Disable LAN Tx queue (0x0C31) */ static enum ice_status ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; enum ice_status status; u16 i, sz = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.dis_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); /* qg_list can be NULL only in VM/VF reset flow */ if (!qg_list && !rst_src) return ICE_ERR_PARAM; if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; cmd->num_entries = num_qgrps; cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & ICE_AQC_Q_DIS_TIMEOUT_M); switch (rst_src) { case ICE_VM_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; cmd->vmvf_and_timeout |= CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); break; case ICE_VF_RESET: cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; /* In this case, FW expects vmvf_num to be absolute VF ID */ cmd->vmvf_and_timeout |= CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) & ICE_AQC_Q_DIS_VMVF_NUM_M); break; case ICE_NO_RESET: default: break; } /* flush pipe on time out */ cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; /* If no queue group info, we are in a reset flow. Issue the AQ */ if (!qg_list) goto do_aq; /* set RD bit to indicate that command buffer is provided by the driver * and it needs to be read by the firmware */ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); for (i = 0, item = qg_list; i < num_qgrps; i++) { u16 item_size = ice_struct_size(item, q_id, item->num_qs); /* If the num of queues is even, add 2 bytes of padding */ if ((item->num_qs % 2) == 0) item_size += 2; sz += item_size; item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); } if (buf_size != sz) return ICE_ERR_PARAM; do_aq: status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); if (status) { if (!qg_list) ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", vmvf_num, hw->adminq.sq_last_status); else ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", LE16_TO_CPU(qg_list[0].q_id[0]), hw->adminq.sq_last_status); } return status; } /** * ice_aq_move_recfg_lan_txq * @hw: pointer to the hardware structure * @num_qs: number of queues to move/reconfigure * @is_move: true if this operation involves node movement * @is_tc_change: true if this operation involves a TC change * @subseq_call: true if this operation is a subsequent call * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN * @timeout: timeout in units of 100 usec (valid values 0-50) * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN * @buf: struct containing src/dest TEID and per-queue info * @buf_size: size of buffer for indirect command * @txqs_moved: out param, number of queues successfully moved * @cd: pointer to command details structure or NULL * * Move / Reconfigure Tx LAN queues (0x0C32) */ enum ice_status ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, bool is_tc_change, bool subseq_call, bool flush_pipe, u8 timeout, u32 *blocked_cgds, struct ice_aqc_move_txqs_data *buf, u16 buf_size, u8 *txqs_moved, struct ice_sq_cd *cd) { struct ice_aqc_move_txqs *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.move_txqs; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) return ICE_ERR_PARAM; if (is_tc_change && !flush_pipe && !blocked_cgds) return ICE_ERR_PARAM; if (!is_move && !is_tc_change) return ICE_ERR_PARAM; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); if (is_move) cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE; if (is_tc_change) cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE; if (subseq_call) cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL; if (flush_pipe) cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE; cmd->num_qs = num_qs; cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) & ICE_AQC_Q_CMD_TIMEOUT_M); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && txqs_moved) *txqs_moved = cmd->num_qs; if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN && is_tc_change && !flush_pipe) *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds); return status; } /** * ice_aq_add_rdma_qsets * @hw: pointer to the hardware structure * @num_qset_grps: Number of RDMA Qset groups * @qset_list: list of qset groups to be added * @buf_size: size of buffer for indirect command * @cd: pointer to command details structure or NULL * * Add Tx RDMA Qsets (0x0C33) */ enum ice_status ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, struct ice_aqc_add_rdma_qset_data *qset_list, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_add_rdma_qset_data *list; struct ice_aqc_add_rdma_qset *cmd; struct ice_aq_desc desc; u16 i, sum_size = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); cmd = &desc.params.add_rdma_qset; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); if (!qset_list) return ICE_ERR_PARAM; if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; for (i = 0, list = qset_list; i < num_qset_grps; i++) { u16 num_qsets = LE16_TO_CPU(list->num_qsets); sum_size += ice_struct_size(list, rdma_qsets, num_qsets); list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + num_qsets); } if (buf_size != sum_size) return ICE_ERR_PARAM; desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd->num_qset_grps = num_qset_grps; return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); } /* End of FW Admin Queue command wrappers */ /** * ice_write_byte - write a byte to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u8 src_byte, dest_byte, mask; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); src_byte = *from; src_byte &= mask; /* shift to correct alignment */ mask <<= shift_width; src_byte <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA); dest_byte &= ~mask; /* get the bits not changing */ dest_byte |= src_byte; /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); } /** * ice_write_word - write a word to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u16 src_word, mask; __le16 dest_word; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_word = *(u16 *)from; src_word &= mask; /* shift to correct alignment */ mask <<= shift_width; src_word <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA); dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); } /** * ice_write_dword - write a dword to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u32 src_dword, mask; __le32 dest_dword; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = (u32)~0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_dword = *(u32 *)from; src_dword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_dword <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA); dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); } /** * ice_write_qword - write a qword to a packed context structure * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { u64 src_qword, mask; __le64 dest_qword; u8 *from, *dest; u16 shift_width; /* copy from the next struct field */ from = src_ctx + ce_info->offset; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = (u64)~0; /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_qword = *(u64 *)from; src_qword &= mask; /* shift to correct alignment */ mask <<= shift_width; src_qword <<= shift_width; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA); dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ /* put it all back */ ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); } /** * ice_set_ctx - set context bits in packed structure * @hw: pointer to the hardware structure * @src_ctx: pointer to a generic non-packed context structure * @dest_ctx: pointer to memory for the packed structure * @ce_info: a description of the structure to be transformed */ enum ice_status ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) { int f; for (f = 0; ce_info[f].width; f++) { /* We have to deal with each element of the FW response * using the correct size so that we are correct regardless * of the endianness of the machine. */ if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", f, ce_info[f].width, ce_info[f].size_of); continue; } switch (ce_info[f].size_of) { case sizeof(u8): ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u16): ice_write_word(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u32): ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u64): ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: return ICE_ERR_INVAL_SIZE; } } return ICE_SUCCESS; } /** * ice_aq_get_internal_data * @hw: pointer to the hardware structure * @cluster_id: specific cluster to dump * @table_id: table ID within cluster * @start: index of line in the block to read * @buf: dump buffer * @buf_size: dump buffer size * @ret_buf_size: return buffer size (returned by FW) * @ret_next_table: next block to read (returned by FW) * @ret_next_index: next index to read (returned by FW) * @cd: pointer to command details structure * * Get internal FW/HW data (0xFF08) for debug purposes. */ enum ice_status ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id, u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, u16 *ret_next_table, u32 *ret_next_index, struct ice_sq_cd *cd) { struct ice_aqc_debug_dump_internals *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.debug_dump; if (buf_size == 0 || !buf) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals); cmd->cluster_id = cluster_id; cmd->table_id = CPU_TO_LE16(table_id); cmd->idx = CPU_TO_LE32(start); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status) { if (ret_buf_size) *ret_buf_size = LE16_TO_CPU(desc.datalen); if (ret_next_table) *ret_next_table = LE16_TO_CPU(cmd->table_id); if (ret_next_index) *ret_next_index = LE32_TO_CPU(cmd->idx); } return status; } /** * ice_read_byte - read context byte into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u8 dest_byte, mask; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = (u8)(BIT(ce_info->width) - 1); /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); dest_byte &= ~(mask); dest_byte >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); } /** * ice_read_word - read context word into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u16 dest_word, mask; u8 *src, *target; __le16 src_word; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; mask = BIT(ce_info->width) - 1; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_word &= ~(CPU_TO_LE16(mask)); /* get the data back into host order before shifting */ dest_word = LE16_TO_CPU(src_word); dest_word >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); } /** * ice_read_dword - read context dword into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u32 dest_dword, mask; __le32 src_dword; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 32 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 5 bits so the shift will do nothing */ if (ce_info->width < 32) mask = BIT(ce_info->width) - 1; else mask = (u32)~0; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_dword &= ~(CPU_TO_LE32(mask)); /* get the data back into host order before shifting */ dest_dword = LE32_TO_CPU(src_dword); dest_dword >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); } /** * ice_read_qword - read context qword into struct * @src_ctx: the context structure to read from * @dest_ctx: the context to be written to * @ce_info: a description of the struct to be filled */ static void ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { u64 dest_qword, mask; __le64 src_qword; u8 *src, *target; u16 shift_width; /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; /* if the field width is exactly 64 on an x86 machine, then the shift * operation will not work because the SHL instructions count is masked * to 6 bits so the shift will do nothing */ if (ce_info->width < 64) mask = BIT_ULL(ce_info->width) - 1; else mask = (u64)~0; /* shift to correct alignment */ mask <<= shift_width; /* get the current bits from the src bit string */ src = src_ctx + (ce_info->lsb / 8); ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA); /* the data in the memory is stored as little endian so mask it * correctly */ src_qword &= ~(CPU_TO_LE64(mask)); /* get the data back into host order before shifting */ dest_qword = LE64_TO_CPU(src_qword); dest_qword >>= shift_width; /* get the address from the struct field */ target = dest_ctx + ce_info->offset; /* put it back in the struct */ ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); } /** * ice_get_ctx - extract context bits from a packed structure * @src_ctx: pointer to a generic packed context structure * @dest_ctx: pointer to a generic non-packed context structure * @ce_info: a description of the structure to be read from */ enum ice_status ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info) { int f; for (f = 0; ce_info[f].width; f++) { switch (ce_info[f].size_of) { case 1: ice_read_byte(src_ctx, dest_ctx, &ce_info[f]); break; case 2: ice_read_word(src_ctx, dest_ctx, &ce_info[f]); break; case 4: ice_read_dword(src_ctx, dest_ctx, &ce_info[f]); break; case 8: ice_read_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: /* nothing to do, just keep going */ break; } } return ICE_SUCCESS; } /** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: software VSI handle * @tc: TC number * @q_handle: software queue handle */ struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) { struct ice_vsi_ctx *vsi; struct ice_q_ctx *q_ctx; vsi = ice_get_vsi_ctx(hw, vsi_handle); if (!vsi) return NULL; if (q_handle >= vsi->num_lan_q_entries[tc]) return NULL; if (!vsi->lan_q_ctx[tc]) return NULL; q_ctx = vsi->lan_q_ctx[tc]; return &q_ctx[q_handle]; } /** * ice_ena_vsi_txq * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @q_handle: software queue handle * @num_qgrps: Number of added queue groups * @buf: list of queue groups to be added * @buf_size: size of buffer for indirect command * @cd: pointer to command details structure or NULL * * This function adds one LAN queue */ enum ice_status ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_sched_node *parent; struct ice_q_ctx *q_ctx; enum ice_status status; struct ice_hw *hw; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; if (num_qgrps > 1 || buf->num_txqs > 1) return ICE_ERR_MAX_LIMIT; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", q_handle); status = ICE_ERR_PARAM; goto ena_txq_exit; } /* find a parent node */ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_LAN); if (!parent) { status = ICE_ERR_PARAM; goto ena_txq_exit; } buf->parent_teid = parent->info.node_teid; node.parent_teid = parent->info.node_teid; /* Mark that the values in the "generic" section as valid. The default * value in the "generic" section is zero. This means that : * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. * - 0 priority among siblings, indicated by Bit 1-3. * - WFQ, indicated by Bit 4. * - 0 Adjustment value is used in PSM credit update flow, indicated by * Bit 5-6. * - Bit 7 is reserved. * Without setting the generic section as valid in valid_sections, the * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. */ buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR; buf->txqs[0].info.generic = 0; buf->txqs[0].info.cir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->txqs[0].info.cir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); buf->txqs[0].info.eir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->txqs[0].info.eir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", LE16_TO_CPU(buf->txqs[0].txq_id), hw->adminq.sq_last_status); goto ena_txq_exit; } node.node_teid = buf->txqs[0].q_teid; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; q_ctx->q_handle = q_handle; q_ctx->q_teid = LE32_TO_CPU(node.node_teid); /* add a leaf node into scheduler tree queue layer */ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); if (!status) status = ice_sched_replay_q_bw(pi, q_ctx); ena_txq_exit: ice_release_lock(&pi->sched_lock); return status; } /** * ice_dis_vsi_txq * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @num_queues: number of queues * @q_handles: pointer to software queue handle array * @q_ids: pointer to the q_id array * @q_teids: pointer to queue node teids * @rst_src: if called due to reset, specifies the reset source * @vmvf_num: the relative VM or VF number that is undergoing the reset * @cd: pointer to command details structure or NULL * * This function removes queues and their corresponding nodes in SW DB */ enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handles, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; hw = pi->hw; if (!num_queues) { /* if queue is disabled already yet the disable queue command * has to be sent to complete the VF reset, then call * ice_aq_dis_lan_txq without any queue information */ if (rst_src) return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); return ICE_ERR_CFG; } buf_size = ice_struct_size(qg_list, q_id, 1); qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size); if (!qg_list) return ICE_ERR_NO_MEMORY; ice_acquire_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { struct ice_sched_node *node; node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); if (!node) continue; q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); if (!q_ctx) { ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", q_handles[i]); continue; } if (q_ctx->q_handle != q_handles[i]) { ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", q_ctx->q_handle, q_handles[i]); continue; } qg_list->parent_teid = node->info.parent_teid; qg_list->num_qs = 1; qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]); status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, vmvf_num, cd); if (status != ICE_SUCCESS) break; ice_free_sched_node(pi, node); q_ctx->q_handle = ICE_INVAL_Q_HANDLE; } ice_release_lock(&pi->sched_lock); ice_free(hw, qg_list); return status; } /** * ice_cfg_vsi_qs - configure the new/existing VSI queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @maxqs: max queues array per TC * @owner: LAN or RDMA * * This function adds/updates the VSI queues per TC. */ static enum ice_status ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *maxqs, u8 owner) { enum ice_status status = ICE_SUCCESS; u8 i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; if (!ice_is_vsi_valid(pi->hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&pi->sched_lock); ice_for_each_traffic_class(i) { /* configuration is possible only if TC node is present */ if (!ice_sched_get_tc_node(pi, i)) continue; status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, ice_is_tc_ena(tc_bitmap, i)); if (status) break; } ice_release_lock(&pi->sched_lock); return status; } /** * ice_cfg_vsi_lan - configure VSI LAN queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @max_lanqs: max LAN queues array per TC * * This function adds/updates the VSI LAN queues per TC. */ enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs) { return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, ICE_SCHED_NODE_OWNER_LAN); } /** * ice_cfg_vsi_rdma - configure the VSI RDMA queues * @pi: port information structure * @vsi_handle: software VSI handle * @tc_bitmap: TC bitmap * @max_rdmaqs: max RDMA queues array per TC * * This function adds/updates the VSI RDMA queues per TC. */ enum ice_status ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs) { return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, ICE_SCHED_NODE_OWNER_RDMA); } /** * ice_ena_vsi_rdma_qset * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number * @rdma_qset: pointer to RDMA qset * @num_qsets: number of RDMA qsets * @qset_teid: pointer to qset node teids * * This function adds RDMA qset */ enum ice_status ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) { struct ice_aqc_txsched_elem_data node = { 0 }; struct ice_aqc_add_rdma_qset_data *buf; struct ice_sched_node *parent; enum ice_status status; struct ice_hw *hw; u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; hw = pi->hw; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; buf_size = ice_struct_size(buf, rdma_qsets, num_qsets); buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size); if (!buf) return ICE_ERR_NO_MEMORY; ice_acquire_lock(&pi->sched_lock); parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, ICE_SCHED_NODE_OWNER_RDMA); if (!parent) { status = ICE_ERR_PARAM; goto rdma_error_exit; } buf->parent_teid = parent->info.node_teid; node.parent_teid = parent->info.node_teid; buf->num_qsets = CPU_TO_LE16(num_qsets); for (i = 0; i < num_qsets; i++) { buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]); buf->rdma_qsets[i].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR; buf->rdma_qsets[i].info.generic = 0; buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->rdma_qsets[i].info.cir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); buf->rdma_qsets[i].info.eir_bw.bw_alloc = CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); } status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); if (status != ICE_SUCCESS) { ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); goto rdma_error_exit; } node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; for (i = 0; i < num_qsets; i++) { node.node_teid = buf->rdma_qsets[i].qset_teid; status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); if (status) break; qset_teid[i] = LE32_TO_CPU(node.node_teid); } rdma_error_exit: ice_release_lock(&pi->sched_lock); ice_free(hw, buf); return status; } /** * ice_dis_vsi_rdma_qset - free RDMA resources * @pi: port_info struct * @count: number of RDMA qsets to free * @qset_teid: TEID of qset node * @q_id: list of queue IDs being disabled */ enum ice_status ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id) { struct ice_aqc_dis_txq_item *qg_list; enum ice_status status = ICE_SUCCESS; struct ice_hw *hw; u16 qg_size; int i; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; hw = pi->hw; qg_size = ice_struct_size(qg_list, q_id, 1); qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size); if (!qg_list) return ICE_ERR_NO_MEMORY; ice_acquire_lock(&pi->sched_lock); for (i = 0; i < count; i++) { struct ice_sched_node *node; node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); if (!node) continue; qg_list->parent_teid = node->info.parent_teid; qg_list->num_qs = 1; qg_list->q_id[0] = CPU_TO_LE16(q_id[i] | ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, ICE_NO_RESET, 0, NULL); if (status) break; ice_free_sched_node(pi, node); } ice_release_lock(&pi->sched_lock); ice_free(hw, qg_list); return status; } /** * ice_is_main_vsi - checks whether the VSI is main VSI * @hw: pointer to the HW struct * @vsi_handle: VSI handle * * Checks whether the VSI is the main VSI (the first PF VSI created on * given PF). */ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) { return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle]; } /** * ice_replay_pre_init - replay pre initialization * @hw: pointer to the HW struct * @sw: pointer to switch info struct for which function initializes filters * * Initializes required config data for VSI, FD, ACL, and RSS before replay. */ enum ice_status ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) { enum ice_status status; u8 i; /* Delete old entries from replay filter list head if there is any */ ice_rm_sw_replay_rule_info(hw, sw); /* In start of replay, move entries into replay_rules list, it * will allow adding rules entries back to filt_rules list, * which is operational list. */ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules, &sw->recp_list[i].filt_replay_rules); ice_sched_replay_agg_vsi_preinit(hw); status = ice_sched_replay_root_node_bw(hw->port_info); if (status) return status; return ice_sched_replay_tc_node_bw(hw->port_info); } /** * ice_replay_vsi - replay VSI configuration * @hw: pointer to the HW struct * @vsi_handle: driver VSI handle * * Restore all VSI configuration after reset. It is required to call this * function with main VSI first. */ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) { struct ice_switch_info *sw = hw->switch_info; struct ice_port_info *pi = hw->port_info; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; /* Replay pre-initialization if there is any */ if (ice_is_main_vsi(hw, vsi_handle)) { status = ice_replay_pre_init(hw, sw); if (status) return status; } /* Replay per VSI all RSS configurations */ status = ice_replay_rss_cfg(hw, vsi_handle); if (status) return status; /* Replay per VSI all filters */ status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle); if (!status) status = ice_replay_vsi_agg(hw, vsi_handle); return status; } /** * ice_replay_post - post replay configuration cleanup * @hw: pointer to the HW struct * * Post replay cleanup. */ void ice_replay_post(struct ice_hw *hw) { /* Delete old entries from replay filter list head */ ice_rm_all_sw_replay_rule_info(hw); ice_sched_replay_agg(hw); } /** * ice_stat_update40 - read 40 bit stat from the chip and update stat values * @hw: ptr to the hardware info * @reg: offset of 64 bit HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) { u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); /* device stats are not reset at PFR, they likely will not be zeroed * when the driver starts. Thus, save the value from the first read * without adding to the statistic value so that we report stats which * count up from zero. */ if (!prev_stat_loaded) { *prev_stat = new_data; return; } /* Calculate the difference between the new and old values, and then * add it to the software stat value. */ if (new_data >= *prev_stat) *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; /* Update the previously stored value to prepare for next read */ *prev_stat = new_data; } /** * ice_stat_update32 - read 32 bit stat from the chip and update stat values * @hw: ptr to the hardware info * @reg: offset of HW register to read from * @prev_stat_loaded: bool to specify if previous stats are loaded * @prev_stat: ptr to previous loaded stat value * @cur_stat: ptr to current stat value */ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat) { u32 new_data; new_data = rd32(hw, reg); /* device stats are not reset at PFR, they likely will not be zeroed * when the driver starts. Thus, save the value from the first read * without adding to the statistic value so that we report stats which * count up from zero. */ if (!prev_stat_loaded) { *prev_stat = new_data; return; } /* Calculate the difference between the new and old values, and then * add it to the software stat value. */ if (new_data >= *prev_stat) *cur_stat += new_data - *prev_stat; else /* to manage the potential roll-over */ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; /* Update the previously stored value to prepare for next read */ *prev_stat = new_data; } /** * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values * @hw: ptr to the hardware info * @vsi_handle: VSI handle * @prev_stat_loaded: bool to specify if the previous stat values are loaded * @cur_stats: ptr to current stats structure * * The GLV_REPC statistic register actually tracks two 16bit statistics, and * thus cannot be read using the normal ice_stat_update32 function. * * Read the GLV_REPC register associated with the given VSI, and update the * rx_no_desc and rx_error values in the ice_eth_stats structure. * * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be * cleared each time it's read. * * Note that the GLV_RDPC register also counts the causes that would trigger * GLV_REPC. However, it does not give the finer grained detail about why the * packets are being dropped. The GLV_REPC values can be used to distinguish * whether Rx packets are dropped due to errors or due to no available * descriptors. */ void ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, struct ice_eth_stats *cur_stats) { u16 vsi_num, no_desc, error_cnt; u32 repc; if (!ice_is_vsi_valid(hw, vsi_handle)) return; vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); /* If we haven't loaded stats yet, just clear the current value */ if (!prev_stat_loaded) { wr32(hw, GLV_REPC(vsi_num), 0); return; } repc = rd32(hw, GLV_REPC(vsi_num)); no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S; error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S; /* Clear the count by writing to the stats register */ wr32(hw, GLV_REPC(vsi_num), 0); cur_stats->rx_no_desc += no_desc; cur_stats->rx_errors += error_cnt; } /** * ice_aq_alternate_write * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be written * @reg_val0: value to be written under 'reg_addr0' * @reg_addr1: address of second dword to be written * @reg_val1: value to be written under 'reg_addr1' * * Write one or two dwords to alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. */ enum ice_status ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1) { struct ice_aqc_read_write_alt_direct *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.read_write_alt_direct; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct); cmd->dword0_addr = CPU_TO_LE32(reg_addr0); cmd->dword1_addr = CPU_TO_LE32(reg_addr1); cmd->dword0_value = CPU_TO_LE32(reg_val0); cmd->dword1_value = CPU_TO_LE32(reg_val1); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); return status; } /** * ice_aq_alternate_read * @hw: pointer to the hardware structure * @reg_addr0: address of first dword to be read * @reg_val0: pointer for data read from 'reg_addr0' * @reg_addr1: address of second dword to be read * @reg_val1: pointer for data read from 'reg_addr1' * * Read one or two dwords from alternate structure. Fields are indicated * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer * is not passed then only register at 'reg_addr0' is read. */ enum ice_status ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1) { struct ice_aqc_read_write_alt_direct *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.read_write_alt_direct; if (!reg_val0) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct); cmd->dword0_addr = CPU_TO_LE32(reg_addr0); cmd->dword1_addr = CPU_TO_LE32(reg_addr1); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (status == ICE_SUCCESS) { *reg_val0 = LE32_TO_CPU(cmd->dword0_value); if (reg_val1) *reg_val1 = LE32_TO_CPU(cmd->dword1_value); } return status; } /** * ice_aq_alternate_write_done * @hw: pointer to the HW structure. * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS * @reset_needed: indicates the SW should trigger GLOBAL reset * * Indicates to the FW that alternate structures have been changed. */ enum ice_status ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed) { struct ice_aqc_done_alt_write *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.done_alt_write; if (!reset_needed) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write); cmd->flags = bios_mode; status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); if (!status) *reset_needed = (LE16_TO_CPU(cmd->flags) & ICE_AQC_RESP_RESET_NEEDED) != 0; return status; } /** * ice_aq_alternate_clear * @hw: pointer to the HW structure. * * Clear the alternate structures of the port from which the function * is called. */ enum ice_status ice_aq_alternate_clear(struct ice_hw *hw) { struct ice_aq_desc desc; enum ice_status status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write); status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); return status; } /** * ice_sched_query_elem - query element information from HW * @hw: pointer to the HW struct * @node_teid: node TEID to be queried * @buf: buffer to element information * * This function queries HW element information */ enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; enum ice_status status; buf_size = sizeof(*buf); ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM); buf->node_teid = CPU_TO_LE32(node_teid); status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, NULL); if (status != ICE_SUCCESS || num_elem_ret != 1) ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); return status; } /** * ice_get_fw_mode - returns FW mode * @hw: pointer to the HW struct */ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) { #define ICE_FW_MODE_DBG_M BIT(0) #define ICE_FW_MODE_REC_M BIT(1) #define ICE_FW_MODE_ROLLBACK_M BIT(2) u32 fw_mode; /* check the current FW mode */ fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M; if (fw_mode & ICE_FW_MODE_DBG_M) return ICE_FW_MODE_DBG; else if (fw_mode & ICE_FW_MODE_REC_M) return ICE_FW_MODE_REC; else if (fw_mode & ICE_FW_MODE_ROLLBACK_M) return ICE_FW_MODE_ROLLBACK; else return ICE_FW_MODE_NORMAL; } /** * ice_cfg_get_cur_lldp_persist_status * @hw: pointer to the HW struct * @lldp_status: return value of LLDP persistent status * * Get the current status of LLDP persistent */ enum ice_status ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) { struct ice_port_info *pi = hw->port_info; enum ice_status ret; __le32 raw_data; u32 data, mask; if (!lldp_status) return ICE_ERR_BAD_PTR; ret = ice_acquire_nvm(hw, ICE_RES_READ); if (ret) return ret; ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID, ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET, ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false, true, NULL); if (!ret) { data = LE32_TO_CPU(raw_data); mask = ICE_AQC_NVM_LLDP_STATUS_M << (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); data = data & mask; *lldp_status = data >> (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); } ice_release_nvm(hw); return ret; } /** * ice_get_dflt_lldp_persist_status * @hw: pointer to the HW struct * @lldp_status: return value of LLDP persistent status * * Get the default status of LLDP persistent */ enum ice_status ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) { struct ice_port_info *pi = hw->port_info; u32 data, mask, loc_data, loc_data_tmp; enum ice_status ret; __le16 loc_raw_data; __le32 raw_data; if (!lldp_status) return ICE_ERR_BAD_PTR; ret = ice_acquire_nvm(hw, ICE_RES_READ); if (ret) return ret; /* Read the offset of EMP_SR_PTR */ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, ICE_AQC_NVM_EMP_SR_PTR_OFFSET, ICE_AQC_NVM_EMP_SR_PTR_RD_LEN, &loc_raw_data, false, true, NULL); if (ret) goto exit; loc_data = LE16_TO_CPU(loc_raw_data); if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) { loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M; loc_data *= ICE_AQC_NVM_SECTOR_UNIT; } else { loc_data *= ICE_AQC_NVM_WORD_UNIT; } /* Read the offset of LLDP configuration pointer */ loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET; ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data, false, true, NULL); if (ret) goto exit; loc_data_tmp = LE16_TO_CPU(loc_raw_data); loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT; loc_data += loc_data_tmp; /* We need to skip LLDP configuration section length (2 bytes) */ loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN; /* Read the LLDP Default Configure */ ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false, true, NULL); if (!ret) { data = LE32_TO_CPU(raw_data); mask = ICE_AQC_NVM_LLDP_STATUS_M << (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); data = data & mask; *lldp_status = data >> (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); } exit: ice_release_nvm(hw); return ret; } /** * ice_aq_read_i2c * @hw: pointer to the hw struct * @topo_addr: topology address for a device to communicate with * @bus_addr: 7-bit I2C bus address * @addr: I2C memory address (I2C offset) with up to 16 bits * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size, * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes) * @data: pointer to data (0 to 16 bytes) to be read from the I2C device * @cd: pointer to command details structure or NULL * * Read I2C (0x06E2) */ enum ice_status ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd) { struct ice_aq_desc desc = { 0 }; struct ice_aqc_i2c *cmd; enum ice_status status; u8 data_size; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); cmd = &desc.params.read_write_i2c; if (!data) return ICE_ERR_PARAM; data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr); cmd->topo_addr = topo_addr; cmd->i2c_params = params; cmd->i2c_addr = addr; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) { struct ice_aqc_read_i2c_resp *resp; u8 i; resp = &desc.params.read_i2c_resp; for (i = 0; i < data_size; i++) { *data = resp->i2c_data[i]; data++; } } return status; } /** * ice_aq_write_i2c * @hw: pointer to the hw struct * @topo_addr: topology address for a device to communicate with * @bus_addr: 7-bit I2C bus address * @addr: I2C memory address (I2C offset) with up to 16 bits * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) * @data: pointer to data (0 to 4 bytes) to be written to the I2C device * @cd: pointer to command details structure or NULL * * Write I2C (0x06E3) */ enum ice_status ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd) { struct ice_aq_desc desc = { 0 }; struct ice_aqc_i2c *cmd; u8 i, data_size; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); cmd = &desc.params.read_write_i2c; data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; /* data_size limited to 4 */ if (data_size > 4) return ICE_ERR_PARAM; cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr); cmd->topo_addr = topo_addr; cmd->i2c_params = params; cmd->i2c_addr = addr; for (i = 0; i < data_size; i++) { cmd->i2c_data[i] = *data; data++; } return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_set_gpio * @hw: pointer to the hw struct * @gpio_ctrl_handle: GPIO controller node handle * @pin_idx: IO Number of the GPIO that needs to be set * @value: SW provide IO value to set in the LSB * @cd: pointer to command details structure or NULL * * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology */ enum ice_status ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, struct ice_sq_cd *cd) { struct ice_aqc_gpio *cmd; struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); cmd = &desc.params.read_write_gpio; - cmd->gpio_ctrl_handle = gpio_ctrl_handle; + cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); cmd->gpio_num = pin_idx; cmd->gpio_val = value ? 1 : 0; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_get_gpio * @hw: pointer to the hw struct * @gpio_ctrl_handle: GPIO controller node handle * @pin_idx: IO Number of the GPIO that needs to be set * @value: IO value read * @cd: pointer to command details structure or NULL * * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of * the topology */ enum ice_status ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd) { struct ice_aqc_gpio *cmd; struct ice_aq_desc desc; enum ice_status status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); cmd = &desc.params.read_write_gpio; - cmd->gpio_ctrl_handle = gpio_ctrl_handle; + cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); cmd->gpio_num = pin_idx; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) return status; *value = !!cmd->gpio_val; return ICE_SUCCESS; } /** - * ice_fw_supports_link_override + * ice_is_fw_api_min_ver * @hw: pointer to the hardware structure + * @maj: major version + * @min: minor version + * @patch: patch version * - * Checks if the firmware supports link override + * Checks if the firmware is minimum version */ -bool ice_fw_supports_link_override(struct ice_hw *hw) +static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) { - if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { - if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) + if (hw->api_maj_ver == maj) { + if (hw->api_min_ver > min) return true; - if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && - hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) + if (hw->api_min_ver == min && hw->api_patch >= patch) return true; - } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { + } else if (hw->api_maj_ver > maj) { return true; } return false; } +/** + * ice_is_fw_min_ver + * @hw: pointer to the hardware structure + * @branch: branch version + * @maj: major version + * @min: minor version + * @patch: patch version + * + * Checks if the firmware is minimum version + */ +static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min, + u8 patch) +{ + if (hw->fw_branch == branch) { + if (hw->fw_maj_ver > maj) + return true; + if (hw->fw_maj_ver == maj) { + if (hw->fw_min_ver > min) + return true; + if (hw->fw_min_ver == min && hw->fw_patch >= patch) + return true; + } + } else if (hw->fw_branch > branch) { + return true; + } + + return false; +} + +/** + * ice_fw_supports_link_override + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports link override + */ +bool ice_fw_supports_link_override(struct ice_hw *hw) +{ + return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, + ICE_FW_API_LINK_OVERRIDE_MIN, + ICE_FW_API_LINK_OVERRIDE_PATCH); +} + /** * ice_get_link_default_override * @ldo: pointer to the link default override struct * @pi: pointer to the port info struct * * Gets the link default override for a port */ enum ice_status ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi) { u16 i, tlv, tlv_len, tlv_start, buf, offset; struct ice_hw *hw = pi->hw; enum ice_status status; status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); return status; } /* Each port has its own config; calculate for our port */ tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + ICE_SR_PFA_LINK_OVERRIDE_OFFSET; /* link options first */ status = ice_read_sr_word(hw, tlv_start, &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> ICE_LINK_OVERRIDE_PHY_CFG_S; /* link PHY config */ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; status = ice_read_sr_word(hw, offset, &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); return status; } ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; /* PHY types low */ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ ldo->phy_type_low |= ((u64)buf << (i * 16)); } /* PHY types high */ offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { status = ice_read_sr_word(hw, (offset + i), &buf); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); return status; } /* shift 16 bits at a time to fill 64 bits */ ldo->phy_type_high |= ((u64)buf << (i * 16)); } return status; } /** * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled * @caps: get PHY capability data */ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) { if (caps->caps & ICE_AQC_PHY_AN_MODE || caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | ICE_AQC_PHY_AN_EN_CLAUSE73 | ICE_AQC_PHY_AN_EN_CLAUSE37)) return true; return false; } /** * ice_is_fw_health_report_supported * @hw: pointer to the hardware structure * * Return true if firmware supports health status reports, * false otherwise */ bool ice_is_fw_health_report_supported(struct ice_hw *hw) { if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ) return true; if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) { if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN) return true; if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN && hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH) return true; } return false; } /** * ice_aq_set_health_status_config - Configure FW health events * @hw: pointer to the HW struct * @event_source: type of diagnostic events to enable * @cd: pointer to command details structure or NULL * * Configure the health status event types that the firmware will send to this * PF. The supported event types are: PF-specific, all PFs, and global */ enum ice_status ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, struct ice_sq_cd *cd) { struct ice_aqc_set_health_status_config *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_health_status_config; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_config); cmd->event_source = event_source; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_get_port_options * @hw: pointer to the hw struct * @options: buffer for the resultant port options * @option_count: input - size of the buffer in port options structures, * output - number of returned port options * @lport: logical port to call the command with (optional) * @lport_valid: when false, FW uses port owned by the PF instead of lport, * when PF owns more than 1 port it must be true * @active_option_idx: index of active port option in returned buffer * @active_option_valid: active option in returned buffer is valid * * Calls Get Port Options AQC (0x06ea) and verifies result. */ enum ice_status ice_aq_get_port_options(struct ice_hw *hw, struct ice_aqc_get_port_options_elem *options, u8 *option_count, u8 lport, bool lport_valid, u8 *active_option_idx, bool *active_option_valid) { struct ice_aqc_get_port_options *cmd; struct ice_aq_desc desc; enum ice_status status; u8 pmd_count; u8 max_speed; u8 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* options buffer shall be able to hold max returned options */ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) return ICE_ERR_PARAM; cmd = &desc.params.get_port_options; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); if (lport_valid) cmd->lport_num = lport; cmd->lport_num_valid = lport_valid; status = ice_aq_send_cmd(hw, &desc, options, *option_count * sizeof(*options), NULL); if (status != ICE_SUCCESS) return status; /* verify direct FW response & set output parameters */ *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M; ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID; if (*active_option_valid) { *active_option_idx = cmd->port_options & ICE_AQC_PORT_OPT_ACTIVE_M; if (*active_option_idx > (*option_count - 1)) return ICE_ERR_OUT_OF_RANGE; ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", *active_option_idx); } /* verify indirect FW response & mask output options fields */ for (i = 0; i < *option_count; i++) { options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M; options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M; pmd_count = options[i].pmd; max_speed = options[i].max_lane_speed; ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", pmd_count, max_speed); /* check only entries containing valid max pmd speed values, * other reserved values may be returned, when logical port * used is unrelated to specific option */ if (max_speed <= ICE_AQC_PORT_OPT_MAX_LANE_100G) { if (pmd_count > ICE_MAX_PORT_PER_PCI_DEV) return ICE_ERR_OUT_OF_RANGE; if (pmd_count > 2 && max_speed > ICE_AQC_PORT_OPT_MAX_LANE_25G) return ICE_ERR_CFG; if (pmd_count > 7 && max_speed > ICE_AQC_PORT_OPT_MAX_LANE_10G) return ICE_ERR_CFG; } } return ICE_SUCCESS; } /** * ice_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the HW struct * @mib_type: Local, Remote or both Local and Remote MIBs * @buf: pointer to the caller-supplied buffer to store the MIB block * @buf_size: size of the buffer (in bytes) * @cd: pointer to command details structure or NULL * * Set the LLDP MIB. (0x0A08) */ enum ice_status ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_lldp_set_local_mib *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_set_mib; if (buf_size == 0 || !buf) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD); desc.datalen = CPU_TO_LE16(buf_size); cmd->type = mib_type; cmd->length = CPU_TO_LE16(buf_size); return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); } /** * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl * @hw: pointer to HW struct */ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) { - if (hw->mac_type != ICE_MAC_E810) + if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC) return false; - if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { - if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && - hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { - return true; - } - return false; + return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, + ICE_FW_API_LLDP_FLTR_MIN, + ICE_FW_API_LLDP_FLTR_PATCH); } /** * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter * @hw: pointer to HW struct * @vsi_num: absolute HW index for VSI * @add: boolean for if adding or removing a filter */ enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_filter_ctrl; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); if (add) cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; else cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; cmd->vsi_num = CPU_TO_LE16(vsi_num); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } +/** + * ice_lldp_execute_pending_mib - execute LLDP pending MIB request + * @hw: pointer to HW struct + */ +enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + /** * ice_fw_supports_report_dflt_cfg * @hw: pointer to the hardware structure * * Checks if the firmware supports report default configuration */ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) { - if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { - if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) - return true; - if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && - hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) - return true; - } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { - return true; - } - return false; + return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, + ICE_FW_API_REPORT_DFLT_CFG_MIN, + ICE_FW_API_REPORT_DFLT_CFG_PATCH); } +/** + * ice_fw_supports_fec_dis_auto + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports FEC disable in Auto FEC mode + */ +bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw) +{ + return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH, + ICE_FW_FEC_DIS_AUTO_MAJ, + ICE_FW_FEC_DIS_AUTO_MIN, + ICE_FW_FEC_DIS_AUTO_PATCH); +} /** * ice_is_fw_auto_drop_supported * @hw: pointer to the hardware structure * * Checks if the firmware supports auto drop feature */ bool ice_is_fw_auto_drop_supported(struct ice_hw *hw) { if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ && hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN) return true; return false; } + diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h index b113082b2394..73e051fdda67 100644 --- a/sys/dev/ice/ice_common.h +++ b/sys/dev/ice/ice_common.h @@ -1,350 +1,353 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_COMMON_H_ #define _ICE_COMMON_H_ #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" #include "virtchnl.h" #include "ice_switch.h" #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 enum ice_fw_modes { ICE_FW_MODE_NORMAL, ICE_FW_MODE_DBG, ICE_FW_MODE_REC, ICE_FW_MODE_ROLLBACK }; void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq); bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq); void ice_set_umac_shared(struct ice_hw *hw); enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req); enum ice_status ice_create_all_ctrlq(struct ice_hw *hw); enum ice_status ice_init_all_ctrlq(struct ice_hw *hw); -void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading); void ice_destroy_all_ctrlq(struct ice_hw *hw); enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending); enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up); enum ice_status ice_update_link_info(struct ice_port_info *pi); enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res); enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res); enum ice_status ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, enum ice_adminq_opc opc, struct ice_sq_cd *cd); enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); void ice_clear_pxe_mode(struct ice_hw *hw); enum ice_status ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); enum ice_status ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id, u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, u16 *ret_next_table, u32 *ret_next_index, struct ice_sq_cd *cd); enum ice_status ice_set_mac_type(struct ice_hw *hw); /* Define a macro that will align a pointer to point to the next memory address * that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For * example, given the variable pointer = 0x1006, then after the following call: * * pointer = ICE_ALIGN(pointer, 4) * * ... the value of pointer would equal 0x1008, since 0x1008 is the next * address after 0x1006 which is divisible by 4. */ #define ICE_ALIGN(ptr, align) (((ptr) + ((align) - 1)) & ~((align) - 1)) enum ice_status ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index); enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index); enum ice_status ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index); enum ice_status ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, u32 tx_cmpltnq_index); enum ice_status ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index); enum ice_status ice_write_tx_drbell_q_ctx(struct ice_hw *hw, struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, u32 tx_drbell_q_index); enum ice_status ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); enum ice_status ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params); enum ice_status ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); enum ice_status ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, struct ice_aqc_get_set_rss_keys *keys); enum ice_status ice_aq_add_lan_txq(struct ice_hw *hw, u8 count, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, bool is_tc_change, bool subseq_call, bool flush_pipe, u8 timeout, u32 *blocked_cgds, struct ice_aqc_move_txqs_data *buf, u16 buf_size, u8 *txqs_moved, struct ice_sq_cd *cd); enum ice_status ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, struct ice_aqc_add_rdma_qset_data *qset_list, u16 buf_size, struct ice_sq_cd *cd); bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; enum ice_status ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info); enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, struct ice_sq_cd *cd); enum ice_status ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, bool save_bad_pac, bool pad_short_pac, bool double_vlan, struct ice_sq_cd *cd); enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); enum ice_status ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, u8 *node_part_number, u16 *node_handle); enum ice_status ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, u16 *node_handle); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); enum ice_status ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); bool ice_fw_supports_link_override(struct ice_hw *hw); +bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw); enum ice_status ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); void ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); enum ice_status ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop, struct ice_sq_cd *cd); enum ice_status ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, struct ice_link_status *link, struct ice_sq_cd *cd); enum ice_status ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, struct ice_sq_cd *cd); enum ice_status ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd); enum ice_status ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, struct ice_sq_cd *cd); enum ice_status ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, bool write, struct ice_sq_cd *cd); enum ice_status ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, struct ice_sq_cd *cd); enum ice_status ice_aq_read_topo_dev_nvm(struct ice_hw *hw, struct ice_aqc_link_topo_params *topo_params, u32 start_address, u8 *buf, u8 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_port_options(struct ice_hw *hw, struct ice_aqc_get_port_options_elem *options, u8 *option_count, u8 lport, bool lport_valid, u8 *active_option_idx, bool *active_option_valid); enum ice_status ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info); enum ice_status __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data); enum ice_status __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data); enum ice_status ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_rdmaqs); enum ice_status ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 *rdma_qset, u16 num_qsets, u32 *qset_teid); enum ice_status ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, u16 *q_id); enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, u16 *q_handle, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd); enum ice_status ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, u16 *max_lanqs); enum ice_status ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw); enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); void ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); void ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, struct ice_eth_stats *cur_stats); enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw); void ice_print_rollback_msg(struct ice_hw *hw); bool ice_is_e810(struct ice_hw *hw); bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e823(struct ice_hw *hw); enum ice_status ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, u32 reg_addr1, u32 reg_val1); enum ice_status ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1); enum ice_status ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed); enum ice_status ice_aq_alternate_clear(struct ice_hw *hw); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); enum ice_status ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status); enum ice_status ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status); enum ice_status ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, struct ice_sq_cd *cd); enum ice_status ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); bool ice_is_100m_speed_supported(struct ice_hw *hw); enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist); enum ice_status ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); enum ice_status ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw); enum ice_status ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd); enum ice_status ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, u8 *data, struct ice_sq_cd *cd); enum ice_status ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, struct ice_sq_cd *cd); bool ice_is_fw_health_report_supported(struct ice_hw *hw); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); /* AQ API version for FW auto drop reports */ bool ice_is_fw_auto_drop_supported(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/sys/dev/ice/ice_common_sysctls.h b/sys/dev/ice/ice_common_sysctls.h index 0d149a5bc25c..11cfc50848f5 100644 --- a/sys/dev/ice/ice_common_sysctls.h +++ b/sys/dev/ice/ice_common_sysctls.h @@ -1,140 +1,155 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_common_sysctls.h * @brief driver wide sysctls not related to the iflib stack * * Contains static sysctl values which are driver wide and configure all * devices of the driver at once. * * Device specific sysctls are setup by functions in ice_lib.c */ #ifndef _ICE_COMMON_SYSCTLS_H_ #define _ICE_COMMON_SYSCTLS_H_ #include /** * @var ice_enable_irdma * @brief boolean indicating if the iRDMA client interface is enabled * * Global sysctl variable indicating whether the RDMA client interface feature * is enabled. */ bool ice_enable_irdma = true; /** * @var ice_enable_tx_fc_filter * @brief boolean indicating if the Tx Flow Control filter should be enabled * * Global sysctl variable indicating whether the Tx Flow Control filters * should be enabled. If true, Ethertype 0x8808 packets will be dropped if * they come from non-HW sources. If false, packets coming from software will * not be dropped. Leave this on if unless you must send flow control frames * (or other control frames) from software. * * @remark each PF has a separate sysctl which can override this value. */ bool ice_enable_tx_fc_filter = true; /** * @var ice_enable_tx_lldp_filter * @brief boolean indicating if the Tx LLDP filter should be enabled * * Global sysctl variable indicating whether the Tx Flow Control filters * should be enabled. If true, Ethertype 0x88cc packets will be dropped if * they come from non-HW sources. If false, packets coming from software will * not be dropped. Leave this on if unless you must send LLDP frames from * software. * * @remark each PF has a separate sysctl which can override this value. */ bool ice_enable_tx_lldp_filter = true; /** * @var ice_enable_health_events * @brief boolean indicating if health status events from the FW should be reported * * Global sysctl variable indicating whether the Health Status events from the * FW should be enabled. If true, if an event occurs, the driver will print out * a message with a description of the event and possible actions to take. * * @remark each PF has a separate sysctl which can override this value. */ bool ice_enable_health_events = true; +/** + * @var ice_tx_balance_en + * @brief boolean permitting the 5-layer scheduler topology enablement + * + * Global sysctl variable indicating whether the driver will allow the + * 5-layer scheduler topology feature to be enabled. It's _not_ + * specifically enabling the feature, just allowing it depending on what + * the DDP package allows. + */ +bool ice_tx_balance_en = true; + /** * @var ice_rdma_max_msix * @brief maximum number of MSI-X vectors to reserve for RDMA interface * * Global sysctl variable indicating the maximum number of MSI-X vectors to * reserve for a single RDMA interface. */ static uint16_t ice_rdma_max_msix = ICE_RDMA_MAX_MSIX; /* sysctls marked as tunable, (i.e. with the CTLFLAG_TUN set) will * automatically load tunable values, without the need to manually create the * TUNABLE definition. * * This works since at least FreeBSD 11, and was backported into FreeBSD 10 * before the FreeBSD 10.1-RELEASE. * * If the tunable needs a custom loader, mark the SYSCTL as CTLFLAG_NOFETCH, * and create the tunable manually. */ static SYSCTL_NODE(_hw, OID_AUTO, ice, CTLFLAG_RD, 0, "ICE driver parameters"); static SYSCTL_NODE(_hw_ice, OID_AUTO, debug, ICE_CTLFLAG_DEBUG | CTLFLAG_RD, 0, "ICE driver debug parameters"); SYSCTL_BOOL(_hw_ice, OID_AUTO, enable_health_events, CTLFLAG_RDTUN, &ice_enable_health_events, 0, "Enable FW health event reporting globally"); SYSCTL_BOOL(_hw_ice, OID_AUTO, irdma, CTLFLAG_RDTUN, &ice_enable_irdma, 0, "Enable iRDMA client interface"); SYSCTL_U16(_hw_ice, OID_AUTO, rdma_max_msix, CTLFLAG_RDTUN, &ice_rdma_max_msix, 0, "Maximum number of MSI-X vectors to reserve per RDMA interface"); SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ice_enable_tx_fc_filter, 0, "Drop Ethertype 0x8808 control frames originating from non-HW sources"); SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, enable_tx_lldp_filter, CTLFLAG_RDTUN, &ice_enable_tx_lldp_filter, 0, "Drop Ethertype 0x88cc LLDP frames originating from non-HW sources"); +SYSCTL_BOOL(_hw_ice_debug, OID_AUTO, tx_balance_en, CTLFLAG_RWTUN, + &ice_tx_balance_en, 0, + "Enable 5-layer scheduler topology"); + #endif /* _ICE_COMMON_SYSCTLS_H_ */ diff --git a/sys/dev/ice/ice_common_txrx.h b/sys/dev/ice/ice_common_txrx.h index d5e6182c2212..dd2b3c5bff0d 100644 --- a/sys/dev/ice/ice_common_txrx.h +++ b/sys/dev/ice/ice_common_txrx.h @@ -1,424 +1,424 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_common_txrx.h * @brief common Tx/Rx utility functions * * Contains common utility functions for the Tx/Rx hot path. * * The functions do depend on the if_pkt_info_t structure. A suitable * implementation of this structure must be provided if these functions are to * be used without the iflib networking stack. */ #ifndef _ICE_COMMON_TXRX_H_ #define _ICE_COMMON_TXRX_H_ #include #include /** * ice_tso_detect_sparse - detect TSO packets with too many segments * @pi: packet information * * Hardware only transmits packets with a maximum of 8 descriptors. For TSO * packets, hardware needs to be able to build the split packets using 8 or * fewer descriptors. Additionally, the header must be contained within at * most 3 descriptors. * * To verify this, we walk the headers to find out how many descriptors the * headers require (usually 1). Then we ensure that, for each TSO segment, its * data plus the headers are contained within 8 or fewer descriptors. */ static inline int ice_tso_detect_sparse(if_pkt_info_t pi) { int count, curseg, i, hlen, segsz, seglen, tsolen, hdrs, maxsegs; bus_dma_segment_t *segs = pi->ipi_segs; int nsegs = pi->ipi_nsegs; curseg = hdrs = 0; hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; tsolen = pi->ipi_len - hlen; /* First, count the number of descriptors for the header. * Additionally, make sure it does not span more than 3 segments. */ i = 0; curseg = segs[0].ds_len; while (hlen > 0) { hdrs++; if (hdrs > ICE_MAX_TSO_HDR_SEGS) return (1); if (curseg == 0) { i++; if (__predict_false(i == nsegs)) return (1); curseg = segs[i].ds_len; } seglen = min(curseg, hlen); curseg -= seglen; hlen -= seglen; } maxsegs = ICE_MAX_TX_SEGS - hdrs; /* We must count the headers, in order to verify that they take up * 3 or fewer descriptors. However, we don't need to check the data * if the total segments is small. */ if (nsegs <= maxsegs) return (0); count = 0; /* Now check the data to make sure that each TSO segment is made up of * no more than maxsegs descriptors. This ensures that hardware will * be capable of performing TSO offload. */ while (tsolen > 0) { segsz = pi->ipi_tso_segsz; while (segsz > 0 && tsolen != 0) { count++; if (count > maxsegs) { return (1); } if (curseg == 0) { i++; if (__predict_false(i == nsegs)) { return (1); } curseg = segs[i].ds_len; } seglen = min(curseg, segsz); segsz -= seglen; curseg -= seglen; tsolen -= seglen; } count = 0; } return (0); } /** * ice_tso_setup - Setup a context descriptor to prepare for a TSO packet * @txq: the Tx queue to use * @pi: the packet info to prepare for * * Setup a context descriptor in preparation for sending a Tx packet that * requires the TSO offload. Returns the index of the descriptor to use when * encapsulating the Tx packet data into descriptors. */ static inline int ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi) { struct ice_tx_ctx_desc *txd; u32 cmd, mss, type, tsolen; int idx; u64 type_cmd_tso_mss; idx = pi->ipi_pidx; txd = (struct ice_tx_ctx_desc *)&txq->tx_base[idx]; tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen); type = ICE_TX_DESC_DTYPE_CTX; cmd = ICE_TX_CTX_DESC_TSO; /* TSO MSS must not be less than 64 */ if (pi->ipi_tso_segsz < ICE_MIN_TSO_MSS) { txq->stats.mss_too_small++; pi->ipi_tso_segsz = ICE_MIN_TSO_MSS; } mss = pi->ipi_tso_segsz; type_cmd_tso_mss = ((u64)type << ICE_TXD_CTX_QW1_DTYPE_S) | ((u64)cmd << ICE_TXD_CTX_QW1_CMD_S) | ((u64)tsolen << ICE_TXD_CTX_QW1_TSO_LEN_S) | ((u64)mss << ICE_TXD_CTX_QW1_MSS_S); txd->qw1 = htole64(type_cmd_tso_mss); txd->tunneling_params = htole32(0); txq->tso++; return ((idx + 1) & (txq->desc_count-1)); } /** * ice_tx_setup_offload - Setup register values for performing a Tx offload * @txq: The Tx queue, used to track checksum offload stats * @pi: the packet info to program for * @cmd: the cmd register value to update * @off: the off register value to update * * Based on the packet info provided, update the cmd and off values for * enabling Tx offloads. This depends on the packet type and which offloads * have been requested. * * We also track the total number of times that we've requested hardware * offload a particular type of checksum for debugging purposes. */ static inline void ice_tx_setup_offload(struct ice_tx_queue *txq, if_pkt_info_t pi, u32 *cmd, u32 *off) { u32 remaining_csum_flags = pi->ipi_csum_flags; switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: if (pi->ipi_csum_flags & ICE_CSUM_IP) { *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; txq->stats.cso[ICE_CSO_STAT_TX_IP4]++; remaining_csum_flags &= ~CSUM_IP; } else *cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: *cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; /* * This indicates that the IIPT flag was set to the IPV6 value; * there's no checksum for IPv6 packets. */ txq->stats.cso[ICE_CSO_STAT_TX_IP6]++; break; #endif default: txq->stats.cso[ICE_CSO_STAT_TX_L3_ERR]++; break; } *off |= (pi->ipi_ehdrlen >> 1) << ICE_TX_DESC_LEN_MACLEN_S; *off |= (pi->ipi_ip_hlen >> 2) << ICE_TX_DESC_LEN_IPLEN_S; if (!(remaining_csum_flags & ~ICE_RX_CSUM_FLAGS)) return; switch (pi->ipi_ipproto) { case IPPROTO_TCP: if (pi->ipi_csum_flags & ICE_CSUM_TCP) { *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; *off |= (pi->ipi_tcp_hlen >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; txq->stats.cso[ICE_CSO_STAT_TX_TCP]++; } break; case IPPROTO_UDP: if (pi->ipi_csum_flags & ICE_CSUM_UDP) { *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; *off |= (sizeof(struct udphdr) >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; txq->stats.cso[ICE_CSO_STAT_TX_UDP]++; } break; case IPPROTO_SCTP: if (pi->ipi_csum_flags & ICE_CSUM_SCTP) { *cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; *off |= (sizeof(struct sctphdr) >> 2) << ICE_TX_DESC_LEN_L4_LEN_S; txq->stats.cso[ICE_CSO_STAT_TX_SCTP]++; } break; default: txq->stats.cso[ICE_CSO_STAT_TX_L4_ERR]++; break; } } /** * ice_rx_checksum - verify hardware checksum is valid or not * @rxq: the Rx queue structure * @flags: checksum flags to update * @data: checksum data to update * @status0: descriptor status data * @ptype: packet type * * Determine whether the hardware indicated that the Rx checksum is valid. If * so, update the checksum flags and data, informing the stack of the status * of the checksum so that it does not spend time verifying it manually. */ static void ice_rx_checksum(struct ice_rx_queue *rxq, uint32_t *flags, uint32_t *data, u16 status0, u16 ptype) { const u16 l3_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)); const u16 l4_error = (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)); const u16 xsum_errors = (l3_error | l4_error | BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)); struct ice_rx_ptype_decoded decoded; bool is_ipv4, is_ipv6; /* No L3 or L4 checksum was calculated */ if (!(status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) { return; } decoded = ice_decode_rx_desc_ptype(ptype); *flags = 0; if (!(decoded.known && decoded.outer_ip)) return; is_ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); is_ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); /* No checksum errors were reported */ if (!(status0 & xsum_errors)) { if (is_ipv4) *flags |= CSUM_L3_CALC | CSUM_L3_VALID; switch (decoded.inner_prot) { case ICE_RX_PTYPE_INNER_PROT_TCP: case ICE_RX_PTYPE_INNER_PROT_UDP: case ICE_RX_PTYPE_INNER_PROT_SCTP: *flags |= CSUM_L4_CALC | CSUM_L4_VALID; *data |= htons(0xffff); break; default: break; } return; } /* * Certain IPv6 extension headers impact the validity of L4 checksums. * If one of these headers exist, hardware will set the IPV6EXADD bit * in the descriptor. If the bit is set then pretend like hardware * didn't checksum this packet. */ if (is_ipv6 && (status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))) { rxq->stats.cso[ICE_CSO_STAT_RX_IP6_ERR]++; return; } /* * At this point, status0 must have at least one of the l3_error or * l4_error bits set. */ if (status0 & l3_error) { if (is_ipv4) { rxq->stats.cso[ICE_CSO_STAT_RX_IP4_ERR]++; *flags |= CSUM_L3_CALC; } else { /* Hardware indicated L3 error but this isn't IPv4? */ rxq->stats.cso[ICE_CSO_STAT_RX_L3_ERR]++; } /* don't bother reporting L4 errors if we got an L3 error */ return; } else if (is_ipv4) { *flags |= CSUM_L3_CALC | CSUM_L3_VALID; } if (status0 & l4_error) { switch (decoded.inner_prot) { case ICE_RX_PTYPE_INNER_PROT_TCP: rxq->stats.cso[ICE_CSO_STAT_RX_TCP_ERR]++; *flags |= CSUM_L4_CALC; break; case ICE_RX_PTYPE_INNER_PROT_UDP: rxq->stats.cso[ICE_CSO_STAT_RX_UDP_ERR]++; *flags |= CSUM_L4_CALC; break; case ICE_RX_PTYPE_INNER_PROT_SCTP: rxq->stats.cso[ICE_CSO_STAT_RX_SCTP_ERR]++; *flags |= CSUM_L4_CALC; break; default: /* * Hardware indicated L4 error, but this isn't one of * the expected protocols. */ rxq->stats.cso[ICE_CSO_STAT_RX_L4_ERR]++; } } } /** * ice_ptype_to_hash - Convert packet type to a hash value * @ptype: the packet type to convert * * Given the packet type, convert to a suitable hashtype to report to the * upper stack via the iri_rsstype value of the if_rxd_info_t structure. * * If the hash type is unknown we'll report M_HASHTYPE_OPAQUE. */ static inline int ice_ptype_to_hash(u16 ptype) { struct ice_rx_ptype_decoded decoded; if (ptype >= ARRAY_SIZE(ice_ptype_lkup)) return M_HASHTYPE_OPAQUE; decoded = ice_decode_rx_desc_ptype(ptype); if (!decoded.known) return M_HASHTYPE_OPAQUE; if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) return M_HASHTYPE_OPAQUE; /* Note: anything that gets to this point is IP */ if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6) { switch (decoded.inner_prot) { case ICE_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case ICE_RX_PTYPE_INNER_PROT_UDP: return M_HASHTYPE_RSS_UDP_IPV6; default: return M_HASHTYPE_RSS_IPV6; } } if (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4) { switch (decoded.inner_prot) { case ICE_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case ICE_RX_PTYPE_INNER_PROT_UDP: return M_HASHTYPE_RSS_UDP_IPV4; default: return M_HASHTYPE_RSS_IPV4; } } /* We should never get here!! */ return M_HASHTYPE_OPAQUE; } #endif diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c index 636c2da3e4a1..76de98dcfafc 100644 --- a/sys/dev/ice/ice_controlq.c +++ b/sys/dev/ice/ice_controlq.c @@ -1,1239 +1,1248 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #define ICE_CQ_INIT_REGS(qinfo, prefix) \ do { \ (qinfo)->sq.head = prefix##_ATQH; \ (qinfo)->sq.tail = prefix##_ATQT; \ (qinfo)->sq.len = prefix##_ATQLEN; \ (qinfo)->sq.bah = prefix##_ATQBAH; \ (qinfo)->sq.bal = prefix##_ATQBAL; \ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ (qinfo)->rq.head = prefix##_ARQH; \ (qinfo)->rq.tail = prefix##_ARQT; \ (qinfo)->rq.len = prefix##_ARQLEN; \ (qinfo)->rq.bah = prefix##_ARQBAH; \ (qinfo)->rq.bal = prefix##_ARQBAL; \ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ } while (0) /** * ice_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * * This assumes the alloc_sq and alloc_rq functions have already been called */ static void ice_adminq_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ICE_CQ_INIT_REGS(cq, PF_FW); } /** * ice_mailbox_init_regs - Initialize Mailbox registers * @hw: pointer to the hardware structure * * This assumes the alloc_sq and alloc_rq functions have already been called */ static void ice_mailbox_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->mailboxq; ICE_CQ_INIT_REGS(cq, PF_MBX); } /** * ice_check_sq_alive * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * * Returns true if Queue is enabled else false. */ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq) { /* check both queue-length and queue-enable fields */ if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | cq->sq.len_ena_mask)) == (cq->num_sq_entries | cq->sq.len_ena_mask); return false; } /** * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc); cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size); if (!cq->sq.desc_buf.va) return ICE_ERR_NO_MEMORY; cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries, sizeof(struct ice_sq_cd)); if (!cq->sq.cmd_buf) { ice_free_dma_mem(hw, &cq->sq.desc_buf); return ICE_ERR_NO_MEMORY; } return ICE_SUCCESS; } /** * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) { size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc); cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size); if (!cq->rq.desc_buf.va) return ICE_ERR_NO_MEMORY; return ICE_SUCCESS; } /** * ice_free_cq_ring - Free control queue ring * @hw: pointer to the hardware structure * @ring: pointer to the specific control queue ring * * This assumes the posted buffers have already been cleaned * and de-allocated */ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) { ice_free_dma_mem(hw, &ring->desc_buf); } /** * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; /* We'll be allocating the buffer info memory first, then we can * allocate the mapped buffers for the event processing */ cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries, sizeof(cq->rq.desc_buf)); if (!cq->rq.dma_head) return ICE_ERR_NO_MEMORY; cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head; /* allocate the mapped buffers */ for (i = 0; i < cq->num_rq_entries; i++) { struct ice_aq_desc *desc; struct ice_dma_mem *bi; bi = &cq->rq.r.rq_bi[i]; bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size); if (!bi->va) goto unwind_alloc_rq_bufs; /* now configure the descriptors for use */ desc = ICE_CTL_Q_DESC(cq->rq, i); desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); if (cq->rq_buf_size > ICE_AQ_LG_BUF) desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); desc->opcode = 0; /* This is in accordance with Admin queue design, there is no * register for buffer size configuration */ desc->datalen = CPU_TO_LE16(bi->size); desc->retval = 0; desc->cookie_high = 0; desc->cookie_low = 0; desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); desc->params.generic.param0 = 0; desc->params.generic.param1 = 0; } return ICE_SUCCESS; unwind_alloc_rq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]); cq->rq.r.rq_bi = NULL; ice_free(hw, cq->rq.dma_head); cq->rq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } /** * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ static enum ice_status ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { int i; /* No mapped memory needed yet, just the buffer info structures */ cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries, sizeof(cq->sq.desc_buf)); if (!cq->sq.dma_head) return ICE_ERR_NO_MEMORY; cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; /* allocate the mapped buffers */ for (i = 0; i < cq->num_sq_entries; i++) { struct ice_dma_mem *bi; bi = &cq->sq.r.sq_bi[i]; bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size); if (!bi->va) goto unwind_alloc_sq_bufs; } return ICE_SUCCESS; unwind_alloc_sq_bufs: /* don't try to free the one that failed... */ i--; for (; i >= 0; i--) ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]); cq->sq.r.sq_bi = NULL; ice_free(hw, cq->sq.dma_head); cq->sq.dma_head = NULL; return ICE_ERR_NO_MEMORY; } static enum ice_status ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { /* Clear Head and Tail */ wr32(hw, ring->head, 0); wr32(hw, ring->tail, 0); /* set starting point */ wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa)); wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa)); /* Check one register to verify that config was applied */ if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa)) return ICE_ERR_AQ_ERROR; return ICE_SUCCESS; } /** * ice_cfg_sq_regs - configure Control ATQ registers * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * Configure base address and length registers for the transmit queue */ static enum ice_status ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } /** * ice_cfg_rq_regs - configure Control ARQ register * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * Configure base address and length registers for the receive (event queue) */ static enum ice_status ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status status; status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); if (status) return status; /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); return ICE_SUCCESS; } #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ do { \ /* free descriptors */ \ if ((qi)->ring.r.ring##_bi) { \ int i; \ \ for (i = 0; i < (qi)->num_##ring##_entries; i++) \ if ((qi)->ring.r.ring##_bi[i].pa) \ ice_free_dma_mem((hw), \ &(qi)->ring.r.ring##_bi[i]); \ } \ /* free the buffer info list */ \ if ((qi)->ring.cmd_buf) \ ice_free(hw, (qi)->ring.cmd_buf); \ /* free DMA head */ \ ice_free(hw, (qi)->ring.dma_head); \ } while (0) /** * ice_init_sq - main initialization routine for Control ATQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * This is the main initialization routine for the Control Send Queue * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->sq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (cq->sq.count > 0) { /* queue already initialized */ ret_code = ICE_ERR_NOT_READY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_sq_entries || !cq->sq_buf_size) { ret_code = ICE_ERR_CFG; goto init_ctrlq_exit; } cq->sq.next_to_use = 0; cq->sq.next_to_clean = 0; /* allocate the ring memory */ ret_code = ice_alloc_ctrlq_sq_ring(hw, cq); if (ret_code) goto init_ctrlq_exit; /* allocate buffers in the rings */ ret_code = ice_alloc_sq_bufs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* initialize base registers */ ret_code = ice_cfg_sq_regs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* success! */ cq->sq.count = cq->num_sq_entries; goto init_ctrlq_exit; init_ctrlq_free_rings: ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: return ret_code; } /** * ice_init_rq - initialize ARQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * The main initialization routine for the Admin Receive (Event) Queue. * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries * - cq->rq_buf_size * * Do *NOT* hold the lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); if (cq->rq.count > 0) { /* queue already initialized */ ret_code = ICE_ERR_NOT_READY; goto init_ctrlq_exit; } /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->rq_buf_size) { ret_code = ICE_ERR_CFG; goto init_ctrlq_exit; } cq->rq.next_to_use = 0; cq->rq.next_to_clean = 0; /* allocate the ring memory */ ret_code = ice_alloc_ctrlq_rq_ring(hw, cq); if (ret_code) goto init_ctrlq_exit; /* allocate buffers in the rings */ ret_code = ice_alloc_rq_bufs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* initialize base registers */ ret_code = ice_cfg_rq_regs(hw, cq); if (ret_code) goto init_ctrlq_free_rings; /* success! */ cq->rq.count = cq->num_rq_entries; goto init_ctrlq_exit; init_ctrlq_free_rings: ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * The main shutdown routine for the Control Transmit Queue */ static enum ice_status ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code = ICE_SUCCESS; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_acquire_lock(&cq->sq_lock); if (!cq->sq.count) { ret_code = ICE_ERR_NOT_READY; goto shutdown_sq_out; } /* Stop firmware AdminQ processing */ wr32(hw, cq->sq.head, 0); wr32(hw, cq->sq.tail, 0); wr32(hw, cq->sq.len, 0); wr32(hw, cq->sq.bal, 0); wr32(hw, cq->sq.bah, 0); cq->sq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers and the ring itself */ ICE_FREE_CQ_BUFS(hw, cq, sq); ice_free_cq_ring(hw, &cq->sq); shutdown_sq_out: ice_release_lock(&cq->sq_lock); return ret_code; } /** * ice_aq_ver_check - Check the reported AQ API version. * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. * * Return: 'true' iff the driver should attempt to load. 'false' otherwise. */ static bool ice_aq_ver_check(struct ice_hw *hw) { if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { /* Major API version is newer than expected, don't load */ ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) - ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); + ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) - ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); } else { /* Major API version is older than expected, log a warning */ - ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR); } return true; } /** * ice_shutdown_rq - shutdown Control ARQ * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * The main shutdown routine for the Control Receive Queue */ static enum ice_status ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { enum ice_status ret_code = ICE_SUCCESS; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); ice_acquire_lock(&cq->rq_lock); if (!cq->rq.count) { ret_code = ICE_ERR_NOT_READY; goto shutdown_rq_out; } /* Stop Control Queue processing */ wr32(hw, cq->rq.head, 0); wr32(hw, cq->rq.tail, 0); wr32(hw, cq->rq.len, 0); wr32(hw, cq->rq.bal, 0); wr32(hw, cq->rq.bah, 0); /* set rq.count to 0 to indicate uninitialized queue */ cq->rq.count = 0; /* free ring buffers and the ring itself */ ICE_FREE_CQ_BUFS(hw, cq, rq); ice_free_cq_ring(hw, &cq->rq); shutdown_rq_out: ice_release_lock(&cq->rq_lock); return ret_code; } /** * ice_idle_aq - stop ARQ/ATQ processing momentarily * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue */ void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { wr32(hw, cq->sq.len, 0); wr32(hw, cq->rq.len, 0); ice_msec_delay(2, false); } /** * ice_init_check_adminq - Check version for Admin Queue to know if its alive * @hw: pointer to the hardware structure */ static enum ice_status ice_init_check_adminq(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; enum ice_status status; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); status = ice_aq_get_fw_ver(hw, NULL); if (status) goto init_ctrlq_free_rq; if (!ice_aq_ver_check(hw)) { status = ICE_ERR_FW_API_VER; goto init_ctrlq_free_rq; } return ICE_SUCCESS; init_ctrlq_free_rq: ice_shutdown_rq(hw, cq); ice_shutdown_sq(hw, cq); return status; } /** * ice_init_ctrlq - main initialization routine for any control Queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type * * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size * * NOTE: this function does not initialize the controlq locks */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { struct ice_ctl_q_info *cq; enum ice_status ret_code; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); switch (q_type) { case ICE_CTL_Q_ADMIN: ice_adminq_init_regs(hw); cq = &hw->adminq; break; case ICE_CTL_Q_MAILBOX: ice_mailbox_init_regs(hw); cq = &hw->mailboxq; break; default: return ICE_ERR_PARAM; } cq->qtype = q_type; /* verify input for valid configuration */ if (!cq->num_rq_entries || !cq->num_sq_entries || !cq->rq_buf_size || !cq->sq_buf_size) { return ICE_ERR_CFG; } /* setup SQ command write back timeout */ cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) return ret_code; /* allocate the ARQ */ ret_code = ice_init_rq(hw, cq); if (ret_code) goto init_ctrlq_free_sq; /* success! */ return ICE_SUCCESS; init_ctrlq_free_sq: ice_shutdown_sq(hw, cq); return ret_code; } /** * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, + bool unloading) { struct ice_ctl_q_info *cq; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); switch (q_type) { case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); + ice_aq_q_shutdown(hw, unloading); break; case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; break; default: return; } ice_shutdown_sq(hw, cq); ice_shutdown_rq(hw, cq); } /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) { ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); } /** * ice_init_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * * Prior to calling this function, the driver MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size * * NOTE: this function does not initialize the controlq locks. */ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw) { enum ice_status status; u32 retry = 0; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); /* Init FW admin queue */ do { status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN); if (status) return status; status = ice_init_check_adminq(hw); if (status != ICE_ERR_AQ_FW_CRITICAL) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); if (status) return status; /* Init Mailbox queue */ return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX); } /** * ice_init_ctrlq_locks - Initialize locks for a control queue * @cq: pointer to the control queue * * Initializes the send and receive queue locks for a given control queue. */ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq) { ice_init_lock(&cq->sq_lock); ice_init_lock(&cq->rq_lock); } /** * ice_create_all_ctrlq - main initialization routine for all control queues * @hw: pointer to the hardware structure * * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure for all control queues: * - cq->num_sq_entries * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size * * This function creates all the control queue locks and then calls * ice_init_all_ctrlq. It should be called once during driver load. If the * driver needs to re-initialize control queues at run time it should call * ice_init_all_ctrlq instead. */ enum ice_status ice_create_all_ctrlq(struct ice_hw *hw) { ice_init_ctrlq_locks(&hw->adminq); ice_init_ctrlq_locks(&hw->mailboxq); return ice_init_all_ctrlq(hw); } /** * ice_destroy_ctrlq_locks - Destroy locks for a control queue * @cq: pointer to the control queue * * Destroys the send and receive queue locks for a given control queue. */ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) { ice_destroy_lock(&cq->sq_lock); ice_destroy_lock(&cq->rq_lock); } /** * ice_destroy_all_ctrlq - exit routine for all control queues * @hw: pointer to the hardware structure * * This function shuts down all the control queues and then destroys the * control queue locks. It should be called once during driver unload. The * driver should call ice_shutdown_all_ctrlq if it needs to shut down and * reinitialize control queues, such as in response to a reset event. */ void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); ice_destroy_ctrlq_locks(&hw->adminq); ice_destroy_ctrlq_locks(&hw->mailboxq); } /** * ice_clean_sq - cleans Admin send queue (ATQ) * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * * returns the number of free desc */ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { struct ice_ctl_q_ring *sq = &cq->sq; u16 ntc = sq->next_to_clean; struct ice_sq_cd *details; struct ice_aq_desc *desc; desc = ICE_CTL_Q_DESC(*sq, ntc); details = ICE_CTL_Q_DETAILS(*sq, ntc); while (rd32(hw, cq->sq.head) != ntc) { ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); ntc++; if (ntc == sq->count) ntc = 0; desc = ICE_CTL_Q_DESC(*sq, ntc); details = ICE_CTL_Q_DETAILS(*sq, ntc); } sq->next_to_clean = ntc; return ICE_CTL_Q_DESC_UNUSED(sq); } /** * ice_debug_cq * @hw: pointer to the hardware structure * @desc: pointer to control queue descriptor * @buf: pointer to command buffer * @buf_len: max length of buf * * Dumps debug log about control command with descriptor contents. */ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) { struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc; u16 datalen, flags; if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) return; if (!desc) return; datalen = LE16_TO_CPU(cq_desc->datalen); flags = LE16_TO_CPU(cq_desc->flags); ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", LE16_TO_CPU(cq_desc->opcode), flags, datalen, LE16_TO_CPU(cq_desc->retval)); ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(cq_desc->cookie_high), LE32_TO_CPU(cq_desc->cookie_low)); ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", LE32_TO_CPU(cq_desc->params.generic.param0), LE32_TO_CPU(cq_desc->params.generic.param1)); ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", LE32_TO_CPU(cq_desc->params.generic.addr_high), LE32_TO_CPU(cq_desc->params.generic.addr_low)); /* Dump buffer iff 1) one exists and 2) is either a response indicated * by the DD and/or CMP flag set or a command with the RD flag set. */ if (buf && cq_desc->datalen != 0 && (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) || flags & ICE_AQ_FLAG_RD)) { ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, MIN_T(u16, buf_len, datalen)); } } /** * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. */ bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) { /* AQ designers suggest use of head for better * timing reliability than DD bit */ return rd32(hw, cq->sq.head) == cq->sq.next_to_use; } /** * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command (non DMA mem) * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ static enum ice_status ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; enum ice_status status = ICE_SUCCESS; struct ice_sq_cd *details; u32 total_delay = 0; u16 retval = 0; u32 val = 0; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) return ICE_ERR_RESET_ONGOING; cq->sq_last_status = ICE_AQ_RC_OK; if (!cq->sq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n"); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; } if ((buf && !buf_size) || (!buf && buf_size)) { status = ICE_ERR_PARAM; goto sq_send_command_error; } if (buf) { if (buf_size > cq->sq_buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n", buf_size); status = ICE_ERR_INVAL_SIZE; goto sq_send_command_error; } desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF); if (buf_size > ICE_AQ_LG_BUF) desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); } val = rd32(hw, cq->sq.head); if (val >= cq->num_sq_entries) { ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n", val); status = ICE_ERR_AQ_EMPTY; goto sq_send_command_error; } details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); if (cd) *details = *cd; else ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM); /* Call clean and check queue available function to reclaim the * descriptors that were processed by FW/MBX; the function returns the * number of desc available. The clean function called here could be * called in a separate thread in case of asynchronous completions. */ if (ice_clean_sq(hw, cq) == 0) { ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n"); status = ICE_ERR_AQ_FULL; goto sq_send_command_error; } /* initialize the temp desc pointer with the right desc */ desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); /* if the desc is available copy the temp desc to the right place */ ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring), ICE_NONDMA_TO_DMA); /* if buf is not NULL assume indirect command */ if (buf) { dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; /* copy the user buf into the respective DMA buf */ ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA); desc_on_ring->datalen = CPU_TO_LE16(buf_size); /* Update the address values in the desc with the pa value * for respective buffer */ desc_on_ring->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa)); desc_on_ring->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa)); } /* Debug desc and buffer */ ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); (cq->sq.next_to_use)++; if (cq->sq.next_to_use == cq->sq.count) cq->sq.next_to_use = 0; wr32(hw, cq->sq.tail, cq->sq.next_to_use); do { if (ice_sq_done(hw, cq)) break; ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false); total_delay++; } while (total_delay < cq->sq_cmd_timeout); /* if ready, copy the desc back to temp */ if (ice_sq_done(hw, cq)) { ice_memcpy(desc, desc_on_ring, sizeof(*desc), ICE_DMA_TO_NONDMA); if (buf) { /* get returned length to copy */ u16 copy_size = LE16_TO_CPU(desc->datalen); if (copy_size > buf_size) { ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n", copy_size, buf_size); status = ICE_ERR_AQ_ERROR; } else { ice_memcpy(buf, dma_buf->va, copy_size, ICE_DMA_TO_NONDMA); } } retval = LE16_TO_CPU(desc->retval); if (retval) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n", LE16_TO_CPU(desc->opcode), retval); /* strip off FW internal code */ retval &= 0xff; } cmd_completed = true; if (!status && retval != ICE_AQ_RC_OK) status = ICE_ERR_AQ_ERROR; cq->sq_last_status = (enum ice_aq_err)retval; } ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); ice_debug_cq(hw, (void *)desc, buf, buf_size); /* save writeback AQ if requested */ if (details->wb_desc) ice_memcpy(details->wb_desc, desc_on_ring, sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA); /* update the error if time out occurred */ if (!cmd_completed) { if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask || rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) { ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n"); status = ICE_ERR_AQ_FW_CRITICAL; } else { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n"); status = ICE_ERR_AQ_TIMEOUT; } } sq_send_command_error: return status; } /** * ice_sq_send_cmd - send command to Control Queue (ATQ) * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command * @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd) { enum ice_status status = ICE_SUCCESS; /* if reset is in progress return a soft error */ if (hw->reset_ongoing) return ICE_ERR_RESET_ONGOING; ice_acquire_lock(&cq->sq_lock); status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd); ice_release_lock(&cq->sq_lock); return status; } /** * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function * @desc: pointer to the temp descriptor (non DMA mem) * @opcode: the opcode can be used to decide which flags to turn off or on * * Fill the desc with default values */ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) { /* zero out the desc */ ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM); desc->opcode = CPU_TO_LE16(opcode); desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI); } /** * ice_clean_rq_elem * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns * the contents through e. It can also return how many events are * left to process through 'pending'. */ enum ice_status ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_rq_event_info *e, u16 *pending) { u16 ntc = cq->rq.next_to_clean; enum ice_aq_err rq_last_status; enum ice_status ret_code = ICE_SUCCESS; struct ice_aq_desc *desc; struct ice_dma_mem *bi; u16 desc_idx; u16 datalen; u16 flags; u16 ntu; /* pre-clean the event info */ ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM); /* take the lock before we start messing with the ring */ ice_acquire_lock(&cq->rq_lock); if (!cq->rq.count) { ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n"); ret_code = ICE_ERR_AQ_EMPTY; goto clean_rq_elem_err; } /* set next_to_use to head */ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = ICE_ERR_AQ_NO_WORK; goto clean_rq_elem_out; } /* now clean the next descriptor */ desc = ICE_CTL_Q_DESC(cq->rq, ntc); desc_idx = ntc; rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval); flags = LE16_TO_CPU(desc->flags); if (flags & ICE_AQ_FLAG_ERR) { ret_code = ICE_ERR_AQ_ERROR; ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n", LE16_TO_CPU(desc->opcode), rq_last_status); } ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA); datalen = LE16_TO_CPU(desc->datalen); e->msg_len = MIN_T(u16, datalen, e->buf_len); if (e->msg_buf && e->msg_len) ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len, ICE_DMA_TO_NONDMA); ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message size */ bi = &cq->rq.r.rq_bi[ntc]; ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM); desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF); if (cq->rq_buf_size > ICE_AQ_LG_BUF) desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB); desc->datalen = CPU_TO_LE16(bi->size); desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa)); desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa)); /* set tail = the last cleaned desc index. */ wr32(hw, cq->rq.tail, ntc); /* ntc is updated to tail + 1 */ ntc++; if (ntc == cq->num_rq_entries) ntc = 0; cq->rq.next_to_clean = ntc; cq->rq.next_to_use = ntu; clean_rq_elem_out: /* Set pending if needed, unlock and return */ if (pending) { /* re-read HW head to calculate actual pending messages */ ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc)); } clean_rq_elem_err: ice_release_lock(&cq->rq_lock); return ret_code; } diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h index 4ed0809f2bad..16d47ae77f8f 100644 --- a/sys/dev/ice/ice_controlq.h +++ b/sys/dev/ice/ice_controlq.h @@ -1,127 +1,127 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_CONTROLQ_H_ #define _ICE_CONTROLQ_H_ #include "ice_adminq_cmd.h" /* Maximum buffer lengths for all control queue types */ #define ICE_AQ_MAX_BUF_LEN 4096 #define ICE_MBXQ_MAX_BUF_LEN 4096 #define ICE_CTL_Q_DESC(R, i) \ (&(((struct ice_aq_desc *)((R).desc_buf.va))[i])) #define ICE_CTL_Q_DESC_UNUSED(R) \ ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1)) /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. */ #define EXP_FW_API_VER_BRANCH 0x00 #define EXP_FW_API_VER_MAJOR 0x01 #define EXP_FW_API_VER_MINOR 0x05 /* Different control queue types: These are mainly for SW consumption. */ enum ice_ctl_q { ICE_CTL_Q_UNKNOWN = 0, ICE_CTL_Q_ADMIN, ICE_CTL_Q_MAILBOX, }; /* Control Queue timeout settings - max delay 1s */ #define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ struct ice_dma_mem desc_buf; /* descriptor ring memory */ void *cmd_buf; /* command buffer memory */ union { struct ice_dma_mem *sq_bi; struct ice_dma_mem *rq_bi; } r; u16 count; /* Number of descriptors */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; /* used for queue tracking */ u32 head; u32 tail; u32 len; u32 bah; u32 bal; u32 len_mask; u32 len_ena_mask; u32 len_crit_mask; u32 head_mask; }; /* sq transaction details */ struct ice_sq_cd { struct ice_aq_desc *wb_desc; }; #define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) /* rq event information */ struct ice_rq_event_info { struct ice_aq_desc desc; u16 msg_len; u16 buf_len; u8 *msg_buf; }; /* Control Queue information */ struct ice_ctl_q_info { enum ice_ctl_q qtype; struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring sq; /* send queue */ u32 sq_cmd_timeout; /* send queue cmd write back timeout */ u16 num_rq_entries; /* receive queue depth */ u16 num_sq_entries; /* send queue depth */ u16 rq_buf_size; /* receive queue buffer size */ u16 sq_buf_size; /* send queue buffer size */ enum ice_aq_err sq_last_status; /* last status on send queue */ struct ice_lock sq_lock; /* Send queue lock */ struct ice_lock rq_lock; /* Receive queue lock */ }; #endif /* _ICE_CONTROLQ_H_ */ diff --git a/sys/dev/ice/ice_dcb.c b/sys/dev/ice/ice_dcb.c index 137ffad92935..19ab0d349cf7 100644 --- a/sys/dev/ice/ice_dcb.c +++ b/sys/dev/ice/ice_dcb.c @@ -1,1841 +1,1881 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_sched.h" #include "ice_dcb.h" /** * ice_aq_get_lldp_mib * @hw: pointer to the HW struct * @bridge_type: type of bridge requested * @mib_type: Local, Remote or both Local and Remote MIBs * @buf: pointer to the caller-supplied buffer to store the MIB block * @buf_size: size of the buffer (in bytes) * @local_len: length of the returned Local LLDP MIB * @remote_len: length of the returned Remote LLDP MIB * @cd: pointer to command details structure or NULL * * Requests the complete LLDP MIB (entire packet). (0x0A00) */ enum ice_status ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_get_mib *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.lldp_get_mib; if (buf_size == 0 || !buf) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib); cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M; cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & ICE_AQ_LLDP_BRID_TYPE_M; desc.datalen = CPU_TO_LE16(buf_size); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status) { if (local_len) *local_len = LE16_TO_CPU(cmd->local_len); if (remote_len) *remote_len = LE16_TO_CPU(cmd->remote_len); } return status; } /** * ice_aq_cfg_lldp_mib_change * @hw: pointer to the HW struct * @ena_update: Enable or Disable event posting * @cd: pointer to command details structure or NULL * * Enable or Disable posting of an event on ARQ when LLDP MIB * associated with the interface changes (0x0A01) */ enum ice_status ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd) { struct ice_aqc_lldp_set_mib_change *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_set_event; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change); if (!ena_update) cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS; + else + cmd->command |= ICE_AQ_LLDP_MIB_PENDING_ENABLE << + ICE_AQ_LLDP_MIB_PENDING_S; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_add_delete_lldp_tlv * @hw: pointer to the HW struct * @bridge_type: type of bridge * @add_lldp_tlv: add (true) or delete (false) TLV * @buf: buffer with TLV to add or delete * @buf_size: length of the buffer * @tlv_len: length of the TLV to be added/deleted * @mib_len: length of the LLDP MIB returned in response * @cd: pointer to command details structure or NULL * * (Add tlv) * Add the specified TLV to LLDP Local MIB for the given bridge type, * it is responsibility of the caller to make sure that the TLV is not * already present in the LLDPDU. * In return firmware will write the complete LLDP MIB with the newly * added TLV in the response buffer. (0x0A02) * * (Delete tlv) * Delete the specified TLV from LLDP Local MIB for the given bridge type. * The firmware places the entire LLDP MIB in the response buffer. (0x0A04) */ enum ice_status ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_add_delete_tlv *cmd; struct ice_aq_desc desc; enum ice_status status; if (tlv_len == 0) return ICE_ERR_PARAM; cmd = &desc.params.lldp_add_delete_tlv; if (add_lldp_tlv) ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_add_tlv); else ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_delete_tlv); desc.flags |= CPU_TO_LE16((u16)(ICE_AQ_FLAG_RD)); cmd->type = ((bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & ICE_AQ_LLDP_BRID_TYPE_M); cmd->len = CPU_TO_LE16(tlv_len); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && mib_len) *mib_len = LE16_TO_CPU(desc.datalen); return status; } /** * ice_aq_update_lldp_tlv * @hw: pointer to the HW struct * @bridge_type: type of bridge * @buf: buffer with TLV to update * @buf_size: size of the buffer holding original and updated TLVs * @old_len: Length of the Original TLV * @new_len: Length of the Updated TLV * @offset: offset of the updated TLV in the buff * @mib_len: length of the returned LLDP MIB * @cd: pointer to command details structure or NULL * * Update the specified TLV to the LLDP Local MIB for the given bridge type. * Firmware will place the complete LLDP MIB in response buffer with the * updated TLV. (0x0A03) */ enum ice_status ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, u16 buf_size, u16 old_len, u16 new_len, u16 offset, u16 *mib_len, struct ice_sq_cd *cd) { struct ice_aqc_lldp_update_tlv *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.lldp_update_tlv; if (offset == 0 || old_len == 0 || new_len == 0) return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_update_tlv); desc.flags |= CPU_TO_LE16((u16)(ICE_AQ_FLAG_RD)); cmd->type = ((bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) & ICE_AQ_LLDP_BRID_TYPE_M); cmd->old_len = CPU_TO_LE16(old_len); cmd->new_offset = CPU_TO_LE16(offset); cmd->new_len = CPU_TO_LE16(new_len); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); if (!status && mib_len) *mib_len = LE16_TO_CPU(desc.datalen); return status; } /** * ice_aq_stop_lldp * @hw: pointer to the HW struct * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown * False if LLDP Agent needs to be Stopped * @persist: True if Stop/Shutdown of LLDP Agent needs to be persistent across * reboots * @cd: pointer to command details structure or NULL * * Stop or Shutdown the embedded LLDP Agent (0x0A05) */ enum ice_status ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_stop; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop); if (shutdown_lldp_agent) cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN; if (persist) cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_DIS; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_aq_start_lldp * @hw: pointer to the HW struct * @persist: True if Start of LLDP Agent needs to be persistent across reboots * @cd: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. (0x0A06) */ enum ice_status ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) { struct ice_aqc_lldp_start *cmd; struct ice_aq_desc desc; cmd = &desc.params.lldp_start; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start); cmd->command = ICE_AQ_LLDP_AGENT_START; if (persist) cmd->command |= ICE_AQ_LLDP_AGENT_PERSIST_ENA; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_get_dcbx_status * @hw: pointer to the HW struct * * Get the DCBX status from the Firmware */ u8 ice_get_dcbx_status(struct ice_hw *hw) { u32 reg; reg = rd32(hw, PRTDCB_GENS); return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >> PRTDCB_GENS_DCBX_STATUS_S); } /** * ice_parse_ieee_ets_common_tlv * @buf: Data buffer to be parsed for ETS CFG/REC data * @ets_cfg: Container to store parsed data * * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV */ static void ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) { u8 offset = 0; int i; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { ets_cfg->prio_table[i * 2] = ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >> ICE_IEEE_ETS_PRIO_1_S); ets_cfg->prio_table[i * 2 + 1] = ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >> ICE_IEEE_ETS_PRIO_0_S); offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- * * TSA Assignment Table (8 octets) * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16| * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ ice_for_each_traffic_class(i) { ets_cfg->tcbwtable[i] = buf[offset]; ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++]; } } /** * ice_parse_ieee_etscfg_tlv * @tlv: IEEE 802.1Qaz ETS CFG TLV * @dcbcfg: Local store to update ETS CFG data * * Parses IEEE 802.1Qaz ETS CFG TLV */ static void ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { struct ice_dcb_ets_cfg *etscfg; u8 *buf = tlv->tlvinfo; /* First Octet post subtype * -------------------------- * |will-|CBS | Re- | Max | * |ing | |served| TCs | * -------------------------- * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >> ICE_IEEE_ETS_WILLING_S); etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S); etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >> ICE_IEEE_ETS_MAXTC_S); /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg); } /** * ice_parse_ieee_etsrec_tlv * @tlv: IEEE 802.1Qaz ETS REC TLV * @dcbcfg: Local store to update ETS REC data * * Parses IEEE 802.1Qaz ETS REC TLV */ static void ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u8 *buf = tlv->tlvinfo; /* Begin parsing at Priority Assignment Table (offset 1 in buf) */ ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec); } /** * ice_parse_ieee_pfccfg_tlv * @tlv: IEEE 802.1Qaz PFC CFG TLV * @dcbcfg: Local store to update PFC CFG data * * Parses IEEE 802.1Qaz PFC CFG TLV */ static void ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u8 *buf = tlv->tlvinfo; /* ---------------------------------------- * |will-|MBC | Re- | PFC | PFC Enable | * |ing | |served| cap | | * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >> ICE_IEEE_PFC_WILLING_S); dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S); dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >> ICE_IEEE_PFC_CAP_S); dcbcfg->pfc.pfcena = buf[1]; } /** * ice_parse_ieee_app_tlv * @tlv: IEEE 802.1Qaz APP TLV * @dcbcfg: Local store to update APP PRIO data * * Parses IEEE 802.1Qaz APP PRIO TLV */ static void ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u16 offset = 0; u16 typelen; int i = 0; u16 len; u8 *buf; typelen = NTOHS(tlv->typelen); len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); buf = tlv->tlvinfo; /* Removing sizeof(ouisubtype) and reserved byte from len. * Remaining len div 3 is number of APP TLVs. */ len -= (sizeof(tlv->ouisubtype) + 1); /* Move offset to App Priority Table */ offset++; /* Application Priority Table (3 octets) * Octets:| 1 | 2 | 3 | * ----------------------------------------- * |Priority|Rsrvd| Sel | Protocol ID | * ----------------------------------------- * Bits:|23 21|20 19|18 16|15 0| * ----------------------------------------- */ while (offset < len) { dcbcfg->app[i].priority = ((buf[offset] & ICE_IEEE_APP_PRIO_M) >> ICE_IEEE_APP_PRIO_S); dcbcfg->app[i].selector = ((buf[offset] & ICE_IEEE_APP_SEL_M) >> ICE_IEEE_APP_SEL_S); dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) | buf[offset + 2]; /* Move to next app */ offset += 3; i++; if (i >= ICE_DCBX_MAX_APPS) break; } dcbcfg->numapps = i; } /** * ice_parse_ieee_tlv * @tlv: IEEE 802.1Qaz TLV * @dcbcfg: Local store to update ETS REC data * * Get the TLV subtype and send it to parsing function * based on the subtype value */ static void ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u32 ouisubtype; u8 subtype; ouisubtype = NTOHL(tlv->ouisubtype); subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> ICE_LLDP_TLV_SUBTYPE_S); switch (subtype) { case ICE_IEEE_SUBTYPE_ETS_CFG: ice_parse_ieee_etscfg_tlv(tlv, dcbcfg); break; case ICE_IEEE_SUBTYPE_ETS_REC: ice_parse_ieee_etsrec_tlv(tlv, dcbcfg); break; case ICE_IEEE_SUBTYPE_PFC_CFG: ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg); break; case ICE_IEEE_SUBTYPE_APP_PRI: ice_parse_ieee_app_tlv(tlv, dcbcfg); break; default: break; } } /** * ice_parse_cee_pgcfg_tlv * @tlv: CEE DCBX PG CFG TLV * @dcbcfg: Local store to update ETS CFG data * * Parses CEE DCBX PG CFG TLV */ static void ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { struct ice_dcb_ets_cfg *etscfg; u8 *buf = tlv->tlvinfo; u16 offset = 0; int i; etscfg = &dcbcfg->etscfg; if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M) etscfg->willing = 1; etscfg->cbs = 0; /* Priority Group Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < 4; i++) { etscfg->prio_table[i * 2] = ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >> ICE_CEE_PGID_PRIO_1_S); etscfg->prio_table[i * 2 + 1] = ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >> ICE_CEE_PGID_PRIO_0_S); offset++; } /* PG Percentage Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7| * --------------------------------- */ ice_for_each_traffic_class(i) { etscfg->tcbwtable[i] = buf[offset++]; if (etscfg->prio_table[i] == ICE_CEE_PGID_STRICT) dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; else dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; } /* Number of TCs supported (1 octet) */ etscfg->maxtcs = buf[offset]; } /** * ice_parse_cee_pfccfg_tlv * @tlv: CEE DCBX PFC CFG TLV * @dcbcfg: Local store to update PFC CFG data * * Parses CEE DCBX PFC CFG TLV */ static void ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u8 *buf = tlv->tlvinfo; if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M) dcbcfg->pfc.willing = 1; /* ------------------------ * | PFC Enable | PFC TCs | * ------------------------ * | 1 octet | 1 octet | */ dcbcfg->pfc.pfcena = buf[0]; dcbcfg->pfc.pfccap = buf[1]; } /** * ice_parse_cee_app_tlv * @tlv: CEE DCBX APP TLV * @dcbcfg: Local store to update APP PRIO data * * Parses CEE DCBX APP PRIO TLV */ static void ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u16 len, typelen, offset = 0; struct ice_cee_app_prio *app; u8 i; typelen = NTOHS(tlv->hdr.typelen); len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); dcbcfg->numapps = len / sizeof(*app); if (!dcbcfg->numapps) return; if (dcbcfg->numapps > ICE_DCBX_MAX_APPS) dcbcfg->numapps = ICE_DCBX_MAX_APPS; for (i = 0; i < dcbcfg->numapps; i++) { u8 up, selector; app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < ICE_MAX_USER_PRIORITY; up++) if (app->prio_map & BIT(up)) break; dcbcfg->app[i].priority = up; /* Get Selector from lower 2 bits, and convert to IEEE */ selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M); switch (selector) { case ICE_CEE_APP_SEL_ETHTYPE: dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE; break; case ICE_CEE_APP_SEL_TCPIP: dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP; break; default: /* Keep selector as it is for unknown types */ dcbcfg->app[i].selector = selector; } dcbcfg->app[i].prot_id = NTOHS(app->protocol); /* Move to next app */ offset += sizeof(*app); } } /** * ice_parse_cee_tlv * @tlv: CEE DCBX TLV * @dcbcfg: Local store to update DCBX config data * * Get the TLV subtype and send it to parsing function * based on the subtype value */ static void ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { struct ice_cee_feat_tlv *sub_tlv; u8 subtype, feat_tlv_count = 0; u16 len, tlvlen, typelen; u32 ouisubtype; ouisubtype = NTOHL(tlv->ouisubtype); subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >> ICE_LLDP_TLV_SUBTYPE_S); /* Return if not CEE DCBX */ if (subtype != ICE_CEE_DCBX_TYPE) return; typelen = NTOHS(tlv->typelen); tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); len = sizeof(tlv->typelen) + sizeof(ouisubtype) + sizeof(struct ice_cee_ctrl_tlv); /* Return if no CEE DCBX Feature TLVs */ if (tlvlen <= len) return; sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len); while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) { u16 sublen; typelen = NTOHS(sub_tlv->hdr.typelen); sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S); switch (subtype) { case ICE_CEE_SUBTYPE_PG_CFG: ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg); break; case ICE_CEE_SUBTYPE_PFC_CFG: ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg); break; case ICE_CEE_SUBTYPE_APP_PRI: ice_parse_cee_app_tlv(sub_tlv, dcbcfg); break; default: return; /* Invalid Sub-type return */ } feat_tlv_count++; /* Move to next sub TLV */ sub_tlv = (struct ice_cee_feat_tlv *) ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) + sublen); } } /** * ice_parse_org_tlv * @tlv: Organization specific TLV * @dcbcfg: Local store to update ETS REC data * * Currently only IEEE 802.1Qaz TLV is supported, all others * will be returned */ static void ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u32 ouisubtype; u32 oui; ouisubtype = NTOHL(tlv->ouisubtype); oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S); switch (oui) { case ICE_IEEE_8021QAZ_OUI: ice_parse_ieee_tlv(tlv, dcbcfg); break; case ICE_CEE_DCBX_OUI: ice_parse_cee_tlv(tlv, dcbcfg); break; default: break; } } /** * ice_lldp_to_dcb_cfg * @lldpmib: LLDPDU to be parsed * @dcbcfg: store for LLDPDU data * * Parse DCB configuration from the LLDPDU */ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) { struct ice_lldp_org_tlv *tlv; enum ice_status ret = ICE_SUCCESS; u16 offset = 0; u16 typelen; u16 type; u16 len; if (!lldpmib || !dcbcfg) return ICE_ERR_PARAM; /* set to the start of LLDPDU */ lldpmib += ETH_HEADER_LEN; tlv = (struct ice_lldp_org_tlv *)lldpmib; while (1) { typelen = NTOHS(tlv->typelen); type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S); len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S); offset += sizeof(typelen) + len; /* END TLV or beyond LLDPDU size */ if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE) break; switch (type) { case ICE_TLV_TYPE_ORG: ice_parse_org_tlv(tlv, dcbcfg); break; default: break; } /* Move to next TLV */ tlv = (struct ice_lldp_org_tlv *) ((char *)tlv + sizeof(tlv->typelen) + len); } return ret; } /** * ice_aq_get_dcb_cfg * @hw: pointer to the HW struct * @mib_type: MIB type for the query * @bridgetype: bridge type for the query (remote) * @dcbcfg: store for LLDPDU data * * Query DCB configuration from the firmware */ enum ice_status ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg) { enum ice_status ret; u8 *lldpmib; /* Allocate the LLDPDU */ lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE); if (!lldpmib) return ICE_ERR_NO_MEMORY; ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib, ICE_LLDPDU_SIZE, NULL, NULL, NULL); if (ret == ICE_SUCCESS) /* Parse LLDP MIB to get DCB configuration */ ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg); ice_free(hw, lldpmib); return ret; } /** * ice_aq_dcb_ignore_pfc - Ignore PFC for given TCs * @hw: pointer to the HW struct * @tcmap: TC map for request/release any ignore PFC condition * @request: request (true) or release (false) ignore PFC condition * @tcmap_ret: return TCs for which PFC is currently ignored * @cd: pointer to command details structure or NULL * * This sends out request/release to ignore PFC condition for a TC. * It will return the TCs for which PFC is currently ignored. (0x0301) */ enum ice_status ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct ice_sq_cd *cd) { struct ice_aqc_pfc_ignore *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.pfc_ignore; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_pfc_ignore); if (request) cmd->cmd_flags = ICE_AQC_PFC_IGNORE_SET; cmd->tc_bitmap = tcmap; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status && tcmap_ret) *tcmap_ret = cmd->tc_bitmap; return status; } /** * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW * @hw: pointer to the HW struct * @start_dcbx_agent: True if DCBX Agent needs to be started * False if DCBX Agent needs to be stopped * @dcbx_agent_status: FW indicates back the DCBX agent status * True if DCBX Agent is active * False if DCBX Agent is stopped * @cd: pointer to command details structure or NULL * * Start/Stop the embedded dcbx Agent. In case that this wrapper function * returns ICE_SUCCESS, caller will need to check if FW returns back the same * value as stated in dcbx_agent_status, and react accordingly. (0x0A09) */ enum ice_status ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd) { struct ice_aqc_lldp_stop_start_specific_agent *cmd; - enum ice_status status; + enum ice_adminq_opc opcode; struct ice_aq_desc desc; - u16 opcode; + enum ice_status status; cmd = &desc.params.lldp_agent_ctrl; opcode = ice_aqc_opc_lldp_stop_start_specific_agent; ice_fill_dflt_direct_cmd_desc(&desc, opcode); if (start_dcbx_agent) cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); *dcbx_agent_status = false; if (status == ICE_SUCCESS && cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX) *dcbx_agent_status = true; return status; } /** * ice_aq_get_cee_dcb_cfg * @hw: pointer to the HW struct * @buff: response buffer that stores CEE operational configuration * @cd: pointer to command details structure or NULL * * Get CEE DCBX mode operational configuration from firmware (0x0A07) */ enum ice_status ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_sq_cd *cd) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg); return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd); } /** * ice_aq_query_pfc_mode - Query PFC mode * @hw: pointer to the HW struct * @pfcmode_ret: Return PFC mode * @cd: pointer to command details structure or NULL * * This will return an indication if DSCP-based PFC or VLAN-based PFC * is enabled. (0x0302) */ enum ice_status ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; enum ice_status status; cmd = &desc.params.set_query_pfc_mode; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_pfc_mode); status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (!status) *pfcmode_ret = cmd->pfc_mode; return status; } /** * ice_aq_set_pfc_mode - Set PFC mode * @hw: pointer to the HW struct * @pfc_mode: value of PFC mode to set * @cd: pointer to command details structure or NULL * * This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN * -based PFC (0x0303) */ enum ice_status ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) { struct ice_aqc_set_query_pfc_mode *cmd; struct ice_aq_desc desc; enum ice_status status; if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) return ICE_ERR_PARAM; cmd = &desc.params.set_query_pfc_mode; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode); cmd->pfc_mode = pfc_mode; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); if (status) return status; /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has * been executed, check if cmd->pfc_mode is what was requested. If not, * return an error. */ if (cmd->pfc_mode != pfc_mode) return ICE_ERR_NOT_SUPPORTED; return ICE_SUCCESS; } /** * ice_aq_set_dcb_parameters - Set DCB parameters * @hw: pointer to the HW struct * @dcb_enable: True if DCB configuration needs to be applied * @cd: pointer to command details structure or NULL * * This AQ command will tell FW if it will apply or not apply the default DCB * configuration when link up (0x0306). */ enum ice_status ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable, struct ice_sq_cd *cd) { struct ice_aqc_set_dcb_params *cmd; struct ice_aq_desc desc; cmd = &desc.params.set_dcb_params; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_dcb_params); cmd->valid_flags = ICE_AQC_LINK_UP_DCB_CFG_VALID; if (dcb_enable) cmd->cmd_flags = ICE_AQC_LINK_UP_DCB_CFG; return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } /** * ice_cee_to_dcb_cfg * @cee_cfg: pointer to CEE configuration struct * @pi: port information structure * * Convert CEE configuration from firmware to DCB configuration */ static void ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg, struct ice_port_info *pi) { u32 status, tlv_status = LE32_TO_CPU(cee_cfg->tlv_status); u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift; u8 i, j, err, sync, oper, app_index, ice_app_sel_type; u16 app_prio = LE16_TO_CPU(cee_cfg->oper_app_prio); u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift; struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg; u16 ice_app_prot_id_type; dcbcfg = &pi->qos_cfg.local_dcbx_cfg; dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE; dcbcfg->tlv_status = tlv_status; /* CEE PG data */ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc; /* Note that the FW creates the oper_prio_tc nibbles reversed * from those in the CEE Priority Group sub-TLV. */ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { dcbcfg->etscfg.prio_table[i * 2] = ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >> ICE_CEE_PGID_PRIO_0_S); dcbcfg->etscfg.prio_table[i * 2 + 1] = ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >> ICE_CEE_PGID_PRIO_1_S); } ice_for_each_traffic_class(i) { dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i]; if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) { /* Map it to next empty TC */ dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1; dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT; } else { dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; } } /* CEE PFC data */ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en; dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS; /* CEE APP TLV data */ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg; else cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg; app_index = 0; for (i = 0; i < 3; i++) { if (i == 0) { /* FCoE APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M; ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S; ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S; ice_app_sel_type = ICE_APP_SEL_ETHTYPE; ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE; } else if (i == 1) { /* iSCSI APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M; ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S; ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S; ice_app_sel_type = ICE_APP_SEL_TCPIP; ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI; for (j = 0; j < cmp_dcbcfg->numapps; j++) { u16 prot_id = cmp_dcbcfg->app[j].prot_id; u8 sel = cmp_dcbcfg->app[j].selector; if (sel == ICE_APP_SEL_TCPIP && (prot_id == ICE_APP_PROT_ID_ISCSI || prot_id == ICE_APP_PROT_ID_ISCSI_860)) { ice_app_prot_id_type = prot_id; break; } } } else { /* FIP APP */ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M; ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S; ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M; ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S; ice_app_sel_type = ICE_APP_SEL_ETHTYPE; ice_app_prot_id_type = ICE_APP_PROT_ID_FIP; } status = (tlv_status & ice_aqc_cee_status_mask) >> ice_aqc_cee_status_shift; err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0; sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0; oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0; /* Add FCoE/iSCSI/FIP APP if Error is False and * Oper/Sync is True */ if (!err && sync && oper) { dcbcfg->app[app_index].priority = - (app_prio & ice_aqc_cee_app_mask) >> - ice_aqc_cee_app_shift; + (u8)((app_prio & ice_aqc_cee_app_mask) >> + ice_aqc_cee_app_shift); dcbcfg->app[app_index].selector = ice_app_sel_type; dcbcfg->app[app_index].prot_id = ice_app_prot_id_type; app_index++; } } dcbcfg->numapps = app_index; } /** * ice_get_ieee_or_cee_dcb_cfg * @pi: port information structure * @dcbx_mode: mode of DCBX (IEEE or CEE) * * Get IEEE or CEE mode DCB configuration from the Firmware */ STATIC enum ice_status ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode) { struct ice_dcbx_cfg *dcbx_cfg = NULL; enum ice_status ret; if (!pi) return ICE_ERR_PARAM; if (dcbx_mode == ICE_DCBX_MODE_IEEE) dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; else if (dcbx_mode == ICE_DCBX_MODE_CEE) dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg; /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE */ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); if (ret) goto out; /* Get Remote DCB Config */ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE, ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg); /* Don't treat ENOENT as an error for Remote MIBs */ if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) ret = ICE_SUCCESS; out: return ret; } /** * ice_get_dcb_cfg * @pi: port information structure * * Get DCB configuration from the Firmware */ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi) { struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg; struct ice_dcbx_cfg *dcbx_cfg; enum ice_status ret; if (!pi) return ICE_ERR_PARAM; ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL); if (ret == ICE_SUCCESS) { /* CEE mode */ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE); ice_cee_to_dcb_cfg(&cee_cfg, pi); } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) { /* CEE mode not enabled try querying IEEE data */ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE); } return ret; } +/** + * ice_get_dcb_cfg_from_mib_change + * @pi: port information structure + * @event: pointer to the admin queue receive event + * + * Set DCB configuration from received MIB Change event + */ +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event) +{ + struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg; + struct ice_aqc_lldp_get_mib *mib; + u8 change_type, dcbx_mode; + + mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw; + + change_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M; + if (change_type == ICE_AQ_LLDP_MIB_REMOTE) + dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg; + + dcbx_mode = ((mib->type & ICE_AQ_LLDP_DCBX_M) >> + ICE_AQ_LLDP_DCBX_S); + + switch (dcbx_mode) { + case ICE_AQ_LLDP_DCBX_IEEE: + dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE; + ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg); + break; + + case ICE_AQ_LLDP_DCBX_CEE: + pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg; + ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *) + event->msg_buf, pi); + break; + } +} + /** * ice_init_dcb * @hw: pointer to the HW struct * @enable_mib_change: enable MIB change event * * Update DCB configuration from the Firmware */ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; enum ice_status ret = ICE_SUCCESS; if (!hw->func_caps.common_cap.dcb) return ICE_ERR_NOT_SUPPORTED; qos_cfg->is_sw_lldp = true; /* Get DCBX status */ qos_cfg->dcbx_status = ice_get_dcbx_status(hw); if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE || qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS || qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { /* Get current DCBX configuration */ ret = ice_get_dcb_cfg(hw->port_info); if (ret) return ret; qos_cfg->is_sw_lldp = false; } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) { return ICE_ERR_NOT_READY; } /* Configure the LLDP MIB change event */ if (enable_mib_change) { ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL); if (ret) qos_cfg->is_sw_lldp = true; } return ret; } /** * ice_cfg_lldp_mib_change * @hw: pointer to the HW struct * @ena_mib: enable/disable MIB change event * * Configure (disable/enable) MIB */ enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib) { struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg; enum ice_status ret; if (!hw->func_caps.common_cap.dcb) return ICE_ERR_NOT_SUPPORTED; /* Get DCBX status */ qos_cfg->dcbx_status = ice_get_dcbx_status(hw); if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) return ICE_ERR_NOT_READY; ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL); if (!ret) qos_cfg->is_sw_lldp = !ena_mib; return ret; } /** * ice_add_ieee_ets_common_tlv * @buf: Data buffer to be populated with ice_dcb_ets_cfg data * @ets_cfg: Container for ice_dcb_ets_cfg data * * Populate the TLV buffer with ice_dcb_ets_cfg data */ static void ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) { u8 priority0, priority1; u8 offset = 0; int i; /* Priority Assignment Table (4 octets) * Octets:| 1 | 2 | 3 | 4 | * ----------------------------------------- * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| * ----------------------------------------- * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| * ----------------------------------------- */ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { priority0 = ets_cfg->prio_table[i * 2] & 0xF; priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF; buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1; offset++; } /* TC Bandwidth Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- * * TSA Assignment Table (8 octets) * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | * --------------------------------- * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| * --------------------------------- */ ice_for_each_traffic_class(i) { buf[offset] = ets_cfg->tcbwtable[i]; buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i]; offset++; } } /** * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format * @tlv: Fill the ETS config data in IEEE format * @dcbcfg: Local store which holds the DCB Config * * Prepare IEEE 802.1Qaz ETS CFG TLV */ static void ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { struct ice_dcb_ets_cfg *etscfg; u8 *buf = tlv->tlvinfo; u8 maxtcwilling = 0; u32 ouisubtype; u16 typelen; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_IEEE_ETS_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | ICE_IEEE_SUBTYPE_ETS_CFG); tlv->ouisubtype = HTONL(ouisubtype); /* First Octet post subtype * -------------------------- * |will-|CBS | Re- | Max | * |ing | |served| TCs | * -------------------------- * |1bit | 1bit|3 bits|3bits| */ etscfg = &dcbcfg->etscfg; if (etscfg->willing) maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S); maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; buf[0] = maxtcwilling; /* Begin adding at Priority Assignment Table (offset 1 in buf) */ ice_add_ieee_ets_common_tlv(&buf[1], etscfg); } /** * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format * @tlv: Fill ETS Recommended TLV in IEEE format * @dcbcfg: Local store which holds the DCB Config * * Prepare IEEE 802.1Qaz ETS REC TLV */ static void ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { struct ice_dcb_ets_cfg *etsrec; u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelen; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_IEEE_ETS_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | ICE_IEEE_SUBTYPE_ETS_REC); tlv->ouisubtype = HTONL(ouisubtype); etsrec = &dcbcfg->etsrec; /* First Octet is reserved */ /* Begin adding at Priority Assignment Table (offset 1 in buf) */ ice_add_ieee_ets_common_tlv(&buf[1], etsrec); } /** * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format * @tlv: Fill PFC TLV in IEEE format * @dcbcfg: Local store which holds the PFC CFG data * * Prepare IEEE 802.1Qaz PFC CFG TLV */ static void ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelen; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_IEEE_PFC_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | ICE_IEEE_SUBTYPE_PFC_CFG); tlv->ouisubtype = HTONL(ouisubtype); /* ---------------------------------------- * |will-|MBC | Re- | PFC | PFC Enable | * |ing | |served| cap | | * ----------------------------------------- * |1bit | 1bit|2 bits|4bits| 1 octet | */ if (dcbcfg->pfc.willing) buf[0] = BIT(ICE_IEEE_PFC_WILLING_S); if (dcbcfg->pfc.mbc) buf[0] |= BIT(ICE_IEEE_PFC_MBC_S); buf[0] |= dcbcfg->pfc.pfccap & 0xF; buf[1] = dcbcfg->pfc.pfcena; } /** * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format * @tlv: Fill APP TLV in IEEE format * @dcbcfg: Local store which holds the APP CFG data * * Prepare IEEE 802.1Qaz APP CFG TLV */ static void ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u16 typelen, len, offset = 0; u8 priority, selector, i = 0; u8 *buf = tlv->tlvinfo; u32 ouisubtype; /* No APP TLVs then just return */ if (dcbcfg->numapps == 0) return; ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | ICE_IEEE_SUBTYPE_APP_PRI); tlv->ouisubtype = HTONL(ouisubtype); /* Move offset to App Priority Table */ offset++; /* Application Priority Table (3 octets) * Octets:| 1 | 2 | 3 | * ----------------------------------------- * |Priority|Rsrvd| Sel | Protocol ID | * ----------------------------------------- * Bits:|23 21|20 19|18 16|15 0| * ----------------------------------------- */ while (i < dcbcfg->numapps) { priority = dcbcfg->app[i].priority & 0x7; selector = dcbcfg->app[i].selector & 0x7; buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector; buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF; buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF; /* Move to next app */ offset += 3; i++; if (i >= ICE_DCBX_MAX_APPS) break; } /* len includes size of ouisubtype + 1 reserved + 3*numapps */ len = sizeof(tlv->ouisubtype) + 1 + (i * 3); typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF)); tlv->typelen = HTONS(typelen); } /** * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV * @tlv: location to build the TLV data * @dcbcfg: location of data to convert to TLV */ static void ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelen; int i; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_DSCP_UP_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | ICE_DSCP_SUBTYPE_DSCP2UP); tlv->ouisubtype = HTONL(ouisubtype); /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { /* IPv4 mapping */ buf[i] = dcbcfg->dscp_map[i]; /* IPv6 mapping */ buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i]; } /* byte 64 - IPv4 untagged traffic */ buf[i] = 0; /* byte 144 - IPv6 untagged traffic */ buf[i + ICE_DSCP_IPV6_OFFSET] = 0; } #define ICE_BYTES_PER_TC 8 /** * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV * @tlv: location to build the TLV data */ static void ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv) { u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelen; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_DSCP_ENF_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | ICE_DSCP_SUBTYPE_ENFORCE); tlv->ouisubtype = HTONL(ouisubtype); /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */ memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC)); } /** * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV * @tlv: location to build the TLV data * @dcbcfg: location of the data to convert to TLV */ static void ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { struct ice_dcb_ets_cfg *etscfg; u8 *buf = tlv->tlvinfo; u32 ouisubtype; u8 offset = 0; u16 typelen; int i; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_DSCP_TC_BW_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | ICE_DSCP_SUBTYPE_TCBW); tlv->ouisubtype = HTONL(ouisubtype); /* First Octect after subtype * ---------------------------- * | RSV | CBS | RSV | Max TCs | * | 1b | 1b | 3b | 3b | * ---------------------------- */ etscfg = &dcbcfg->etscfg; buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; /* bytes 1 - 4 reserved */ offset = 5; /* TC BW table * bytes 0 - 7 for TC 0 - 7 * * TSA Assignment table * bytes 8 - 15 for TC 0 - 7 */ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { buf[offset] = etscfg->tcbwtable[i]; buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i]; offset++; } } /** * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV * @tlv: Fill PFC TLV in IEEE format * @dcbcfg: Local store which holds the PFC CFG data */ static void ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) { u8 *buf = tlv->tlvinfo; u32 ouisubtype; u16 typelen; typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | ICE_DSCP_PFC_TLV_LEN); tlv->typelen = HTONS(typelen); ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | ICE_DSCP_SUBTYPE_PFC); tlv->ouisubtype = HTONL(ouisubtype); buf[0] = dcbcfg->pfc.pfccap & 0xF; - buf[1] = dcbcfg->pfc.pfcena & 0xF; + buf[1] = dcbcfg->pfc.pfcena; } /** * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs * @tlv: Fill TLV data in IEEE format * @dcbcfg: Local store which holds the DCB Config * @tlvid: Type of IEEE TLV * * Add tlv information */ static void ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, u16 tlvid) { if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) { switch (tlvid) { case ICE_IEEE_TLV_ID_ETS_CFG: ice_add_ieee_ets_tlv(tlv, dcbcfg); break; case ICE_IEEE_TLV_ID_ETS_REC: ice_add_ieee_etsrec_tlv(tlv, dcbcfg); break; case ICE_IEEE_TLV_ID_PFC_CFG: ice_add_ieee_pfc_tlv(tlv, dcbcfg); break; case ICE_IEEE_TLV_ID_APP_PRI: ice_add_ieee_app_pri_tlv(tlv, dcbcfg); break; default: break; } } else { /* pfc_mode == ICE_QOS_MODE_DSCP */ switch (tlvid) { case ICE_TLV_ID_DSCP_UP: ice_add_dscp_up_tlv(tlv, dcbcfg); break; case ICE_TLV_ID_DSCP_ENF: ice_add_dscp_enf_tlv(tlv); break; case ICE_TLV_ID_DSCP_TC_BW: ice_add_dscp_tc_bw_tlv(tlv, dcbcfg); break; case ICE_TLV_ID_DSCP_TO_PFC: ice_add_dscp_pfc_tlv(tlv, dcbcfg); break; default: break; } } } /** * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format * @lldpmib: pointer to the HW struct * @miblen: length of LLDP MIB * @dcbcfg: Local store which holds the DCB Config * * Convert the DCB configuration to MIB format */ void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) { u16 len, offset = 0, tlvid = ICE_TLV_ID_START; struct ice_lldp_org_tlv *tlv; u16 typelen; tlv = (struct ice_lldp_org_tlv *)lldpmib; while (1) { ice_add_dcb_tlv(tlv, dcbcfg, tlvid++); typelen = NTOHS(tlv->typelen); len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; if (len) offset += len + 2; /* END TLV or beyond LLDPDU size */ if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU || offset > ICE_LLDPDU_SIZE) break; /* Move to next TLV */ if (len) tlv = (struct ice_lldp_org_tlv *) ((char *)tlv + sizeof(tlv->typelen) + len); } *miblen = offset; } /** * ice_set_dcb_cfg - Set the local LLDP MIB to FW * @pi: port information structure * * Set DCB configuration to the Firmware */ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) { u8 mib_type, *lldpmib = NULL; struct ice_dcbx_cfg *dcbcfg; enum ice_status ret; struct ice_hw *hw; u16 miblen; if (!pi) return ICE_ERR_PARAM; hw = pi->hw; /* update the HW local config */ dcbcfg = &pi->qos_cfg.local_dcbx_cfg; /* Allocate the LLDPDU */ lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE); if (!lldpmib) return ICE_ERR_NO_MEMORY; mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING; ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg); ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL); ice_free(hw, lldpmib); return ret; } /** * ice_aq_query_port_ets - query port ETS configuration * @pi: port information structure * @buf: pointer to buffer * @buf_size: buffer size in bytes * @cd: pointer to command details structure or NULL * * query current port ETS configuration */ enum ice_status ice_aq_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { struct ice_aqc_query_port_ets *cmd; struct ice_aq_desc desc; enum ice_status status; if (!pi || !pi->root) return ICE_ERR_PARAM; cmd = &desc.params.port_ets; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); cmd->port_teid = pi->root->info.node_teid; status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd); return status; } /** * ice_update_port_tc_tree_cfg - update TC tree configuration * @pi: port information structure * @buf: pointer to buffer * * update the SW DB with the new TC changes */ enum ice_status ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf) { struct ice_sched_node *node, *tc_node; struct ice_aqc_txsched_elem_data elem; enum ice_status status = ICE_SUCCESS; u32 teid1, teid2; u8 i, j; if (!pi) return ICE_ERR_PARAM; /* suspend the missing TC nodes */ for (i = 0; i < pi->root->num_children; i++) { teid1 = LE32_TO_CPU(pi->root->children[i]->info.node_teid); ice_for_each_traffic_class(j) { teid2 = LE32_TO_CPU(buf->tc_node_teid[j]); if (teid1 == teid2) break; } if (j < ICE_MAX_TRAFFIC_CLASS) continue; /* TC is missing */ pi->root->children[i]->in_use = false; } /* add the new TC nodes */ ice_for_each_traffic_class(j) { teid2 = LE32_TO_CPU(buf->tc_node_teid[j]); if (teid2 == ICE_INVAL_TEID) continue; /* Is it already present in the tree ? */ for (i = 0; i < pi->root->num_children; i++) { tc_node = pi->root->children[i]; if (!tc_node) continue; teid1 = LE32_TO_CPU(tc_node->info.node_teid); if (teid1 == teid2) { tc_node->tc_num = j; tc_node->in_use = true; break; } } if (i < pi->root->num_children) continue; /* new TC */ status = ice_sched_query_elem(pi->hw, teid2, &elem); if (!status) status = ice_sched_add_node(pi, 1, &elem); if (status) break; /* update the TC number */ node = ice_sched_find_node_by_teid(pi->root, teid2); if (node) node->tc_num = j; } return status; } /** * ice_query_port_ets - query port ETS configuration * @pi: port information structure * @buf: pointer to buffer * @buf_size: buffer size in bytes * @cd: pointer to command details structure or NULL * * query current port ETS configuration and update the * SW DB with the TC changes */ enum ice_status ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd) { enum ice_status status; ice_acquire_lock(&pi->sched_lock); status = ice_aq_query_port_ets(pi, buf, buf_size, cd); if (!status) status = ice_update_port_tc_tree_cfg(pi, buf); ice_release_lock(&pi->sched_lock); return status; } diff --git a/sys/dev/ice/ice_dcb.h b/sys/dev/ice/ice_dcb.h index 6d624268bb74..504a356221c5 100644 --- a/sys/dev/ice/ice_dcb.h +++ b/sys/dev/ice/ice_dcb.h @@ -1,277 +1,279 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_DCB_H_ #define _ICE_DCB_H_ #include "ice_type.h" #include "ice_common.h" #define ICE_DCBX_OFFLOAD_DIS 0 #define ICE_DCBX_OFFLOAD_ENABLED 1 #define ICE_DCBX_STATUS_NOT_STARTED 0 #define ICE_DCBX_STATUS_IN_PROGRESS 1 #define ICE_DCBX_STATUS_DONE 2 #define ICE_DCBX_STATUS_MULTIPLE_PEERS 3 #define ICE_DCBX_STATUS_DIS 7 #define ICE_TLV_TYPE_END 0 #define ICE_TLV_TYPE_ORG 127 #define ICE_IEEE_8021QAZ_OUI 0x0080C2 #define ICE_IEEE_SUBTYPE_ETS_CFG 9 #define ICE_IEEE_SUBTYPE_ETS_REC 10 #define ICE_IEEE_SUBTYPE_PFC_CFG 11 #define ICE_IEEE_SUBTYPE_APP_PRI 12 #define ICE_CEE_DCBX_OUI 0x001B21 #define ICE_CEE_DCBX_TYPE 2 #define ICE_DSCP_OUI 0xFFFFFF #define ICE_DSCP_SUBTYPE_DSCP2UP 0x41 #define ICE_DSCP_SUBTYPE_ENFORCE 0x42 #define ICE_DSCP_SUBTYPE_TCBW 0x43 #define ICE_DSCP_SUBTYPE_PFC 0x44 #define ICE_DSCP_IPV6_OFFSET 80 #define ICE_CEE_SUBTYPE_CTRL 1 #define ICE_CEE_SUBTYPE_PG_CFG 2 #define ICE_CEE_SUBTYPE_PFC_CFG 3 #define ICE_CEE_SUBTYPE_APP_PRI 4 #define ICE_CEE_MAX_FEAT_TYPE 3 #define ICE_LLDP_ADMINSTATUS_DIS 0 #define ICE_LLDP_ADMINSTATUS_ENA_RX 1 #define ICE_LLDP_ADMINSTATUS_ENA_TX 2 #define ICE_LLDP_ADMINSTATUS_ENA_RXTX 3 /* Defines for LLDP TLV header */ #define ICE_LLDP_TLV_LEN_S 0 #define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S) #define ICE_LLDP_TLV_TYPE_S 9 #define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S) #define ICE_LLDP_TLV_SUBTYPE_S 0 #define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S) #define ICE_LLDP_TLV_OUI_S 8 #define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S) /* Defines for IEEE ETS TLV */ #define ICE_IEEE_ETS_MAXTC_S 0 #define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S) #define ICE_IEEE_ETS_CBS_S 6 #define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S) #define ICE_IEEE_ETS_WILLING_S 7 #define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S) #define ICE_IEEE_ETS_PRIO_0_S 0 #define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S) #define ICE_IEEE_ETS_PRIO_1_S 4 #define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S) #define ICE_CEE_PGID_PRIO_0_S 0 #define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S) #define ICE_CEE_PGID_PRIO_1_S 4 #define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S) #define ICE_CEE_PGID_STRICT 15 /* Defines for IEEE TSA types */ #define ICE_IEEE_TSA_STRICT 0 #define ICE_IEEE_TSA_CBS 1 #define ICE_IEEE_TSA_ETS 2 #define ICE_IEEE_TSA_VENDOR 255 /* Defines for IEEE PFC TLV */ #define ICE_IEEE_PFC_CAP_S 0 #define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S) #define ICE_IEEE_PFC_MBC_S 6 #define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S) #define ICE_IEEE_PFC_WILLING_S 7 #define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S) /* Defines for IEEE APP TLV */ #define ICE_IEEE_APP_SEL_S 0 #define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S) #define ICE_IEEE_APP_PRIO_S 5 #define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S) /* TLV definitions for preparing MIB */ #define ICE_TLV_ID_CHASSIS_ID 0 #define ICE_TLV_ID_PORT_ID 1 #define ICE_TLV_ID_TIME_TO_LIVE 2 #define ICE_IEEE_TLV_ID_ETS_CFG 3 #define ICE_IEEE_TLV_ID_ETS_REC 4 #define ICE_IEEE_TLV_ID_PFC_CFG 5 #define ICE_IEEE_TLV_ID_APP_PRI 6 #define ICE_TLV_ID_END_OF_LLDPPDU 7 #define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG #define ICE_TLV_ID_DSCP_UP 3 #define ICE_TLV_ID_DSCP_ENF 4 #define ICE_TLV_ID_DSCP_TC_BW 5 #define ICE_TLV_ID_DSCP_TO_PFC 6 #define ICE_IEEE_ETS_TLV_LEN 25 #define ICE_IEEE_PFC_TLV_LEN 6 #define ICE_IEEE_APP_TLV_LEN 11 #define ICE_DSCP_UP_TLV_LEN 148 #define ICE_DSCP_ENF_TLV_LEN 132 #define ICE_DSCP_TC_BW_TLV_LEN 25 #define ICE_DSCP_PFC_TLV_LEN 6 #pragma pack(1) /* IEEE 802.1AB LLDP Organization specific TLV */ struct ice_lldp_org_tlv { __be16 typelen; __be32 ouisubtype; u8 tlvinfo[STRUCT_HACK_VAR_LEN]; }; #pragma pack() struct ice_cee_tlv_hdr { __be16 typelen; u8 operver; u8 maxver; }; struct ice_cee_ctrl_tlv { struct ice_cee_tlv_hdr hdr; __be32 seqno; __be32 ackno; }; struct ice_cee_feat_tlv { struct ice_cee_tlv_hdr hdr; u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ #define ICE_CEE_FEAT_TLV_ENA_M 0x80 #define ICE_CEE_FEAT_TLV_WILLING_M 0x40 #define ICE_CEE_FEAT_TLV_ERR_M 0x20 u8 subtype; u8 tlvinfo[STRUCT_HACK_VAR_LEN]; }; #pragma pack(1) struct ice_cee_app_prio { __be16 protocol; u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */ #define ICE_CEE_APP_SELECTOR_M 0x03 __be16 lower_oui; u8 prio_map; }; #pragma pack() /* TODO: The below structures related LLDP/DCBX variables * and statistics are defined but need to find how to get * the required information from the Firmware to use them */ /* IEEE 802.1AB LLDP Agent Statistics */ struct ice_lldp_stats { u64 remtablelastchangetime; u64 remtableinserts; u64 remtabledeletes; u64 remtabledrops; u64 remtableageouts; u64 txframestotal; u64 rxframesdiscarded; u64 rxportframeerrors; u64 rxportframestotal; u64 rxporttlvsdiscardedtotal; u64 rxporttlvsunrecognizedtotal; u64 remtoomanyneighbors; }; /* IEEE 802.1Qaz DCBX variables */ struct ice_dcbx_variables { u32 defmaxtrafficclasses; u32 defprioritytcmapping; u32 deftcbandwidth; u32 deftsaassignment; }; enum ice_status ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf, u16 buf_size, u16 *local_len, u16 *remote_len, struct ice_sq_cd *cd); enum ice_status ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv, void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len, struct ice_sq_cd *cd); enum ice_status ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf, u16 buf_size, u16 old_len, u16 new_len, u16 offset, u16 *mib_len, struct ice_sq_cd *cd); enum ice_status ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct ice_sq_cd *cd); enum ice_status ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, struct ice_aqc_get_cee_dcb_cfg_resp *buff, struct ice_sq_cd *cd); enum ice_status ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd); enum ice_status ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable, struct ice_sq_cd *cd); enum ice_status ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg); u8 ice_get_dcbx_status(struct ice_hw *hw); enum ice_status ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi); enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); +void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi, + struct ice_rq_event_info *event); enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change); void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg); enum ice_status ice_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cmd_details); enum ice_status ice_aq_query_port_ets(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf); enum ice_status ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist, struct ice_sq_cd *cd); enum ice_status ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd); enum ice_status ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent, bool *dcbx_agent_status, struct ice_sq_cd *cd); enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib); enum ice_status ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update, struct ice_sq_cd *cd); #endif /* _ICE_DCB_H_ */ diff --git a/sys/dev/ice/ice_ddp_common.c b/sys/dev/ice/ice_ddp_common.c new file mode 100644 index 000000000000..730b78b0f81e --- /dev/null +++ b/sys/dev/ice/ice_ddp_common.c @@ -0,0 +1,2532 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2022, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*$FreeBSD$*/ + +#include "ice_ddp_common.h" +#include "ice_type.h" +#include "ice_common.h" +#include "ice_sched.h" + +/** + * ice_aq_download_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer to transfer + * @buf_size: the size of the package buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Download Package (0x0C40) + */ +static enum ice_status +ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, bool last_buf, u32 *error_offset, + u32 *error_info, struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * ice_aq_upload_section + * @hw: pointer to the hardware structure + * @pkg_buf: the package buffer which will receive the section + * @buf_size: the size of the package buffer + * @cd: pointer to command details structure or NULL + * + * Upload Section (0x0C41) + */ +enum ice_status +ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); +} + +/** + * ice_aq_update_pkg + * @hw: pointer to the hardware structure + * @pkg_buf: the package cmd buffer + * @buf_size: the size of the package cmd buffer + * @last_buf: last buffer indicator + * @error_offset: returns error offset + * @error_info: returns error information + * @cd: pointer to command details structure or NULL + * + * Update Package (0x0C42) + */ +static enum ice_status +ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, + bool last_buf, u32 *error_offset, u32 *error_info, + struct ice_sq_cd *cd) +{ + struct ice_aqc_download_pkg *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (error_offset) + *error_offset = 0; + if (error_info) + *error_info = 0; + + cmd = &desc.params.download_pkg; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + + if (last_buf) + cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; + + status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); + if (status == ICE_ERR_AQ_ERROR) { + /* Read error from buffer only when the FW returned an error */ + struct ice_aqc_download_pkg_resp *resp; + + resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; + if (error_offset) + *error_offset = LE32_TO_CPU(resp->error_offset); + if (error_info) + *error_info = LE32_TO_CPU(resp->error_info); + } + + return status; +} + +/** + * ice_find_seg_in_pkg + * @hw: pointer to the hardware structure + * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) + * @pkg_hdr: pointer to the package header to be searched + * + * This function searches a package file for a particular segment type. On + * success it returns a pointer to the segment header, otherwise it will + * return NULL. + */ +struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr) +{ + u32 i; + + ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", + pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, + pkg_hdr->pkg_format_ver.update, + pkg_hdr->pkg_format_ver.draft); + + /* Search all package segments for the requested segment type */ + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + struct ice_generic_seg_hdr *seg; + + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); + + if (LE32_TO_CPU(seg->seg_type) == seg_type) + return seg; + } + + return NULL; +} + +/** + * ice_get_pkg_seg_by_idx + * @pkg_hdr: pointer to the package header to be searched + * @idx: index of segment + */ +static struct ice_generic_seg_hdr * +ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg = NULL; + + if (idx < LE32_TO_CPU(pkg_hdr->seg_count)) + seg = (struct ice_generic_seg_hdr *) + ((u8 *)pkg_hdr + + LE32_TO_CPU(pkg_hdr->seg_offset[idx])); + + return seg; +} + +/** + * ice_is_signing_seg_at_idx - determine if segment is a signing segment + * @pkg_hdr: pointer to package header + * @idx: segment index + */ +static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx) +{ + struct ice_generic_seg_hdr *seg; + bool retval = false; + + seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (seg) + retval = LE32_TO_CPU(seg->seg_type) == SEGMENT_TYPE_SIGNING; + + return retval; +} + +/** + * ice_is_signing_seg_type_at_idx + * @pkg_hdr: pointer to package header + * @idx: segment index + * @seg_id: segment id that is expected + * @sign_type: signing type + * + * Determine if a segment is a signing segment of the correct type + */ +static bool +ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, + u32 seg_id, u32 sign_type) +{ + bool result = false; + + if (ice_is_signing_seg_at_idx(pkg_hdr, idx)) { + struct ice_sign_seg *seg; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, + idx); + if (seg && LE32_TO_CPU(seg->seg_id) == seg_id && + LE32_TO_CPU(seg->sign_type) == sign_type) + result = true; + } + + return result; +} + +/** + * ice_update_pkg_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + */ +enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status = ICE_SUCCESS; + u32 i; + + for (i = 0; i < count; i++) { + struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); + bool last = ((i + 1) == count); + u32 offset, info; + + status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), + last, &offset, &info, NULL); + + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", + status, offset, info); + break; + } + } + + return status; +} + +/** + * ice_update_pkg + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains change lock and updates package. + */ +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_status status; + + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; + + status = ice_update_pkg_no_lock(hw, bufs, count); + + ice_release_change_lock(hw); + + return status; +} + +static enum ice_ddp_state +ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err) +{ + switch (aq_err) { + case ICE_AQ_RC_ENOSEC: + return ICE_DDP_PKG_NO_SEC_MANIFEST; + case ICE_AQ_RC_EBADSIG: + return ICE_DDP_PKG_FILE_SIGNATURE_INVALID; + case ICE_AQ_RC_ESVN: + return ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW; + case ICE_AQ_RC_EBADMAN: + return ICE_DDP_PKG_MANIFEST_INVALID; + case ICE_AQ_RC_EBADBUF: + return ICE_DDP_PKG_BUFFER_INVALID; + default: + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + bool metadata = false; + + if (LE32_TO_CPU(buf->section_entry[0].type) & ICE_METADATA_BUF) + metadata = true; + + return metadata; +} + +/** + * ice_is_last_download_buffer + * @buf: pointer to current buffer header + * @idx: index of the buffer in the current sequence + * @count: the buffer count in the current sequence + * + * Note: this routine should only be called if the buffer is not the last buffer + */ +static bool +ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) +{ + bool last = ((idx + 1) == count); + + /* A set metadata flag in the next buffer will signal that the current + * buffer will be the last buffer downloaded + */ + if (!last) { + struct ice_buf *next_buf = ((struct ice_buf *)buf) + 1; + + last = ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); + } + + return last; +} + +/** + * ice_dwnld_cfg_bufs_no_lock + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * @indicate_last: if true, then set last buffer flag on last buffer download + * + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, + u32 count, bool indicate_last) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_buf_hdr *bh; + enum ice_aq_err err; + u32 offset, info, i; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)(bufs + start); + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + for (i = 0; i < count; i++) { + enum ice_status status; + bool last = false; + + bh = (struct ice_buf_hdr *)(bufs + start + i); + + if (indicate_last) + last = ice_is_last_download_buffer(bh, i, count); + + status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, + &offset, &info, NULL); + + /* Save AQ status from download package */ + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", + status, offset, info); + err = hw->adminq.sq_last_status; + state = ice_map_aq_err_to_ddp_state(err); + break; + } + + if (last) + break; + } + + return state; +} + +/** + * ice_aq_get_pkg_info_list + * @hw: pointer to the hardware structure + * @pkg_info: the buffer which will receive the information list + * @buf_size: the size of the pkg_info information buffer + * @cd: pointer to command details structure or NULL + * + * Get Package Info List (0x0C43) + */ +static enum ice_status +ice_aq_get_pkg_info_list(struct ice_hw *hw, + struct ice_aqc_get_pkg_info_resp *pkg_info, + u16 buf_size, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); + + return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); +} + +/** + * ice_has_signing_seg - determine if package has a signing segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + */ +static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr); + + return seg_hdr ? true : false; +} + +/** + * ice_get_pkg_segment_id - get correct package segment id, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) +{ + u32 seg_id; + + switch (mac_type) { + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K: + default: + seg_id = SEGMENT_TYPE_ICE_E810; + break; + } + + return seg_id; +} + +/** + * ice_get_pkg_sign_type - get package segment sign type, based on device + * @mac_type: MAC type of the device + */ +static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) +{ + u32 sign_type; + + switch (mac_type) { + case ICE_MAC_GENERIC_3K: + sign_type = SEGMENT_SIGN_TYPE_RSA3K; + break; + case ICE_MAC_GENERIC: + default: + sign_type = SEGMENT_SIGN_TYPE_RSA2K; + break; + } + + return sign_type; +} + +/** + * ice_get_signing_req - get correct package requirements, based on device + * @hw: pointer to the hardware structure + */ +static void ice_get_signing_req(struct ice_hw *hw) +{ + hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type); + hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type); +} + +/** + * ice_download_pkg_sig_seg - download a signature segment + * @hw: pointer to the hardware structure + * @seg: pointer to signature segment + */ +static enum ice_ddp_state +ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +{ + enum ice_ddp_state state; + + state = ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, + LE32_TO_CPU(seg->buf_tbl.buf_count), + false); + + return state; +} + +/** + * ice_download_pkg_config_seg - download a config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index + * @start: starting buffer + * @count: buffer count + * + * Note: idx must reference a ICE segment + */ +static enum ice_ddp_state +ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx, u32 start, u32 count) +{ + struct ice_buf_table *bufs; + enum ice_ddp_state state; + struct ice_seg *seg; + u32 buf_count; + + seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) + return ICE_DDP_PKG_ERR; + + bufs = ice_find_buf_table(seg); + buf_count = LE32_TO_CPU(bufs->buf_count); + + if (start >= buf_count || start + count > buf_count) + return ICE_DDP_PKG_ERR; + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, + true); + + return state; +} + +/** + * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @idx: segment index (must be a signature segment) + * + * Note: idx must reference a signature segment + */ +static enum ice_ddp_state +ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + u32 idx) +{ + enum ice_ddp_state state; + struct ice_sign_seg *seg; + u32 conf_idx; + u32 start; + u32 count; + + seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); + if (!seg) { + state = ICE_DDP_PKG_ERR; + goto exit; + } + + conf_idx = LE32_TO_CPU(seg->signed_seg_idx); + start = LE32_TO_CPU(seg->signed_buf_start); + count = LE32_TO_CPU(seg->signed_buf_count); + + state = ice_download_pkg_sig_seg(hw, seg); + if (state) + goto exit; + + state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + count); + +exit: + return state; +} + +/** + * ice_match_signing_seg - determine if a matching signing segment exists + * @pkg_hdr: pointer to package header + * @seg_id: segment id that is expected + * @sign_type: signing type + */ +static bool +ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type) +{ + bool match = false; + u32 i; + + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id, + sign_type)) { + match = true; + break; + } + } + + return match; +} + +/** + * ice_post_dwnld_pkg_actions - perform post download package actions + * @hw: pointer to the hardware structure + */ +static enum ice_ddp_state +ice_post_dwnld_pkg_actions(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + enum ice_status status; + + status = ice_set_vlan_mode(hw); + if (status) { + ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", + status); + state = ICE_DDP_PKG_ERR; + } + + return state; +} + +/** + * ice_download_pkg_with_sig_seg - download package using signature segments + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + */ +static enum ice_ddp_state +ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + enum ice_aq_err aq_err = hw->adminq.sq_last_status; + enum ice_ddp_state state = ICE_DDP_PKG_ERR; + enum ice_status status; + u32 i; + + ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id); + ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type); + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + state = ICE_DDP_PKG_ALREADY_LOADED; + else + state = ice_map_aq_err_to_ddp_state(aq_err); + return state; + } + + for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { + if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id, + hw->pkg_sign_type)) + continue; + + state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + if (state) + break; + } + + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_dwnld_cfg_bufs + * @hw: pointer to the hardware structure + * @bufs: pointer to an array of buffers + * @count: the number of buffers in the array + * + * Obtains global config lock and downloads the package configuration buffers + * to the firmware. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + enum ice_status status; + struct ice_buf_hdr *bh; + + if (!bufs || !count) + return ICE_DDP_PKG_ERR; + + /* If the first buffer's first section has its metadata bit set + * then there are no buffers to be downloaded, and the operation is + * considered a success. + */ + bh = (struct ice_buf_hdr *)bufs; + if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) + return ICE_DDP_PKG_SUCCESS; + + status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); + if (status) { + if (status == ICE_ERR_AQ_NO_WORK) + return ICE_DDP_PKG_ALREADY_LOADED; + return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); + } + + state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + if (!state) + state = ice_post_dwnld_pkg_actions(hw); + + ice_release_global_cfg_lock(hw); + + return state; +} + +/** + * ice_download_pkg_without_sig_seg + * @hw: pointer to the hardware structure + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package without signature segment. + */ +static enum ice_ddp_state +ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_buf_table *ice_buf_tbl; + enum ice_ddp_state state; + + ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", + ice_seg->hdr.seg_format_ver.major, + ice_seg->hdr.seg_format_ver.minor, + ice_seg->hdr.seg_format_ver.update, + ice_seg->hdr.seg_format_ver.draft); + + ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", + LE32_TO_CPU(ice_seg->hdr.seg_type), + LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); + + ice_buf_tbl = ice_find_buf_table(ice_seg); + + ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + state = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, + LE32_TO_CPU(ice_buf_tbl->buf_count)); + + return state; +} + +/** + * ice_download_pkg + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to package header + * @ice_seg: pointer to the segment of the package to be downloaded + * + * Handles the download of a complete package. + */ +static enum ice_ddp_state +ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, + struct ice_seg *ice_seg) +{ + enum ice_ddp_state state; + + if (hw->pkg_has_signing_seg) + state = ice_download_pkg_with_sig_seg(hw, pkg_hdr); + else + state = ice_download_pkg_without_sig_seg(hw, ice_seg); + + ice_post_pkg_dwnld_vlan_mode_cfg(hw); + + return state; +} + +/** + * ice_init_pkg_info + * @hw: pointer to the hardware structure + * @pkg_hdr: pointer to the driver's package hdr + * + * Saves off the package details into the HW structure. + */ +static enum ice_ddp_state +ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) +{ + struct ice_generic_seg_hdr *seg_hdr; + + if (!pkg_hdr) + return ICE_DDP_PKG_ERR; + + hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr); + ice_get_signing_req(hw); + + ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", + hw->pkg_seg_id); + + seg_hdr = (struct ice_generic_seg_hdr *) + ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); + if (seg_hdr) { + struct ice_meta_sect *meta; + struct ice_pkg_enum state; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + /* Get package information from the Metadata Section */ + meta = (struct ice_meta_sect *) + ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, + ICE_SID_METADATA); + if (!meta) { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + hw->pkg_ver = meta->ver; + ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), + ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", + meta->ver.major, meta->ver.minor, meta->ver.update, + meta->ver.draft, meta->name); + + hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; + ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, + sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); + + ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", + seg_hdr->seg_format_ver.major, + seg_hdr->seg_format_ver.minor, + seg_hdr->seg_format_ver.update, + seg_hdr->seg_format_ver.draft, + seg_hdr->seg_id); + } else { + ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_get_pkg_info + * @hw: pointer to the hardware structure + * + * Store details of the package currently loaded in HW into the HW structure. + */ +enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw) +{ + enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; + struct ice_aqc_get_pkg_info_resp *pkg_info; + u16 size; + u32 i; + + size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); + pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); + if (!pkg_info) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto init_pkg_free_alloc; + } + + for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { +#define ICE_PKG_FLAG_COUNT 4 + char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; + u8 place = 0; + + if (pkg_info->pkg_info[i].is_active) { + flags[place++] = 'A'; + hw->active_pkg_ver = pkg_info->pkg_info[i].ver; + hw->active_track_id = + LE32_TO_CPU(pkg_info->pkg_info[i].track_id); + ice_memcpy(hw->active_pkg_name, + pkg_info->pkg_info[i].name, + sizeof(pkg_info->pkg_info[i].name), + ICE_NONDMA_TO_NONDMA); + hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; + } + if (pkg_info->pkg_info[i].is_active_at_boot) + flags[place++] = 'B'; + if (pkg_info->pkg_info[i].is_modified) + flags[place++] = 'M'; + if (pkg_info->pkg_info[i].is_in_nvm) + flags[place++] = 'N'; + + ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", + i, pkg_info->pkg_info[i].ver.major, + pkg_info->pkg_info[i].ver.minor, + pkg_info->pkg_info[i].ver.update, + pkg_info->pkg_info[i].ver.draft, + pkg_info->pkg_info[i].name, flags); + } + +init_pkg_free_alloc: + ice_free(hw, pkg_info); + + return state; +} + +/** + * ice_label_enum_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the label entry to be returned + * @offset: pointer to receive absolute offset, always zero for label sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual label entries. + */ +static void * +ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, + u32 *offset) +{ + struct ice_label_section *labels; + + if (!section) + return NULL; + + if (index > ICE_MAX_LABELS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + labels = (struct ice_label_section *)section; + if (index >= LE16_TO_CPU(labels->count)) + return NULL; + + return labels->label + index; +} + +/** + * ice_enum_labels + * @ice_seg: pointer to the ice segment (NULL on subsequent calls) + * @type: the section type that will contain the label (0 on subsequent calls) + * @state: ice_pkg_enum structure that will hold the state of the enumeration + * @value: pointer to a value that will return the label's value if found + * + * Enumerates a list of labels in the package. The caller will call + * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call + * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL + * the end of the list has been reached. + */ +static char * +ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, + u16 *value) +{ + struct ice_label *label; + + /* Check for valid label section on first call */ + if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) + return NULL; + + label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, + NULL, + ice_label_enum_handler); + if (!label) + return NULL; + + *value = LE16_TO_CPU(label->value); + return label->name; +} + +/** + * ice_find_label_value + * @ice_seg: pointer to the ice segment (non-NULL) + * @name: name of the label to search for + * @type: the section type that will contain the label + * @value: pointer to a value that will return the label's value if found + * + * Finds a label's value given the label name and the section type to search. + * The ice_seg parameter must not be NULL since the first call to + * ice_enum_labels requires a pointer to an actual ice_seg structure. + */ +enum ice_status +ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, + u16 *value) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return ICE_ERR_PARAM; + + do { + label_name = ice_enum_labels(ice_seg, type, &state, &val); + if (label_name && !strcmp(label_name, name)) { + *value = val; + return ICE_SUCCESS; + } + + ice_seg = NULL; + } while (label_name); + + return ICE_ERR_CFG; +} + +/** + * ice_verify_pkg - verify package + * @pkg: pointer to the package buffer + * @len: size of the package buffer + * + * Verifies various attributes of the package file, including length, format + * version, and the requirement of at least one segment. + */ +enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +{ + u32 seg_count; + u32 i; + + if (len < ice_struct_size(pkg, seg_offset, 1)) + return ICE_DDP_PKG_INVALID_FILE; + + if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || + pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || + pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || + pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) + return ICE_DDP_PKG_INVALID_FILE; + + /* pkg must have at least one segment */ + seg_count = LE32_TO_CPU(pkg->seg_count); + if (seg_count < 1) + return ICE_DDP_PKG_INVALID_FILE; + + /* make sure segment array fits in package length */ + if (len < ice_struct_size(pkg, seg_offset, seg_count)) + return ICE_DDP_PKG_INVALID_FILE; + + /* all segments must fit within length */ + for (i = 0; i < seg_count; i++) { + u32 off = LE32_TO_CPU(pkg->seg_offset[i]); + struct ice_generic_seg_hdr *seg; + + /* segment header must fit */ + if (len < off + sizeof(*seg)) + return ICE_DDP_PKG_INVALID_FILE; + + seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + + /* segment body must fit */ + if (len < off + LE32_TO_CPU(seg->seg_size)) + return ICE_DDP_PKG_INVALID_FILE; + } + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_free_seg - free package segment pointer + * @hw: pointer to the hardware structure + * + * Frees the package segment pointer in the proper manner, depending on if the + * segment was allocated or just the passed in pointer was stored. + */ +void ice_free_seg(struct ice_hw *hw) +{ + if (hw->pkg_copy) { + ice_free(hw, hw->pkg_copy); + hw->pkg_copy = NULL; + hw->pkg_size = 0; + } + hw->seg = NULL; +} + +/** + * ice_chk_pkg_version - check package version for compatibility with driver + * @pkg_ver: pointer to a version structure to check + * + * Check to make sure that the package about to be downloaded is compatible with + * the driver. To be compatible, the major and minor components of the package + * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR + * definitions. + */ +static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) +{ + if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor > ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH; + else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ || + (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ && + pkg_ver->minor < ICE_PKG_SUPP_VER_MNR)) + return ICE_DDP_PKG_FILE_VERSION_TOO_LOW; + + return ICE_DDP_PKG_SUCCESS; +} + +/** + * ice_chk_pkg_compat + * @hw: pointer to the hardware structure + * @ospkg: pointer to the package hdr + * @seg: pointer to the package segment hdr + * + * This function checks the package version compatibility with driver and NVM + */ +static enum ice_ddp_state +ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, + struct ice_seg **seg) +{ + struct ice_aqc_get_pkg_info_resp *pkg; + enum ice_ddp_state state; + u16 size; + u32 i; + + /* Check package version compatibility */ + state = ice_chk_pkg_version(&hw->pkg_ver); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); + return state; + } + + /* find ICE segment in given package */ + *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, + ospkg); + if (!*seg) { + ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); + return ICE_DDP_PKG_INVALID_FILE; + } + + /* Check if FW is compatible with the OS package */ + size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); + pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); + if (!pkg) + return ICE_DDP_PKG_ERR; + + if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) { + state = ICE_DDP_PKG_ERR; + goto fw_ddp_compat_free_alloc; + } + + for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { + /* loop till we find the NVM package */ + if (!pkg->pkg_info[i].is_in_nvm) + continue; + if ((*seg)->hdr.seg_format_ver.major != + pkg->pkg_info[i].ver.major || + (*seg)->hdr.seg_format_ver.minor > + pkg->pkg_info[i].ver.minor) { + state = ICE_DDP_PKG_FW_MISMATCH; + ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); + } + /* done processing NVM package so break */ + break; + } +fw_ddp_compat_free_alloc: + ice_free(hw, pkg); + return state; +} + +/** + * ice_sw_fv_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the field vector entry to be returned + * @offset: ptr to variable that receives the offset in the field vector table + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * This function treats the given section as of type ice_sw_fv_section and + * enumerates offset field. "offset" is an index into the field vector table. + */ +static void * +ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_sw_fv_section *fv_section = + (struct ice_sw_fv_section *)section; + + if (!section || sect_type != ICE_SID_FLD_VEC_SW) + return NULL; + if (index >= LE16_TO_CPU(fv_section->count)) + return NULL; + if (offset) + /* "index" passed in to this function is relative to a given + * 4k block. To get to the true index into the field vector + * table need to add the relative index to the base_offset + * field of this section + */ + *offset = LE16_TO_CPU(fv_section->base_offset) + index; + return fv_section->fv + index; +} + +/** + * ice_get_prof_index_max - get the max profile index for used profile + * @hw: pointer to the HW struct + * + * Calling this function will get the max profile index for used profile + * and store the index number in struct ice_switch_info *switch_info + * in hw for following use. + */ +static int ice_get_prof_index_max(struct ice_hw *hw) +{ + u16 prof_index = 0, j, max_prof_index = 0; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + bool flag = false; + struct ice_fv *fv; + u32 offset; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + + do { + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* in the profile that not be used, the prot_id is set to 0xff + * and the off is set to 0x1ff for all the field vectors. + */ + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id != ICE_PROT_INVALID || + fv->ew[j].off != ICE_FV_OFFSET_INVAL) + flag = true; + if (flag && prof_index > max_prof_index) + max_prof_index = prof_index; + + prof_index++; + flag = false; + } while (fv); + + hw->switch_info->max_used_prof_index = max_prof_index; + + return ICE_SUCCESS; +} + +/** + * ice_get_ddp_pkg_state - get DDP pkg state after download + * @hw: pointer to the HW struct + * @already_loaded: indicates if pkg was already loaded onto the device + * + */ +static enum ice_ddp_state +ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded) +{ + if (hw->pkg_ver.major == hw->active_pkg_ver.major && + hw->pkg_ver.minor == hw->active_pkg_ver.minor && + hw->pkg_ver.update == hw->active_pkg_ver.update && + hw->pkg_ver.draft == hw->active_pkg_ver.draft && + !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) { + if (already_loaded) + return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED; + else + return ICE_DDP_PKG_SUCCESS; + } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ || + hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED; + } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ && + hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) { + return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED; + } else { + return ICE_DDP_PKG_ERR; + } +} + +/** + * ice_init_pkg_regs - initialize additional package registers + * @hw: pointer to the hardware structure + */ +static void ice_init_pkg_regs(struct ice_hw *hw) +{ +#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF +#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF +#define ICE_SW_BLK_IDX 0 + + /* setup Switch block input mask, which is 48-bits in two parts */ + wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); + wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); +} + +/** + * ice_init_pkg - initialize/download package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function initializes a package. The package contains HW tables + * required to do packet processing. First, the function extracts package + * information such as version. Then it finds the ice configuration segment + * within the package; this function then saves a copy of the segment pointer + * within the supplied package buffer. Next, the function will cache any hints + * from the package, followed by downloading the package itself. Note, that if + * a previous PF driver has already downloaded the package successfully, then + * the current driver will not have to download the package again. + * + * The local package contents will be used to query default behavior and to + * update specific sections of the HW's version of the package (e.g. to update + * the parse graph to understand new protocols). + * + * This function stores a pointer to the package buffer memory, and it is + * expected that the supplied buffer will not be freed immediately. If the + * package buffer needs to be freed, such as when read from a file, use + * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this + * case. + */ +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) +{ + bool already_loaded = false; + enum ice_ddp_state state; + struct ice_pkg_hdr *pkg; + struct ice_seg *seg; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + pkg = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return state; + } + + /* initialize package info */ + state = ice_init_pkg_info(hw, pkg); + if (state) + return state; + + /* For packages with signing segments, must be a matching segment */ + if (hw->pkg_has_signing_seg) + if (!ice_match_signing_seg(pkg, hw->pkg_seg_id, + hw->pkg_sign_type)) + return ICE_DDP_PKG_ERR; + + /* before downloading the package, check package version for + * compatibility with driver + */ + state = ice_chk_pkg_compat(hw, pkg, &seg); + if (state) + return state; + + /* initialize package hints and then download package */ + ice_init_pkg_hints(hw, seg); + state = ice_download_pkg(hw, pkg, seg); + + if (state == ICE_DDP_PKG_ALREADY_LOADED) { + ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); + already_loaded = true; + } + + /* Get information on the package currently loaded in HW, then make sure + * the driver is compatible with this version. + */ + if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) { + state = ice_get_pkg_info(hw); + if (!state) + state = ice_get_ddp_pkg_state(hw, already_loaded); + } + + if (ice_is_init_pkg_successful(state)) { + hw->seg = seg; + /* on successful package download update other required + * registers to support the package and fill HW tables + * with package content. + */ + ice_init_pkg_regs(hw); + ice_fill_blk_tbls(hw); + ice_get_prof_index_max(hw); + } else { + ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", + state); + } + + return state; +} + +/** + * ice_copy_and_init_pkg - initialize/download a copy of the package + * @hw: pointer to the hardware structure + * @buf: pointer to the package buffer + * @len: size of the package buffer + * + * This function copies the package buffer, and then calls ice_init_pkg() to + * initialize the copied package contents. + * + * The copying is necessary if the package buffer supplied is constant, or if + * the memory may disappear shortly after calling this function. + * + * If the package buffer resides in the data segment and can be modified, the + * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). + * + * However, if the package buffer needs to be copied first, such as when being + * read from a file, the caller should use ice_copy_and_init_pkg(). + * + * This function will first copy the package buffer, before calling + * ice_init_pkg(). The caller is free to immediately destroy the original + * package buffer, as the new copy will be managed by this function and + * related routines. + */ +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) +{ + enum ice_ddp_state state; + u8 *buf_copy; + + if (!buf || !len) + return ICE_DDP_PKG_ERR; + + buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); + + state = ice_init_pkg(hw, buf_copy, len); + if (!ice_is_init_pkg_successful(state)) { + /* Free the copy, since we failed to initialize the package */ + ice_free(hw, buf_copy); + } else { + /* Track the copied pkg so we can free it later */ + hw->pkg_copy = buf_copy; + hw->pkg_size = len; + } + + return state; +} + +/** + * ice_is_init_pkg_successful - check if DDP init was successful + * @state: state of the DDP pkg after download + */ +bool ice_is_init_pkg_successful(enum ice_ddp_state state) +{ + switch (state) { + case ICE_DDP_PKG_SUCCESS: + case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED: + case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED: + return true; + default: + return false; + } +} + +/** + * ice_pkg_buf_alloc + * @hw: pointer to the HW structure + * + * Allocates a package buffer and returns a pointer to the buffer header. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) +{ + struct ice_buf_build *bld; + struct ice_buf_hdr *buf; + + bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); + if (!bld) + return NULL; + + buf = (struct ice_buf_hdr *)bld; + buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, + section_entry)); + return bld; +} + +static bool ice_is_gtp_u_profile(u16 prof_idx) +{ + return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID && + prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP) || + prof_idx == ICE_PROFID_IPV4_GTPU_TEID; +} + +static bool ice_is_gtp_c_profile(u16 prof_idx) +{ + switch (prof_idx) { + case ICE_PROFID_IPV4_GTPC_TEID: + case ICE_PROFID_IPV4_GTPC_NO_TEID: + case ICE_PROFID_IPV6_GTPC_TEID: + case ICE_PROFID_IPV6_GTPC_NO_TEID: + return true; + default: + return false; + } +} + +/** + * ice_get_sw_prof_type - determine switch profile type + * @hw: pointer to the HW structure + * @fv: pointer to the switch field vector + * @prof_idx: profile index to check + */ +static enum ice_prof_type +ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx) +{ + bool valid_prof = false; + u16 i; + + if (ice_is_gtp_c_profile(prof_idx)) + return ICE_PROF_TUN_GTPC; + + if (ice_is_gtp_u_profile(prof_idx)) + return ICE_PROF_TUN_GTPU; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { + if (fv->ew[i].off != ICE_NAN_OFFSET) + valid_prof = true; + + /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && + fv->ew[i].off == ICE_VNI_OFFSET) + return ICE_PROF_TUN_UDP; + + /* GRE tunnel will have GRE protocol */ + if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) + return ICE_PROF_TUN_GRE; + } + + return valid_prof ? ICE_PROF_NON_TUN : ICE_PROF_INVALID; +} + +/** + * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type + * @hw: pointer to hardware structure + * @req_profs: type of profiles requested + * @bm: pointer to memory for returning the bitmap of field vectors + */ +void +ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, + ice_bitmap_t *bm) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); + ice_seg = hw->seg; + do { + enum ice_prof_type prof_type; + u32 offset; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + ice_seg = NULL; + + if (fv) { + /* Determine field vector type */ + prof_type = ice_get_sw_prof_type(hw, fv, offset); + + if (req_profs & prof_type) + ice_set_bit((u16)offset, bm); + } + } while (fv); +} + +/** + * ice_get_sw_fv_list + * @hw: pointer to the HW structure + * @lkups: lookup elements or match criteria for the advanced recipe, one + * structure per protocol header + * @bm: bitmap of field vectors to consider + * @fv_list: Head of a list + * + * Finds all the field vector entries from switch block that contain + * a given protocol ID and offset and returns a list of structures of type + * "ice_sw_fv_list_entry". Every structure in the list has a field vector + * definition and profile ID information + * NOTE: The caller of the function is responsible for freeing the memory + * allocated for every list entry. + */ +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) +{ + struct ice_sw_fv_list_entry *fvl; + struct ice_sw_fv_list_entry *tmp; + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + u32 offset; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!lkups->n_val_words || !hw->seg) + return ICE_ERR_PARAM; + + ice_seg = hw->seg; + do { + u16 i; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &offset, ice_sw_fv_handler); + if (!fv) + break; + ice_seg = NULL; + + /* If field vector is not in the bitmap list, then skip this + * profile. + */ + if (!ice_is_bit_set(bm, (u16)offset)) + continue; + + for (i = 0; i < lkups->n_val_words; i++) { + int j; + + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) + if (fv->ew[j].prot_id == + lkups->fv_words[i].prot_id && + fv->ew[j].off == lkups->fv_words[i].off) + break; + if (j >= hw->blk[ICE_BLK_SW].es.fvw) + break; + if (i + 1 == lkups->n_val_words) { + fvl = (struct ice_sw_fv_list_entry *) + ice_malloc(hw, sizeof(*fvl)); + if (!fvl) + goto err; + fvl->fv_ptr = fv; + fvl->profile_id = offset; + LIST_ADD(&fvl->list_entry, fv_list); + break; + } + } + } while (fv); + if (LIST_EMPTY(fv_list)) { + ice_warn(hw, "Required profiles not found in currently loaded DDP package"); + return ICE_ERR_CFG; + } + return ICE_SUCCESS; + +err: + LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, + list_entry) { + LIST_DEL(&fvl->list_entry); + ice_free(hw, fvl); + } + + return ICE_ERR_NO_MEMORY; +} + +/** + * ice_init_prof_result_bm - Initialize the profile result index bitmap + * @hw: pointer to hardware structure + */ +void ice_init_prof_result_bm(struct ice_hw *hw) +{ + struct ice_pkg_enum state; + struct ice_seg *ice_seg; + struct ice_fv *fv; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!hw->seg) + return; + + ice_seg = hw->seg; + do { + u32 off; + u16 i; + + fv = (struct ice_fv *) + ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, + &off, ice_sw_fv_handler); + ice_seg = NULL; + if (!fv) + break; + + ice_zero_bitmap(hw->switch_info->prof_res_bm[off], + ICE_MAX_FV_WORDS); + + /* Determine empty field vector indices, these can be + * used for recipe results. Skip index 0, since it is + * always used for Switch ID. + */ + for (i = 1; i < ICE_MAX_FV_WORDS; i++) + if (fv->ew[i].prot_id == ICE_PROT_INVALID && + fv->ew[i].off == ICE_FV_OFFSET_INVAL) + ice_set_bit(i, + hw->switch_info->prof_res_bm[off]); + } while (fv); +} + +/** + * ice_pkg_buf_free + * @hw: pointer to the HW structure + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Frees a package buffer + */ +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) +{ + ice_free(hw, bld); +} + +/** + * ice_pkg_buf_reserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to reserve + * + * Reserves one or more section table entries in a package buffer. This routine + * can be called multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +enum ice_status +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return ICE_ERR_PARAM; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't increase table size */ + section_count = LE16_TO_CPU(buf->section_count); + if (section_count > 0) + return ICE_ERR_CFG; + + if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) + return ICE_ERR_CFG; + bld->reserved_section_table_entries += count; + + data_end = LE16_TO_CPU(buf->data_end) + + FLEX_ARRAY_SIZE(buf, section_entry, count); + buf->data_end = CPU_TO_LE16(data_end); + + return ICE_SUCCESS; +} + +/** + * ice_pkg_buf_alloc_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * + * Reserves memory in the buffer for a section's content and updates the + * buffers' status accordingly. This routine returns a pointer to the first + * byte of the section start within the buffer, which is used to fill in the + * section contents. + * Note: all package contents must be in Little Endian form. + */ +void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) +{ + struct ice_buf_hdr *buf; + u16 sect_count; + u16 data_end; + + if (!bld || !type || !size) + return NULL; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* check for enough space left in buffer */ + data_end = LE16_TO_CPU(buf->data_end); + + /* section start must align on 4 byte boundary */ + data_end = ICE_ALIGN(data_end, 4); + + if ((data_end + size) > ICE_MAX_S_DATA_END) + return NULL; + + /* check for more available section table entries */ + sect_count = LE16_TO_CPU(buf->section_count); + if (sect_count < bld->reserved_section_table_entries) { + void *section_ptr = ((u8 *)buf) + data_end; + + buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); + buf->section_entry[sect_count].size = CPU_TO_LE16(size); + buf->section_entry[sect_count].type = CPU_TO_LE32(type); + + data_end += size; + buf->data_end = CPU_TO_LE16(data_end); + + buf->section_count = CPU_TO_LE16(sect_count + 1); + return section_ptr; + } + + /* no free section table entries */ + return NULL; +} + +/** + * ice_pkg_buf_alloc_single_section + * @hw: pointer to the HW structure + * @type: the section type value + * @size: the size of the section to reserve (in bytes) + * @section: returns pointer to the section + * + * Allocates a package buffer with a single section. + * Note: all package contents must be in Little Endian form. + */ +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section) +{ + struct ice_buf_build *buf; + + if (!section) + return NULL; + + buf = ice_pkg_buf_alloc(hw); + if (!buf) + return NULL; + + if (ice_pkg_buf_reserve_section(buf, 1)) + goto ice_pkg_buf_alloc_single_section_err; + + *section = ice_pkg_buf_alloc_section(buf, type, size); + if (!*section) + goto ice_pkg_buf_alloc_single_section_err; + + return buf; + +ice_pkg_buf_alloc_single_section_err: + ice_pkg_buf_free(hw, buf); + return NULL; +} + +/** + * ice_pkg_buf_unreserve_section + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * @count: the number of sections to unreserve + * + * Unreserves one or more section table entries in a package buffer, releasing + * space that can be used for section data. This routine can be called + * multiple times as long as they are made before calling + * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() + * is called once, the number of sections that can be allocated will not be able + * to be increased; not using all reserved sections is fine, but this will + * result in some wasted space in the buffer. + * Note: all package contents must be in Little Endian form. + */ +enum ice_status +ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) +{ + struct ice_buf_hdr *buf; + u16 section_count; + u16 data_end; + + if (!bld) + return ICE_ERR_PARAM; + + buf = (struct ice_buf_hdr *)&bld->buf; + + /* already an active section, can't decrease table size */ + section_count = LE16_TO_CPU(buf->section_count); + if (section_count > 0) + return ICE_ERR_CFG; + + if (count > bld->reserved_section_table_entries) + return ICE_ERR_CFG; + bld->reserved_section_table_entries -= count; + + data_end = LE16_TO_CPU(buf->data_end) - + FLEX_ARRAY_SIZE(buf, section_entry, count); + buf->data_end = CPU_TO_LE16(data_end); + + return ICE_SUCCESS; +} + +/** + * ice_pkg_buf_get_free_space + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of free bytes remaining in the buffer. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); +} + +/** + * ice_pkg_buf_get_active_sections + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Returns the number of active sections. Before using the package buffer + * in an update package command, the caller should make sure that there is at + * least one active section - otherwise, the buffer is not legal and should + * not be used. + * Note: all package contents must be in Little Endian form. + */ +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) +{ + struct ice_buf_hdr *buf; + + if (!bld) + return 0; + + buf = (struct ice_buf_hdr *)&bld->buf; + return LE16_TO_CPU(buf->section_count); +} + +/** + * ice_pkg_buf + * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) + * + * Return a pointer to the buffer's header + */ +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) +{ + if (bld) + return &bld->buf; + + return NULL; +} + +/** + * ice_find_buf_table + * @ice_seg: pointer to the ice segment + * + * Returns the address of the buffer table within the ice segment. + */ +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) +{ + struct ice_nvm_table *nvms; + + nvms = (struct ice_nvm_table *) + (ice_seg->device_table + + LE32_TO_CPU(ice_seg->device_table_count)); + + return (_FORCE_ struct ice_buf_table *) + (nvms->vers + LE32_TO_CPU(nvms->table_count)); +} + +/** + * ice_pkg_val_buf + * @buf: pointer to the ice buffer + * + * This helper function validates a buffer's header. + */ +static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +{ + struct ice_buf_hdr *hdr; + u16 section_count; + u16 data_end; + + hdr = (struct ice_buf_hdr *)buf->buf; + /* verify data */ + section_count = LE16_TO_CPU(hdr->section_count); + if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) + return NULL; + + data_end = LE16_TO_CPU(hdr->data_end); + if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) + return NULL; + + return hdr; +} + +/** + * ice_pkg_enum_buf + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This function will enumerate all the buffers in the ice segment. The first + * call is made with the ice_seg parameter non-NULL; on subsequent calls, + * ice_seg is set to NULL which continues the enumeration. When the function + * returns a NULL pointer, then the end of the buffers has been reached, or an + * unexpected value has been detected (for example an invalid section count or + * an invalid buffer end value). + */ +struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (ice_seg) { + state->buf_table = ice_find_buf_table(ice_seg); + if (!state->buf_table) + return NULL; + + state->buf_idx = 0; + return ice_pkg_val_buf(state->buf_table->buf_array); + } + + if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) + return ice_pkg_val_buf(state->buf_table->buf_array + + state->buf_idx); + else + return NULL; +} + +/** + * ice_pkg_advance_sect + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * + * This helper function will advance the section within the ice segment, + * also advancing the buffer if needed. + */ +bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) +{ + if (!ice_seg && !state->buf) + return false; + + if (!ice_seg && state->buf) + if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) + return true; + + state->buf = ice_pkg_enum_buf(ice_seg, state); + if (!state->buf) + return false; + + /* start of new buffer, reset section index */ + state->sect_idx = 0; + return true; +} + +/** + * ice_pkg_enum_section + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * + * This function will enumerate all the sections of a particular type in the + * ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the matching + * sections has been reached. + */ +void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type) +{ + u16 offset, size; + + if (ice_seg) + state->type = sect_type; + + if (!ice_pkg_advance_sect(ice_seg, state)) + return NULL; + + /* scan for next matching section */ + while (state->buf->section_entry[state->sect_idx].type != + CPU_TO_LE32(state->type)) + if (!ice_pkg_advance_sect(NULL, state)) + return NULL; + + /* validate section */ + offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); + if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) + return NULL; + + size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) + return NULL; + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) + return NULL; + + state->sect_type = + LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); + + /* calc pointer to this section */ + state->sect = ((u8 *)state->buf) + + LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); + + return state->sect; +} + +/** + * ice_pkg_enum_entry + * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) + * @state: pointer to the enum state + * @sect_type: section type to enumerate + * @offset: pointer to variable that receives the offset in the table (optional) + * @handler: function that handles access to the entries into the section type + * + * This function will enumerate all the entries in particular section type in + * the ice segment. The first call is made with the ice_seg parameter non-NULL; + * on subsequent calls, ice_seg is set to NULL which continues the enumeration. + * When the function returns a NULL pointer, then the end of the entries has + * been reached. + * + * Since each section may have a different header and entry size, the handler + * function is needed to determine the number and location entries in each + * section. + * + * The offset parameter is optional, but should be used for sections that + * contain an offset for each section table. For such cases, the section handler + * function must return the appropriate offset + index to give the absolution + * offset for each entry. For example, if the base for a section's header + * indicates a base offset of 10, and the index for the entry is 2, then + * section handler function should set the offset to 10 + 2 = 12. + */ +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) +{ + void *entry; + + if (ice_seg) { + if (!handler) + return NULL; + + if (!ice_pkg_enum_section(ice_seg, state, sect_type)) + return NULL; + + state->entry_idx = 0; + state->handler = handler; + } else { + state->entry_idx++; + } + + if (!state->handler) + return NULL; + + /* get entry */ + entry = state->handler(state->sect_type, state->sect, state->entry_idx, + offset); + if (!entry) { + /* end of a section, look for another section of this type */ + if (!ice_pkg_enum_section(NULL, state, 0)) + return NULL; + + state->entry_idx = 0; + entry = state->handler(state->sect_type, state->sect, + state->entry_idx, offset); + } + + return entry; +} + +/** + * ice_boost_tcam_handler + * @sect_type: section type + * @section: pointer to section + * @index: index of the boost TCAM entry to be returned + * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections + * + * This is a callback function that can be passed to ice_pkg_enum_entry. + * Handles enumeration of individual boost TCAM entries. + */ +static void * +ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) +{ + struct ice_boost_tcam_section *boost; + + if (!section) + return NULL; + + if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) + return NULL; + + if (index > ICE_MAX_BST_TCAMS_IN_BUF) + return NULL; + + if (offset) + *offset = 0; + + boost = (struct ice_boost_tcam_section *)section; + if (index >= LE16_TO_CPU(boost->count)) + return NULL; + + return boost->tcam + index; +} + +/** + * ice_find_boost_entry + * @ice_seg: pointer to the ice segment (non-NULL) + * @addr: Boost TCAM address of entry to search for + * @entry: returns pointer to the entry + * + * Finds a particular Boost TCAM entry and returns a pointer to that entry + * if it is found. The ice_seg parameter must not be NULL since the first call + * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. + */ +static enum ice_status +ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, + struct ice_boost_tcam_entry **entry) +{ + struct ice_boost_tcam_entry *tcam; + struct ice_pkg_enum state; + + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return ICE_ERR_PARAM; + + do { + tcam = (struct ice_boost_tcam_entry *) + ice_pkg_enum_entry(ice_seg, &state, + ICE_SID_RXPARSER_BOOST_TCAM, NULL, + ice_boost_tcam_handler); + if (tcam && LE16_TO_CPU(tcam->addr) == addr) { + *entry = tcam; + return ICE_SUCCESS; + } + + ice_seg = NULL; + } while (tcam); + + *entry = NULL; + return ICE_ERR_CFG; +} + +/** + * ice_init_pkg_hints + * @hw: pointer to the HW structure + * @ice_seg: pointer to the segment of the package scan (non-NULL) + * + * This function will scan the package and save off relevant information + * (hints or metadata) for driver use. The ice_seg parameter must not be NULL + * since the first call to ice_enum_labels requires a pointer to an actual + * ice_seg structure. + */ +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) +{ + struct ice_pkg_enum state; + char *label_name; + u16 val; + int i; + + ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + if (!ice_seg) + return; + + label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, + &val); + + while (label_name) { +/* TODO: Replace !strnsmp() with wrappers like match_some_pre() */ + if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) + /* check for a tunnel entry */ + ice_add_tunnel_hint(hw, label_name, val); + + label_name = ice_enum_labels(NULL, 0, &state, &val); + } + + /* Cache the appropriate boost TCAM entry pointers for tunnels */ + for (i = 0; i < hw->tnl.count; i++) { + ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, + &hw->tnl.tbl[i].boost_entry); + if (hw->tnl.tbl[i].boost_entry) + hw->tnl.tbl[i].valid = true; + } +} + +/** + * ice_acquire_global_cfg_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the global config lock for reading + * or writing of the package. When attempting to obtain write access, the + * caller must check for the following two return values: + * + * ICE_SUCCESS - Means the caller has acquired the global config lock + * and can perform writing of the package. + * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the + * package or has found that no update was necessary; in + * this case, the caller can just skip performing any + * update of the package. + */ +enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access) +{ + enum ice_status status; + + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + + if (status == ICE_ERR_AQ_NO_WORK) + ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); + + return status; +} + +/** + * ice_release_global_cfg_lock + * @hw: pointer to the HW structure + * + * This function will release the global config lock. + */ +void ice_release_global_cfg_lock(struct ice_hw *hw) +{ + ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); +} + +/** + * ice_acquire_change_lock + * @hw: pointer to the HW structure + * @access: access type (read or write) + * + * This function will request ownership of the change lock. + */ +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) +{ + return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, + ICE_CHANGE_LOCK_TIMEOUT); +} + +/** + * ice_release_change_lock + * @hw: pointer to the HW structure + * + * This function will release the change lock using the proper Admin Command. + */ +void ice_release_change_lock(struct ice_hw *hw) +{ + ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); +} + +/** + * ice_get_set_tx_topo - get or set tx topology + * @hw: pointer to the HW struct + * @buf: pointer to tx topology buffer + * @buf_size: buffer size + * @cd: pointer to command details structure or NULL + * @flags: pointer to descriptor flags + * @set: 0-get, 1-set topology + * + * The function will get or set tx topology + */ +static enum ice_status +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, + struct ice_sq_cd *cd, u8 *flags, bool set) +{ + struct ice_aqc_get_set_tx_topo *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.get_set_tx_topo; + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; + /* requested to update a new topology, not a default topolgy */ + if (buf) + cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | + ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; + } + desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + /* read the return flag values (first byte) for get operation */ + if (!set && flags) + *flags = desc.params.get_set_tx_topo.set_flags; + + return ICE_SUCCESS; +} + +/** + * ice_cfg_tx_topo - Initialize new tx topology if available + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @len: buffer size + * + * The function will apply the new Tx topology from the package buffer + * if available. + */ +enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) +{ + u8 *current_topo, *new_topo = NULL; + struct ice_run_time_cfg_seg *seg; + struct ice_buf_hdr *section; + struct ice_pkg_hdr *pkg_hdr; + enum ice_ddp_state state; + u16 i, size = 0, offset; + enum ice_status status; + u32 reg = 0; + u8 flags; + + if (!buf || !len) + return ICE_ERR_PARAM; + + /* Does FW support new Tx topology mode ? */ + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); + return ICE_ERR_NOT_SUPPORTED; + } + + current_topo = (u8 *)ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); + if (!current_topo) + return ICE_ERR_NO_MEMORY; + + /* get the current Tx topology */ + status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, + &flags, false); + ice_free(hw, current_topo); + + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); + return status; + } + + /* Is default topology already applied ? */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 9) { + ice_debug(hw, ICE_DBG_INIT, "Loaded default topology\n"); + /* Already default topology is loaded */ + return ICE_ERR_ALREADY_EXISTS; + } + + /* Is new topology already applied ? */ + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 5) { + ice_debug(hw, ICE_DBG_INIT, "Loaded new topology\n"); + /* Already new topology is loaded */ + return ICE_ERR_ALREADY_EXISTS; + } + + /* Is set topology issued already ? */ + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { + ice_debug(hw, ICE_DBG_INIT, "Update tx topology was done by another PF\n"); + /* add a small delay before exiting */ + for (i = 0; i < 20; i++) + ice_msec_delay(100, true); + return ICE_ERR_ALREADY_EXISTS; + } + + /* Change the topology from new to default (5 to 9) */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == 5) { + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); + goto update_topo; + } + + pkg_hdr = (struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg_hdr, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", + state); + return ICE_ERR_CFG; + } + + /* find run time configuration segment */ + seg = (struct ice_run_time_cfg_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); + return ICE_ERR_CFG; + } + + if (LE32_TO_CPU(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", + seg->buf_table.buf_count); + return ICE_ERR_CFG; + } + + section = ice_pkg_val_buf(seg->buf_table.buf_array); + + if (!section || LE32_TO_CPU(section->section_entry[0].type) != + ICE_SID_TX_5_LAYER_TOPO) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); + return ICE_ERR_CFG; + } + + size = LE16_TO_CPU(section->section_entry[0].size); + offset = LE16_TO_CPU(section->section_entry[0].offset); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); + return ICE_ERR_CFG; + } + + /* make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); + return ICE_ERR_CFG; + } + + /* Get the new topology buffer */ + new_topo = ((u8 *)section) + offset; + +update_topo: + /* acquire global lock to make sure that set topology issued + * by one PF + */ + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); + return status; + } + + /* check reset was triggered already or not */ + reg = rd32(hw, GLGEN_RSTAT); + if (reg & GLGEN_RSTAT_DEVSTATE_M) { + /* Reset is in progress, re-init the hw again */ + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n"); + ice_check_reset(hw); + return ICE_SUCCESS; + } + + /* set new topology */ + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Set tx topology is failed\n"); + return status; + } + + /* new topology is updated, delay 1 second before issuing the CORRER */ + for (i = 0; i < 10; i++) + ice_msec_delay(100, true); + ice_reset(hw, ICE_RESET_CORER); + /* CORER will clear the global lock, so no explicit call + * required for release + */ + return ICE_SUCCESS; +} diff --git a/sys/dev/ice/ice_ddp_common.h b/sys/dev/ice/ice_ddp_common.h new file mode 100644 index 000000000000..621729b03446 --- /dev/null +++ b/sys/dev/ice/ice_ddp_common.h @@ -0,0 +1,478 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* Copyright (c) 2022, Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*$FreeBSD$*/ + +#ifndef _ICE_DDP_H_ +#define _ICE_DDP_H_ + +#include "ice_osdep.h" +#include "ice_adminq_cmd.h" +#include "ice_controlq.h" +#include "ice_status.h" +#include "ice_flex_type.h" +#include "ice_protocol_type.h" + +/* Package minimal version supported */ +#define ICE_PKG_SUPP_VER_MAJ 1 +#define ICE_PKG_SUPP_VER_MNR 3 + +/* Package format version */ +#define ICE_PKG_FMT_VER_MAJ 1 +#define ICE_PKG_FMT_VER_MNR 0 +#define ICE_PKG_FMT_VER_UPD 0 +#define ICE_PKG_FMT_VER_DFT 0 + +#define ICE_PKG_CNT 4 + +enum ice_ddp_state { + /* Indicates that this call to ice_init_pkg + * successfully loaded the requested DDP package + */ + ICE_DDP_PKG_SUCCESS = 0, + + /* Generic error for already loaded errors, it is mapped later to + * the more specific one (one of the next 3) + */ + ICE_DDP_PKG_ALREADY_LOADED = -1, + + /* Indicates that a DDP package of the same version has already been + * loaded onto the device by a previous call or by another PF + */ + ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2, + + /* The device has a DDP package that is not supported by the driver */ + ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3, + + /* The device has a compatible package + * (but different from the request) already loaded + */ + ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4, + + /* The firmware loaded on the device is not compatible with + * the DDP package loaded + */ + ICE_DDP_PKG_FW_MISMATCH = -5, + + /* The DDP package file is invalid */ + ICE_DDP_PKG_INVALID_FILE = -6, + + /* The version of the DDP package provided is higher than + * the driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7, + + /* The version of the DDP package provided is lower than the + * driver supports + */ + ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8, + + /* Missing security manifest in DDP pkg */ + ICE_DDP_PKG_NO_SEC_MANIFEST = -9, + + /* The RSA signature of the DDP package file provided is invalid */ + ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -10, + + /* The DDP package file security revision is too low and not + * supported by firmware + */ + ICE_DDP_PKG_SECURE_VERSION_NBR_TOO_LOW = -11, + + /* Manifest hash mismatch */ + ICE_DDP_PKG_MANIFEST_INVALID = -12, + + /* Buffer hash mismatches manifest */ + ICE_DDP_PKG_BUFFER_INVALID = -13, + + /* Other errors */ + ICE_DDP_PKG_ERR = -14, +}; + +/* Package and segment headers and tables */ +struct ice_pkg_hdr { + struct ice_pkg_ver pkg_format_ver; + __le32 seg_count; + __le32 seg_offset[STRUCT_HACK_VAR_LEN]; +}; + +/* Package signing algorithm types */ +#define SEGMENT_SIGN_TYPE_INVALID 0x00000000 +#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001 +#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002 +#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */ + +/* generic segment */ +struct ice_generic_seg_hdr { +#define SEGMENT_TYPE_INVALID 0x00000000 +#define SEGMENT_TYPE_METADATA 0x00000001 +#define SEGMENT_TYPE_ICE_E810 0x00000010 +#define SEGMENT_TYPE_SIGNING 0x00001001 +#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020 + __le32 seg_type; + struct ice_pkg_ver seg_format_ver; + __le32 seg_size; + char seg_id[ICE_PKG_NAME_SIZE]; +}; + +/* ice specific segment */ + +union ice_device_id { + struct { + __le16 device_id; + __le16 vendor_id; + } dev_vend_id; + __le32 id; +}; + +struct ice_device_id_entry { + union ice_device_id device; + union ice_device_id sub_device; +}; + +struct ice_seg { + struct ice_generic_seg_hdr hdr; + __le32 device_table_count; + struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN]; +}; + +struct ice_nvm_table { + __le32 table_count; + __le32 vers[STRUCT_HACK_VAR_LEN]; +}; + +struct ice_buf { +#define ICE_PKG_BUF_SIZE 4096 + u8 buf[ICE_PKG_BUF_SIZE]; +}; + +struct ice_buf_table { + __le32 buf_count; + struct ice_buf buf_array[STRUCT_HACK_VAR_LEN]; +}; + +struct ice_run_time_cfg_seg { + struct ice_generic_seg_hdr hdr; + u8 rsvd[8]; + struct ice_buf_table buf_table; +}; + +/* global metadata specific segment */ +struct ice_global_metadata_seg { + struct ice_generic_seg_hdr hdr; + struct ice_pkg_ver pkg_ver; + __le32 rsvd; + char pkg_name[ICE_PKG_NAME_SIZE]; +}; + +#define ICE_MIN_S_OFF 12 +#define ICE_MAX_S_OFF 4095 +#define ICE_MIN_S_SZ 1 +#define ICE_MAX_S_SZ 4084 + +struct ice_sign_seg { + struct ice_generic_seg_hdr hdr; + __le32 seg_id; + __le32 sign_type; + __le32 signed_seg_idx; + __le32 signed_buf_start; + __le32 signed_buf_count; +#define ICE_SIGN_SEG_RESERVED_COUNT 44 + u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; + struct ice_buf_table buf_tbl; +}; + +/* section information */ +struct ice_section_entry { + __le32 type; + __le16 offset; + __le16 size; +}; + +#define ICE_MIN_S_COUNT 1 +#define ICE_MAX_S_COUNT 511 +#define ICE_MIN_S_DATA_END 12 +#define ICE_MAX_S_DATA_END 4096 + +#define ICE_METADATA_BUF 0x80000000 + +struct ice_buf_hdr { + __le16 section_count; + __le16 data_end; + struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN]; +}; + +#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ + ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ + (ent_sz)) + +/* ice package section IDs */ +#define ICE_SID_METADATA 1 +#define ICE_SID_XLT0_SW 10 +#define ICE_SID_XLT_KEY_BUILDER_SW 11 +#define ICE_SID_XLT1_SW 12 +#define ICE_SID_XLT2_SW 13 +#define ICE_SID_PROFID_TCAM_SW 14 +#define ICE_SID_PROFID_REDIR_SW 15 +#define ICE_SID_FLD_VEC_SW 16 +#define ICE_SID_CDID_KEY_BUILDER_SW 17 +#define ICE_SID_CDID_REDIR_SW 18 + +#define ICE_SID_XLT0_ACL 20 +#define ICE_SID_XLT_KEY_BUILDER_ACL 21 +#define ICE_SID_XLT1_ACL 22 +#define ICE_SID_XLT2_ACL 23 +#define ICE_SID_PROFID_TCAM_ACL 24 +#define ICE_SID_PROFID_REDIR_ACL 25 +#define ICE_SID_FLD_VEC_ACL 26 +#define ICE_SID_CDID_KEY_BUILDER_ACL 27 +#define ICE_SID_CDID_REDIR_ACL 28 + +#define ICE_SID_XLT0_FD 30 +#define ICE_SID_XLT_KEY_BUILDER_FD 31 +#define ICE_SID_XLT1_FD 32 +#define ICE_SID_XLT2_FD 33 +#define ICE_SID_PROFID_TCAM_FD 34 +#define ICE_SID_PROFID_REDIR_FD 35 +#define ICE_SID_FLD_VEC_FD 36 +#define ICE_SID_CDID_KEY_BUILDER_FD 37 +#define ICE_SID_CDID_REDIR_FD 38 + +#define ICE_SID_XLT0_RSS 40 +#define ICE_SID_XLT_KEY_BUILDER_RSS 41 +#define ICE_SID_XLT1_RSS 42 +#define ICE_SID_XLT2_RSS 43 +#define ICE_SID_PROFID_TCAM_RSS 44 +#define ICE_SID_PROFID_REDIR_RSS 45 +#define ICE_SID_FLD_VEC_RSS 46 +#define ICE_SID_CDID_KEY_BUILDER_RSS 47 +#define ICE_SID_CDID_REDIR_RSS 48 + +#define ICE_SID_RXPARSER_CAM 50 +#define ICE_SID_RXPARSER_NOMATCH_CAM 51 +#define ICE_SID_RXPARSER_IMEM 52 +#define ICE_SID_RXPARSER_XLT0_BUILDER 53 +#define ICE_SID_RXPARSER_NODE_PTYPE 54 +#define ICE_SID_RXPARSER_MARKER_PTYPE 55 +#define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_PROTO_GRP 57 +#define ICE_SID_RXPARSER_METADATA_INIT 58 +#define ICE_SID_RXPARSER_XLT0 59 + +#define ICE_SID_TXPARSER_CAM 60 +#define ICE_SID_TXPARSER_NOMATCH_CAM 61 +#define ICE_SID_TXPARSER_IMEM 62 +#define ICE_SID_TXPARSER_XLT0_BUILDER 63 +#define ICE_SID_TXPARSER_NODE_PTYPE 64 +#define ICE_SID_TXPARSER_MARKER_PTYPE 65 +#define ICE_SID_TXPARSER_BOOST_TCAM 66 +#define ICE_SID_TXPARSER_PROTO_GRP 67 +#define ICE_SID_TXPARSER_METADATA_INIT 68 +#define ICE_SID_TXPARSER_XLT0 69 + +#define ICE_SID_RXPARSER_INIT_REDIR 70 +#define ICE_SID_TXPARSER_INIT_REDIR 71 +#define ICE_SID_RXPARSER_MARKER_GRP 72 +#define ICE_SID_TXPARSER_MARKER_GRP 73 +#define ICE_SID_RXPARSER_LAST_PROTO 74 +#define ICE_SID_TXPARSER_LAST_PROTO 75 +#define ICE_SID_RXPARSER_PG_SPILL 76 +#define ICE_SID_TXPARSER_PG_SPILL 77 +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 +#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 + +#define ICE_SID_XLT0_PE 80 +#define ICE_SID_XLT_KEY_BUILDER_PE 81 +#define ICE_SID_XLT1_PE 82 +#define ICE_SID_XLT2_PE 83 +#define ICE_SID_PROFID_TCAM_PE 84 +#define ICE_SID_PROFID_REDIR_PE 85 +#define ICE_SID_FLD_VEC_PE 86 +#define ICE_SID_CDID_KEY_BUILDER_PE 87 +#define ICE_SID_CDID_REDIR_PE 88 + +#define ICE_SID_RXPARSER_FLAG_REDIR 97 + +/* Label Metadata section IDs */ +#define ICE_SID_LBL_FIRST 0x80000010 +#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 +#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 +#define ICE_SID_LBL_RESERVED_12 0x80000012 +#define ICE_SID_LBL_RESERVED_13 0x80000013 +#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 +#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 +#define ICE_SID_LBL_PTYPE 0x80000016 +#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 +#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 +#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 +#define ICE_SID_LBL_RXPARSER_PG 0x8000001A +#define ICE_SID_LBL_TXPARSER_PG 0x8000001B +#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C +#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D +#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E +#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F +#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 +#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 +#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 +#define ICE_SID_LBL_FLAG 0x80000023 +#define ICE_SID_LBL_REG 0x80000024 +#define ICE_SID_LBL_SW_PTG 0x80000025 +#define ICE_SID_LBL_ACL_PTG 0x80000026 +#define ICE_SID_LBL_PE_PTG 0x80000027 +#define ICE_SID_LBL_RSS_PTG 0x80000028 +#define ICE_SID_LBL_FD_PTG 0x80000029 +#define ICE_SID_LBL_SW_VSIG 0x8000002A +#define ICE_SID_LBL_ACL_VSIG 0x8000002B +#define ICE_SID_LBL_PE_VSIG 0x8000002C +#define ICE_SID_LBL_RSS_VSIG 0x8000002D +#define ICE_SID_LBL_FD_VSIG 0x8000002E +#define ICE_SID_LBL_PTYPE_META 0x8000002F +#define ICE_SID_LBL_SW_PROFID 0x80000030 +#define ICE_SID_LBL_ACL_PROFID 0x80000031 +#define ICE_SID_LBL_PE_PROFID 0x80000032 +#define ICE_SID_LBL_RSS_PROFID 0x80000033 +#define ICE_SID_LBL_FD_PROFID 0x80000034 +#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 +#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 +#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 +#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 +/* The following define MUST be updated to reflect the last label section ID */ +#define ICE_SID_LBL_LAST 0x80000038 + +/* Label ICE runtime configuration section IDs */ +#define ICE_SID_TX_5_LAYER_TOPO 0x10 + +enum ice_block { + ICE_BLK_SW = 0, + ICE_BLK_ACL, + ICE_BLK_FD, + ICE_BLK_RSS, + ICE_BLK_PE, + ICE_BLK_COUNT +}; + +enum ice_sect { + ICE_XLT0 = 0, + ICE_XLT_KB, + ICE_XLT1, + ICE_XLT2, + ICE_PROF_TCAM, + ICE_PROF_REDIR, + ICE_VEC_TBL, + ICE_CDID_KB, + ICE_CDID_REDIR, + ICE_SECT_COUNT +}; + +/* package buffer building */ + +struct ice_buf_build { + struct ice_buf buf; + u16 reserved_section_table_entries; +}; + +struct ice_pkg_enum { + struct ice_buf_table *buf_table; + u32 buf_idx; + + u32 type; + struct ice_buf_hdr *buf; + u32 sect_idx; + void *sect; + u32 sect_type; + + u32 entry_idx; + void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); +}; + +struct ice_hw; + +enum ice_status +ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); +void ice_release_change_lock(struct ice_hw *hw); + +struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw); +void * +ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size); +enum ice_status +ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); +enum ice_status +ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, + ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); +enum ice_status +ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); +u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); +u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); + +enum ice_status +ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +enum ice_status +ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count); +void ice_release_global_cfg_lock(struct ice_hw *hw); +struct ice_generic_seg_hdr * +ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, + struct ice_pkg_hdr *pkg_hdr); +enum ice_ddp_state +ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len); +enum ice_ddp_state +ice_get_pkg_info(struct ice_hw *hw); +void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg); +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); +enum ice_status +ice_acquire_global_cfg_lock(struct ice_hw *hw, + enum ice_aq_res_access_type access); + +struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg); +struct ice_buf_hdr * +ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state); +bool +ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state); +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)); +void * +ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type); +enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); +enum ice_ddp_state +ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); +bool ice_is_init_pkg_successful(enum ice_ddp_state state); +void ice_free_seg(struct ice_hw *hw); + +struct ice_buf_build * +ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, + void **section); +struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); +void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); + +enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); + +#endif /* _ICE_DDP_H_ */ diff --git a/sys/dev/ice/ice_opts.h b/sys/dev/ice/ice_defs.h similarity index 64% copy from sys/dev/ice/ice_opts.h copy to sys/dev/ice/ice_defs.h index ef163db402de..8a1dda2c492c 100644 --- a/sys/dev/ice/ice_opts.h +++ b/sys/dev/ice/ice_defs.h @@ -1,48 +1,71 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ -/** - * @file ice_opts.h - * @brief header including kernel option files - * - * Contains the various opt_*.h header files which set various macros - * indicating features and functionality which depend on kernel configuration. - */ +#ifndef _ICE_DEFS_H_ +#define _ICE_DEFS_H_ + +#define ETH_ALEN 6 + +#define ETH_HEADER_LEN 14 + +#define BIT(a) (1UL << (a)) +#ifndef BIT_ULL +#define BIT_ULL(a) (1ULL << (a)) +#endif /* BIT_ULL */ + +#define BITS_PER_BYTE 8 -#ifndef _ICE_OPTS_H_ -#define _ICE_OPTS_H_ +#define _FORCE_ -#include "opt_inet.h" -#include "opt_inet6.h" -#include "opt_rss.h" +#define ICE_BYTES_PER_WORD 2 +#define ICE_BYTES_PER_DWORD 4 +#define ICE_MAX_TRAFFIC_CLASS 8 +#ifndef MIN_T +#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b)) #endif + +#define IS_ASCII(_ch) ((_ch) < 0x80) + +#define STRUCT_HACK_VAR_LEN +/** + * ice_struct_size - size of struct with C99 flexible array member + * @ptr: pointer to structure + * @field: flexible array member (last member of the structure) + * @num: number of elements of that flexible array member + */ +#define ice_struct_size(ptr, field, num) \ + (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num)) + +#define FLEX_ARRAY_SIZE(_ptr, _mem, cnt) ((cnt) * sizeof(_ptr->_mem[0])) + +#endif /* _ICE_DEFS_H_ */ diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h index 2d092c6037c4..dc8970952b49 100644 --- a/sys/dev/ice/ice_devids.h +++ b/sys/dev/ice/ice_devids.h @@ -1,90 +1,95 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_DEVIDS_H_ #define _ICE_DEVIDS_H_ /* Device IDs */ +#define ICE_DEV_ID_E822_SI_DFLT 0x1888 /* Intel(R) Ethernet Connection E823-L for backplane */ #define ICE_DEV_ID_E823L_BACKPLANE 0x124C /* Intel(R) Ethernet Connection E823-L for SFP */ #define ICE_DEV_ID_E823L_SFP 0x124D /* Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E823L_10G_BASE_T 0x124E /* Intel(R) Ethernet Connection E823-L 1GbE */ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ #define ICE_DEV_ID_E810C_QSFP 0x1592 /* Intel(R) Ethernet Controller E810-C for SFP */ #define ICE_DEV_ID_E810C_SFP 0x1593 #define ICE_SUBDEV_ID_E810T 0x000E #define ICE_SUBDEV_ID_E810T2 0x000F +#define ICE_SUBDEV_ID_E810T3 0x02E9 +#define ICE_SUBDEV_ID_E810T4 0x02EA +#define ICE_SUBDEV_ID_E810T5 0x0010 +#define ICE_SUBDEV_ID_E810T6 0x0012 +#define ICE_SUBDEV_ID_E810T7 0x0011 /* Intel(R) Ethernet Controller E810-XXV for backplane */ #define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599 /* Intel(R) Ethernet Controller E810-XXV for QSFP */ #define ICE_DEV_ID_E810_XXV_QSFP 0x159A /* Intel(R) Ethernet Controller E810-XXV for SFP */ #define ICE_DEV_ID_E810_XXV_SFP 0x159B /* Intel(R) Ethernet Connection E823-C for backplane */ #define ICE_DEV_ID_E823C_BACKPLANE 0x188A /* Intel(R) Ethernet Connection E823-C for QSFP */ #define ICE_DEV_ID_E823C_QSFP 0x188B /* Intel(R) Ethernet Connection E823-C for SFP */ #define ICE_DEV_ID_E823C_SFP 0x188C /* Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E823C_10G_BASE_T 0x188D /* Intel(R) Ethernet Connection E823-C 1GbE */ #define ICE_DEV_ID_E823C_SGMII 0x188E /* Intel(R) Ethernet Connection E822-C for backplane */ #define ICE_DEV_ID_E822C_BACKPLANE 0x1890 /* Intel(R) Ethernet Connection E822-C for QSFP */ #define ICE_DEV_ID_E822C_QSFP 0x1891 /* Intel(R) Ethernet Connection E822-C for SFP */ #define ICE_DEV_ID_E822C_SFP 0x1892 /* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E822C_10G_BASE_T 0x1893 /* Intel(R) Ethernet Connection E822-C 1GbE */ #define ICE_DEV_ID_E822C_SGMII 0x1894 /* Intel(R) Ethernet Connection E822-L for backplane */ #define ICE_DEV_ID_E822L_BACKPLANE 0x1897 /* Intel(R) Ethernet Connection E822-L for SFP */ #define ICE_DEV_ID_E822L_SFP 0x1898 /* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */ #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 /* Intel(R) Ethernet Connection E822-L 1GbE */ #define ICE_DEV_ID_E822L_SGMII 0x189A - #endif /* _ICE_DEVIDS_H_ */ diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h index 19d633554463..f47adb1572c6 100644 --- a/sys/dev/ice/ice_drv_info.h +++ b/sys/dev/ice/ice_drv_info.h @@ -1,199 +1,196 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_drv_info.h * @brief device IDs and driver version * * Contains the device IDs tables and the driver version string. * * This file contains static or constant definitions intended to be included * exactly once in the main driver interface file. It implicitly depends on * the main driver header file. * * These definitions could be placed directly in the interface file, but are * kept separate for organizational purposes. */ /** * @var ice_driver_version * @brief driver version string * * Driver version information, used for display as part of an informational * sysctl, and as part of the driver information sent to the firmware at load. * * @var ice_major_version * @brief driver major version number * * @var ice_minor_version * @brief driver minor version number * * @var ice_patch_version * @brief driver patch version number * * @var ice_rc_version * @brief driver release candidate version number */ -const char ice_driver_version[] = "1.34.2-k"; +const char ice_driver_version[] = "1.37.7-k"; const uint8_t ice_major_version = 1; -const uint8_t ice_minor_version = 34; -const uint8_t ice_patch_version = 2; +const uint8_t ice_minor_version = 37; +const uint8_t ice_patch_version = 7; const uint8_t ice_rc_version = 0; #define PVIDV(vendor, devid, name) \ - PVID(vendor, devid, name " - 1.34.2-k") + PVID(vendor, devid, name " - 1.37.7-k") #define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \ - PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.34.2-k") + PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.37.7-k") /** * @var ice_vendor_info_array * @brief array of PCI devices supported by this driver * * Array of PCI devices which are supported by this driver. Used to determine * whether a given device should be loaded by this driver. This information is * also exported as part of the module information for other tools to analyze. * * @remark Each type of device ID needs to be listed from most-specific entry * to most-generic entry; e.g. PVIDV_OEM()s for a device ID must come before * the PVIDV() for it. */ static pci_vendor_info_t ice_vendor_info_array[] = { PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE, "Intel(R) Ethernet Controller E810-C for backplane"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0001, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0002, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-C-Q1 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x0008, 0, "Intel(R) Ethernet Network Adapter E810-C-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x000D, 0, "Intel(R) Ethernet Network Adapter E810-L-Q2 for OCP3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, ICE_INTEL_VENDOR_ID, 0x000E, 0, "Intel(R) Ethernet Network Adapter E810-2C-Q2"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP, "Intel(R) Ethernet Controller E810-C for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x0007, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4"), - PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, - ICE_INTEL_VENDOR_ID, 0x0008, 0, - "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, ICE_INTEL_VENDOR_ID, 0x000C, 0, "Intel(R) Ethernet Network Adapter E810-XXV-4 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP, "Intel(R) Ethernet Controller E810-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_BACKPLANE, "Intel(R) Ethernet Connection E822-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_QSFP, "Intel(R) Ethernet Connection E822-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP, "Intel(R) Ethernet Connection E822-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T, "Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII, "Intel(R) Ethernet Connection E822-C 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE, "Intel(R) Ethernet Connection E822-L for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP, "Intel(R) Ethernet Connection E822-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T, "Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII, "Intel(R) Ethernet Connection E822-L 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE, "Intel(R) Ethernet Connection E823-L for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP, "Intel(R) Ethernet Connection E823-L for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP, "Intel(R) Ethernet Connection E823-L for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T, "Intel(R) Ethernet Connection E823-L/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE, "Intel(R) Ethernet Connection E823-L 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_BACKPLANE, "Intel(R) Ethernet Connection E823-C for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_QSFP, "Intel(R) Ethernet Connection E823-C for QSFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SFP, "Intel(R) Ethernet Connection E823-C for SFP"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_10G_BASE_T, "Intel(R) Ethernet Connection E823-C/X557-AT 10GBASE-T"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823C_SGMII, "Intel(R) Ethernet Connection E823-C 1GbE"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_BACKPLANE, "Intel(R) Ethernet Controller E810-XXV for backplane"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_QSFP, "Intel(R) Ethernet Controller E810-XXV for QSFP"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0003, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0004, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0005, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"), PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, ICE_INTEL_VENDOR_ID, 0x0006, 0, "Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"), PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP, "Intel(R) Ethernet Controller E810-XXV for SFP"), PVID_END }; diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h index 51c60b0dfeea..efbeb12c3ab1 100644 --- a/sys/dev/ice/ice_features.h +++ b/sys/dev/ice/ice_features.h @@ -1,95 +1,97 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_features.h * @brief device feature controls * * Contains a list of various device features which could be enabled or * disabled. */ #ifndef _ICE_FEATURES_H_ #define _ICE_FEATURES_H_ /** * @enum feat_list * @brief driver feature enumeration * * Enumeration of possible device driver features that can be enabled or * disabled. Each possible value represents a different feature which can be * enabled or disabled. * * The driver stores a bitmap of the features that the device and OS are * capable of, as well as another bitmap indicating which features are * currently enabled for that device. */ enum feat_list { ICE_FEATURE_SRIOV, ICE_FEATURE_RSS, ICE_FEATURE_NETMAP, ICE_FEATURE_FDIR, ICE_FEATURE_MSI, ICE_FEATURE_MSIX, ICE_FEATURE_RDMA, ICE_FEATURE_SAFE_MODE, ICE_FEATURE_LENIENT_LINK_MODE, ICE_FEATURE_LINK_MGMT_VER_1, ICE_FEATURE_LINK_MGMT_VER_2, ICE_FEATURE_HEALTH_STATUS, ICE_FEATURE_FW_LOGGING, ICE_FEATURE_HAS_PBA, + ICE_FEATURE_DCB, + ICE_FEATURE_TX_BALANCE, /* Must be last entry */ ICE_FEATURE_COUNT }; /** * ice_disable_unsupported_features - Disable features not enabled by OS * @bitmap: the feature bitmap * * Check for OS support of various driver features. Clear the feature bit for * any feature which is not enabled by the OS. This should be called early * during driver attach after setting up the feature bitmap. * * @remark the bitmap parameter is marked as unused in order to avoid an * unused parameter warning in case none of the features need to be disabled. */ static inline void ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap) { ice_clear_bit(ICE_FEATURE_SRIOV, bitmap); #ifndef DEV_NETMAP ice_clear_bit(ICE_FEATURE_NETMAP, bitmap); #endif } #endif diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c index 0c956f720a92..36c420478131 100644 --- a/sys/dev/ice/ice_flex_pipe.c +++ b/sys/dev/ice/ice_flex_pipe.c @@ -1,5702 +1,3882 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" +#include "ice_ddp_common.h" #include "ice_flex_pipe.h" #include "ice_protocol_type.h" #include "ice_flow.h" -/* To support tunneling entries by PF, the package will append the PF number to - * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. - */ -#define ICE_TNL_PRE "TNL_" static const struct ice_tunnel_type_scan tnls[] = { { TNL_VXLAN, "TNL_VXLAN_PF" }, { TNL_GENEVE, "TNL_GENEVE_PF" }, { TNL_LAST, "" } }; static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = { /* SWITCH */ { ICE_SID_XLT0_SW, ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT1_SW, ICE_SID_XLT2_SW, ICE_SID_PROFID_TCAM_SW, ICE_SID_PROFID_REDIR_SW, ICE_SID_FLD_VEC_SW, ICE_SID_CDID_KEY_BUILDER_SW, ICE_SID_CDID_REDIR_SW }, /* ACL */ { ICE_SID_XLT0_ACL, ICE_SID_XLT_KEY_BUILDER_ACL, ICE_SID_XLT1_ACL, ICE_SID_XLT2_ACL, ICE_SID_PROFID_TCAM_ACL, ICE_SID_PROFID_REDIR_ACL, ICE_SID_FLD_VEC_ACL, ICE_SID_CDID_KEY_BUILDER_ACL, ICE_SID_CDID_REDIR_ACL }, /* FD */ { ICE_SID_XLT0_FD, ICE_SID_XLT_KEY_BUILDER_FD, ICE_SID_XLT1_FD, ICE_SID_XLT2_FD, ICE_SID_PROFID_TCAM_FD, ICE_SID_PROFID_REDIR_FD, ICE_SID_FLD_VEC_FD, ICE_SID_CDID_KEY_BUILDER_FD, ICE_SID_CDID_REDIR_FD }, /* RSS */ { ICE_SID_XLT0_RSS, ICE_SID_XLT_KEY_BUILDER_RSS, ICE_SID_XLT1_RSS, ICE_SID_XLT2_RSS, ICE_SID_PROFID_TCAM_RSS, ICE_SID_PROFID_REDIR_RSS, ICE_SID_FLD_VEC_RSS, ICE_SID_CDID_KEY_BUILDER_RSS, ICE_SID_CDID_REDIR_RSS }, /* PE */ { ICE_SID_XLT0_PE, ICE_SID_XLT_KEY_BUILDER_PE, ICE_SID_XLT1_PE, ICE_SID_XLT2_PE, ICE_SID_PROFID_TCAM_PE, ICE_SID_PROFID_REDIR_PE, ICE_SID_FLD_VEC_PE, ICE_SID_CDID_KEY_BUILDER_PE, ICE_SID_CDID_REDIR_PE } }; /** * ice_sect_id - returns section ID * @blk: block type * @sect: section type * * This helper function returns the proper section ID given a block type and a * section type. */ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect) { return ice_sect_lkup[blk][sect]; } -/** - * ice_pkg_val_buf - * @buf: pointer to the ice buffer - * - * This helper function validates a buffer's header. - */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) -{ - struct ice_buf_hdr *hdr; - u16 section_count; - u16 data_end; - - hdr = (struct ice_buf_hdr *)buf->buf; - /* verify data */ - section_count = LE16_TO_CPU(hdr->section_count); - if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) - return NULL; - - data_end = LE16_TO_CPU(hdr->data_end); - if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END) - return NULL; - - return hdr; -} - -/** - * ice_find_buf_table - * @ice_seg: pointer to the ice segment - * - * Returns the address of the buffer table within the ice segment. - */ -static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) -{ - struct ice_nvm_table *nvms; - - nvms = (struct ice_nvm_table *) - (ice_seg->device_table + - LE32_TO_CPU(ice_seg->device_table_count)); - - return (_FORCE_ struct ice_buf_table *) - (nvms->vers + LE32_TO_CPU(nvms->table_count)); -} - -/** - * ice_pkg_enum_buf - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This function will enumerate all the buffers in the ice segment. The first - * call is made with the ice_seg parameter non-NULL; on subsequent calls, - * ice_seg is set to NULL which continues the enumeration. When the function - * returns a NULL pointer, then the end of the buffers has been reached, or an - * unexpected value has been detected (for example an invalid section count or - * an invalid buffer end value). - */ -static struct ice_buf_hdr * -ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (ice_seg) { - state->buf_table = ice_find_buf_table(ice_seg); - if (!state->buf_table) - return NULL; - - state->buf_idx = 0; - return ice_pkg_val_buf(state->buf_table->buf_array); - } - - if (++state->buf_idx < LE32_TO_CPU(state->buf_table->buf_count)) - return ice_pkg_val_buf(state->buf_table->buf_array + - state->buf_idx); - else - return NULL; -} - -/** - * ice_pkg_advance_sect - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * - * This helper function will advance the section within the ice segment, - * also advancing the buffer if needed. - */ -static bool -ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state) -{ - if (!ice_seg && !state->buf) - return false; - - if (!ice_seg && state->buf) - if (++state->sect_idx < LE16_TO_CPU(state->buf->section_count)) - return true; - - state->buf = ice_pkg_enum_buf(ice_seg, state); - if (!state->buf) - return false; - - /* start of new buffer, reset section index */ - state->sect_idx = 0; - return true; -} - -/** - * ice_pkg_enum_section - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * - * This function will enumerate all the sections of a particular type in the - * ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the matching - * sections has been reached. - */ -static void * -ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type) -{ - u16 offset, size; - - if (ice_seg) - state->type = sect_type; - - if (!ice_pkg_advance_sect(ice_seg, state)) - return NULL; - - /* scan for next matching section */ - while (state->buf->section_entry[state->sect_idx].type != - CPU_TO_LE32(state->type)) - if (!ice_pkg_advance_sect(NULL, state)) - return NULL; - - /* validate section */ - offset = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); - if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF) - return NULL; - - size = LE16_TO_CPU(state->buf->section_entry[state->sect_idx].size); - if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) - return NULL; - - /* make sure the section fits in the buffer */ - if (offset + size > ICE_PKG_BUF_SIZE) - return NULL; - - state->sect_type = - LE32_TO_CPU(state->buf->section_entry[state->sect_idx].type); - - /* calc pointer to this section */ - state->sect = ((u8 *)state->buf) + - LE16_TO_CPU(state->buf->section_entry[state->sect_idx].offset); - - return state->sect; -} - -/** - * ice_pkg_enum_entry - * @ice_seg: pointer to the ice segment (or NULL on subsequent calls) - * @state: pointer to the enum state - * @sect_type: section type to enumerate - * @offset: pointer to variable that receives the offset in the table (optional) - * @handler: function that handles access to the entries into the section type - * - * This function will enumerate all the entries in particular section type in - * the ice segment. The first call is made with the ice_seg parameter non-NULL; - * on subsequent calls, ice_seg is set to NULL which continues the enumeration. - * When the function returns a NULL pointer, then the end of the entries has - * been reached. - * - * Since each section may have a different header and entry size, the handler - * function is needed to determine the number and location entries in each - * section. - * - * The offset parameter is optional, but should be used for sections that - * contain an offset for each section table. For such cases, the section handler - * function must return the appropriate offset + index to give the absolution - * offset for each entry. For example, if the base for a section's header - * indicates a base offset of 10, and the index for the entry is 2, then - * section handler function should set the offset to 10 + 2 = 12. - */ -static void * -ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, - u32 sect_type, u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) -{ - void *entry; - - if (ice_seg) { - if (!handler) - return NULL; - - if (!ice_pkg_enum_section(ice_seg, state, sect_type)) - return NULL; - - state->entry_idx = 0; - state->handler = handler; - } else { - state->entry_idx++; - } - - if (!state->handler) - return NULL; - - /* get entry */ - entry = state->handler(state->sect_type, state->sect, state->entry_idx, - offset); - if (!entry) { - /* end of a section, look for another section of this type */ - if (!ice_pkg_enum_section(NULL, state, 0)) - return NULL; - - state->entry_idx = 0; - entry = state->handler(state->sect_type, state->sect, - state->entry_idx, offset); - } - - return entry; -} - -/** - * ice_boost_tcam_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the boost TCAM entry to be returned - * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual boost TCAM entries. - */ -static void * -ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_boost_tcam_section *boost; - - if (!section) - return NULL; - - if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM) - return NULL; - - if (index > ICE_MAX_BST_TCAMS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - boost = (struct ice_boost_tcam_section *)section; - if (index >= LE16_TO_CPU(boost->count)) - return NULL; - - return boost->tcam + index; -} - -/** - * ice_find_boost_entry - * @ice_seg: pointer to the ice segment (non-NULL) - * @addr: Boost TCAM address of entry to search for - * @entry: returns pointer to the entry - * - * Finds a particular Boost TCAM entry and returns a pointer to that entry - * if it is found. The ice_seg parameter must not be NULL since the first call - * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure. - */ -static enum ice_status -ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr, - struct ice_boost_tcam_entry **entry) -{ - struct ice_boost_tcam_entry *tcam; - struct ice_pkg_enum state; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return ICE_ERR_PARAM; - - do { - tcam = (struct ice_boost_tcam_entry *) - ice_pkg_enum_entry(ice_seg, &state, - ICE_SID_RXPARSER_BOOST_TCAM, NULL, - ice_boost_tcam_handler); - if (tcam && LE16_TO_CPU(tcam->addr) == addr) { - *entry = tcam; - return ICE_SUCCESS; - } - - ice_seg = NULL; - } while (tcam); - - *entry = NULL; - return ICE_ERR_CFG; -} - -/** - * ice_label_enum_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the label entry to be returned - * @offset: pointer to receive absolute offset, always zero for label sections - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * Handles enumeration of individual label entries. - */ -static void * -ice_label_enum_handler(u32 __ALWAYS_UNUSED sect_type, void *section, u32 index, - u32 *offset) -{ - struct ice_label_section *labels; - - if (!section) - return NULL; - - if (index > ICE_MAX_LABELS_IN_BUF) - return NULL; - - if (offset) - *offset = 0; - - labels = (struct ice_label_section *)section; - if (index >= LE16_TO_CPU(labels->count)) - return NULL; - - return labels->label + index; -} - -/** - * ice_enum_labels - * @ice_seg: pointer to the ice segment (NULL on subsequent calls) - * @type: the section type that will contain the label (0 on subsequent calls) - * @state: ice_pkg_enum structure that will hold the state of the enumeration - * @value: pointer to a value that will return the label's value if found - * - * Enumerates a list of labels in the package. The caller will call - * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call - * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL - * the end of the list has been reached. - */ -static char * -ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state, - u16 *value) -{ - struct ice_label *label; - - /* Check for valid label section on first call */ - if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST)) - return NULL; - - label = (struct ice_label *)ice_pkg_enum_entry(ice_seg, state, type, - NULL, - ice_label_enum_handler); - if (!label) - return NULL; - - *value = LE16_TO_CPU(label->value); - return label->name; -} - /** * ice_add_tunnel_hint * @hw: pointer to the HW structure * @label_name: label text * @val: value of the tunnel port boost entry */ -static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val) { if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) { u16 i; for (i = 0; tnls[i].type != TNL_LAST; i++) { size_t len = strlen(tnls[i].label_prefix); /* Look for matching label start, before continuing */ if (strncmp(label_name, tnls[i].label_prefix, len)) continue; /* Make sure this label matches our PF. Note that the PF * character ('0' - '7') will be located where our * prefix string's null terminator is located. */ if ((label_name[len] - '0') == hw->pf_id) { hw->tnl.tbl[hw->tnl.count].type = tnls[i].type; hw->tnl.tbl[hw->tnl.count].valid = false; hw->tnl.tbl[hw->tnl.count].in_use = false; hw->tnl.tbl[hw->tnl.count].marked = false; hw->tnl.tbl[hw->tnl.count].boost_addr = val; hw->tnl.tbl[hw->tnl.count].port = 0; hw->tnl.count++; break; } } } } -/** - * ice_init_pkg_hints - * @hw: pointer to the HW structure - * @ice_seg: pointer to the segment of the package scan (non-NULL) - * - * This function will scan the package and save off relevant information - * (hints or metadata) for driver use. The ice_seg parameter must not be NULL - * since the first call to ice_enum_labels requires a pointer to an actual - * ice_seg structure. - */ -static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - int i; - - ice_memset(&hw->tnl, 0, sizeof(hw->tnl), ICE_NONDMA_MEM); - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return; - - label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state, - &val); - - while (label_name) { - if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE))) - /* check for a tunnel entry */ - ice_add_tunnel_hint(hw, label_name, val); - - label_name = ice_enum_labels(NULL, 0, &state, &val); - } - - /* Cache the appropriate boost TCAM entry pointers for tunnels */ - for (i = 0; i < hw->tnl.count; i++) { - ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr, - &hw->tnl.tbl[i].boost_entry); - if (hw->tnl.tbl[i].boost_entry) - hw->tnl.tbl[i].valid = true; - } -} - /* Key creation */ #define ICE_DC_KEY 0x1 /* don't care */ #define ICE_DC_KEYINV 0x1 #define ICE_NM_KEY 0x0 /* never match */ #define ICE_NM_KEYINV 0x0 #define ICE_0_KEY 0x1 /* match 0 */ #define ICE_0_KEYINV 0x0 #define ICE_1_KEY 0x0 /* match 1 */ #define ICE_1_KEYINV 0x1 /** * ice_gen_key_word - generate 16-bits of a key/mask word * @val: the value * @valid: valid bits mask (change only the valid bits) * @dont_care: don't care mask * @nvr_mtch: never match mask * @key: pointer to an array of where the resulting key portion * @key_inv: pointer to an array of where the resulting key invert portion * * This function generates 16-bits from a 8-bit value, an 8-bit don't care mask * and an 8-bit never match mask. The 16-bits of output are divided into 8 bits * of key and 8 bits of key invert. * * '0' = b01, always match a 0 bit * '1' = b10, always match a 1 bit * '?' = b11, don't care bit (always matches) * '~' = b00, never match bit * * Input: * val: b0 1 0 1 0 1 * dont_care: b0 0 1 1 0 0 * never_mtch: b0 0 0 0 1 1 * ------------------------------ * Result: key: b01 10 11 11 00 00 */ static enum ice_status ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key, u8 *key_inv) { u8 in_key = *key, in_key_inv = *key_inv; u8 i; /* 'dont_care' and 'nvr_mtch' masks cannot overlap */ if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch)) return ICE_ERR_CFG; *key = 0; *key_inv = 0; /* encode the 8 bits into 8-bit key and 8-bit key invert */ for (i = 0; i < 8; i++) { *key >>= 1; *key_inv >>= 1; if (!(valid & 0x1)) { /* change only valid bits */ *key |= (in_key & 0x1) << 7; *key_inv |= (in_key_inv & 0x1) << 7; } else if (dont_care & 0x1) { /* don't care bit */ *key |= ICE_DC_KEY << 7; *key_inv |= ICE_DC_KEYINV << 7; } else if (nvr_mtch & 0x1) { /* never match bit */ *key |= ICE_NM_KEY << 7; *key_inv |= ICE_NM_KEYINV << 7; } else if (val & 0x01) { /* exact 1 match */ *key |= ICE_1_KEY << 7; *key_inv |= ICE_1_KEYINV << 7; } else { /* exact 0 match */ *key |= ICE_0_KEY << 7; *key_inv |= ICE_0_KEYINV << 7; } dont_care >>= 1; nvr_mtch >>= 1; valid >>= 1; val >>= 1; in_key >>= 1; in_key_inv >>= 1; } return ICE_SUCCESS; } /** * ice_bits_max_set - determine if the number of bits set is within a maximum * @mask: pointer to the byte array which is the mask * @size: the number of bytes in the mask * @max: the max number of set bits * * This function determines if there are at most 'max' number of bits set in an * array. Returns true if the number for bits set is <= max or will return false * otherwise. */ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) { u16 count = 0; u16 i; /* check each byte */ for (i = 0; i < size; i++) { /* if 0, go to next byte */ if (!mask[i]) continue; /* We know there is at least one set bit in this byte because of * the above check; if we already have found 'max' number of * bits set, then we can return failure now. */ if (count == max) return false; /* count the bits in this byte, checking threshold */ count += ice_hweight8(mask[i]); - if (count > max) - return false; - } - - return true; -} - -/** - * ice_set_key - generate a variable sized key with multiples of 16-bits - * @key: pointer to where the key will be stored - * @size: the size of the complete key in bytes (must be even) - * @val: array of 8-bit values that makes up the value portion of the key - * @upd: array of 8-bit masks that determine what key portion to update - * @dc: array of 8-bit masks that make up the don't care mask - * @nm: array of 8-bit masks that make up the never match mask - * @off: the offset of the first byte in the key to update - * @len: the number of bytes in the key update - * - * This function generates a key from a value, a don't care mask and a never - * match mask. - * upd, dc, and nm are optional parameters, and can be NULL: - * upd == NULL --> upd mask is all 1's (update all bits) - * dc == NULL --> dc mask is all 0's (no don't care bits) - * nm == NULL --> nm mask is all 0's (no never match bits) - */ -static enum ice_status -ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, - u16 len) -{ - u16 half_size; - u16 i; - - /* size must be a multiple of 2 bytes. */ - if (size % 2) - return ICE_ERR_CFG; - half_size = size / 2; - - if (off + len > half_size) - return ICE_ERR_CFG; - - /* Make sure at most one bit is set in the never match mask. Having more - * than one never match mask bit set will cause HW to consume excessive - * power otherwise; this is a power management efficiency check. - */ -#define ICE_NVR_MTCH_BITS_MAX 1 - if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) - return ICE_ERR_CFG; - - for (i = 0; i < len; i++) - if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, - dc ? dc[i] : 0, nm ? nm[i] : 0, - key + off + i, key + half_size + off + i)) - return ICE_ERR_CFG; - - return ICE_SUCCESS; -} - -/** - * ice_acquire_global_cfg_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the global config lock for reading - * or writing of the package. When attempting to obtain write access, the - * caller must check for the following two return values: - * - * ICE_SUCCESS - Means the caller has acquired the global config lock - * and can perform writing of the package. - * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the - * package or has found that no update was necessary; in - * this case, the caller can just skip performing any - * update of the package. - */ -static enum ice_status -ice_acquire_global_cfg_lock(struct ice_hw *hw, - enum ice_aq_res_access_type access) -{ - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access, - ICE_GLOBAL_CFG_LOCK_TIMEOUT); - - if (status == ICE_ERR_AQ_NO_WORK) - ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n"); - - return status; -} - -/** - * ice_release_global_cfg_lock - * @hw: pointer to the HW structure - * - * This function will release the global config lock. - */ -static void ice_release_global_cfg_lock(struct ice_hw *hw) -{ - ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID); -} - -/** - * ice_acquire_change_lock - * @hw: pointer to the HW structure - * @access: access type (read or write) - * - * This function will request ownership of the change lock. - */ -static enum ice_status -ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access) -{ - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access, - ICE_CHANGE_LOCK_TIMEOUT); -} - -/** - * ice_release_change_lock - * @hw: pointer to the HW structure - * - * This function will release the change lock using the proper Admin Command. - */ -static void ice_release_change_lock(struct ice_hw *hw) -{ - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID); -} - -/** - * ice_aq_download_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer to transfer - * @buf_size: the size of the package buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Download Package (0x0C40) - */ -static enum ice_status -ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, bool last_buf, u32 *error_offset, - u32 *error_info, struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = LE32_TO_CPU(resp->error_offset); - if (error_info) - *error_info = LE32_TO_CPU(resp->error_info); - } - - return status; -} - -/** - * ice_aq_upload_section - * @hw: pointer to the hardware structure - * @pkg_buf: the package buffer which will receive the section - * @buf_size: the size of the package buffer - * @cd: pointer to command details structure or NULL - * - * Upload Section (0x0C41) - */ -enum ice_status -ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); -} - -/** - * ice_aq_update_pkg - * @hw: pointer to the hardware structure - * @pkg_buf: the package cmd buffer - * @buf_size: the size of the package cmd buffer - * @last_buf: last buffer indicator - * @error_offset: returns error offset - * @error_info: returns error information - * @cd: pointer to command details structure or NULL - * - * Update Package (0x0C42) - */ -static enum ice_status -ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, - bool last_buf, u32 *error_offset, u32 *error_info, - struct ice_sq_cd *cd) -{ - struct ice_aqc_download_pkg *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - if (error_offset) - *error_offset = 0; - if (error_info) - *error_info = 0; - - cmd = &desc.params.download_pkg; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg); - desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); - - if (last_buf) - cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF; - - status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd); - if (status == ICE_ERR_AQ_ERROR) { - /* Read error from buffer only when the FW returned an error */ - struct ice_aqc_download_pkg_resp *resp; - - resp = (struct ice_aqc_download_pkg_resp *)pkg_buf; - if (error_offset) - *error_offset = LE32_TO_CPU(resp->error_offset); - if (error_info) - *error_info = LE32_TO_CPU(resp->error_info); - } - - return status; -} - -/** - * ice_find_seg_in_pkg - * @hw: pointer to the hardware structure - * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - */ -static struct ice_generic_seg_hdr * -ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) -{ - u32 i; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n", - pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor, - pkg_hdr->pkg_format_ver.update, - pkg_hdr->pkg_format_ver.draft); - - /* Search all package segments for the requested segment type */ - for (i = 0; i < LE32_TO_CPU(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; - - seg = (struct ice_generic_seg_hdr *) - ((u8 *)pkg_hdr + LE32_TO_CPU(pkg_hdr->seg_offset[i])); - - if (LE32_TO_CPU(seg->seg_type) == seg_type) - return seg; - } - - return NULL; -} - -/** - * ice_update_pkg_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - */ -static enum ice_status -ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status = ICE_SUCCESS; - u32 i; - - for (i = 0; i < count; i++) { - struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i); - bool last = ((i + 1) == count); - u32 offset, info; - - status = ice_aq_update_pkg(hw, bh, LE16_TO_CPU(bh->data_end), - last, &offset, &info, NULL); - - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n", - status, offset, info); - break; - } - } - - return status; -} - -/** - * ice_update_pkg - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains change lock and updates package. - */ -enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status; - - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - return status; - - status = ice_update_pkg_no_lock(hw, bufs, count); - - ice_release_change_lock(hw); - - return status; -} - -/** - * ice_dwnld_cfg_bufs - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @count: the number of buffers in the array - * - * Obtains global config lock and downloads the package configuration buffers - * to the firmware. Metadata buffers are skipped, and the first metadata buffer - * found indicates that the rest of the buffers are all metadata buffers. - */ -static enum ice_status -ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) -{ - enum ice_status status; - struct ice_buf_hdr *bh; - u32 offset, info, i; - - if (!bufs || !count) - return ICE_ERR_PARAM; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)bufs; - if (LE32_TO_CPU(bh->section_entry[0].type) & ICE_METADATA_BUF) - return ICE_SUCCESS; - - /* reset pkg_dwnld_status in case this function is called in the - * reset/rebuild flow - */ - hw->pkg_dwnld_status = ICE_AQ_RC_OK; - - status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); - if (status) { - if (status == ICE_ERR_AQ_NO_WORK) - hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST; - else - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - return status; - } - - for (i = 0; i < count; i++) { - bool last = ((i + 1) == count); - - if (!last) { - /* check next buffer for metadata flag */ - bh = (struct ice_buf_hdr *)(bufs + i + 1); - - /* A set metadata flag in the next buffer will signal - * that the current buffer will be the last buffer - * downloaded - */ - if (LE16_TO_CPU(bh->section_count)) - if (LE32_TO_CPU(bh->section_entry[0].type) & - ICE_METADATA_BUF) - last = true; - } - - bh = (struct ice_buf_hdr *)(bufs + i); - - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); - - /* Save AQ status from download package */ - hw->pkg_dwnld_status = hw->adminq.sq_last_status; - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - - break; - } - - if (last) - break; - } - - if (!status) { - status = ice_set_vlan_mode(hw); - if (status) - ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n", - status); - } - - ice_release_global_cfg_lock(hw); - - return status; -} - -/** - * ice_aq_get_pkg_info_list - * @hw: pointer to the hardware structure - * @pkg_info: the buffer which will receive the information list - * @buf_size: the size of the pkg_info information buffer - * @cd: pointer to command details structure or NULL - * - * Get Package Info List (0x0C43) - */ -static enum ice_status -ice_aq_get_pkg_info_list(struct ice_hw *hw, - struct ice_aqc_get_pkg_info_resp *pkg_info, - u16 buf_size, struct ice_sq_cd *cd) -{ - struct ice_aq_desc desc; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list); - - return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd); -} - -/** - * ice_download_pkg - * @hw: pointer to the hardware structure - * @ice_seg: pointer to the segment of the package to be downloaded - * - * Handles the download of a complete package. - */ -static enum ice_status -ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg) -{ - struct ice_buf_table *ice_buf_tbl; - enum ice_status status; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n", - ice_seg->hdr.seg_format_ver.major, - ice_seg->hdr.seg_format_ver.minor, - ice_seg->hdr.seg_format_ver.update, - ice_seg->hdr.seg_format_ver.draft); - - ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n", - LE32_TO_CPU(ice_seg->hdr.seg_type), - LE32_TO_CPU(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id); - - ice_buf_tbl = ice_find_buf_table(ice_seg); - - ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n", - LE32_TO_CPU(ice_buf_tbl->buf_count)); - - status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array, - LE32_TO_CPU(ice_buf_tbl->buf_count)); - - ice_post_pkg_dwnld_vlan_mode_cfg(hw); - - return status; -} - -/** - * ice_init_pkg_info - * @hw: pointer to the hardware structure - * @pkg_hdr: pointer to the driver's package hdr - * - * Saves off the package details into the HW structure. - */ -static enum ice_status -ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) -{ - struct ice_generic_seg_hdr *seg_hdr; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - if (!pkg_hdr) - return ICE_ERR_PARAM; - - hw->pkg_seg_id = SEGMENT_TYPE_ICE_E810; - - ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n", - hw->pkg_seg_id); - - seg_hdr = (struct ice_generic_seg_hdr *) - ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr); - if (seg_hdr) { - struct ice_meta_sect *meta; - struct ice_pkg_enum state; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - /* Get package information from the Metadata Section */ - meta = (struct ice_meta_sect *) - ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state, - ICE_SID_METADATA); - if (!meta) { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n"); - return ICE_ERR_CFG; - } - - hw->pkg_ver = meta->ver; - ice_memcpy(hw->pkg_name, meta->name, sizeof(meta->name), - ICE_NONDMA_TO_NONDMA); - - ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n", - meta->ver.major, meta->ver.minor, meta->ver.update, - meta->ver.draft, meta->name); - - hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver; - ice_memcpy(hw->ice_seg_id, seg_hdr->seg_id, - sizeof(hw->ice_seg_id), ICE_NONDMA_TO_NONDMA); - - ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n", - seg_hdr->seg_format_ver.major, - seg_hdr->seg_format_ver.minor, - seg_hdr->seg_format_ver.update, - seg_hdr->seg_format_ver.draft, - seg_hdr->seg_id); - } else { - ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n"); - return ICE_ERR_CFG; - } - - return ICE_SUCCESS; -} - -/** - * ice_get_pkg_info - * @hw: pointer to the hardware structure - * - * Store details of the package currently loaded in HW into the HW structure. - */ -static enum ice_status ice_get_pkg_info(struct ice_hw *hw) -{ - struct ice_aqc_get_pkg_info_resp *pkg_info; - enum ice_status status; - u16 size; - u32 i; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - size = ice_struct_size(pkg_info, pkg_info, ICE_PKG_CNT); - pkg_info = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); - if (!pkg_info) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL); - if (status) - goto init_pkg_free_alloc; - - for (i = 0; i < LE32_TO_CPU(pkg_info->count); i++) { -#define ICE_PKG_FLAG_COUNT 4 - char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 }; - u8 place = 0; - - if (pkg_info->pkg_info[i].is_active) { - flags[place++] = 'A'; - hw->active_pkg_ver = pkg_info->pkg_info[i].ver; - hw->active_track_id = - LE32_TO_CPU(pkg_info->pkg_info[i].track_id); - ice_memcpy(hw->active_pkg_name, - pkg_info->pkg_info[i].name, - sizeof(pkg_info->pkg_info[i].name), - ICE_NONDMA_TO_NONDMA); - hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm; - } - if (pkg_info->pkg_info[i].is_active_at_boot) - flags[place++] = 'B'; - if (pkg_info->pkg_info[i].is_modified) - flags[place++] = 'M'; - if (pkg_info->pkg_info[i].is_in_nvm) - flags[place++] = 'N'; - - ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", - i, pkg_info->pkg_info[i].ver.major, - pkg_info->pkg_info[i].ver.minor, - pkg_info->pkg_info[i].ver.update, - pkg_info->pkg_info[i].ver.draft, - pkg_info->pkg_info[i].name, flags); - } - -init_pkg_free_alloc: - ice_free(hw, pkg_info); - - return status; -} - -/** - * ice_find_label_value - * @ice_seg: pointer to the ice segment (non-NULL) - * @name: name of the label to search for - * @type: the section type that will contain the label - * @value: pointer to a value that will return the label's value if found - * - * Finds a label's value given the label name and the section type to search. - * The ice_seg parameter must not be NULL since the first call to - * ice_enum_labels requires a pointer to an actual ice_seg structure. - */ -enum ice_status -ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, - u16 *value) -{ - struct ice_pkg_enum state; - char *label_name; - u16 val; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ice_seg) - return ICE_ERR_PARAM; - - do { - label_name = ice_enum_labels(ice_seg, type, &state, &val); - if (label_name && !strcmp(label_name, name)) { - *value = val; - return ICE_SUCCESS; - } - - ice_seg = NULL; - } while (label_name); - - return ICE_ERR_CFG; -} - -/** - * ice_verify_pkg - verify package - * @pkg: pointer to the package buffer - * @len: size of the package buffer - * - * Verifies various attributes of the package file, including length, format - * version, and the requirement of at least one segment. - */ -static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) -{ - u32 seg_count; - u32 i; - - if (len < ice_struct_size(pkg, seg_offset, 1)) - return ICE_ERR_BUF_TOO_SHORT; - - if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || - pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR || - pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD || - pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT) - return ICE_ERR_CFG; - - /* pkg must have at least one segment */ - seg_count = LE32_TO_CPU(pkg->seg_count); - if (seg_count < 1) - return ICE_ERR_CFG; - - /* make sure segment array fits in package length */ - if (len < ice_struct_size(pkg, seg_offset, seg_count)) - return ICE_ERR_BUF_TOO_SHORT; - - /* all segments must fit within length */ - for (i = 0; i < seg_count; i++) { - u32 off = LE32_TO_CPU(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; - - /* segment header must fit */ - if (len < off + sizeof(*seg)) - return ICE_ERR_BUF_TOO_SHORT; - - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); - - /* segment body must fit */ - if (len < off + LE32_TO_CPU(seg->seg_size)) - return ICE_ERR_BUF_TOO_SHORT; - } - - return ICE_SUCCESS; -} - -/** - * ice_free_seg - free package segment pointer - * @hw: pointer to the hardware structure - * - * Frees the package segment pointer in the proper manner, depending on if the - * segment was allocated or just the passed in pointer was stored. - */ -void ice_free_seg(struct ice_hw *hw) -{ - if (hw->pkg_copy) { - ice_free(hw, hw->pkg_copy); - hw->pkg_copy = NULL; - hw->pkg_size = 0; - } - hw->seg = NULL; -} - -/** - * ice_init_pkg_regs - initialize additional package registers - * @hw: pointer to the hardware structure - */ -static void ice_init_pkg_regs(struct ice_hw *hw) -{ -#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF -#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF -#define ICE_SW_BLK_IDX 0 - - /* setup Switch block input mask, which is 48-bits in two parts */ - wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L); - wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H); -} - -/** - * ice_chk_pkg_version - check package version for compatibility with driver - * @pkg_ver: pointer to a version structure to check - * - * Check to make sure that the package about to be downloaded is compatible with - * the driver. To be compatible, the major and minor components of the package - * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR - * definitions. - */ -static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) -{ - if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ || - pkg_ver->minor != ICE_PKG_SUPP_VER_MNR) - return ICE_ERR_NOT_SUPPORTED; - - return ICE_SUCCESS; -} - -/** - * ice_chk_pkg_compat - * @hw: pointer to the hardware structure - * @ospkg: pointer to the package hdr - * @seg: pointer to the package segment hdr - * - * This function checks the package version compatibility with driver and NVM - */ -static enum ice_status -ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, - struct ice_seg **seg) -{ - struct ice_aqc_get_pkg_info_resp *pkg; - enum ice_status status; - u16 size; - u32 i; - - ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); - - /* Check package version compatibility */ - status = ice_chk_pkg_version(&hw->pkg_ver); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n"); - return status; - } - - /* find ICE segment in given package */ - *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id, - ospkg); - if (!*seg) { - ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n"); - return ICE_ERR_CFG; - } - - /* Check if FW is compatible with the OS package */ - size = ice_struct_size(pkg, pkg_info, ICE_PKG_CNT); - pkg = (struct ice_aqc_get_pkg_info_resp *)ice_malloc(hw, size); - if (!pkg) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL); - if (status) - goto fw_ddp_compat_free_alloc; - - for (i = 0; i < LE32_TO_CPU(pkg->count); i++) { - /* loop till we find the NVM package */ - if (!pkg->pkg_info[i].is_in_nvm) - continue; - if ((*seg)->hdr.seg_format_ver.major != - pkg->pkg_info[i].ver.major || - (*seg)->hdr.seg_format_ver.minor > - pkg->pkg_info[i].ver.minor) { - status = ICE_ERR_FW_DDP_MISMATCH; - ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n"); - } - /* done processing NVM package so break */ - break; - } -fw_ddp_compat_free_alloc: - ice_free(hw, pkg); - return status; -} - -/** - * ice_sw_fv_handler - * @sect_type: section type - * @section: pointer to section - * @index: index of the field vector entry to be returned - * @offset: ptr to variable that receives the offset in the field vector table - * - * This is a callback function that can be passed to ice_pkg_enum_entry. - * This function treats the given section as of type ice_sw_fv_section and - * enumerates offset field. "offset" is an index into the field vector table. - */ -static void * -ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset) -{ - struct ice_sw_fv_section *fv_section = - (struct ice_sw_fv_section *)section; - - if (!section || sect_type != ICE_SID_FLD_VEC_SW) - return NULL; - if (index >= LE16_TO_CPU(fv_section->count)) - return NULL; - if (offset) - /* "index" passed in to this function is relative to a given - * 4k block. To get to the true index into the field vector - * table need to add the relative index to the base_offset - * field of this section - */ - *offset = LE16_TO_CPU(fv_section->base_offset) + index; - return fv_section->fv + index; -} - -/** - * ice_get_prof_index_max - get the max profile index for used profile - * @hw: pointer to the HW struct - * - * Calling this function will get the max profile index for used profile - * and store the index number in struct ice_switch_info *switch_info - * in hw for following use. - */ -static int ice_get_prof_index_max(struct ice_hw *hw) -{ - u16 prof_index = 0, j, max_prof_index = 0; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - bool flag = false; - struct ice_fv *fv; - u32 offset; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!hw->seg) - return ICE_ERR_PARAM; - - ice_seg = hw->seg; - - do { - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* in the profile that not be used, the prot_id is set to 0xff - * and the off is set to 0x1ff for all the field vectors. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id != ICE_PROT_INVALID || - fv->ew[j].off != ICE_FV_OFFSET_INVAL) - flag = true; - if (flag && prof_index > max_prof_index) - max_prof_index = prof_index; - - prof_index++; - flag = false; - } while (fv); - - hw->switch_info->max_used_prof_index = max_prof_index; - - return ICE_SUCCESS; -} - -/** - * ice_init_pkg - initialize/download package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function initializes a package. The package contains HW tables - * required to do packet processing. First, the function extracts package - * information such as version. Then it finds the ice configuration segment - * within the package; this function then saves a copy of the segment pointer - * within the supplied package buffer. Next, the function will cache any hints - * from the package, followed by downloading the package itself. Note, that if - * a previous PF driver has already downloaded the package successfully, then - * the current driver will not have to download the package again. - * - * The local package contents will be used to query default behavior and to - * update specific sections of the HW's version of the package (e.g. to update - * the parse graph to understand new protocols). - * - * This function stores a pointer to the package buffer memory, and it is - * expected that the supplied buffer will not be freed immediately. If the - * package buffer needs to be freed, such as when read from a file, use - * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this - * case. - */ -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len) -{ - struct ice_pkg_hdr *pkg; - enum ice_status status; - struct ice_seg *seg; - - if (!buf || !len) - return ICE_ERR_PARAM; - - pkg = (struct ice_pkg_hdr *)buf; - status = ice_verify_pkg(pkg, len); - if (status) { - ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n", - status); - return status; - } - - /* initialize package info */ - status = ice_init_pkg_info(hw, pkg); - if (status) - return status; - - /* before downloading the package, check package version for - * compatibility with driver - */ - status = ice_chk_pkg_compat(hw, pkg, &seg); - if (status) - return status; - - /* initialize package hints and then download package */ - ice_init_pkg_hints(hw, seg); - status = ice_download_pkg(hw, seg); - if (status == ICE_ERR_AQ_NO_WORK) { - ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n"); - status = ICE_SUCCESS; - } - - /* Get information on the package currently loaded in HW, then make sure - * the driver is compatible with this version. - */ - if (!status) { - status = ice_get_pkg_info(hw); - if (!status) - status = ice_chk_pkg_version(&hw->active_pkg_ver); - } - - if (!status) { - hw->seg = seg; - /* on successful package download update other required - * registers to support the package and fill HW tables - * with package content. - */ - ice_init_pkg_regs(hw); - ice_fill_blk_tbls(hw); - ice_get_prof_index_max(hw); - } else { - ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", - status); - } - - return status; -} - -/** - * ice_copy_and_init_pkg - initialize/download a copy of the package - * @hw: pointer to the hardware structure - * @buf: pointer to the package buffer - * @len: size of the package buffer - * - * This function copies the package buffer, and then calls ice_init_pkg() to - * initialize the copied package contents. - * - * The copying is necessary if the package buffer supplied is constant, or if - * the memory may disappear shortly after calling this function. - * - * If the package buffer resides in the data segment and can be modified, the - * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg(). - * - * However, if the package buffer needs to be copied first, such as when being - * read from a file, the caller should use ice_copy_and_init_pkg(). - * - * This function will first copy the package buffer, before calling - * ice_init_pkg(). The caller is free to immediately destroy the original - * package buffer, as the new copy will be managed by this function and - * related routines. - */ -enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len) -{ - enum ice_status status; - u8 *buf_copy; - - if (!buf || !len) - return ICE_ERR_PARAM; - - buf_copy = (u8 *)ice_memdup(hw, buf, len, ICE_NONDMA_TO_NONDMA); - - status = ice_init_pkg(hw, buf_copy, len); - if (status) { - /* Free the copy, since we failed to initialize the package */ - ice_free(hw, buf_copy); - } else { - /* Track the copied pkg so we can free it later */ - hw->pkg_copy = buf_copy; - hw->pkg_size = len; - } - - return status; -} - -/** - * ice_pkg_buf_alloc - * @hw: pointer to the HW structure - * - * Allocates a package buffer and returns a pointer to the buffer header. - * Note: all package contents must be in Little Endian form. - */ -static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw) -{ - struct ice_buf_build *bld; - struct ice_buf_hdr *buf; - - bld = (struct ice_buf_build *)ice_malloc(hw, sizeof(*bld)); - if (!bld) - return NULL; - - buf = (struct ice_buf_hdr *)bld; - buf->data_end = CPU_TO_LE16(offsetof(struct ice_buf_hdr, - section_entry)); - return bld; -} - -/** - * ice_get_sw_prof_type - determine switch profile type - * @hw: pointer to the HW structure - * @fv: pointer to the switch field vector - */ -static enum ice_prof_type -ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv) -{ - u16 i; - - for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { - /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && - fv->ew[i].off == ICE_VNI_OFFSET) - return ICE_PROF_TUN_UDP; - - /* GRE tunnel will have GRE protocol */ - if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF) - return ICE_PROF_TUN_GRE; - } - - return ICE_PROF_NON_TUN; -} - -/** - * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type - * @hw: pointer to hardware structure - * @req_profs: type of profiles requested - * @bm: pointer to memory for returning the bitmap of field vectors - */ -void -ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs, - ice_bitmap_t *bm) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - if (req_profs == ICE_PROF_ALL) { - ice_bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES); - return; - } - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - ice_zero_bitmap(bm, ICE_MAX_NUM_PROFILES); - ice_seg = hw->seg; - do { - enum ice_prof_type prof_type; - u32 offset; - - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - ice_seg = NULL; - - if (fv) { - /* Determine field vector type */ - prof_type = ice_get_sw_prof_type(hw, fv); - - if (req_profs & prof_type) - ice_set_bit((u16)offset, bm); - } - } while (fv); -} - -/** - * ice_get_sw_fv_list - * @hw: pointer to the HW structure - * @prot_ids: field vector to search for with a given protocol ID - * @ids_cnt: lookup/protocol count - * @bm: bitmap of field vectors to consider - * @fv_list: Head of a list - * - * Finds all the field vector entries from switch block that contain - * a given protocol ID and returns a list of structures of type - * "ice_sw_fv_list_entry". Every structure in the list has a field vector - * definition and profile ID information - * NOTE: The caller of the function is responsible for freeing the memory - * allocated for every list entry. - */ -enum ice_status -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list) -{ - struct ice_sw_fv_list_entry *fvl; - struct ice_sw_fv_list_entry *tmp; - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - u32 offset; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!ids_cnt || !hw->seg) - return ICE_ERR_PARAM; - - ice_seg = hw->seg; - do { - u16 i; - - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &offset, ice_sw_fv_handler); - if (!fv) - break; - ice_seg = NULL; - - /* If field vector is not in the bitmap list, then skip this - * profile. - */ - if (!ice_is_bit_set(bm, (u16)offset)) - continue; - - for (i = 0; i < ids_cnt; i++) { - int j; - - /* This code assumes that if a switch field vector line - * has a matching protocol, then this line will contain - * the entries necessary to represent every field in - * that protocol header. - */ - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv->ew[j].prot_id == prot_ids[i]) - break; - if (j >= hw->blk[ICE_BLK_SW].es.fvw) - break; - if (i + 1 == ids_cnt) { - fvl = (struct ice_sw_fv_list_entry *) - ice_malloc(hw, sizeof(*fvl)); - if (!fvl) - goto err; - fvl->fv_ptr = fv; - fvl->profile_id = offset; - LIST_ADD(&fvl->list_entry, fv_list); - break; - } - } - } while (fv); - if (LIST_EMPTY(fv_list)) - return ICE_ERR_CFG; - return ICE_SUCCESS; - -err: - LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry, - list_entry) { - LIST_DEL(&fvl->list_entry); - ice_free(hw, fvl); - } - - return ICE_ERR_NO_MEMORY; -} - -/** - * ice_init_prof_result_bm - Initialize the profile result index bitmap - * @hw: pointer to hardware structure - */ -void ice_init_prof_result_bm(struct ice_hw *hw) -{ - struct ice_pkg_enum state; - struct ice_seg *ice_seg; - struct ice_fv *fv; - - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); - - if (!hw->seg) - return; - - ice_seg = hw->seg; - do { - u32 off; - u16 i; - - fv = (struct ice_fv *) - ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW, - &off, ice_sw_fv_handler); - ice_seg = NULL; - if (!fv) - break; - - ice_zero_bitmap(hw->switch_info->prof_res_bm[off], - ICE_MAX_FV_WORDS); - - /* Determine empty field vector indices, these can be - * used for recipe results. Skip index 0, since it is - * always used for Switch ID. - */ - for (i = 1; i < ICE_MAX_FV_WORDS; i++) - if (fv->ew[i].prot_id == ICE_PROT_INVALID && - fv->ew[i].off == ICE_FV_OFFSET_INVAL) - ice_set_bit(i, - hw->switch_info->prof_res_bm[off]); - } while (fv); -} - -/** - * ice_pkg_buf_free - * @hw: pointer to the HW structure - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Frees a package buffer - */ -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld) -{ - ice_free(hw, bld); -} - -/** - * ice_pkg_buf_reserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to reserve - * - * Reserves one or more section table entries in a package buffer. This routine - * can be called multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. - */ -static enum ice_status -ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count) -{ - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return ICE_ERR_PARAM; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* already an active section, can't increase table size */ - section_count = LE16_TO_CPU(buf->section_count); - if (section_count > 0) - return ICE_ERR_CFG; - - if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT) - return ICE_ERR_CFG; - bld->reserved_section_table_entries += count; - - data_end = LE16_TO_CPU(buf->data_end) + - FLEX_ARRAY_SIZE(buf, section_entry, count); - buf->data_end = CPU_TO_LE16(data_end); - - return ICE_SUCCESS; -} - -/** - * ice_pkg_buf_alloc_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * - * Reserves memory in the buffer for a section's content and updates the - * buffers' status accordingly. This routine returns a pointer to the first - * byte of the section start within the buffer, which is used to fill in the - * section contents. - * Note: all package contents must be in Little Endian form. - */ -static void * -ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size) -{ - struct ice_buf_hdr *buf; - u16 sect_count; - u16 data_end; - - if (!bld || !type || !size) - return NULL; - - buf = (struct ice_buf_hdr *)&bld->buf; - - /* check for enough space left in buffer */ - data_end = LE16_TO_CPU(buf->data_end); - - /* section start must align on 4 byte boundary */ - data_end = ICE_ALIGN(data_end, 4); - - if ((data_end + size) > ICE_MAX_S_DATA_END) - return NULL; - - /* check for more available section table entries */ - sect_count = LE16_TO_CPU(buf->section_count); - if (sect_count < bld->reserved_section_table_entries) { - void *section_ptr = ((u8 *)buf) + data_end; - - buf->section_entry[sect_count].offset = CPU_TO_LE16(data_end); - buf->section_entry[sect_count].size = CPU_TO_LE16(size); - buf->section_entry[sect_count].type = CPU_TO_LE32(type); - - data_end += size; - buf->data_end = CPU_TO_LE16(data_end); - - buf->section_count = CPU_TO_LE16(sect_count + 1); - return section_ptr; - } - - /* no free section table entries */ - return NULL; -} - -/** - * ice_pkg_buf_alloc_single_section - * @hw: pointer to the HW structure - * @type: the section type value - * @size: the size of the section to reserve (in bytes) - * @section: returns pointer to the section - * - * Allocates a package buffer with a single section. - * Note: all package contents must be in Little Endian form. - */ -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section) -{ - struct ice_buf_build *buf; - - if (!section) - return NULL; - - buf = ice_pkg_buf_alloc(hw); - if (!buf) - return NULL; - - if (ice_pkg_buf_reserve_section(buf, 1)) - goto ice_pkg_buf_alloc_single_section_err; - - *section = ice_pkg_buf_alloc_section(buf, type, size); - if (!*section) - goto ice_pkg_buf_alloc_single_section_err; - - return buf; + if (count > max) + return false; + } -ice_pkg_buf_alloc_single_section_err: - ice_pkg_buf_free(hw, buf); - return NULL; + return true; } /** - * ice_pkg_buf_unreserve_section - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * @count: the number of sections to unreserve + * ice_set_key - generate a variable sized key with multiples of 16-bits + * @key: pointer to where the key will be stored + * @size: the size of the complete key in bytes (must be even) + * @val: array of 8-bit values that makes up the value portion of the key + * @upd: array of 8-bit masks that determine what key portion to update + * @dc: array of 8-bit masks that make up the don't care mask + * @nm: array of 8-bit masks that make up the never match mask + * @off: the offset of the first byte in the key to update + * @len: the number of bytes in the key update * - * Unreserves one or more section table entries in a package buffer, releasing - * space that can be used for section data. This routine can be called - * multiple times as long as they are made before calling - * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section() - * is called once, the number of sections that can be allocated will not be able - * to be increased; not using all reserved sections is fine, but this will - * result in some wasted space in the buffer. - * Note: all package contents must be in Little Endian form. + * This function generates a key from a value, a don't care mask and a never + * match mask. + * upd, dc, and nm are optional parameters, and can be NULL: + * upd == NULL --> upd mask is all 1's (update all bits) + * dc == NULL --> dc mask is all 0's (no don't care bits) + * nm == NULL --> nm mask is all 0's (no never match bits) */ -enum ice_status -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count) +static enum ice_status +ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off, + u16 len) { - struct ice_buf_hdr *buf; - u16 section_count; - u16 data_end; - - if (!bld) - return ICE_ERR_PARAM; + u16 half_size; + u16 i; - buf = (struct ice_buf_hdr *)&bld->buf; + /* size must be a multiple of 2 bytes. */ + if (size % 2) + return ICE_ERR_CFG; + half_size = size / 2; - /* already an active section, can't decrease table size */ - section_count = LE16_TO_CPU(buf->section_count); - if (section_count > 0) + if (off + len > half_size) return ICE_ERR_CFG; - if (count > bld->reserved_section_table_entries) + /* Make sure at most one bit is set in the never match mask. Having more + * than one never match mask bit set will cause HW to consume excessive + * power otherwise; this is a power management efficiency check. + */ +#define ICE_NVR_MTCH_BITS_MAX 1 + if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX)) return ICE_ERR_CFG; - bld->reserved_section_table_entries -= count; - data_end = LE16_TO_CPU(buf->data_end) - - FLEX_ARRAY_SIZE(buf, section_entry, count); - buf->data_end = CPU_TO_LE16(data_end); + for (i = 0; i < len; i++) + if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff, + dc ? dc[i] : 0, nm ? nm[i] : 0, + key + off + i, key + half_size + off + i)) + return ICE_ERR_CFG; return ICE_SUCCESS; } -/** - * ice_pkg_buf_get_free_space - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of free bytes remaining in the buffer. - * Note: all package contents must be in Little Endian form. - */ -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return ICE_MAX_S_DATA_END - LE16_TO_CPU(buf->data_end); -} - -/** - * ice_pkg_buf_get_active_sections - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Returns the number of active sections. Before using the package buffer - * in an update package command, the caller should make sure that there is at - * least one active section - otherwise, the buffer is not legal and should - * not be used. - * Note: all package contents must be in Little Endian form. - */ -static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld) -{ - struct ice_buf_hdr *buf; - - if (!bld) - return 0; - - buf = (struct ice_buf_hdr *)&bld->buf; - return LE16_TO_CPU(buf->section_count); -} - -/** - * ice_pkg_buf - * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc()) - * - * Return a pointer to the buffer's header - */ -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld) -{ - if (!bld) - return NULL; - - return &bld->buf; -} - /** * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage * @hw: pointer to the HW structure * @port: port to search for * @index: optionally returns index * * Returns whether a port is already in use as a tunnel, and optionally its * index */ static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index) { u16 i; for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { if (index) *index = i; return true; } return false; } /** * ice_tunnel_port_in_use * @hw: pointer to the HW structure * @port: port to search for * @index: optionally returns index * * Returns whether a port is already in use as a tunnel, and optionally its * index */ bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index) { bool res; ice_acquire_lock(&hw->tnl_lock); res = ice_tunnel_port_in_use_hlpr(hw, port, index); ice_release_lock(&hw->tnl_lock); return res; } /** * ice_tunnel_get_type * @hw: pointer to the HW structure * @port: port to search for * @type: returns tunnel index * * For a given port number, will return the type of tunnel. */ bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type) { bool res = false; u16 i; ice_acquire_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) { *type = hw->tnl.tbl[i].type; res = true; break; } ice_release_lock(&hw->tnl_lock); return res; } /** * ice_find_free_tunnel_entry * @hw: pointer to the HW structure * @type: tunnel type * @index: optionally returns index * * Returns whether there is a free tunnel entry, and optionally its index */ static bool ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type, u16 *index) { u16 i; for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].type == type) { if (index) *index = i; return true; } return false; } /** * ice_get_open_tunnel_port - retrieve an open tunnel port * @hw: pointer to the HW structure * @type: tunnel type (TNL_ALL will return any open port) * @port: returns open port */ bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port) { bool res = false; u16 i; ice_acquire_lock(&hw->tnl_lock); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (type == TNL_ALL || hw->tnl.tbl[i].type == type)) { *port = hw->tnl.tbl[i].port; res = true; break; } ice_release_lock(&hw->tnl_lock); return res; } /** * ice_create_tunnel * @hw: pointer to the HW structure * @type: type of tunnel * @port: port of tunnel to create * * Create a tunnel by updating the parse graph in the parser. We do that by * creating a package buffer with the tunnel info and issuing an update package * command. */ enum ice_status ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) { struct ice_boost_tcam_section *sect_rx, *sect_tx; enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; u16 index; ice_acquire_lock(&hw->tnl_lock); if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) { hw->tnl.tbl[index].ref++; status = ICE_SUCCESS; goto ice_create_tunnel_end; } if (!ice_find_free_tunnel_entry(hw, type, &index)) { status = ICE_ERR_OUT_OF_RANGE; goto ice_create_tunnel_end; } bld = ice_pkg_buf_alloc(hw); if (!bld) { status = ICE_ERR_NO_MEMORY; goto ice_create_tunnel_end; } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) goto ice_create_tunnel_err; sect_rx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, ice_struct_size(sect_rx, tcam, 1)); if (!sect_rx) goto ice_create_tunnel_err; sect_rx->count = CPU_TO_LE16(1); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, ice_struct_size(sect_tx, tcam, 1)); if (!sect_tx) goto ice_create_tunnel_err; sect_tx->count = CPU_TO_LE16(1); /* copy original boost entry to update package buffer */ ice_memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry, sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); /* over-write the never-match dest port key bits with the encoded port * bits */ ice_set_key((u8 *)§_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key), (u8 *)&port, NULL, NULL, NULL, (u16)offsetof(struct ice_boost_key_value, hv_dst_port_key), sizeof(sect_rx->tcam[0].key.key.hv_dst_port_key)); /* exact copy of entry to Tx section entry */ ice_memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam), ICE_NONDMA_TO_NONDMA); status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); if (!status) { hw->tnl.tbl[index].port = port; hw->tnl.tbl[index].in_use = true; hw->tnl.tbl[index].ref = 1; } ice_create_tunnel_err: ice_pkg_buf_free(hw, bld); ice_create_tunnel_end: ice_release_lock(&hw->tnl_lock); return status; } /** * ice_destroy_tunnel * @hw: pointer to the HW structure * @port: port of tunnel to destroy (ignored if the all parameter is true) * @all: flag that states to destroy all tunnels * * Destroys a tunnel or all tunnels by creating an update package buffer * targeting the specific updates requested and then performing an update * package. */ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) { struct ice_boost_tcam_section *sect_rx, *sect_tx; enum ice_status status = ICE_ERR_MAX_LIMIT; struct ice_buf_build *bld; u16 count = 0; u16 index; u16 size; u16 i, j; ice_acquire_lock(&hw->tnl_lock); if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index)) if (hw->tnl.tbl[index].ref > 1) { hw->tnl.tbl[index].ref--; status = ICE_SUCCESS; goto ice_destroy_tunnel_end; } /* determine count */ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) count++; if (!count) { status = ICE_ERR_PARAM; goto ice_destroy_tunnel_end; } /* size of section - there is at least one entry */ size = ice_struct_size(sect_rx, tcam, count); bld = ice_pkg_buf_alloc(hw); if (!bld) { status = ICE_ERR_NO_MEMORY; goto ice_destroy_tunnel_end; } /* allocate 2 sections, one for Rx parser, one for Tx parser */ if (ice_pkg_buf_reserve_section(bld, 2)) goto ice_destroy_tunnel_err; sect_rx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, size); if (!sect_rx) goto ice_destroy_tunnel_err; sect_rx->count = CPU_TO_LE16(count); sect_tx = (struct ice_boost_tcam_section *) ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, size); if (!sect_tx) goto ice_destroy_tunnel_err; sect_tx->count = CPU_TO_LE16(count); /* copy original boost entry to update package buffer, one copy to Rx * section, another copy to the Tx section */ for (i = 0, j = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use && (all || hw->tnl.tbl[i].port == port)) { ice_memcpy(sect_rx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_rx->tcam), ICE_NONDMA_TO_NONDMA); ice_memcpy(sect_tx->tcam + j, hw->tnl.tbl[i].boost_entry, sizeof(*sect_tx->tcam), ICE_NONDMA_TO_NONDMA); hw->tnl.tbl[i].marked = true; j++; } status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); if (!status) for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) if (hw->tnl.tbl[i].marked) { hw->tnl.tbl[i].ref = 0; hw->tnl.tbl[i].port = 0; hw->tnl.tbl[i].in_use = false; hw->tnl.tbl[i].marked = false; } ice_destroy_tunnel_err: ice_pkg_buf_free(hw, bld); ice_destroy_tunnel_end: ice_release_lock(&hw->tnl_lock); return status; } /** * ice_replay_tunnels * @hw: pointer to the HW structure * * Replays all tunnels */ enum ice_status ice_replay_tunnels(struct ice_hw *hw) { enum ice_status status = ICE_SUCCESS; u16 i; ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++) { enum ice_tunnel_type type = hw->tnl.tbl[i].type; u16 refs = hw->tnl.tbl[i].ref; u16 port = hw->tnl.tbl[i].port; if (!hw->tnl.tbl[i].in_use) continue; /* Replay tunnels one at a time by destroying them, then * recreating them */ hw->tnl.tbl[i].ref = 1; /* make sure to destroy in one call */ status = ice_destroy_tunnel(hw, port, false); if (status) { ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - destroy tunnel port 0x%x\n", status, port); break; } status = ice_create_tunnel(hw, type, port); if (status) { ice_debug(hw, ICE_DBG_PKG, "ERR: 0x%x - create tunnel port 0x%x\n", status, port); break; } /* reset to original ref count */ hw->tnl.tbl[i].ref = refs; } return status; } /** * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index * @hw: pointer to the hardware structure * @blk: hardware block * @prof: profile ID * @fv_idx: field vector word index * @prot: variable to receive the protocol ID * @off: variable to receive the protocol offset */ enum ice_status ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off) { struct ice_fv_word *fv_ext; if (prof >= hw->blk[blk].es.count) return ICE_ERR_PARAM; if (fv_idx >= hw->blk[blk].es.fvw) return ICE_ERR_PARAM; fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw); *prot = fv_ext[fv_idx].prot_id; *off = fv_ext[fv_idx].off; return ICE_SUCCESS; } /* PTG Management */ /** * ice_ptg_update_xlt1 - Updates packet type groups in HW via XLT1 table * @hw: pointer to the hardware structure * @blk: HW block * * This function will update the XLT1 hardware table to reflect the new * packet type group configuration. */ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk) { struct ice_xlt1_section *sect; struct ice_buf_build *bld; enum ice_status status; u16 index; bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1), ice_struct_size(sect, value, ICE_XLT1_CNT), (void **)§); if (!bld) return ICE_ERR_NO_MEMORY; sect->count = CPU_TO_LE16(ICE_XLT1_CNT); sect->offset = CPU_TO_LE16(0); for (index = 0; index < ICE_XLT1_CNT; index++) sect->value[index] = hw->blk[blk].xlt1.ptypes[index].ptg; status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); ice_pkg_buf_free(hw, bld); return status; } /** * ice_ptg_find_ptype - Search for packet type group using packet type (ptype) * @hw: pointer to the hardware structure * @blk: HW block * @ptype: the ptype to search for * @ptg: pointer to variable that receives the PTG * * This function will search the PTGs for a particular ptype, returning the * PTG ID that contains it through the PTG parameter, with the value of * ICE_DEFAULT_PTG (0) meaning it is part the default PTG. */ static enum ice_status ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg) { if (ptype >= ICE_XLT1_CNT || !ptg) return ICE_ERR_PARAM; *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg; return ICE_SUCCESS; } /** * ice_ptg_alloc_val - Allocates a new packet type group ID by value * @hw: pointer to the hardware structure * @blk: HW block * @ptg: the PTG to allocate * * This function allocates a given packet type group ID specified by the PTG * parameter. */ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg) { hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true; } /** * ice_ptg_free - Frees a packet type group * @hw: pointer to the hardware structure * @blk: HW block * @ptg: the PTG ID to free * * This function frees a packet type group, and returns all the current ptypes * within it to the default PTG. */ void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg) { struct ice_ptg_ptype *p, *temp; hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = false; p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; while (p) { p->ptg = ICE_DEFAULT_PTG; temp = p->next_ptype; p->next_ptype = NULL; p = temp; } hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = NULL; } /** * ice_ptg_remove_ptype - Removes ptype from a particular packet type group * @hw: pointer to the hardware structure * @blk: HW block * @ptype: the ptype to remove * @ptg: the PTG to remove the ptype from * * This function will remove the ptype from the specific PTG, and move it to * the default PTG (ICE_DEFAULT_PTG). */ static enum ice_status ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { struct ice_ptg_ptype **ch; struct ice_ptg_ptype *p; if (ptype > ICE_XLT1_CNT - 1) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use) return ICE_ERR_DOES_NOT_EXIST; /* Should not happen if .in_use is set, bad config */ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype) return ICE_ERR_CFG; /* find the ptype within this PTG, and bypass the link over it */ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; while (p) { if (ptype == (p - hw->blk[blk].xlt1.ptypes)) { *ch = p->next_ptype; break; } ch = &p->next_ptype; p = p->next_ptype; } hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG; hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL; return ICE_SUCCESS; } /** * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group * @hw: pointer to the hardware structure * @blk: HW block * @ptype: the ptype to add or move * @ptg: the PTG to add or move the ptype to * * This function will either add or move a ptype to a particular PTG depending * on if the ptype is already part of another group. Note that using a * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the * default PTG. */ static enum ice_status ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg) { enum ice_status status; u8 original_ptg; if (ptype > ICE_XLT1_CNT - 1) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG) return ICE_ERR_DOES_NOT_EXIST; status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg); if (status) return status; /* Is ptype already in the correct PTG? */ if (original_ptg == ptg) return ICE_SUCCESS; /* Remove from original PTG and move back to the default PTG */ if (original_ptg != ICE_DEFAULT_PTG) ice_ptg_remove_ptype(hw, blk, ptype, original_ptg); /* Moving to default PTG? Then we're done with this request */ if (ptg == ICE_DEFAULT_PTG) return ICE_SUCCESS; /* Add ptype to PTG at beginning of list */ hw->blk[blk].xlt1.ptypes[ptype].next_ptype = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype; hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype = &hw->blk[blk].xlt1.ptypes[ptype]; hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg; hw->blk[blk].xlt1.t[ptype] = ptg; return ICE_SUCCESS; } /* Block / table size info */ struct ice_blk_size_details { u16 xlt1; /* # XLT1 entries */ u16 xlt2; /* # XLT2 entries */ u16 prof_tcam; /* # profile ID TCAM entries */ u16 prof_id; /* # profile IDs */ u8 prof_cdid_bits; /* # CDID one-hot bits used in key */ u16 prof_redir; /* # profile redirection entries */ u16 es; /* # extraction sequence entries */ u16 fvw; /* # field vector words */ u8 overwrite; /* overwrite existing entries allowed */ u8 reverse; /* reverse FV order */ }; static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = { /** * Table Definitions * XLT1 - Number of entries in XLT1 table * XLT2 - Number of entries in XLT2 table * TCAM - Number of entries Profile ID TCAM table * CDID - Control Domain ID of the hardware block * PRED - Number of entries in the Profile Redirection Table * FV - Number of entries in the Field Vector * FVW - Width (in WORDs) of the Field Vector * OVR - Overwrite existing table entries * REV - Reverse FV */ /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */ /* Overwrite , Reverse FV */ /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48, false, false }, /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32, false, false }, /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, false, true }, /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24, true, true }, /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24, false, false }, }; enum ice_sid_all { ICE_SID_XLT1_OFF = 0, ICE_SID_XLT2_OFF, ICE_SID_PR_OFF, ICE_SID_PR_REDIR_OFF, ICE_SID_ES_OFF, ICE_SID_OFF_COUNT, }; /* Characteristic handling */ /** * ice_match_prop_lst - determine if properties of two lists match * @list1: first properties list * @list2: second properties list * * Count, cookies and the order must match in order to be considered equivalent. */ static bool ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2) { struct ice_vsig_prof *tmp1; struct ice_vsig_prof *tmp2; u16 chk_count = 0; u16 count = 0; /* compare counts */ LIST_FOR_EACH_ENTRY(tmp1, list1, ice_vsig_prof, list) count++; LIST_FOR_EACH_ENTRY(tmp2, list2, ice_vsig_prof, list) chk_count++; if (!count || count != chk_count) return false; tmp1 = LIST_FIRST_ENTRY(list1, struct ice_vsig_prof, list); tmp2 = LIST_FIRST_ENTRY(list2, struct ice_vsig_prof, list); /* profile cookies must compare, and in the exact same order to take * into account priority */ while (count--) { if (tmp2->profile_cookie != tmp1->profile_cookie) return false; tmp1 = LIST_NEXT_ENTRY(tmp1, struct ice_vsig_prof, list); tmp2 = LIST_NEXT_ENTRY(tmp2, struct ice_vsig_prof, list); } return true; } /* VSIG Management */ /** * ice_vsig_update_xlt2_sect - update one section of XLT2 table * @hw: pointer to the hardware structure * @blk: HW block * @vsi: HW VSI number to program * @vsig: VSIG for the VSI * * This function will update the XLT2 hardware table with the input VSI * group configuration. */ static enum ice_status ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_xlt2_section *sect; struct ice_buf_build *bld; enum ice_status status; bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2), ice_struct_size(sect, value, 1), (void **)§); if (!bld) return ICE_ERR_NO_MEMORY; sect->count = CPU_TO_LE16(1); sect->offset = CPU_TO_LE16(vsi); sect->value[0] = CPU_TO_LE16(vsig); status = ice_update_pkg(hw, ice_pkg_buf(bld), 1); ice_pkg_buf_free(hw, bld); return status; } /** * ice_vsig_update_xlt2 - update XLT2 table with VSIG configuration * @hw: pointer to the hardware structure * @blk: HW block * * This function will update the XLT2 hardware table with the input VSI * group configuration of used vsis. */ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk) { u16 vsi; for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) { /* update only vsis that have been changed */ if (hw->blk[blk].xlt2.vsis[vsi].changed) { enum ice_status status; u16 vsig; vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; status = ice_vsig_update_xlt2_sect(hw, blk, vsi, vsig); if (status) return status; hw->blk[blk].xlt2.vsis[vsi].changed = 0; } } return ICE_SUCCESS; } /** * ice_vsig_find_vsi - find a VSIG that contains a specified VSI * @hw: pointer to the hardware structure * @blk: HW block * @vsi: VSI of interest * @vsig: pointer to receive the VSI group * * This function will lookup the VSI entry in the XLT2 list and return * the VSI group its associated with. */ enum ice_status ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig) { if (!vsig || vsi >= ICE_MAX_VSI) return ICE_ERR_PARAM; /* As long as there's a default or valid VSIG associated with the input * VSI, the functions returns a success. Any handling of VSIG will be * done by the following add, update or remove functions. */ *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig; return ICE_SUCCESS; } /** * ice_vsig_alloc_val - allocate a new VSIG by value * @hw: pointer to the hardware structure * @blk: HW block * @vsig: the VSIG to allocate * * This function will allocate a given VSIG specified by the VSIG parameter. */ static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig) { u16 idx = vsig & ICE_VSIG_IDX_M; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) { INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true; } return ICE_VSIG_VALUE(idx, hw->pf_id); } /** * ice_vsig_alloc - Finds a free entry and allocates a new VSIG * @hw: pointer to the hardware structure * @blk: HW block * * This function will iterate through the VSIG list and mark the first * unused entry for the new VSIG entry as used and return that value. */ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk) { u16 i; for (i = 1; i < ICE_MAX_VSIGS; i++) if (!hw->blk[blk].xlt2.vsig_tbl[i].in_use) return ice_vsig_alloc_val(hw, blk, i); return ICE_DEFAULT_VSIG; } /** * ice_find_dup_props_vsig - find VSI group with a specified set of properties * @hw: pointer to the hardware structure * @blk: HW block * @chs: characteristic list * @vsig: returns the VSIG with the matching profiles, if found * * Each VSIG is associated with a characteristic set; i.e. all VSIs under * a group have the same characteristic set. To check if there exists a VSIG * which has the same characteristics as the input characteristics; this * function will iterate through the XLT2 list and return the VSIG that has a * matching configuration. In order to make sure that priorities are accounted * for, the list must match exactly, including the order in which the * characteristics are listed. */ static enum ice_status ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *chs, u16 *vsig) { struct ice_xlt2 *xlt2 = &hw->blk[blk].xlt2; u16 i; for (i = 0; i < xlt2->count; i++) if (xlt2->vsig_tbl[i].in_use && ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) { *vsig = ICE_VSIG_VALUE(i, hw->pf_id); return ICE_SUCCESS; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_vsig_free - free VSI group * @hw: pointer to the hardware structure * @blk: HW block * @vsig: VSIG to remove * * The function will remove all VSIs associated with the input VSIG and move * them to the DEFAULT_VSIG and mark the VSIG available. */ static enum ice_status ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig) { struct ice_vsig_prof *dtmp, *del; struct ice_vsig_vsi *vsi_cur; u16 idx; idx = vsig & ICE_VSIG_IDX_M; if (idx >= ICE_MAX_VSIGS) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) return ICE_ERR_DOES_NOT_EXIST; hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false; vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; /* If the VSIG has at least 1 VSI then iterate through the * list and remove the VSIs before deleting the group. */ if (vsi_cur) { /* remove all vsis associated with this VSIG XLT2 entry */ do { struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; vsi_cur->next_vsi = NULL; vsi_cur = tmp; } while (vsi_cur); /* NULL terminate head of VSI list */ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = NULL; } /* free characteristic list */ LIST_FOR_EACH_ENTRY_SAFE(del, dtmp, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { LIST_DEL(&del->list); ice_free(hw, del); } /* if VSIG characteristic list was cleared for reset * re-initialize the list head */ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst); return ICE_SUCCESS; } /** * ice_vsig_remove_vsi - remove VSI from VSIG * @hw: pointer to the hardware structure * @blk: HW block * @vsi: VSI to remove * @vsig: VSI group to remove from * * The function will remove the input VSI from its VSI group and move it * to the DEFAULT_VSIG. */ static enum ice_status ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt; u16 idx; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) return ICE_ERR_PARAM; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) return ICE_ERR_DOES_NOT_EXIST; /* entry already in default VSIG, don't have to remove */ if (idx == ICE_DEFAULT_VSIG) return ICE_SUCCESS; vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; if (!(*vsi_head)) return ICE_ERR_CFG; vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi]; vsi_cur = (*vsi_head); /* iterate the VSI list, skip over the entry to be removed */ while (vsi_cur) { if (vsi_tgt == vsi_cur) { (*vsi_head) = vsi_cur->next_vsi; break; } vsi_head = &vsi_cur->next_vsi; vsi_cur = vsi_cur->next_vsi; } /* verify if VSI was removed from group list */ if (!vsi_cur) return ICE_ERR_DOES_NOT_EXIST; vsi_cur->vsig = ICE_DEFAULT_VSIG; vsi_cur->changed = 1; vsi_cur->next_vsi = NULL; return ICE_SUCCESS; } /** * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group * @hw: pointer to the hardware structure * @blk: HW block * @vsi: VSI to move * @vsig: destination VSI group * * This function will move or add the input VSI to the target VSIG. * The function will find the original VSIG the VSI belongs to and * move the entry to the DEFAULT_VSIG, update the original VSIG and * then move entry to the new VSIG. */ static enum ice_status ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_vsig_vsi *tmp; enum ice_status status; u16 orig_vsig, idx; idx = vsig & ICE_VSIG_IDX_M; if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS) return ICE_ERR_PARAM; /* if VSIG not in use and VSIG is not default type this VSIG * doesn't exist. */ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use && vsig != ICE_DEFAULT_VSIG) return ICE_ERR_DOES_NOT_EXIST; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (status) return status; /* no update required if vsigs match */ if (orig_vsig == vsig) return ICE_SUCCESS; if (orig_vsig != ICE_DEFAULT_VSIG) { /* remove entry from orig_vsig and add to default VSIG */ status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig); if (status) return status; } if (idx == ICE_DEFAULT_VSIG) return ICE_SUCCESS; /* Create VSI entry and add VSIG and prop_mask values */ hw->blk[blk].xlt2.vsis[vsi].vsig = vsig; hw->blk[blk].xlt2.vsis[vsi].changed = 1; /* Add new entry to the head of the VSIG list */ tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi = &hw->blk[blk].xlt2.vsis[vsi]; hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp; hw->blk[blk].xlt2.t[vsi] = vsig; return ICE_SUCCESS; } /** * ice_find_prof_id - find profile ID for a given field vector * @hw: pointer to the hardware structure * @blk: HW block * @fv: field vector to search for * @prof_id: receives the profile ID */ static enum ice_status ice_find_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_fv_word *fv, u8 *prof_id) { struct ice_es *es = &hw->blk[blk].es; u16 off; u8 i; for (i = 0; i < (u8)es->count; i++) { off = i * es->fvw; if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv))) continue; *prof_id = i; return ICE_SUCCESS; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_prof_id_rsrc_type - get profile ID resource type for a block type * @blk: the block type * @rsrc_type: pointer to variable to receive the resource type */ static bool ice_prof_id_rsrc_type(enum ice_block blk, u16 *rsrc_type) { switch (blk) { case ICE_BLK_RSS: *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID; break; case ICE_BLK_PE: *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_PROFID; break; default: return false; } return true; } /** * ice_tcam_ent_rsrc_type - get TCAM entry resource type for a block type * @blk: the block type * @rsrc_type: pointer to variable to receive the resource type */ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type) { switch (blk) { case ICE_BLK_RSS: *rsrc_type = ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM; break; case ICE_BLK_PE: *rsrc_type = ICE_AQC_RES_TYPE_QHASH_PROF_BLDR_TCAM; break; default: return false; } return true; } /** * ice_alloc_tcam_ent - allocate hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block to allocate the TCAM for * @btm: true to allocate from bottom of table, false to allocate from top * @tcam_idx: pointer to variable to receive the TCAM entry * * This function allocates a new entry in a Profile ID TCAM for a specific * block. */ static enum ice_status ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm, u16 *tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx); } /** * ice_free_tcam_ent - free hardware TCAM entry * @hw: pointer to the HW struct * @blk: the block from which to free the TCAM entry * @tcam_idx: the TCAM entry to free * * This function frees an entry in a Profile ID TCAM for a specific block. */ static enum ice_status ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx) { u16 res_type; if (!ice_tcam_ent_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; return ice_free_hw_res(hw, res_type, 1, &tcam_idx); } /** * ice_alloc_prof_id - allocate profile ID * @hw: pointer to the HW struct * @blk: the block to allocate the profile ID for * @prof_id: pointer to variable to receive the profile ID * * This function allocates a new profile ID, which also corresponds to a Field * Vector (Extraction Sequence) entry. */ static enum ice_status ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id) { enum ice_status status; u16 res_type; u16 get_prof; if (!ice_prof_id_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof); if (!status) *prof_id = (u8)get_prof; return status; } /** * ice_free_prof_id - free profile ID * @hw: pointer to the HW struct * @blk: the block from which to free the profile ID * @prof_id: the profile ID to free * * This function frees a profile ID, which also corresponds to a Field Vector. */ static enum ice_status ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { u16 tmp_prof_id = (u16)prof_id; u16 res_type; if (!ice_prof_id_rsrc_type(blk, &res_type)) return ICE_ERR_PARAM; return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id); } /** * ice_prof_inc_ref - increment reference count for profile * @hw: pointer to the HW struct * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to increment the reference count */ static enum ice_status ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) return ICE_ERR_PARAM; hw->blk[blk].es.ref_count[prof_id]++; return ICE_SUCCESS; } /** * ice_write_es - write an extraction sequence to hardware * @hw: pointer to the HW struct * @blk: the block in which to write the extraction sequence * @prof_id: the profile ID to write * @fv: pointer to the extraction sequence to write - NULL to clear extraction */ static void ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id, struct ice_fv_word *fv) { u16 off; off = prof_id * hw->blk[blk].es.fvw; if (!fv) { ice_memset(&hw->blk[blk].es.t[off], 0, hw->blk[blk].es.fvw * sizeof(*fv), ICE_NONDMA_MEM); hw->blk[blk].es.written[prof_id] = false; } else { ice_memcpy(&hw->blk[blk].es.t[off], fv, hw->blk[blk].es.fvw * sizeof(*fv), ICE_NONDMA_TO_NONDMA); } } /** * ice_prof_dec_ref - decrement reference count for profile * @hw: pointer to the HW struct * @blk: the block from which to free the profile ID * @prof_id: the profile ID for which to decrement the reference count */ static enum ice_status ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id) { if (prof_id > hw->blk[blk].es.count) return ICE_ERR_PARAM; if (hw->blk[blk].es.ref_count[prof_id] > 0) { if (!--hw->blk[blk].es.ref_count[prof_id]) { ice_write_es(hw, blk, prof_id, NULL); return ice_free_prof_id(hw, blk, prof_id); } } return ICE_SUCCESS; } /* Block / table section IDs */ static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = { /* SWITCH */ { ICE_SID_XLT1_SW, ICE_SID_XLT2_SW, ICE_SID_PROFID_TCAM_SW, ICE_SID_PROFID_REDIR_SW, ICE_SID_FLD_VEC_SW }, /* ACL */ { ICE_SID_XLT1_ACL, ICE_SID_XLT2_ACL, ICE_SID_PROFID_TCAM_ACL, ICE_SID_PROFID_REDIR_ACL, ICE_SID_FLD_VEC_ACL }, /* FD */ { ICE_SID_XLT1_FD, ICE_SID_XLT2_FD, ICE_SID_PROFID_TCAM_FD, ICE_SID_PROFID_REDIR_FD, ICE_SID_FLD_VEC_FD }, /* RSS */ { ICE_SID_XLT1_RSS, ICE_SID_XLT2_RSS, ICE_SID_PROFID_TCAM_RSS, ICE_SID_PROFID_REDIR_RSS, ICE_SID_FLD_VEC_RSS }, /* PE */ { ICE_SID_XLT1_PE, ICE_SID_XLT2_PE, ICE_SID_PROFID_TCAM_PE, ICE_SID_PROFID_REDIR_PE, ICE_SID_FLD_VEC_PE } }; /** * ice_init_sw_xlt1_db - init software XLT1 database from HW tables * @hw: pointer to the hardware structure * @blk: the HW block to initialize */ static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk) { u16 pt; for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) { u8 ptg; ptg = hw->blk[blk].xlt1.t[pt]; if (ptg != ICE_DEFAULT_PTG) { ice_ptg_alloc_val(hw, blk, ptg); ice_ptg_add_mv_ptype(hw, blk, pt, ptg); } } } /** * ice_init_sw_xlt2_db - init software XLT2 database from HW tables * @hw: pointer to the hardware structure * @blk: the HW block to initialize */ static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk) { u16 vsi; for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) { u16 vsig; vsig = hw->blk[blk].xlt2.t[vsi]; if (vsig) { ice_vsig_alloc_val(hw, blk, vsig); ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); /* no changes at this time, since this has been * initialized from the original package */ hw->blk[blk].xlt2.vsis[vsi].changed = 0; } } } /** * ice_init_sw_db - init software database from HW tables * @hw: pointer to the hardware structure */ static void ice_init_sw_db(struct ice_hw *hw) { u16 i; for (i = 0; i < ICE_BLK_COUNT; i++) { ice_init_sw_xlt1_db(hw, (enum ice_block)i); ice_init_sw_xlt2_db(hw, (enum ice_block)i); } } /** * ice_fill_tbl - Reads content of a single table type into database * @hw: pointer to the hardware structure * @block_id: Block ID of the table to copy * @sid: Section ID of the table to copy * * Will attempt to read the entire content of a given table of a single block * into the driver database. We assume that the buffer will always * be as large or larger than the data contained in the package. If * this condition is not met, there is most likely an error in the package * contents. */ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid) { u32 dst_len, sect_len, offset = 0; struct ice_prof_redir_section *pr; struct ice_prof_id_section *pid; struct ice_xlt1_section *xlt1; struct ice_xlt2_section *xlt2; struct ice_sw_fv_section *es; struct ice_pkg_enum state; u8 *src, *dst; void *sect; /* if the HW segment pointer is null then the first iteration of * ice_pkg_enum_section() will fail. In this case the HW tables will * not be filled and return success. */ if (!hw->seg) { ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n"); return; } - ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + ice_memset(&state, 0, sizeof(state), ICE_NONDMA_MEM); + + sect = ice_pkg_enum_section(hw->seg, &state, sid); + + while (sect) { + switch (sid) { + case ICE_SID_XLT1_SW: + case ICE_SID_XLT1_FD: + case ICE_SID_XLT1_RSS: + case ICE_SID_XLT1_ACL: + case ICE_SID_XLT1_PE: + xlt1 = (struct ice_xlt1_section *)sect; + src = xlt1->value; + sect_len = LE16_TO_CPU(xlt1->count) * + sizeof(*hw->blk[block_id].xlt1.t); + dst = hw->blk[block_id].xlt1.t; + dst_len = hw->blk[block_id].xlt1.count * + sizeof(*hw->blk[block_id].xlt1.t); + break; + case ICE_SID_XLT2_SW: + case ICE_SID_XLT2_FD: + case ICE_SID_XLT2_RSS: + case ICE_SID_XLT2_ACL: + case ICE_SID_XLT2_PE: + xlt2 = (struct ice_xlt2_section *)sect; + src = (_FORCE_ u8 *)xlt2->value; + sect_len = LE16_TO_CPU(xlt2->count) * + sizeof(*hw->blk[block_id].xlt2.t); + dst = (u8 *)hw->blk[block_id].xlt2.t; + dst_len = hw->blk[block_id].xlt2.count * + sizeof(*hw->blk[block_id].xlt2.t); + break; + case ICE_SID_PROFID_TCAM_SW: + case ICE_SID_PROFID_TCAM_FD: + case ICE_SID_PROFID_TCAM_RSS: + case ICE_SID_PROFID_TCAM_ACL: + case ICE_SID_PROFID_TCAM_PE: + pid = (struct ice_prof_id_section *)sect; + src = (u8 *)pid->entry; + sect_len = LE16_TO_CPU(pid->count) * + sizeof(*hw->blk[block_id].prof.t); + dst = (u8 *)hw->blk[block_id].prof.t; + dst_len = hw->blk[block_id].prof.count * + sizeof(*hw->blk[block_id].prof.t); + break; + case ICE_SID_PROFID_REDIR_SW: + case ICE_SID_PROFID_REDIR_FD: + case ICE_SID_PROFID_REDIR_RSS: + case ICE_SID_PROFID_REDIR_ACL: + case ICE_SID_PROFID_REDIR_PE: + pr = (struct ice_prof_redir_section *)sect; + src = pr->redir_value; + sect_len = LE16_TO_CPU(pr->count) * + sizeof(*hw->blk[block_id].prof_redir.t); + dst = hw->blk[block_id].prof_redir.t; + dst_len = hw->blk[block_id].prof_redir.count * + sizeof(*hw->blk[block_id].prof_redir.t); + break; + case ICE_SID_FLD_VEC_SW: + case ICE_SID_FLD_VEC_FD: + case ICE_SID_FLD_VEC_RSS: + case ICE_SID_FLD_VEC_ACL: + case ICE_SID_FLD_VEC_PE: + es = (struct ice_sw_fv_section *)sect; + src = (u8 *)es->fv; + sect_len = (u32)(LE16_TO_CPU(es->count) * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + dst = (u8 *)hw->blk[block_id].es.t; + dst_len = (u32)(hw->blk[block_id].es.count * + hw->blk[block_id].es.fvw) * + sizeof(*hw->blk[block_id].es.t); + break; + default: + return; + } + + /* if the section offset exceeds destination length, terminate + * table fill. + */ + if (offset > dst_len) + return; + + /* if the sum of section size and offset exceed destination size + * then we are out of bounds of the HW table size for that PF. + * Changing section length to fill the remaining table space + * of that PF. + */ + if ((offset + sect_len) > dst_len) + sect_len = dst_len - offset; + + ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); + offset += sect_len; + sect = ice_pkg_enum_section(NULL, &state, sid); + } +} + +/** + * ice_init_flow_profs - init flow profile locks and list heads + * @hw: pointer to the hardware structure + * @blk_idx: HW block index + */ +static +void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) +{ + ice_init_lock(&hw->fl_profs_locks[blk_idx]); + INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); +} + +/** + * ice_init_hw_tbls - init hardware table memory + * @hw: pointer to the hardware structure + */ +enum ice_status ice_init_hw_tbls(struct ice_hw *hw) +{ + u8 i; + + ice_init_lock(&hw->rss_locks); + INIT_LIST_HEAD(&hw->rss_list_head); + for (i = 0; i < ICE_BLK_COUNT; i++) { + struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; + struct ice_prof_tcam *prof = &hw->blk[i].prof; + struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; + struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; + struct ice_es *es = &hw->blk[i].es; + u16 j; + + if (hw->blk[i].is_list_init) + continue; + + ice_init_flow_profs(hw, i); + ice_init_lock(&es->prof_map_lock); + INIT_LIST_HEAD(&es->prof_map); + hw->blk[i].is_list_init = true; + + hw->blk[i].overwrite = blk_sizes[i].overwrite; + es->reverse = blk_sizes[i].reverse; + + xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; + xlt1->count = blk_sizes[i].xlt1; + + xlt1->ptypes = (struct ice_ptg_ptype *) + ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); + + if (!xlt1->ptypes) + goto err; + + xlt1->ptg_tbl = (struct ice_ptg_entry *) + ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); + + if (!xlt1->ptg_tbl) + goto err; + + xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); + if (!xlt1->t) + goto err; + + xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; + xlt2->count = blk_sizes[i].xlt2; + + xlt2->vsis = (struct ice_vsig_vsi *) + ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); + + if (!xlt2->vsis) + goto err; + + xlt2->vsig_tbl = (struct ice_vsig_entry *) + ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); + if (!xlt2->vsig_tbl) + goto err; + + for (j = 0; j < xlt2->count; j++) + INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); + + xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); + if (!xlt2->t) + goto err; + + prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; + prof->count = blk_sizes[i].prof_tcam; + prof->max_prof_id = blk_sizes[i].prof_id; + prof->cdid_bits = blk_sizes[i].prof_cdid_bits; + prof->t = (struct ice_prof_tcam_entry *) + ice_calloc(hw, prof->count, sizeof(*prof->t)); + + if (!prof->t) + goto err; + + prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; + prof_redir->count = blk_sizes[i].prof_redir; + prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, + sizeof(*prof_redir->t)); - sect = ice_pkg_enum_section(hw->seg, &state, sid); + if (!prof_redir->t) + goto err; - while (sect) { - switch (sid) { - case ICE_SID_XLT1_SW: - case ICE_SID_XLT1_FD: - case ICE_SID_XLT1_RSS: - case ICE_SID_XLT1_ACL: - case ICE_SID_XLT1_PE: - xlt1 = (struct ice_xlt1_section *)sect; - src = xlt1->value; - sect_len = LE16_TO_CPU(xlt1->count) * - sizeof(*hw->blk[block_id].xlt1.t); - dst = hw->blk[block_id].xlt1.t; - dst_len = hw->blk[block_id].xlt1.count * - sizeof(*hw->blk[block_id].xlt1.t); - break; - case ICE_SID_XLT2_SW: - case ICE_SID_XLT2_FD: - case ICE_SID_XLT2_RSS: - case ICE_SID_XLT2_ACL: - case ICE_SID_XLT2_PE: - xlt2 = (struct ice_xlt2_section *)sect; - src = (_FORCE_ u8 *)xlt2->value; - sect_len = LE16_TO_CPU(xlt2->count) * - sizeof(*hw->blk[block_id].xlt2.t); - dst = (u8 *)hw->blk[block_id].xlt2.t; - dst_len = hw->blk[block_id].xlt2.count * - sizeof(*hw->blk[block_id].xlt2.t); - break; - case ICE_SID_PROFID_TCAM_SW: - case ICE_SID_PROFID_TCAM_FD: - case ICE_SID_PROFID_TCAM_RSS: - case ICE_SID_PROFID_TCAM_ACL: - case ICE_SID_PROFID_TCAM_PE: - pid = (struct ice_prof_id_section *)sect; - src = (u8 *)pid->entry; - sect_len = LE16_TO_CPU(pid->count) * - sizeof(*hw->blk[block_id].prof.t); - dst = (u8 *)hw->blk[block_id].prof.t; - dst_len = hw->blk[block_id].prof.count * - sizeof(*hw->blk[block_id].prof.t); - break; - case ICE_SID_PROFID_REDIR_SW: - case ICE_SID_PROFID_REDIR_FD: - case ICE_SID_PROFID_REDIR_RSS: - case ICE_SID_PROFID_REDIR_ACL: - case ICE_SID_PROFID_REDIR_PE: - pr = (struct ice_prof_redir_section *)sect; - src = pr->redir_value; - sect_len = LE16_TO_CPU(pr->count) * - sizeof(*hw->blk[block_id].prof_redir.t); - dst = hw->blk[block_id].prof_redir.t; - dst_len = hw->blk[block_id].prof_redir.count * - sizeof(*hw->blk[block_id].prof_redir.t); - break; - case ICE_SID_FLD_VEC_SW: - case ICE_SID_FLD_VEC_FD: - case ICE_SID_FLD_VEC_RSS: - case ICE_SID_FLD_VEC_ACL: - case ICE_SID_FLD_VEC_PE: - es = (struct ice_sw_fv_section *)sect; - src = (u8 *)es->fv; - sect_len = (u32)(LE16_TO_CPU(es->count) * - hw->blk[block_id].es.fvw) * - sizeof(*hw->blk[block_id].es.t); - dst = (u8 *)hw->blk[block_id].es.t; - dst_len = (u32)(hw->blk[block_id].es.count * - hw->blk[block_id].es.fvw) * - sizeof(*hw->blk[block_id].es.t); - break; - default: - return; - } + es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; + es->count = blk_sizes[i].es; + es->fvw = blk_sizes[i].fvw; + es->t = (struct ice_fv_word *) + ice_calloc(hw, (u32)(es->count * es->fvw), + sizeof(*es->t)); + if (!es->t) + goto err; - /* if the section offset exceeds destination length, terminate - * table fill. - */ - if (offset > dst_len) - return; + es->ref_count = (u16 *) + ice_calloc(hw, es->count, sizeof(*es->ref_count)); - /* if the sum of section size and offset exceed destination size - * then we are out of bounds of the HW table size for that PF. - * Changing section length to fill the remaining table space - * of that PF. - */ - if ((offset + sect_len) > dst_len) - sect_len = dst_len - offset; + if (!es->ref_count) + goto err; + + es->written = (u8 *) + ice_calloc(hw, es->count, sizeof(*es->written)); + + if (!es->written) + goto err; - ice_memcpy(dst + offset, src, sect_len, ICE_NONDMA_TO_NONDMA); - offset += sect_len; - sect = ice_pkg_enum_section(NULL, &state, sid); } + return ICE_SUCCESS; + +err: + ice_free_hw_tbls(hw); + return ICE_ERR_NO_MEMORY; } /** * ice_fill_blk_tbls - Read package context for tables * @hw: pointer to the hardware structure * * Reads the current package contents and populates the driver * database with the data iteratively for all advanced feature * blocks. Assume that the HW tables have been allocated. */ void ice_fill_blk_tbls(struct ice_hw *hw) { u8 i; for (i = 0; i < ICE_BLK_COUNT; i++) { enum ice_block blk_id = (enum ice_block)i; ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid); ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid); } ice_init_sw_db(hw); } /** * ice_free_prof_map - free profile map * @hw: pointer to the hardware structure * @blk_idx: HW block index */ static void ice_free_prof_map(struct ice_hw *hw, u8 blk_idx) { struct ice_es *es = &hw->blk[blk_idx].es; struct ice_prof_map *del, *tmp; ice_acquire_lock(&es->prof_map_lock); LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &es->prof_map, ice_prof_map, list) { LIST_DEL(&del->list); ice_free(hw, del); } INIT_LIST_HEAD(&es->prof_map); ice_release_lock(&es->prof_map_lock); } /** * ice_free_flow_profs - free flow profile entries * @hw: pointer to the hardware structure * @blk_idx: HW block index */ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) { struct ice_flow_prof *p, *tmp; ice_acquire_lock(&hw->fl_profs_locks[blk_idx]); LIST_FOR_EACH_ENTRY_SAFE(p, tmp, &hw->fl_profs[blk_idx], ice_flow_prof, l_entry) { LIST_DEL(&p->l_entry); ice_free(hw, p); } ice_release_lock(&hw->fl_profs_locks[blk_idx]); /* if driver is in reset and tables are being cleared * re-initialize the flow profile list heads */ INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); } /** * ice_free_vsig_tbl - free complete VSIG table entries * @hw: pointer to the hardware structure * @blk: the HW block on which to free the VSIG table entries */ static void ice_free_vsig_tbl(struct ice_hw *hw, enum ice_block blk) { u16 i; if (!hw->blk[blk].xlt2.vsig_tbl) return; for (i = 1; i < ICE_MAX_VSIGS; i++) if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) ice_vsig_free(hw, blk, i); } /** * ice_free_hw_tbls - free hardware table memory * @hw: pointer to the hardware structure */ void ice_free_hw_tbls(struct ice_hw *hw) { struct ice_rss_cfg *r, *rt; u8 i; for (i = 0; i < ICE_BLK_COUNT; i++) { if (hw->blk[i].is_list_init) { struct ice_es *es = &hw->blk[i].es; ice_free_prof_map(hw, i); ice_destroy_lock(&es->prof_map_lock); ice_free_flow_profs(hw, i); ice_destroy_lock(&hw->fl_profs_locks[i]); hw->blk[i].is_list_init = false; } ice_free_vsig_tbl(hw, (enum ice_block)i); ice_free(hw, hw->blk[i].xlt1.ptypes); ice_free(hw, hw->blk[i].xlt1.ptg_tbl); ice_free(hw, hw->blk[i].xlt1.t); ice_free(hw, hw->blk[i].xlt2.t); ice_free(hw, hw->blk[i].xlt2.vsig_tbl); ice_free(hw, hw->blk[i].xlt2.vsis); ice_free(hw, hw->blk[i].prof.t); ice_free(hw, hw->blk[i].prof_redir.t); ice_free(hw, hw->blk[i].es.t); ice_free(hw, hw->blk[i].es.ref_count); ice_free(hw, hw->blk[i].es.written); } LIST_FOR_EACH_ENTRY_SAFE(r, rt, &hw->rss_list_head, ice_rss_cfg, l_entry) { LIST_DEL(&r->l_entry); ice_free(hw, r); } ice_destroy_lock(&hw->rss_locks); ice_memset(hw->blk, 0, sizeof(hw->blk), ICE_NONDMA_MEM); } -/** - * ice_init_flow_profs - init flow profile locks and list heads - * @hw: pointer to the hardware structure - * @blk_idx: HW block index - */ -static void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx) -{ - ice_init_lock(&hw->fl_profs_locks[blk_idx]); - INIT_LIST_HEAD(&hw->fl_profs[blk_idx]); -} - /** * ice_clear_hw_tbls - clear HW tables and flow profiles * @hw: pointer to the hardware structure */ void ice_clear_hw_tbls(struct ice_hw *hw) { u8 i; for (i = 0; i < ICE_BLK_COUNT; i++) { struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; struct ice_prof_tcam *prof = &hw->blk[i].prof; struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; struct ice_es *es = &hw->blk[i].es; if (hw->blk[i].is_list_init) { ice_free_prof_map(hw, i); ice_free_flow_profs(hw, i); } ice_free_vsig_tbl(hw, (enum ice_block)i); - ice_memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes), - ICE_NONDMA_MEM); - ice_memset(xlt1->ptg_tbl, 0, - ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), - ICE_NONDMA_MEM); - ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), - ICE_NONDMA_MEM); - - ice_memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis), - ICE_NONDMA_MEM); - ice_memset(xlt2->vsig_tbl, 0, - xlt2->count * sizeof(*xlt2->vsig_tbl), - ICE_NONDMA_MEM); - ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), - ICE_NONDMA_MEM); - - ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), - ICE_NONDMA_MEM); - ice_memset(prof_redir->t, 0, - prof_redir->count * sizeof(*prof_redir->t), - ICE_NONDMA_MEM); - - ice_memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw, - ICE_NONDMA_MEM); - ice_memset(es->ref_count, 0, es->count * sizeof(*es->ref_count), - ICE_NONDMA_MEM); - ice_memset(es->written, 0, es->count * sizeof(*es->written), - ICE_NONDMA_MEM); - } -} - -/** - * ice_init_hw_tbls - init hardware table memory - * @hw: pointer to the hardware structure - */ -enum ice_status ice_init_hw_tbls(struct ice_hw *hw) -{ - u8 i; - - ice_init_lock(&hw->rss_locks); - INIT_LIST_HEAD(&hw->rss_list_head); - for (i = 0; i < ICE_BLK_COUNT; i++) { - struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir; - struct ice_prof_tcam *prof = &hw->blk[i].prof; - struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1; - struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2; - struct ice_es *es = &hw->blk[i].es; - u16 j; - - if (hw->blk[i].is_list_init) - continue; - - ice_init_flow_profs(hw, i); - ice_init_lock(&es->prof_map_lock); - INIT_LIST_HEAD(&es->prof_map); - hw->blk[i].is_list_init = true; - - hw->blk[i].overwrite = blk_sizes[i].overwrite; - es->reverse = blk_sizes[i].reverse; - - xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF]; - xlt1->count = blk_sizes[i].xlt1; - - xlt1->ptypes = (struct ice_ptg_ptype *) - ice_calloc(hw, xlt1->count, sizeof(*xlt1->ptypes)); - - if (!xlt1->ptypes) - goto err; - - xlt1->ptg_tbl = (struct ice_ptg_entry *) - ice_calloc(hw, ICE_MAX_PTGS, sizeof(*xlt1->ptg_tbl)); - - if (!xlt1->ptg_tbl) - goto err; - - xlt1->t = (u8 *)ice_calloc(hw, xlt1->count, sizeof(*xlt1->t)); - if (!xlt1->t) - goto err; - - xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF]; - xlt2->count = blk_sizes[i].xlt2; - - xlt2->vsis = (struct ice_vsig_vsi *) - ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsis)); - - if (!xlt2->vsis) - goto err; - - xlt2->vsig_tbl = (struct ice_vsig_entry *) - ice_calloc(hw, xlt2->count, sizeof(*xlt2->vsig_tbl)); - if (!xlt2->vsig_tbl) - goto err; + if (xlt1->ptypes) + ice_memset(xlt1->ptypes, 0, + xlt1->count * sizeof(*xlt1->ptypes), + ICE_NONDMA_MEM); - for (j = 0; j < xlt2->count; j++) - INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst); - - xlt2->t = (u16 *)ice_calloc(hw, xlt2->count, sizeof(*xlt2->t)); - if (!xlt2->t) - goto err; + if (xlt1->ptg_tbl) + ice_memset(xlt1->ptg_tbl, 0, + ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl), + ICE_NONDMA_MEM); - prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF]; - prof->count = blk_sizes[i].prof_tcam; - prof->max_prof_id = blk_sizes[i].prof_id; - prof->cdid_bits = blk_sizes[i].prof_cdid_bits; - prof->t = (struct ice_prof_tcam_entry *) - ice_calloc(hw, prof->count, sizeof(*prof->t)); + if (xlt1->t) + ice_memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t), + ICE_NONDMA_MEM); - if (!prof->t) - goto err; + if (xlt2->vsis) + ice_memset(xlt2->vsis, 0, + xlt2->count * sizeof(*xlt2->vsis), + ICE_NONDMA_MEM); - prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF]; - prof_redir->count = blk_sizes[i].prof_redir; - prof_redir->t = (u8 *)ice_calloc(hw, prof_redir->count, - sizeof(*prof_redir->t)); + if (xlt2->vsig_tbl) + ice_memset(xlt2->vsig_tbl, 0, + xlt2->count * sizeof(*xlt2->vsig_tbl), + ICE_NONDMA_MEM); - if (!prof_redir->t) - goto err; + if (xlt2->t) + ice_memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t), + ICE_NONDMA_MEM); - es->sid = ice_blk_sids[i][ICE_SID_ES_OFF]; - es->count = blk_sizes[i].es; - es->fvw = blk_sizes[i].fvw; - es->t = (struct ice_fv_word *) - ice_calloc(hw, (u32)(es->count * es->fvw), - sizeof(*es->t)); - if (!es->t) - goto err; + if (prof->t) + ice_memset(prof->t, 0, prof->count * sizeof(*prof->t), + ICE_NONDMA_MEM); - es->ref_count = (u16 *) - ice_calloc(hw, es->count, sizeof(*es->ref_count)); + if (prof_redir->t) + ice_memset(prof_redir->t, 0, + prof_redir->count * sizeof(*prof_redir->t), + ICE_NONDMA_MEM); - if (!es->ref_count) - goto err; + if (es->t) + ice_memset(es->t, 0, + es->count * sizeof(*es->t) * es->fvw, + ICE_NONDMA_MEM); - es->written = (u8 *) - ice_calloc(hw, es->count, sizeof(*es->written)); + if (es->ref_count) + ice_memset(es->ref_count, 0, + es->count * sizeof(*es->ref_count), + ICE_NONDMA_MEM); - if (!es->written) - goto err; + if (es->written) + ice_memset(es->written, 0, + es->count * sizeof(*es->written), + ICE_NONDMA_MEM); } - return ICE_SUCCESS; - -err: - ice_free_hw_tbls(hw); - return ICE_ERR_NO_MEMORY; } /** * ice_prof_gen_key - generate profile ID key * @hw: pointer to the HW struct * @blk: the block in which to write profile ID to * @ptg: packet type group (PTG) portion of key * @vsig: VSIG portion of key * @cdid: CDID portion of key * @flags: flag portion of key * @vl_msk: valid mask * @dc_msk: don't care mask * @nm_msk: never match mask * @key: output of profile ID key */ static enum ice_status ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ], u8 key[ICE_TCAM_KEY_SZ]) { struct ice_prof_id_key inkey; inkey.xlt1 = ptg; inkey.xlt2_cdid = CPU_TO_LE16(vsig); inkey.flags = CPU_TO_LE16(flags); switch (hw->blk[blk].prof.cdid_bits) { case 0: break; case 2: #define ICE_CD_2_M 0xC000U #define ICE_CD_2_S 14 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_2_M); inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_2_S); break; case 4: #define ICE_CD_4_M 0xF000U #define ICE_CD_4_S 12 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_4_M); inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_4_S); break; case 8: #define ICE_CD_8_M 0xFF00U #define ICE_CD_8_S 16 inkey.xlt2_cdid &= ~CPU_TO_LE16(ICE_CD_8_M); inkey.xlt2_cdid |= CPU_TO_LE16(BIT(cdid) << ICE_CD_8_S); break; default: ice_debug(hw, ICE_DBG_PKG, "Error in profile config\n"); break; } return ice_set_key(key, ICE_TCAM_KEY_SZ, (u8 *)&inkey, vl_msk, dc_msk, nm_msk, 0, ICE_TCAM_KEY_SZ / 2); } /** * ice_tcam_write_entry - write TCAM entry * @hw: pointer to the HW struct * @blk: the block in which to write profile ID to * @idx: the entry index to write to * @prof_id: profile ID * @ptg: packet type group (PTG) portion of key * @vsig: VSIG portion of key * @cdid: CDID portion of key * @flags: flag portion of key * @vl_msk: valid mask * @dc_msk: don't care mask * @nm_msk: never match mask */ static enum ice_status ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx, u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ], u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ]) { struct ice_prof_tcam_entry; enum ice_status status; status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk, dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key); if (!status) { hw->blk[blk].prof.t[idx].addr = CPU_TO_LE16(idx); hw->blk[blk].prof.t[idx].prof_id = prof_id; } return status; } /** * ice_vsig_get_ref - returns number of VSIs belong to a VSIG * @hw: pointer to the hardware structure * @blk: HW block * @vsig: VSIG to query * @refs: pointer to variable to receive the reference count */ static enum ice_status ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *ptr; *refs = 0; if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) return ICE_ERR_DOES_NOT_EXIST; ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; while (ptr) { (*refs)++; ptr = ptr->next_vsi; } return ICE_SUCCESS; } /** * ice_has_prof_vsig - check to see if VSIG has a specific profile * @hw: pointer to the hardware structure * @blk: HW block * @vsig: VSIG to check against * @hdl: profile handle */ static bool ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *ent; LIST_FOR_EACH_ENTRY(ent, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) if (ent->profile_cookie == hdl) return true; ice_debug(hw, ICE_DBG_INIT, "Characteristic list for VSI group %d not found.\n", vsig); return false; } /** * ice_prof_bld_es - build profile ID extraction sequence changes * @hw: pointer to the HW struct * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { u16 vec_size = hw->blk[blk].es.fvw * sizeof(struct ice_fv_word); struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_PTG_ES_ADD && tmp->add_prof) { u16 off = tmp->prof_id * hw->blk[blk].es.fvw; struct ice_pkg_es *p; u32 id; id = ice_sect_id(blk, ICE_VEC_TBL); p = (struct ice_pkg_es *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, es, 1) + vec_size - sizeof(p->es[0])); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->offset = CPU_TO_LE16(tmp->prof_id); ice_memcpy(p->es, &hw->blk[blk].es.t[off], vec_size, ICE_NONDMA_TO_NONDMA); } return ICE_SUCCESS; } /** * ice_prof_bld_tcam - build profile ID TCAM changes * @hw: pointer to the HW struct * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_TCAM_ADD && tmp->add_tcam_idx) { struct ice_prof_id_section *p; u32 id; id = ice_sect_id(blk, ICE_PROF_TCAM); p = (struct ice_prof_id_section *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, entry, 1)); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->entry[0].addr = CPU_TO_LE16(tmp->tcam_idx); p->entry[0].prof_id = tmp->prof_id; ice_memcpy(p->entry[0].key, &hw->blk[blk].prof.t[tmp->tcam_idx].key, sizeof(hw->blk[blk].prof.t->key), ICE_NONDMA_TO_NONDMA); } return ICE_SUCCESS; } /** * ice_prof_bld_xlt1 - build XLT1 changes * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) if (tmp->type == ICE_PTG_ES_ADD && tmp->add_ptg) { struct ice_xlt1_section *p; u32 id; id = ice_sect_id(blk, ICE_XLT1); p = (struct ice_xlt1_section *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, value, 1)); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->offset = CPU_TO_LE16(tmp->ptype); p->value[0] = tmp->ptg; } return ICE_SUCCESS; } /** * ice_prof_bld_xlt2 - build XLT2 changes * @blk: hardware block * @bld: the update package buffer build to add to * @chgs: the list of changes to make in hardware */ static enum ice_status ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs) { struct ice_chs_chg *tmp; LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { struct ice_xlt2_section *p; u32 id; switch (tmp->type) { case ICE_VSIG_ADD: case ICE_VSI_MOVE: case ICE_VSIG_REM: id = ice_sect_id(blk, ICE_XLT2); p = (struct ice_xlt2_section *) ice_pkg_buf_alloc_section(bld, id, ice_struct_size(p, value, 1)); if (!p) return ICE_ERR_MAX_LIMIT; p->count = CPU_TO_LE16(1); p->offset = CPU_TO_LE16(tmp->vsi); p->value[0] = CPU_TO_LE16(tmp->vsig); break; default: break; } } return ICE_SUCCESS; } /** * ice_upd_prof_hw - update hardware using the change list * @hw: pointer to the HW struct * @blk: hardware block * @chgs: the list of changes to make in hardware */ static enum ice_status ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *chgs) { struct ice_buf_build *b; struct ice_chs_chg *tmp; enum ice_status status; u16 pkg_sects; u16 xlt1 = 0; u16 xlt2 = 0; u16 tcam = 0; u16 es = 0; u16 sects; /* count number of sections we need */ LIST_FOR_EACH_ENTRY(tmp, chgs, ice_chs_chg, list_entry) { switch (tmp->type) { case ICE_PTG_ES_ADD: if (tmp->add_ptg) xlt1++; if (tmp->add_prof) es++; break; case ICE_TCAM_ADD: tcam++; break; case ICE_VSIG_ADD: case ICE_VSI_MOVE: case ICE_VSIG_REM: xlt2++; break; default: break; } } sects = xlt1 + xlt2 + tcam + es; if (!sects) return ICE_SUCCESS; /* Build update package buffer */ b = ice_pkg_buf_alloc(hw); if (!b) return ICE_ERR_NO_MEMORY; status = ice_pkg_buf_reserve_section(b, sects); if (status) goto error_tmp; /* Preserve order of table update: ES, TCAM, PTG, VSIG */ if (es) { status = ice_prof_bld_es(hw, blk, b, chgs); if (status) goto error_tmp; } if (tcam) { status = ice_prof_bld_tcam(hw, blk, b, chgs); if (status) goto error_tmp; } if (xlt1) { status = ice_prof_bld_xlt1(blk, b, chgs); if (status) goto error_tmp; } if (xlt2) { status = ice_prof_bld_xlt2(blk, b, chgs); if (status) goto error_tmp; } /* After package buffer build check if the section count in buffer is * non-zero and matches the number of sections detected for package * update. */ pkg_sects = ice_pkg_buf_get_active_sections(b); if (!pkg_sects || pkg_sects != sects) { status = ICE_ERR_INVAL_SIZE; goto error_tmp; } /* update package */ status = ice_update_pkg(hw, ice_pkg_buf(b), 1); if (status == ICE_ERR_AQ_ERROR) ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n"); error_tmp: ice_pkg_buf_free(hw, b); return status; } /** * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID - * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits) + * @ptypes: bitmap indicating ptypes (ICE_FLOW_PTYPE_MAX bits) * @es: extraction sequence (length of array is determined by the block) * * This function registers a profile, which matches a set of PTGs with a * particular extraction sequence. While the hardware profile is allocated * it will not be written until the first call to ice_add_flow that specifies * the ID value used here. */ enum ice_status -ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es) +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, + ice_bitmap_t *ptypes, struct ice_fv_word *es) { - u32 bytes = DIVIDE_AND_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); struct ice_prof_map *prof; enum ice_status status; - u8 byte = 0; u8 prof_id; + u16 ptype; ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* search for existing profile */ status = ice_find_prof_id(hw, blk, es, &prof_id); if (status) { /* allocate profile ID */ status = ice_alloc_prof_id(hw, blk, &prof_id); if (status) goto err_ice_add_prof; /* and write new es */ ice_write_es(hw, blk, prof_id, es); } ice_prof_inc_ref(hw, blk, prof_id); /* add profile info */ prof = (struct ice_prof_map *)ice_malloc(hw, sizeof(*prof)); if (!prof) goto err_ice_add_prof; prof->profile_cookie = id; prof->prof_id = prof_id; prof->ptg_cnt = 0; prof->context = 0; /* build list of ptgs */ - while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) { - u8 bit; + ice_for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) { + u8 ptg; - if (!ptypes[byte]) { - bytes--; - byte++; + /* The package should place all ptypes in a non-zero + * PTG, so the following call should never fail. + */ + if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) continue; - } - - /* Examine 8 bits per byte */ - ice_for_each_set_bit(bit, (ice_bitmap_t *)&ptypes[byte], - BITS_PER_BYTE) { - u16 ptype; - u8 ptg; - - ptype = byte * BITS_PER_BYTE + bit; - - /* The package should place all ptypes in a non-zero - * PTG, so the following call should never fail. - */ - if (ice_ptg_find_ptype(hw, blk, ptype, &ptg)) - continue; - - /* If PTG is already added, skip and continue */ - if (ice_is_bit_set(ptgs_used, ptg)) - continue; - ice_set_bit(ptg, ptgs_used); - prof->ptg[prof->ptg_cnt] = ptg; + /* If PTG is already added, skip and continue */ + if (ice_is_bit_set(ptgs_used, ptg)) + continue; - if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) - break; - } + ice_set_bit(ptg, ptgs_used); + prof->ptg[prof->ptg_cnt] = ptg; - bytes--; - byte++; + if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE) + break; } LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map); status = ICE_SUCCESS; err_ice_add_prof: ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_search_prof_id - Search for a profile tracking ID * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * * This will search for a profile tracking ID which was previously added. * The profile map lock should be held before calling this function. */ struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; struct ice_prof_map *map; LIST_FOR_EACH_ENTRY(map, &hw->blk[blk].es.prof_map, ice_prof_map, list) if (map->profile_cookie == id) { entry = map; break; } return entry; } /** * ice_set_prof_context - Set context for a given profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @cntxt: context */ enum ice_status ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *entry; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); entry = ice_search_prof_id(hw, blk, id); if (entry) { entry->context = cntxt; status = ICE_SUCCESS; } ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_get_prof_context - Get context for a given profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * @cntxt: pointer to variable to receive the context */ enum ice_status ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *entry; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); entry = ice_search_prof_id(hw, blk, id); if (entry) { *cntxt = entry->context; status = ICE_SUCCESS; } ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_vsig_prof_id_count - count profiles in a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: VSIG to remove the profile from */ static u16 ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig) { u16 idx = vsig & ICE_VSIG_IDX_M, count = 0; struct ice_vsig_prof *p; LIST_FOR_EACH_ENTRY(p, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) count++; return count; } /** * ice_rel_tcam_idx - release a TCAM index * @hw: pointer to the HW struct * @blk: hardware block * @idx: the index to release */ static enum ice_status ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx) { /* Masks to invoke a never match entry */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 }; enum ice_status status; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk, dc_msk, nm_msk); if (status) return status; /* release the TCAM entry */ status = ice_free_tcam_ent(hw, blk, idx); return status; } /** * ice_rem_prof_id - remove one profile from a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @prof: pointer to profile structure to remove */ static enum ice_status ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk, struct ice_vsig_prof *prof) { enum ice_status status; u16 i; for (i = 0; i < prof->tcam_count; i++) if (prof->tcam[i].in_use) { prof->tcam[i].in_use = false; status = ice_rel_tcam_idx(hw, blk, prof->tcam[i].tcam_idx); if (status) return ICE_ERR_HW_TABLE; } return ICE_SUCCESS; } /** * ice_rem_vsig - remove VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: the VSIG to remove * @chg: the change list */ static enum ice_status ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct LIST_HEAD_TYPE *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_vsi *vsi_cur; struct ice_vsig_prof *d, *t; - enum ice_status status; /* remove TCAM entries */ LIST_FOR_EACH_ENTRY_SAFE(d, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { + enum ice_status status; + status = ice_rem_prof_id(hw, blk, d); if (status) return status; LIST_DEL(&d->list); ice_free(hw, d); } /* Move all VSIS associated with this VSIG to the default VSIG */ vsi_cur = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi; /* If the VSIG has at least 1 VSI then iterate through the list * and remove the VSIs before deleting the group. */ if (vsi_cur) do { struct ice_vsig_vsi *tmp = vsi_cur->next_vsi; struct ice_chs_chg *p; p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; p->type = ICE_VSIG_REM; p->orig_vsig = vsig; p->vsig = ICE_DEFAULT_VSIG; - p->vsi = vsi_cur - hw->blk[blk].xlt2.vsis; + p->vsi = (u16)(vsi_cur - hw->blk[blk].xlt2.vsis); LIST_ADD(&p->list_entry, chg); vsi_cur = tmp; } while (vsi_cur); return ice_vsig_free(hw, blk, vsig); } /** * ice_rem_prof_id_vsig - remove a specific profile from a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: VSIG to remove the profile from * @hdl: profile handle indicating which profile to remove * @chg: list to receive a record of changes */ static enum ice_status ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, struct LIST_HEAD_TYPE *chg) { u16 idx = vsig & ICE_VSIG_IDX_M; struct ice_vsig_prof *p, *t; - enum ice_status status; LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) if (p->profile_cookie == hdl) { + enum ice_status status; + if (ice_vsig_prof_id_count(hw, blk, vsig) == 1) /* this is the last profile, remove the VSIG */ return ice_rem_vsig(hw, blk, vsig, chg); status = ice_rem_prof_id(hw, blk, p); if (!status) { LIST_DEL(&p->list); ice_free(hw, p); } return status; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_rem_flow_all - remove all flows with a particular profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID */ static enum ice_status ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_chs_chg *del, *tmp; enum ice_status status; struct LIST_HEAD_TYPE chg; u16 i; INIT_LIST_HEAD(&chg); for (i = 1; i < ICE_MAX_VSIGS; i++) if (hw->blk[blk].xlt2.vsig_tbl[i].in_use) { if (ice_has_prof_vsig(hw, blk, i, id)) { status = ice_rem_prof_id_vsig(hw, blk, i, id, &chg); if (status) goto err_ice_rem_flow_all; } } status = ice_upd_prof_hw(hw, blk, &chg); err_ice_rem_flow_all: LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } return status; } /** * ice_rem_prof - remove profile * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * * This will remove the profile specified by the ID parameter, which was * previously created through ice_add_prof. If any existing entries * are associated with this profile, they will be removed as well. */ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *pmap; enum ice_status status; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_rem_prof; } /* remove all flows with this profile */ status = ice_rem_flow_all(hw, blk, pmap->profile_cookie); if (status) goto err_ice_rem_prof; /* dereference profile, and possibly remove */ ice_prof_dec_ref(hw, blk, pmap->prof_id); LIST_DEL(&pmap->list); ice_free(hw, pmap); err_ice_rem_prof: ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_get_prof - get profile * @hw: pointer to the HW struct * @blk: hardware block * @hdl: profile handle * @chg: change list */ static enum ice_status ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct LIST_HEAD_TYPE *chg) { enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_chs_chg *p; u16 i; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_get_prof; } for (i = 0; i < map->ptg_cnt; i++) if (!hw->blk[blk].es.written[map->prof_id]) { /* add ES to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) { status = ICE_ERR_NO_MEMORY; goto err_ice_get_prof; } p->type = ICE_PTG_ES_ADD; p->ptype = 0; p->ptg = map->ptg[i]; p->add_ptg = 0; p->add_prof = 1; p->prof_id = map->prof_id; hw->blk[blk].es.written[map->prof_id] = true; LIST_ADD(&p->list_entry, chg); } err_ice_get_prof: ice_release_lock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ return status; } /** * ice_get_profs_vsig - get a copy of the list of profiles from a VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: VSIG from which to copy the list * @lst: output list * * This routine makes a copy of the list of profiles in the specified VSIG. */ static enum ice_status ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct LIST_HEAD_TYPE *lst) { struct ice_vsig_prof *ent1, *ent2; u16 idx = vsig & ICE_VSIG_IDX_M; LIST_FOR_EACH_ENTRY(ent1, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { struct ice_vsig_prof *p; /* copy to the input list */ p = (struct ice_vsig_prof *)ice_memdup(hw, ent1, sizeof(*p), ICE_NONDMA_TO_NONDMA); if (!p) goto err_ice_get_profs_vsig; LIST_ADD_TAIL(&p->list, lst); } return ICE_SUCCESS; err_ice_get_profs_vsig: LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) { LIST_DEL(&ent1->list); ice_free(hw, ent1); } return ICE_ERR_NO_MEMORY; } /** * ice_add_prof_to_lst - add profile entry to a list * @hw: pointer to the HW struct * @blk: hardware block * @lst: the list to be added to * @hdl: profile handle of entry to add */ static enum ice_status ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct LIST_HEAD_TYPE *lst, u64 hdl) { enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_vsig_prof *p; u16 i; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); if (!map) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_add_prof_to_lst; } p = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*p)); if (!p) { status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_to_lst; } p->profile_cookie = map->profile_cookie; p->prof_id = map->prof_id; p->tcam_count = map->ptg_cnt; for (i = 0; i < map->ptg_cnt; i++) { p->tcam[i].prof_id = map->prof_id; p->tcam[i].tcam_idx = ICE_INVALID_TCAM; p->tcam[i].ptg = map->ptg[i]; } LIST_ADD(&p->list, lst); err_ice_add_prof_to_lst: ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_move_vsi - move VSI to another VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the VSI to move * @vsig: the VSIG to move the VSI to * @chg: the change list */ static enum ice_status ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig, struct LIST_HEAD_TYPE *chg) { enum ice_status status; struct ice_chs_chg *p; u16 orig_vsig; p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig); if (!status) status = ice_vsig_add_mv_vsi(hw, blk, vsi, vsig); if (status) { ice_free(hw, p); return status; } p->type = ICE_VSI_MOVE; p->vsi = vsi; p->orig_vsig = orig_vsig; p->vsig = vsig; LIST_ADD(&p->list_entry, chg); return ICE_SUCCESS; } /** * ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list * @hw: pointer to the HW struct * @idx: the index of the TCAM entry to remove * @chg: the list of change structures to search */ static void ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg) { struct ice_chs_chg *pos, *tmp; LIST_FOR_EACH_ENTRY_SAFE(tmp, pos, chg, ice_chs_chg, list_entry) if (tmp->type == ICE_TCAM_ADD && tmp->tcam_idx == idx) { LIST_DEL(&tmp->list_entry); ice_free(hw, tmp); } } /** * ice_prof_tcam_ena_dis - add enable or disable TCAM change * @hw: pointer to the HW struct * @blk: hardware block * @enable: true to enable, false to disable * @vsig: the VSIG of the TCAM entry * @tcam: pointer the TCAM info structure of the TCAM to disable * @chg: the change list * * This function appends an enable or disable TCAM entry in the change log */ static enum ice_status ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable, u16 vsig, struct ice_tcam_inf *tcam, struct LIST_HEAD_TYPE *chg) { enum ice_status status; struct ice_chs_chg *p; u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; /* if disabling, free the TCAM */ if (!enable) { status = ice_rel_tcam_idx(hw, blk, tcam->tcam_idx); /* if we have already created a change for this TCAM entry, then * we need to remove that entry, in order to prevent writing to * a TCAM entry we no longer will have ownership of. */ ice_rem_chg_tcam_ent(hw, tcam->tcam_idx, chg); tcam->tcam_idx = 0; tcam->in_use = 0; return status; } /* for re-enabling, reallocate a TCAM */ status = ice_alloc_tcam_ent(hw, blk, true, &tcam->tcam_idx); if (status) return status; /* add TCAM to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id, tcam->ptg, vsig, 0, 0, vl_msk, dc_msk, nm_msk); if (status) goto err_ice_prof_tcam_ena_dis; tcam->in_use = 1; p->type = ICE_TCAM_ADD; p->add_tcam_idx = true; p->prof_id = tcam->prof_id; p->ptg = tcam->ptg; p->vsig = 0; p->tcam_idx = tcam->tcam_idx; /* log change */ LIST_ADD(&p->list_entry, chg); return ICE_SUCCESS; err_ice_prof_tcam_ena_dis: ice_free(hw, p); return status; } /** * ice_adj_prof_priorities - adjust profile based on priorities * @hw: pointer to the HW struct * @blk: hardware block * @vsig: the VSIG for which to adjust profile priorities * @chg: the change list */ static enum ice_status ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig, struct LIST_HEAD_TYPE *chg) { ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT); enum ice_status status = ICE_SUCCESS; struct ice_vsig_prof *t; u16 idx; ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT); idx = vsig & ICE_VSIG_IDX_M; /* Priority is based on the order in which the profiles are added. The * newest added profile has highest priority and the oldest added * profile has the lowest priority. Since the profile property list for * a VSIG is sorted from newest to oldest, this code traverses the list * in order and enables the first of each PTG that it finds (that is not * already enabled); it also disables any duplicate PTGs that it finds * in the older profiles (that are currently enabled). */ LIST_FOR_EACH_ENTRY(t, &hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst, ice_vsig_prof, list) { u16 i; for (i = 0; i < t->tcam_count; i++) { bool used; /* Scan the priorities from newest to oldest. * Make sure that the newest profiles take priority. */ used = ice_is_bit_set(ptgs_used, t->tcam[i].ptg); if (used && t->tcam[i].in_use) { /* need to mark this PTG as never match, as it * was already in use and therefore duplicate * (and lower priority) */ status = ice_prof_tcam_ena_dis(hw, blk, false, vsig, &t->tcam[i], chg); if (status) return status; } else if (!used && !t->tcam[i].in_use) { /* need to enable this PTG, as it in not in use * and not enabled (highest priority) */ status = ice_prof_tcam_ena_dis(hw, blk, true, vsig, &t->tcam[i], chg); if (status) return status; } /* keep track of used ptgs */ ice_set_bit(t->tcam[i].ptg, ptgs_used); } } return status; } /** * ice_add_prof_id_vsig - add profile to VSIG * @hw: pointer to the HW struct * @blk: hardware block * @vsig: the VSIG to which this profile is to be added * @hdl: the profile handle indicating the profile to add * @rev: true to add entries to the end of the list * @chg: the change list */ static enum ice_status ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, bool rev, struct LIST_HEAD_TYPE *chg) { /* Masks that ignore flags */ u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; enum ice_status status = ICE_SUCCESS; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) return ICE_ERR_ALREADY_EXISTS; /* new VSIG profile structure */ t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); if (!t) return ICE_ERR_NO_MEMORY; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); if (!map) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_add_prof_id_vsig; } t->profile_cookie = map->profile_cookie; t->prof_id = map->prof_id; t->tcam_count = map->ptg_cnt; /* create TCAM entries */ for (i = 0; i < map->ptg_cnt; i++) { u16 tcam_idx; /* add TCAM to change list */ p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) { status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_id_vsig; } /* allocate the TCAM entry index */ status = ice_alloc_tcam_ent(hw, blk, true, &tcam_idx); if (status) { ice_free(hw, p); goto err_ice_add_prof_id_vsig; } t->tcam[i].ptg = map->ptg[i]; t->tcam[i].prof_id = map->prof_id; t->tcam[i].tcam_idx = tcam_idx; t->tcam[i].in_use = true; p->type = ICE_TCAM_ADD; p->add_tcam_idx = true; p->prof_id = t->tcam[i].prof_id; p->ptg = t->tcam[i].ptg; p->vsig = vsig; p->tcam_idx = t->tcam[i].tcam_idx; /* write the TCAM entry */ status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx, t->tcam[i].prof_id, t->tcam[i].ptg, vsig, 0, 0, vl_msk, dc_msk, nm_msk); if (status) { ice_free(hw, p); goto err_ice_add_prof_id_vsig; } /* log change */ LIST_ADD(&p->list_entry, chg); } /* add profile to VSIG */ vsig_idx = vsig & ICE_VSIG_IDX_M; if (rev) LIST_ADD_TAIL(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); else LIST_ADD(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; err_ice_add_prof_id_vsig: ice_release_lock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ ice_free(hw, t); return status; } /** * ice_create_prof_id_vsig - add a new VSIG with a single profile * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the initial VSI that will be in VSIG * @hdl: the profile handle of the profile that will be added to the VSIG * @chg: the change list */ static enum ice_status ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl, struct LIST_HEAD_TYPE *chg) { enum ice_status status; struct ice_chs_chg *p; u16 new_vsig; p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p)); if (!p) return ICE_ERR_NO_MEMORY; new_vsig = ice_vsig_alloc(hw, blk); if (!new_vsig) { status = ICE_ERR_HW_TABLE; goto err_ice_create_prof_id_vsig; } status = ice_move_vsi(hw, blk, vsi, new_vsig, chg); if (status) goto err_ice_create_prof_id_vsig; status = ice_add_prof_id_vsig(hw, blk, new_vsig, hdl, false, chg); if (status) goto err_ice_create_prof_id_vsig; p->type = ICE_VSIG_ADD; p->vsi = vsi; p->orig_vsig = ICE_DEFAULT_VSIG; p->vsig = new_vsig; LIST_ADD(&p->list_entry, chg); return ICE_SUCCESS; err_ice_create_prof_id_vsig: /* let caller clean up the change list */ ice_free(hw, p); return status; } /** * ice_create_vsig_from_lst - create a new VSIG with a list of profiles * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the initial VSI that will be in VSIG * @lst: the list of profile that will be added to the VSIG * @new_vsig: return of new VSIG * @chg: the change list */ static enum ice_status ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi, struct LIST_HEAD_TYPE *lst, u16 *new_vsig, struct LIST_HEAD_TYPE *chg) { struct ice_vsig_prof *t; enum ice_status status; u16 vsig; vsig = ice_vsig_alloc(hw, blk); if (!vsig) return ICE_ERR_HW_TABLE; status = ice_move_vsi(hw, blk, vsi, vsig, chg); if (status) return status; LIST_FOR_EACH_ENTRY(t, lst, ice_vsig_prof, list) { /* Reverse the order here since we are copying the list */ status = ice_add_prof_id_vsig(hw, blk, vsig, t->profile_cookie, true, chg); if (status) return status; } *new_vsig = vsig; return ICE_SUCCESS; } /** * ice_find_prof_vsig - find a VSIG with a specific profile handle * @hw: pointer to the HW struct * @blk: hardware block * @hdl: the profile handle of the profile to search for * @vsig: returns the VSIG with the matching profile */ static bool ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig) { struct ice_vsig_prof *t; enum ice_status status; struct LIST_HEAD_TYPE lst; INIT_LIST_HEAD(&lst); t = (struct ice_vsig_prof *)ice_malloc(hw, sizeof(*t)); if (!t) return false; t->profile_cookie = hdl; LIST_ADD(&t->list, &lst); status = ice_find_dup_props_vsig(hw, blk, &lst, vsig); LIST_DEL(&t->list); ice_free(hw, t); return status == ICE_SUCCESS; } /** * ice_add_vsi_flow - add VSI flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: input VSI * @vsig: target VSIG to include the input VSI * * Calling this function will add the VSI to a given VSIG and * update the HW tables accordingly. This call can be used to * add multiple VSIs to a VSIG if we know beforehand that those * VSIs have the same characteristics of the VSIG. This will * save time in generating a new VSIG and TCAMs till a match is * found and subsequent rollback when a matching VSIG is found. */ enum ice_status ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig) { struct ice_chs_chg *tmp, *del; struct LIST_HEAD_TYPE chg; enum ice_status status; /* if target VSIG is default the move is invalid */ if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG) return ICE_ERR_PARAM; INIT_LIST_HEAD(&chg); /* move VSI to the VSIG that matches */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); /* update hardware if success */ if (!status) status = ice_upd_prof_hw(hw, blk, &chg); LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } return status; } /** * ice_add_prof_id_flow - add profile flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the VSI to enable with the profile specified by ID * @hdl: profile handle * * Calling this function will update the hardware tables to enable the * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ enum ice_status ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct LIST_HEAD_TYPE union_lst; enum ice_status status; struct LIST_HEAD_TYPE chg; u16 vsig; INIT_LIST_HEAD(&union_lst); INIT_LIST_HEAD(&chg); /* Get profile */ status = ice_get_prof(hw, blk, hdl, &chg); if (status) return status; /* determine if VSI is already part of a VSIG */ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); if (!status && vsig) { bool only_vsi; u16 or_vsig; u16 ref; /* found in VSIG */ or_vsig = vsig; /* make sure that there is no overlap/conflict between the new * characteristics and the existing ones; we don't support that * scenario */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) { status = ICE_ERR_ALREADY_EXISTS; goto err_ice_add_prof_id_flow; } /* last VSI in the VSIG? */ status = ice_vsig_get_ref(hw, blk, vsig, &ref); if (status) goto err_ice_add_prof_id_flow; only_vsi = (ref == 1); /* create a union of the current profiles and the one being * added */ status = ice_get_profs_vsig(hw, blk, vsig, &union_lst); if (status) goto err_ice_add_prof_id_flow; status = ice_add_prof_to_lst(hw, blk, &union_lst, hdl); if (status) goto err_ice_add_prof_id_flow; /* search for an existing VSIG with an exact charc match */ status = ice_find_dup_props_vsig(hw, blk, &union_lst, &vsig); if (!status) { /* move VSI to the VSIG that matches */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; /* VSI has been moved out of or_vsig. If the or_vsig had * only that VSI it is now empty and can be removed. */ if (only_vsi) { status = ice_rem_vsig(hw, blk, or_vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } } else if (only_vsi) { /* If the original VSIG only contains one VSI, then it * will be the requesting VSI. In this case the VSI is * not sharing entries and we can simply add the new * profile to the VSIG. */ status = ice_add_prof_id_vsig(hw, blk, vsig, hdl, false, &chg); if (status) goto err_ice_add_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } else { /* No match, so we need a new VSIG */ status = ice_create_vsig_from_lst(hw, blk, vsi, &union_lst, &vsig, &chg); if (status) goto err_ice_add_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } } else { /* need to find or add a VSIG */ /* search for an existing VSIG with an exact charc match */ if (ice_find_prof_vsig(hw, blk, hdl, &vsig)) { /* found an exact match */ /* add or move VSI to the VSIG that matches */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); if (status) goto err_ice_add_prof_id_flow; } else { /* we did not find an exact match */ /* we need to add a VSIG */ status = ice_create_prof_id_vsig(hw, blk, vsi, hdl, &chg); if (status) goto err_ice_add_prof_id_flow; } } /* update hardware */ if (!status) status = ice_upd_prof_hw(hw, blk, &chg); err_ice_add_prof_id_flow: LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, &union_lst, ice_vsig_prof, list) { LIST_DEL(&del1->list); ice_free(hw, del1); } return status; } /** * ice_add_flow - add flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: array of VSIs to enable with the profile specified by ID * @count: number of elements in the VSI array * @id: profile tracking ID * * Calling this function will update the hardware tables to enable the * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be enabled. */ enum ice_status ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { - enum ice_status status; u16 i; for (i = 0; i < count; i++) { + enum ice_status status; + status = ice_add_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; } return ICE_SUCCESS; } /** * ice_rem_prof_from_list - remove a profile from list * @hw: pointer to the HW struct * @lst: list to remove the profile from * @hdl: the profile handle indicating the profile to remove */ static enum ice_status ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl) { struct ice_vsig_prof *ent, *tmp; LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, lst, ice_vsig_prof, list) if (ent->profile_cookie == hdl) { LIST_DEL(&ent->list); ice_free(hw, ent); return ICE_SUCCESS; } return ICE_ERR_DOES_NOT_EXIST; } /** * ice_rem_prof_id_flow - remove flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: the VSI from which to remove the profile specified by ID * @hdl: profile tracking handle * * Calling this function will update the hardware tables to remove the * profile indicated by the ID parameter for the VSIs specified in the VSI * array. Once successfully called, the flow will be disabled. */ enum ice_status ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl) { struct ice_vsig_prof *tmp1, *del1; struct ice_chs_chg *tmp, *del; struct LIST_HEAD_TYPE chg, copy; enum ice_status status; u16 vsig; INIT_LIST_HEAD(©); INIT_LIST_HEAD(&chg); /* determine if VSI is already part of a VSIG */ status = ice_vsig_find_vsi(hw, blk, vsi, &vsig); if (!status && vsig) { bool last_profile; bool only_vsi; u16 ref; /* found in VSIG */ last_profile = ice_vsig_prof_id_count(hw, blk, vsig) == 1; status = ice_vsig_get_ref(hw, blk, vsig, &ref); if (status) goto err_ice_rem_prof_id_flow; only_vsi = (ref == 1); if (only_vsi) { /* If the original VSIG only contains one reference, * which will be the requesting VSI, then the VSI is not * sharing entries and we can simply remove the specific * characteristics from the VSIG. */ if (last_profile) { /* If there are no profiles left for this VSIG, * then simply remove the VSIG. */ status = ice_rem_vsig(hw, blk, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } else { status = ice_rem_prof_id_vsig(hw, blk, vsig, hdl, &chg); if (status) goto err_ice_rem_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } } else { /* Make a copy of the VSIG's list of Profiles */ status = ice_get_profs_vsig(hw, blk, vsig, ©); if (status) goto err_ice_rem_prof_id_flow; /* Remove specified profile entry from the list */ status = ice_rem_prof_from_list(hw, ©, hdl); if (status) goto err_ice_rem_prof_id_flow; if (LIST_EMPTY(©)) { status = ice_move_vsi(hw, blk, vsi, ICE_DEFAULT_VSIG, &chg); if (status) goto err_ice_rem_prof_id_flow; } else if (!ice_find_dup_props_vsig(hw, blk, ©, &vsig)) { /* found an exact match */ /* add or move VSI to the VSIG that matches */ /* Search for a VSIG with a matching profile * list */ /* Found match, move VSI to the matching VSIG */ status = ice_move_vsi(hw, blk, vsi, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } else { /* since no existing VSIG supports this * characteristic pattern, we need to create a * new VSIG and TCAM entries */ status = ice_create_vsig_from_lst(hw, blk, vsi, ©, &vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; /* Adjust priorities */ status = ice_adj_prof_priorities(hw, blk, vsig, &chg); if (status) goto err_ice_rem_prof_id_flow; } } } else { status = ICE_ERR_DOES_NOT_EXIST; } /* update hardware tables */ if (!status) status = ice_upd_prof_hw(hw, blk, &chg); err_ice_rem_prof_id_flow: LIST_FOR_EACH_ENTRY_SAFE(del, tmp, &chg, ice_chs_chg, list_entry) { LIST_DEL(&del->list_entry); ice_free(hw, del); } LIST_FOR_EACH_ENTRY_SAFE(del1, tmp1, ©, ice_vsig_prof, list) { LIST_DEL(&del1->list); ice_free(hw, del1); } return status; } /** * ice_rem_flow - remove flow * @hw: pointer to the HW struct * @blk: hardware block * @vsi: array of VSIs from which to remove the profile specified by ID * @count: number of elements in the VSI array * @id: profile tracking ID * * The function will remove flows from the specified VSIs that were enabled * using ice_add_flow. The ID value will indicated which profile will be * removed. Once successfully called, the flow will be disabled. */ enum ice_status ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id) { - enum ice_status status; u16 i; for (i = 0; i < count; i++) { + enum ice_status status; + status = ice_rem_prof_id_flow(hw, blk, vsi[i], id); if (status) return status; } return ICE_SUCCESS; } diff --git a/sys/dev/ice/ice_flex_pipe.h b/sys/dev/ice/ice_flex_pipe.h index 5c1dd7157537..ada71b2d446a 100644 --- a/sys/dev/ice/ice_flex_pipe.h +++ b/sys/dev/ice/ice_flex_pipe.h @@ -1,128 +1,108 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_FLEX_PIPE_H_ #define _ICE_FLEX_PIPE_H_ #include "ice_type.h" -/* Package minimal version supported */ -#define ICE_PKG_SUPP_VER_MAJ 1 -#define ICE_PKG_SUPP_VER_MNR 3 - -/* Package format version */ -#define ICE_PKG_FMT_VER_MAJ 1 -#define ICE_PKG_FMT_VER_MNR 0 -#define ICE_PKG_FMT_VER_UPD 0 -#define ICE_PKG_FMT_VER_DFT 0 - -#define ICE_PKG_CNT 4 - -enum ice_status -ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); enum ice_status ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx, u8 *prot, u16 *off); enum ice_status ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type, u16 *value); void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type, ice_bitmap_t *bm); void ice_init_prof_result_bm(struct ice_hw *hw); enum ice_status -ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt, - ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list); -enum ice_status -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); -enum ice_status ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type, u16 *port); enum ice_status ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port); enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all); bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index); bool ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type); enum ice_status ice_replay_tunnels(struct ice_hw *hw); /* XLT1/PType group functions */ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk); void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg); /* XLT2/VSI group functions */ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk); enum ice_status ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig); enum ice_status -ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], - struct ice_fv_word *es); +ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, + ice_bitmap_t *ptypes, struct ice_fv_word *es); struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); enum ice_status ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig); enum ice_status ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); enum ice_status ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); enum ice_status ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt); enum ice_status ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt); -enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); -enum ice_status -ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); enum ice_status ice_init_hw_tbls(struct ice_hw *hw); -void ice_free_seg(struct ice_hw *hw); void ice_fill_blk_tbls(struct ice_hw *hw); void ice_clear_hw_tbls(struct ice_hw *hw); void ice_free_hw_tbls(struct ice_hw *hw); enum ice_status ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id); enum ice_status ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count, u64 id); enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id); -struct ice_buf_build * -ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size, - void **section); -struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld); -void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld); + +void ice_fill_blk_tbls(struct ice_hw *hw); + +/* To support tunneling entries by PF, the package will append the PF number to + * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc. + */ +#define ICE_TNL_PRE "TNL_" + +void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val); #endif /* _ICE_FLEX_PIPE_H_ */ diff --git a/sys/dev/ice/ice_flex_type.h b/sys/dev/ice/ice_flex_type.h index bd3b6ddeaf7b..145797f34b7a 100644 --- a/sys/dev/ice/ice_flex_type.h +++ b/sys/dev/ice/ice_flex_type.h @@ -1,761 +1,510 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_FLEX_TYPE_H_ #define _ICE_FLEX_TYPE_H_ #define ICE_FV_OFFSET_INVAL 0x1FF #pragma pack(1) /* Extraction Sequence (Field Vector) Table */ struct ice_fv_word { u8 prot_id; u16 off; /* Offset within the protocol header */ u8 resvrd; }; + #pragma pack() #define ICE_MAX_NUM_PROFILES 256 #define ICE_MAX_FV_WORDS 48 struct ice_fv { struct ice_fv_word ew[ICE_MAX_FV_WORDS]; }; -/* Package and segment headers and tables */ -struct ice_pkg_hdr { - struct ice_pkg_ver pkg_format_ver; - __le32 seg_count; - __le32 seg_offset[STRUCT_HACK_VAR_LEN]; -}; - -/* generic segment */ -struct ice_generic_seg_hdr { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_ICE_E810 0x00000010 - __le32 seg_type; - struct ice_pkg_ver seg_format_ver; - __le32 seg_size; - char seg_id[ICE_PKG_NAME_SIZE]; -}; - -/* ice specific segment */ - -union ice_device_id { - struct { - __le16 device_id; - __le16 vendor_id; - } dev_vend_id; - __le32 id; -}; - -struct ice_device_id_entry { - union ice_device_id device; - union ice_device_id sub_device; -}; - -struct ice_seg { - struct ice_generic_seg_hdr hdr; - __le32 device_table_count; - struct ice_device_id_entry device_table[STRUCT_HACK_VAR_LEN]; -}; - -struct ice_nvm_table { - __le32 table_count; - __le32 vers[STRUCT_HACK_VAR_LEN]; -}; - -struct ice_buf { -#define ICE_PKG_BUF_SIZE 4096 - u8 buf[ICE_PKG_BUF_SIZE]; -}; - -struct ice_buf_table { - __le32 buf_count; - struct ice_buf buf_array[STRUCT_HACK_VAR_LEN]; -}; - -/* global metadata specific segment */ -struct ice_global_metadata_seg { - struct ice_generic_seg_hdr hdr; - struct ice_pkg_ver pkg_ver; - __le32 rsvd; - char pkg_name[ICE_PKG_NAME_SIZE]; -}; - -#define ICE_MIN_S_OFF 12 -#define ICE_MAX_S_OFF 4095 -#define ICE_MIN_S_SZ 1 -#define ICE_MAX_S_SZ 4084 - -/* section information */ -struct ice_section_entry { - __le32 type; - __le16 offset; - __le16 size; -}; - -#define ICE_MIN_S_COUNT 1 -#define ICE_MAX_S_COUNT 511 -#define ICE_MIN_S_DATA_END 12 -#define ICE_MAX_S_DATA_END 4096 - -#define ICE_METADATA_BUF 0x80000000 - -struct ice_buf_hdr { - __le16 section_count; - __le16 data_end; - struct ice_section_entry section_entry[STRUCT_HACK_VAR_LEN]; -}; - -#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ - ice_struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ - (ent_sz)) - -/* ice package section IDs */ -#define ICE_SID_METADATA 1 -#define ICE_SID_XLT0_SW 10 -#define ICE_SID_XLT_KEY_BUILDER_SW 11 -#define ICE_SID_XLT1_SW 12 -#define ICE_SID_XLT2_SW 13 -#define ICE_SID_PROFID_TCAM_SW 14 -#define ICE_SID_PROFID_REDIR_SW 15 -#define ICE_SID_FLD_VEC_SW 16 -#define ICE_SID_CDID_KEY_BUILDER_SW 17 -#define ICE_SID_CDID_REDIR_SW 18 - -#define ICE_SID_XLT0_ACL 20 -#define ICE_SID_XLT_KEY_BUILDER_ACL 21 -#define ICE_SID_XLT1_ACL 22 -#define ICE_SID_XLT2_ACL 23 -#define ICE_SID_PROFID_TCAM_ACL 24 -#define ICE_SID_PROFID_REDIR_ACL 25 -#define ICE_SID_FLD_VEC_ACL 26 -#define ICE_SID_CDID_KEY_BUILDER_ACL 27 -#define ICE_SID_CDID_REDIR_ACL 28 - -#define ICE_SID_XLT0_FD 30 -#define ICE_SID_XLT_KEY_BUILDER_FD 31 -#define ICE_SID_XLT1_FD 32 -#define ICE_SID_XLT2_FD 33 -#define ICE_SID_PROFID_TCAM_FD 34 -#define ICE_SID_PROFID_REDIR_FD 35 -#define ICE_SID_FLD_VEC_FD 36 -#define ICE_SID_CDID_KEY_BUILDER_FD 37 -#define ICE_SID_CDID_REDIR_FD 38 - -#define ICE_SID_XLT0_RSS 40 -#define ICE_SID_XLT_KEY_BUILDER_RSS 41 -#define ICE_SID_XLT1_RSS 42 -#define ICE_SID_XLT2_RSS 43 -#define ICE_SID_PROFID_TCAM_RSS 44 -#define ICE_SID_PROFID_REDIR_RSS 45 -#define ICE_SID_FLD_VEC_RSS 46 -#define ICE_SID_CDID_KEY_BUILDER_RSS 47 -#define ICE_SID_CDID_REDIR_RSS 48 - -#define ICE_SID_RXPARSER_CAM 50 -#define ICE_SID_RXPARSER_NOMATCH_CAM 51 -#define ICE_SID_RXPARSER_IMEM 52 -#define ICE_SID_RXPARSER_XLT0_BUILDER 53 -#define ICE_SID_RXPARSER_NODE_PTYPE 54 -#define ICE_SID_RXPARSER_MARKER_PTYPE 55 -#define ICE_SID_RXPARSER_BOOST_TCAM 56 -#define ICE_SID_RXPARSER_PROTO_GRP 57 -#define ICE_SID_RXPARSER_METADATA_INIT 58 -#define ICE_SID_RXPARSER_XLT0 59 - -#define ICE_SID_TXPARSER_CAM 60 -#define ICE_SID_TXPARSER_NOMATCH_CAM 61 -#define ICE_SID_TXPARSER_IMEM 62 -#define ICE_SID_TXPARSER_XLT0_BUILDER 63 -#define ICE_SID_TXPARSER_NODE_PTYPE 64 -#define ICE_SID_TXPARSER_MARKER_PTYPE 65 -#define ICE_SID_TXPARSER_BOOST_TCAM 66 -#define ICE_SID_TXPARSER_PROTO_GRP 67 -#define ICE_SID_TXPARSER_METADATA_INIT 68 -#define ICE_SID_TXPARSER_XLT0 69 - -#define ICE_SID_RXPARSER_INIT_REDIR 70 -#define ICE_SID_TXPARSER_INIT_REDIR 71 -#define ICE_SID_RXPARSER_MARKER_GRP 72 -#define ICE_SID_TXPARSER_MARKER_GRP 73 -#define ICE_SID_RXPARSER_LAST_PROTO 74 -#define ICE_SID_TXPARSER_LAST_PROTO 75 -#define ICE_SID_RXPARSER_PG_SPILL 76 -#define ICE_SID_TXPARSER_PG_SPILL 77 -#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 -#define ICE_SID_TXPARSER_NOMATCH_SPILL 79 - -#define ICE_SID_XLT0_PE 80 -#define ICE_SID_XLT_KEY_BUILDER_PE 81 -#define ICE_SID_XLT1_PE 82 -#define ICE_SID_XLT2_PE 83 -#define ICE_SID_PROFID_TCAM_PE 84 -#define ICE_SID_PROFID_REDIR_PE 85 -#define ICE_SID_FLD_VEC_PE 86 -#define ICE_SID_CDID_KEY_BUILDER_PE 87 -#define ICE_SID_CDID_REDIR_PE 88 - -#define ICE_SID_RXPARSER_FLAG_REDIR 97 - -/* Label Metadata section IDs */ -#define ICE_SID_LBL_FIRST 0x80000010 -#define ICE_SID_LBL_RXPARSER_IMEM 0x80000010 -#define ICE_SID_LBL_TXPARSER_IMEM 0x80000011 -#define ICE_SID_LBL_RESERVED_12 0x80000012 -#define ICE_SID_LBL_RESERVED_13 0x80000013 -#define ICE_SID_LBL_RXPARSER_MARKER 0x80000014 -#define ICE_SID_LBL_TXPARSER_MARKER 0x80000015 -#define ICE_SID_LBL_PTYPE 0x80000016 -#define ICE_SID_LBL_PROTOCOL_ID 0x80000017 -#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 -#define ICE_SID_LBL_TXPARSER_TMEM 0x80000019 -#define ICE_SID_LBL_RXPARSER_PG 0x8000001A -#define ICE_SID_LBL_TXPARSER_PG 0x8000001B -#define ICE_SID_LBL_RXPARSER_M_TCAM 0x8000001C -#define ICE_SID_LBL_TXPARSER_M_TCAM 0x8000001D -#define ICE_SID_LBL_SW_PROFID_TCAM 0x8000001E -#define ICE_SID_LBL_ACL_PROFID_TCAM 0x8000001F -#define ICE_SID_LBL_PE_PROFID_TCAM 0x80000020 -#define ICE_SID_LBL_RSS_PROFID_TCAM 0x80000021 -#define ICE_SID_LBL_FD_PROFID_TCAM 0x80000022 -#define ICE_SID_LBL_FLAG 0x80000023 -#define ICE_SID_LBL_REG 0x80000024 -#define ICE_SID_LBL_SW_PTG 0x80000025 -#define ICE_SID_LBL_ACL_PTG 0x80000026 -#define ICE_SID_LBL_PE_PTG 0x80000027 -#define ICE_SID_LBL_RSS_PTG 0x80000028 -#define ICE_SID_LBL_FD_PTG 0x80000029 -#define ICE_SID_LBL_SW_VSIG 0x8000002A -#define ICE_SID_LBL_ACL_VSIG 0x8000002B -#define ICE_SID_LBL_PE_VSIG 0x8000002C -#define ICE_SID_LBL_RSS_VSIG 0x8000002D -#define ICE_SID_LBL_FD_VSIG 0x8000002E -#define ICE_SID_LBL_PTYPE_META 0x8000002F -#define ICE_SID_LBL_SW_PROFID 0x80000030 -#define ICE_SID_LBL_ACL_PROFID 0x80000031 -#define ICE_SID_LBL_PE_PROFID 0x80000032 -#define ICE_SID_LBL_RSS_PROFID 0x80000033 -#define ICE_SID_LBL_FD_PROFID 0x80000034 -#define ICE_SID_LBL_RXPARSER_MARKER_GRP 0x80000035 -#define ICE_SID_LBL_TXPARSER_MARKER_GRP 0x80000036 -#define ICE_SID_LBL_RXPARSER_PROTO 0x80000037 -#define ICE_SID_LBL_TXPARSER_PROTO 0x80000038 -/* The following define MUST be updated to reflect the last label section ID */ -#define ICE_SID_LBL_LAST 0x80000038 - -enum ice_block { - ICE_BLK_SW = 0, - ICE_BLK_ACL, - ICE_BLK_FD, - ICE_BLK_RSS, - ICE_BLK_PE, - ICE_BLK_COUNT -}; - -enum ice_sect { - ICE_XLT0 = 0, - ICE_XLT_KB, - ICE_XLT1, - ICE_XLT2, - ICE_PROF_TCAM, - ICE_PROF_REDIR, - ICE_VEC_TBL, - ICE_CDID_KB, - ICE_CDID_REDIR, - ICE_SECT_COUNT -}; - /* Packet Type (PTYPE) values */ #define ICE_PTYPE_MAC_PAY 1 #define ICE_PTYPE_IPV4FRAG_PAY 22 #define ICE_PTYPE_IPV4_PAY 23 #define ICE_PTYPE_IPV4_UDP_PAY 24 #define ICE_PTYPE_IPV4_TCP_PAY 26 #define ICE_PTYPE_IPV4_SCTP_PAY 27 #define ICE_PTYPE_IPV4_ICMP_PAY 28 #define ICE_PTYPE_IPV6FRAG_PAY 88 #define ICE_PTYPE_IPV6_PAY 89 #define ICE_PTYPE_IPV6_UDP_PAY 90 #define ICE_PTYPE_IPV6_TCP_PAY 92 #define ICE_PTYPE_IPV6_SCTP_PAY 93 #define ICE_PTYPE_IPV6_ICMP_PAY 94 struct ice_meta_sect { struct ice_pkg_ver ver; #define ICE_META_SECT_NAME_SIZE 28 char name[ICE_META_SECT_NAME_SIZE]; __le32 track_id; }; /* Packet Type Groups (PTG) - Inner Most fields (IM) */ #define ICE_PTG_IM_IPV4_TCP 16 #define ICE_PTG_IM_IPV4_UDP 17 #define ICE_PTG_IM_IPV4_SCTP 18 #define ICE_PTG_IM_IPV4_PAY 20 #define ICE_PTG_IM_IPV4_OTHER 21 #define ICE_PTG_IM_IPV6_TCP 32 #define ICE_PTG_IM_IPV6_UDP 33 #define ICE_PTG_IM_IPV6_SCTP 34 #define ICE_PTG_IM_IPV6_OTHER 37 #define ICE_PTG_IM_L2_OTHER 67 struct ice_flex_fields { union { struct { u8 src_ip; u8 dst_ip; u8 flow_label; /* valid for IPv6 only */ } ip_fields; struct { u8 src_prt; u8 dst_prt; } tcp_udp_fields; struct { u8 src_ip; u8 dst_ip; u8 src_prt; u8 dst_prt; } ip_tcp_udp_fields; struct { u8 src_prt; u8 dst_prt; u8 flow_label; /* valid for IPv6 only */ u8 spi; } ip_esp_fields; struct { u32 offset; u32 length; } off_len; } fields; }; #define ICE_XLT1_DFLT_GRP 0 #define ICE_XLT1_TABLE_SIZE 1024 /* package labels */ struct ice_label { __le16 value; #define ICE_PKG_LABEL_SIZE 64 char name[ICE_PKG_LABEL_SIZE]; }; struct ice_label_section { __le16 count; struct ice_label label[STRUCT_HACK_VAR_LEN]; }; #define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ ice_struct_size((struct ice_label_section *)0, label, 1) - \ sizeof(struct ice_label), sizeof(struct ice_label)) struct ice_sw_fv_section { __le16 count; __le16 base_offset; struct ice_fv fv[STRUCT_HACK_VAR_LEN]; }; struct ice_sw_fv_list_entry { struct LIST_ENTRY_TYPE list_entry; u32 profile_id; struct ice_fv *fv_ptr; }; #pragma pack(1) /* The BOOST TCAM stores the match packet header in reverse order, meaning * the fields are reversed; in addition, this means that the normally big endian * fields of the packet are now little endian. */ struct ice_boost_key_value { -#define ICE_BOOST_REMAINING_HV_KEY 15 +#define ICE_BOOST_REMAINING_HV_KEY 15 u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY]; - __le16 hv_dst_port_key; - __le16 hv_src_port_key; + union { + struct { + __le16 hv_dst_port_key; + __le16 hv_src_port_key; + } /* udp_tunnel */; + struct { + __le16 hv_vlan_id_key; + __le16 hv_etype_key; + } vlan; + }; u8 tcam_search_key; }; #pragma pack() struct ice_boost_key { struct ice_boost_key_value key; struct ice_boost_key_value key2; }; /* package Boost TCAM entry */ struct ice_boost_tcam_entry { __le16 addr; __le16 reserved; /* break up the 40 bytes of key into different fields */ struct ice_boost_key key; u8 boost_hit_index_group; /* The following contains bitfields which are not on byte boundaries. * These fields are currently unused by driver software. */ #define ICE_BOOST_BIT_FIELDS 43 u8 bit_fields[ICE_BOOST_BIT_FIELDS]; }; struct ice_boost_tcam_section { __le16 count; __le16 reserved; struct ice_boost_tcam_entry tcam[STRUCT_HACK_VAR_LEN]; }; #define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ ice_struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \ sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) struct ice_xlt1_section { __le16 count; __le16 offset; u8 value[STRUCT_HACK_VAR_LEN]; }; struct ice_xlt2_section { __le16 count; __le16 offset; __le16 value[STRUCT_HACK_VAR_LEN]; }; struct ice_prof_redir_section { __le16 count; __le16 offset; u8 redir_value[STRUCT_HACK_VAR_LEN]; }; -/* package buffer building */ - -struct ice_buf_build { - struct ice_buf buf; - u16 reserved_section_table_entries; -}; - -struct ice_pkg_enum { - struct ice_buf_table *buf_table; - u32 buf_idx; - - u32 type; - struct ice_buf_hdr *buf; - u32 sect_idx; - void *sect; - u32 sect_type; - - u32 entry_idx; - void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset); -}; - /* Tunnel enabling */ enum ice_tunnel_type { TNL_VXLAN = 0, TNL_GENEVE, + TNL_GRETAP, TNL_GTP, + TNL_GTPC, + TNL_GTPU, TNL_LAST = 0xFF, TNL_ALL = 0xFF, }; struct ice_tunnel_type_scan { enum ice_tunnel_type type; const char *label_prefix; }; struct ice_tunnel_entry { enum ice_tunnel_type type; u16 boost_addr; u16 port; u16 ref; struct ice_boost_tcam_entry *boost_entry; u8 valid; u8 in_use; u8 marked; }; #define ICE_TUNNEL_MAX_ENTRIES 16 struct ice_tunnel_table { struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES]; u16 count; }; struct ice_pkg_es { __le16 count; __le16 offset; struct ice_fv_word es[STRUCT_HACK_VAR_LEN]; }; struct ice_es { u32 sid; u16 count; u16 fvw; u16 *ref_count; struct LIST_HEAD_TYPE prof_map; struct ice_fv_word *t; struct ice_lock prof_map_lock; /* protect access to profiles list */ u8 *written; u8 reverse; /* set to true to reverse FV order */ }; /* PTYPE Group management */ /* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type * group (PTG) ID as output. * * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE * are a part of this group until moved to a new PTG. */ #define ICE_DEFAULT_PTG 0 struct ice_ptg_entry { struct ice_ptg_ptype *first_ptype; u8 in_use; }; struct ice_ptg_ptype { struct ice_ptg_ptype *next_ptype; u8 ptg; }; #define ICE_MAX_TCAM_PER_PROFILE 32 #define ICE_MAX_PTG_PER_PROFILE 32 struct ice_prof_map { struct LIST_ENTRY_TYPE list; u64 profile_cookie; u64 context; u8 prof_id; u8 ptg_cnt; u8 ptg[ICE_MAX_PTG_PER_PROFILE]; }; #define ICE_INVALID_TCAM 0xFFFF struct ice_tcam_inf { u16 tcam_idx; u8 ptg; u8 prof_id; u8 in_use; }; struct ice_vsig_prof { struct LIST_ENTRY_TYPE list; u64 profile_cookie; u8 prof_id; u8 tcam_count; struct ice_tcam_inf tcam[ICE_MAX_TCAM_PER_PROFILE]; }; struct ice_vsig_entry { struct LIST_HEAD_TYPE prop_lst; struct ice_vsig_vsi *first_vsi; u8 in_use; }; struct ice_vsig_vsi { struct ice_vsig_vsi *next_vsi; u32 prop_mask; u16 changed; u16 vsig; }; #define ICE_XLT1_CNT 1024 #define ICE_MAX_PTGS 256 /* XLT1 Table */ struct ice_xlt1 { struct ice_ptg_entry *ptg_tbl; struct ice_ptg_ptype *ptypes; u8 *t; u32 sid; u16 count; }; #define ICE_XLT2_CNT 768 #define ICE_MAX_VSIGS 768 /* VSIG bit layout: * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS * [13:15]: PF number of device */ #define ICE_VSIG_IDX_M (0x1FFF) #define ICE_PF_NUM_S 13 #define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S) #define ICE_VSIG_VALUE(vsig, pf_id) \ ((u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \ (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))) #define ICE_DEFAULT_VSIG 0 /* XLT2 Table */ struct ice_xlt2 { struct ice_vsig_entry *vsig_tbl; struct ice_vsig_vsi *vsis; u16 *t; u32 sid; u16 count; }; /* Extraction sequence - list of match fields: * protocol ID, offset, profile length */ union ice_match_fld { struct { u8 prot_id; u8 offset; u8 length; u8 reserved; /* must be zero */ } fld; u32 val; }; #define ICE_MATCH_LIST_SZ 20 #pragma pack(1) struct ice_match { u8 count; union ice_match_fld list[ICE_MATCH_LIST_SZ]; }; /* Profile ID Management */ struct ice_prof_id_key { __le16 flags; u8 xlt1; __le16 xlt2_cdid; }; /* Keys are made up of two values, each one-half the size of the key. * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values) */ #define ICE_TCAM_KEY_VAL_SZ 5 #define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ) struct ice_prof_tcam_entry { __le16 addr; u8 key[ICE_TCAM_KEY_SZ]; u8 prof_id; }; #pragma pack() struct ice_prof_id_section { __le16 count; struct ice_prof_tcam_entry entry[STRUCT_HACK_VAR_LEN]; }; struct ice_prof_tcam { u32 sid; u16 count; u16 max_prof_id; struct ice_prof_tcam_entry *t; u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */ }; struct ice_prof_redir { u8 *t; u32 sid; u16 count; }; /* Tables per block */ struct ice_blk_info { struct ice_xlt1 xlt1; struct ice_xlt2 xlt2; struct ice_prof_tcam prof; struct ice_prof_redir prof_redir; struct ice_es es; u8 overwrite; /* set to true to allow overwrite of table entries */ u8 is_list_init; }; enum ice_chg_type { ICE_TCAM_NONE = 0, ICE_PTG_ES_ADD, ICE_TCAM_ADD, ICE_VSIG_ADD, ICE_VSIG_REM, ICE_VSI_MOVE, }; struct ice_chs_chg { struct LIST_ENTRY_TYPE list_entry; enum ice_chg_type type; u8 add_ptg; u8 add_vsig; u8 add_tcam_idx; u8 add_prof; u16 ptype; u8 ptg; u8 prof_id; u16 vsi; u16 vsig; u16 orig_vsig; u16 tcam_idx; }; #define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT enum ice_prof_type { + ICE_PROF_INVALID = 0x0, ICE_PROF_NON_TUN = 0x1, ICE_PROF_TUN_UDP = 0x2, ICE_PROF_TUN_GRE = 0x4, - ICE_PROF_TUN_ALL = 0x6, + ICE_PROF_TUN_GTPU = 0x8, + ICE_PROF_TUN_GTPC = 0x10, + ICE_PROF_TUN_ALL = 0x1E, ICE_PROF_ALL = 0xFF, }; /* Number of bits/bytes contained in meta init entry. Note, this should be a * multiple of 32 bits. */ #define ICE_META_INIT_BITS 192 #define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \ BITS_PER_BYTE)) /* The meta init Flag field starts at this bit */ #define ICE_META_FLAGS_ST 123 /* The entry and bit to check for Double VLAN Mode (DVM) support */ #define ICE_META_VLAN_MODE_ENTRY 0 #define ICE_META_FLAG_VLAN_MODE 60 #define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \ ICE_META_FLAG_VLAN_MODE) struct ice_meta_init_entry { __le32 bm[ICE_META_INIT_DW_CNT]; }; struct ice_meta_init_section { __le16 count; __le16 offset; struct ice_meta_init_entry entry[1]; }; #endif /* _ICE_FLEX_TYPE_H_ */ diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c index 387882bfe903..73abf03d43b8 100644 --- a/sys/dev/ice/ice_flow.c +++ b/sys/dev/ice/ice_flow.c @@ -1,1950 +1,1948 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_flow.h" /* Size of known protocol header fields */ #define ICE_FLOW_FLD_SZ_ETH_TYPE 2 #define ICE_FLOW_FLD_SZ_VLAN 2 #define ICE_FLOW_FLD_SZ_IPV4_ADDR 4 #define ICE_FLOW_FLD_SZ_IPV6_ADDR 16 #define ICE_FLOW_FLD_SZ_IP_DSCP 1 #define ICE_FLOW_FLD_SZ_IP_TTL 1 #define ICE_FLOW_FLD_SZ_IP_PROT 1 #define ICE_FLOW_FLD_SZ_PORT 2 #define ICE_FLOW_FLD_SZ_TCP_FLAGS 1 #define ICE_FLOW_FLD_SZ_ICMP_TYPE 1 #define ICE_FLOW_FLD_SZ_ICMP_CODE 1 #define ICE_FLOW_FLD_SZ_ARP_OPER 2 #define ICE_FLOW_FLD_SZ_GRE_KEYID 4 /* Describe properties of a protocol header field */ struct ice_flow_field_info { enum ice_flow_seg_hdr hdr; s16 off; /* Offset from start of a protocol header, in bits */ u16 size; /* Size of fields in bits */ }; #define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \ .hdr = _hdr, \ .off = (_offset_bytes) * BITS_PER_BYTE, \ .size = (_size_bytes) * BITS_PER_BYTE, \ } /* Table containing properties of supported protocol header fields */ static const struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = { /* Ether */ /* ICE_FLOW_FIELD_IDX_ETH_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_ETH_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_S_VLAN */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, ICE_FLOW_FLD_SZ_VLAN), /* ICE_FLOW_FIELD_IDX_C_VLAN */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, ICE_FLOW_FLD_SZ_VLAN), /* ICE_FLOW_FIELD_IDX_ETH_TYPE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ICE_FLOW_FLD_SZ_ETH_TYPE), /* IPv4 / IPv6 */ /* ICE_FLOW_FIELD_IDX_IPV4_DSCP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 1, ICE_FLOW_FLD_SZ_IP_DSCP), /* ICE_FLOW_FIELD_IDX_IPV6_DSCP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 0, ICE_FLOW_FLD_SZ_IP_DSCP), /* ICE_FLOW_FIELD_IDX_IPV4_TTL */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 8, ICE_FLOW_FLD_SZ_IP_TTL), /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 9, ICE_FLOW_FLD_SZ_IP_PROT), /* ICE_FLOW_FIELD_IDX_IPV6_TTL */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 7, ICE_FLOW_FLD_SZ_IP_TTL), /* ICE_FLOW_FIELD_IDX_IPV4_PROT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NONE, 6, ICE_FLOW_FLD_SZ_IP_PROT), /* ICE_FLOW_FIELD_IDX_IPV4_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_IPV4_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 16, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_IPV6_SA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, ICE_FLOW_FLD_SZ_IPV6_ADDR), /* ICE_FLOW_FIELD_IDX_IPV6_DA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR), /* Transport */ /* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_TCP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_UDP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 0, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_UDP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, ICE_FLOW_FLD_SZ_PORT), /* ICE_FLOW_FIELD_IDX_TCP_FLAGS */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS), /* ARP */ /* ICE_FLOW_FIELD_IDX_ARP_SIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_ARP_DIP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, ICE_FLOW_FLD_SZ_IPV4_ADDR), /* ICE_FLOW_FIELD_IDX_ARP_SHA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_ARP_DHA */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN), /* ICE_FLOW_FIELD_IDX_ARP_OP */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, ICE_FLOW_FLD_SZ_ARP_OPER), /* ICMP */ /* ICE_FLOW_FIELD_IDX_ICMP_TYPE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, ICE_FLOW_FLD_SZ_ICMP_TYPE), /* ICE_FLOW_FIELD_IDX_ICMP_CODE */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, ICE_FLOW_FLD_SZ_ICMP_CODE), /* GRE */ /* ICE_FLOW_FIELD_IDX_GRE_KEYID */ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12, ICE_FLOW_FLD_SZ_GRE_KEYID), }; /* Bitmaps indicating relevant packet types for a particular protocol header * * Packet types for packets with an Outer/First/Single MAC header */ static const u32 ice_ptypes_mac_ofos[] = { 0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB, 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last MAC VLAN header */ static const u32 ice_ptypes_macvlan_il[] = { 0x00000000, 0xBC000000, 0x000001DF, 0xF0000000, 0x0000077E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single non-frag IPv4 header, * does NOT include IPV4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos[] = { 0x1D800000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single non-frag IPv4 header, * includes IPV4 other PTYPEs */ static const u32 ice_ptypes_ipv4_ofos_all[] = { 0x1D800000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv4 header */ static const u32 ice_ptypes_ipv4_il[] = { 0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B, 0x0000000E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single non-frag IPv6 header, * does NOT include IVP6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos[] = { 0x00000000, 0x00000000, 0x76000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single non-frag IPv6 header, * includes IPV6 other PTYPEs */ static const u32 ice_ptypes_ipv6_ofos_all[] = { 0x00000000, 0x00000000, 0x76000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv6 header */ static const u32 ice_ptypes_ipv6_il[] = { 0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000, 0x00000770, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single * non-frag IPv4 header - no L4 */ static const u32 ice_ptypes_ipv4_ofos_no_l4[] = { 0x10800000, 0x04000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv4 header - no L4 */ static const u32 ice_ptypes_ipv4_il_no_l4[] = { 0x60000000, 0x18043008, 0x80000002, 0x6010c021, 0x00000008, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outer/First/Single * non-frag IPv6 header - no L4 */ static const u32 ice_ptypes_ipv6_ofos_no_l4[] = { 0x00000000, 0x00000000, 0x42000000, 0x10002000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last IPv6 header - no L4 */ static const u32 ice_ptypes_ipv6_il_no_l4[] = { 0x00000000, 0x02180430, 0x0000010c, 0x086010c0, 0x00000430, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outermost/First ARP header */ static const u32 ice_ptypes_arp_of[] = { 0x00000800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* UDP Packet types for non-tunneled packets or tunneled * packets with inner UDP. */ static const u32 ice_ptypes_udp_il[] = { 0x81000000, 0x20204040, 0x04000010, 0x80810102, 0x00000040, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last TCP header */ static const u32 ice_ptypes_tcp_il[] = { 0x04000000, 0x80810102, 0x10000040, 0x02040408, 0x00000102, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last SCTP header */ static const u32 ice_ptypes_sctp_il[] = { 0x08000000, 0x01020204, 0x20000081, 0x04080810, 0x00000204, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outermost/First ICMP header */ static const u32 ice_ptypes_icmp_of[] = { 0x10000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last ICMP header */ static const u32 ice_ptypes_icmp_il[] = { 0x00000000, 0x02040408, 0x40000102, 0x08101020, 0x00000408, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Outermost/First GRE header */ static const u32 ice_ptypes_gre_of[] = { 0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000, 0x0000017E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Packet types for packets with an Innermost/Last MAC header */ static const u32 ice_ptypes_mac_il[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; /* Manage parameters and info. used during the creation of a flow profile */ struct ice_flow_prof_params { enum ice_block blk; u16 entry_length; /* # of bytes formatted entry will require */ u8 es_cnt; struct ice_flow_prof *prof; /* For ACL, the es[0] will have the data of ICE_RX_MDID_PKT_FLAGS_15_0 * This will give us the direction flags. */ struct ice_fv_word es[ICE_MAX_FV_WORDS]; + ice_declare_bitmap(ptypes, ICE_FLOW_PTYPE_MAX); }; #define ICE_FLOW_SEG_HDRS_L3_MASK \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | \ ICE_FLOW_SEG_HDR_ARP) #define ICE_FLOW_SEG_HDRS_L4_MASK \ (ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \ ICE_FLOW_SEG_HDR_SCTP) /* mask for L4 protocols that are NOT part of IPV4/6 OTHER PTYPE groups */ #define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) /** * ice_flow_val_hdrs - validates packet segments for valid protocol headers * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ static enum ice_status ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt) { u8 i; for (i = 0; i < segs_cnt; i++) { /* Multiple L3 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK && !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK)) return ICE_ERR_PARAM; /* Multiple L4 headers */ if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK && !ice_is_pow2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) return ICE_ERR_PARAM; } return ICE_SUCCESS; } /** * ice_flow_proc_seg_hdrs - process protocol headers present in pkt segments * @params: information about the flow to be processed * * This function identifies the packet types associated with the protocol * headers being present in packet segments of the specified flow profile. */ static enum ice_status ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params) { struct ice_flow_prof *prof; u8 i; ice_memset(params->ptypes, 0xff, sizeof(params->ptypes), ICE_NONDMA_MEM); prof = params->prof; for (i = 0; i < params->prof->segs_cnt; i++) { const ice_bitmap_t *src; u32 hdrs; hdrs = prof->segs[i].hdrs; if (hdrs & ICE_FLOW_SEG_HDR_ETH) { src = !i ? (const ice_bitmap_t *)ice_ptypes_mac_ofos : (const ice_bitmap_t *)ice_ptypes_mac_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) { src = (const ice_bitmap_t *)ice_ptypes_macvlan_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) { ice_and_bitmap(params->ptypes, params->ptypes, (const ice_bitmap_t *)ice_ptypes_arp_of, ICE_FLOW_PTYPE_MAX); } if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { src = i ? (const ice_bitmap_t *)ice_ptypes_ipv4_il : (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_all; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && (hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) { src = i ? (const ice_bitmap_t *)ice_ptypes_ipv6_il : (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_all; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) && !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos_no_l4 : (const ice_bitmap_t *)ice_ptypes_ipv4_il_no_l4; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv4_ofos : (const ice_bitmap_t *)ice_ptypes_ipv4_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) && !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos_no_l4 : (const ice_bitmap_t *)ice_ptypes_ipv6_il_no_l4; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) { src = !i ? (const ice_bitmap_t *)ice_ptypes_ipv6_ofos : (const ice_bitmap_t *)ice_ptypes_ipv6_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (hdrs & ICE_FLOW_SEG_HDR_UDP) { src = (const ice_bitmap_t *)ice_ptypes_udp_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_TCP) { ice_and_bitmap(params->ptypes, params->ptypes, (const ice_bitmap_t *)ice_ptypes_tcp_il, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_SCTP) { src = (const ice_bitmap_t *)ice_ptypes_sctp_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } if (hdrs & ICE_FLOW_SEG_HDR_ICMP) { src = !i ? (const ice_bitmap_t *)ice_ptypes_icmp_of : (const ice_bitmap_t *)ice_ptypes_icmp_il; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } else if (hdrs & ICE_FLOW_SEG_HDR_GRE) { if (!i) { src = (const ice_bitmap_t *)ice_ptypes_gre_of; ice_and_bitmap(params->ptypes, params->ptypes, src, ICE_FLOW_PTYPE_MAX); } } } return ICE_SUCCESS; } /** * ice_flow_xtract_fld - Create an extraction sequence entry for the given field * @hw: pointer to the HW struct * @params: information about the flow to be processed * @seg: packet segment index of the field to be extracted * @fld: ID of field to be extracted * * This function determines the protocol ID, offset, and size of the given * field. It then allocates one or more extraction sequence entries for the * given field, and fill the entries with protocol ID and offset information. */ static enum ice_status ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params, u8 seg, enum ice_flow_field fld) { enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX; + u8 fv_words = (u8)hw->blk[params->blk].es.fvw; enum ice_prot_id prot_id = ICE_PROT_ID_INVAL; - u8 fv_words = hw->blk[params->blk].es.fvw; struct ice_flow_fld_info *flds; u16 cnt, ese_bits, i; u16 off; flds = params->prof->segs[seg].fields; switch (fld) { case ICE_FLOW_FIELD_IDX_ETH_DA: case ICE_FLOW_FIELD_IDX_ETH_SA: case ICE_FLOW_FIELD_IDX_S_VLAN: case ICE_FLOW_FIELD_IDX_C_VLAN: prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL; break; case ICE_FLOW_FIELD_IDX_ETH_TYPE: prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_DSCP: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_DSCP: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; break; case ICE_FLOW_FIELD_IDX_IPV4_TTL: case ICE_FLOW_FIELD_IDX_IPV4_PROT: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; - /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. */ if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL) sib = ICE_FLOW_FIELD_IDX_IPV4_PROT; else sib = ICE_FLOW_FIELD_IDX_IPV4_TTL; break; case ICE_FLOW_FIELD_IDX_IPV6_TTL: case ICE_FLOW_FIELD_IDX_IPV6_PROT: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; - /* TTL and PROT share the same extraction seq. entry. * Each is considered a sibling to the other in terms of sharing * the same extraction sequence entry. */ if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL) sib = ICE_FLOW_FIELD_IDX_IPV6_PROT; else sib = ICE_FLOW_FIELD_IDX_IPV6_TTL; break; case ICE_FLOW_FIELD_IDX_IPV4_SA: case ICE_FLOW_FIELD_IDX_IPV4_DA: prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL; break; case ICE_FLOW_FIELD_IDX_IPV6_SA: case ICE_FLOW_FIELD_IDX_IPV6_DA: prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL; break; case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT: case ICE_FLOW_FIELD_IDX_TCP_DST_PORT: case ICE_FLOW_FIELD_IDX_TCP_FLAGS: prot_id = ICE_PROT_TCP_IL; break; case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT: case ICE_FLOW_FIELD_IDX_UDP_DST_PORT: prot_id = ICE_PROT_UDP_IL_OR_S; break; case ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT: case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT: prot_id = ICE_PROT_SCTP_IL; break; case ICE_FLOW_FIELD_IDX_ARP_SIP: case ICE_FLOW_FIELD_IDX_ARP_DIP: case ICE_FLOW_FIELD_IDX_ARP_SHA: case ICE_FLOW_FIELD_IDX_ARP_DHA: case ICE_FLOW_FIELD_IDX_ARP_OP: prot_id = ICE_PROT_ARP_OF; break; case ICE_FLOW_FIELD_IDX_ICMP_TYPE: case ICE_FLOW_FIELD_IDX_ICMP_CODE: /* ICMP type and code share the same extraction seq. entry */ prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ? ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL; sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ? ICE_FLOW_FIELD_IDX_ICMP_CODE : ICE_FLOW_FIELD_IDX_ICMP_TYPE; break; case ICE_FLOW_FIELD_IDX_GRE_KEYID: prot_id = ICE_PROT_GRE_OF; break; default: return ICE_ERR_NOT_IMPL; } /* Each extraction sequence entry is a word in size, and extracts a * word-aligned offset from a protocol header. */ ese_bits = ICE_FLOW_FV_EXTRACT_SZ * BITS_PER_BYTE; - flds[fld].xtrct.prot_id = prot_id; + flds[fld].xtrct.prot_id = (u8)prot_id; flds[fld].xtrct.off = (ice_flds_info[fld].off / ese_bits) * ICE_FLOW_FV_EXTRACT_SZ; flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits); flds[fld].xtrct.idx = params->es_cnt; /* Adjust the next field-entry index after accommodating the number of * entries this field consumes */ cnt = DIVIDE_AND_ROUND_UP(flds[fld].xtrct.disp + ice_flds_info[fld].size, ese_bits); /* Fill in the extraction sequence entries needed for this field */ off = flds[fld].xtrct.off; for (i = 0; i < cnt; i++) { /* Only consume an extraction sequence entry if there is no * sibling field associated with this field or the sibling entry * already extracts the word shared with this field. */ if (sib == ICE_FLOW_FIELD_IDX_MAX || flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL || flds[sib].xtrct.off != off) { u8 idx; /* Make sure the number of extraction sequence required * does not exceed the block's capability */ if (params->es_cnt >= fv_words) return ICE_ERR_MAX_LIMIT; /* some blocks require a reversed field vector layout */ if (hw->blk[params->blk].es.reverse) idx = fv_words - params->es_cnt - 1; else idx = params->es_cnt; - params->es[idx].prot_id = prot_id; + params->es[idx].prot_id = (u8)prot_id; params->es[idx].off = off; params->es_cnt++; } off += ICE_FLOW_FV_EXTRACT_SZ; } return ICE_SUCCESS; } /** * ice_flow_create_xtrct_seq - Create an extraction sequence for given segments * @hw: pointer to the HW struct * @params: information about the flow to be processed * * This function iterates through all matched fields in the given segments, and * creates an extraction sequence for the fields. */ static enum ice_status ice_flow_create_xtrct_seq(struct ice_hw *hw, struct ice_flow_prof_params *params) { enum ice_status status = ICE_SUCCESS; u8 i; for (i = 0; i < params->prof->segs_cnt; i++) { u64 match = params->prof->segs[i].match; enum ice_flow_field j; ice_for_each_set_bit(j, (ice_bitmap_t *)&match, ICE_FLOW_FIELD_IDX_MAX) { status = ice_flow_xtract_fld(hw, params, i, j); if (status) return status; ice_clear_bit(j, (ice_bitmap_t *)&match); } } return status; } /** * ice_flow_proc_segs - process all packet segments associated with a profile * @hw: pointer to the HW struct * @params: information about the flow to be processed */ static enum ice_status ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params) { enum ice_status status; status = ice_flow_proc_seg_hdrs(params); if (status) return status; status = ice_flow_create_xtrct_seq(hw, params); if (status) return status; switch (params->blk) { case ICE_BLK_RSS: status = ICE_SUCCESS; break; default: return ICE_ERR_NOT_IMPL; } return status; } #define ICE_FLOW_FIND_PROF_CHK_FLDS 0x00000001 #define ICE_FLOW_FIND_PROF_CHK_VSI 0x00000002 #define ICE_FLOW_FIND_PROF_NOT_CHK_DIR 0x00000004 /** * ice_flow_find_prof_conds - Find a profile matching headers and conditions * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @vsi_handle: software VSI handle to check VSI (ICE_FLOW_FIND_PROF_CHK_VSI) * @conds: additional conditions to be checked (ICE_FLOW_FIND_PROF_CHK_*) */ static struct ice_flow_prof * ice_flow_find_prof_conds(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, u16 vsi_handle, u32 conds) { struct ice_flow_prof *p, *prof = NULL; ice_acquire_lock(&hw->fl_profs_locks[blk]); LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) if ((p->dir == dir || conds & ICE_FLOW_FIND_PROF_NOT_CHK_DIR) && segs_cnt && segs_cnt == p->segs_cnt) { u8 i; /* Check for profile-VSI association if specified */ if ((conds & ICE_FLOW_FIND_PROF_CHK_VSI) && ice_is_vsi_valid(hw, vsi_handle) && !ice_is_bit_set(p->vsis, vsi_handle)) continue; /* Protocol headers must be checked. Matched fields are * checked if specified. */ for (i = 0; i < segs_cnt; i++) if (segs[i].hdrs != p->segs[i].hdrs || ((conds & ICE_FLOW_FIND_PROF_CHK_FLDS) && segs[i].match != p->segs[i].match)) break; /* A match is found if all segments are matched */ if (i == segs_cnt) { prof = p; break; } } ice_release_lock(&hw->fl_profs_locks[blk]); return prof; } /** * ice_flow_find_prof - Look up a profile matching headers and matched fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided */ u64 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt) { struct ice_flow_prof *p; p = ice_flow_find_prof_conds(hw, blk, dir, segs, segs_cnt, ICE_MAX_VSI, ICE_FLOW_FIND_PROF_CHK_FLDS); return p ? p->id : ICE_FLOW_PROF_ID_INVAL; } /** * ice_flow_find_prof_id - Look up a profile with given profile ID * @hw: pointer to the HW struct * @blk: classification stage * @prof_id: unique ID to identify this flow profile */ static struct ice_flow_prof * ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *p; LIST_FOR_EACH_ENTRY(p, &hw->fl_profs[blk], ice_flow_prof, l_entry) if (p->id == prof_id) return p; return NULL; } /** * ice_flow_get_hw_prof - return the HW profile for a specific profile ID handle * @hw: pointer to the HW struct * @blk: classification stage * @prof_id: the profile ID handle * @hw_prof_id: pointer to variable to receive the HW profile ID */ enum ice_status ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u8 *hw_prof_id) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; struct ice_prof_map *map; ice_acquire_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, prof_id); if (map) { *hw_prof_id = map->prof_id; status = ICE_SUCCESS; } ice_release_lock(&hw->blk[blk].es.prof_map_lock); return status; } /** * ice_flow_add_prof_sync - Add a flow profile for packet segments and fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @acts: array of default actions * @acts_cnt: number of default actions * @prof: stores the returned flow profile added * * Assumption: the caller has acquired the lock to the profile list */ static enum ice_status ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_prof **prof) { struct ice_flow_prof_params *params; enum ice_status status; u8 i; if (!prof || (acts_cnt && !acts)) return ICE_ERR_BAD_PTR; params = (struct ice_flow_prof_params *)ice_malloc(hw, sizeof(*params)); if (!params) return ICE_ERR_NO_MEMORY; params->prof = (struct ice_flow_prof *) ice_malloc(hw, sizeof(*params->prof)); if (!params->prof) { status = ICE_ERR_NO_MEMORY; goto free_params; } /* initialize extraction sequence to all invalid (0xff) */ for (i = 0; i < ICE_MAX_FV_WORDS; i++) { params->es[i].prot_id = ICE_PROT_INVALID; params->es[i].off = ICE_FV_OFFSET_INVAL; } params->blk = blk; params->prof->id = prof_id; params->prof->dir = dir; params->prof->segs_cnt = segs_cnt; /* Make a copy of the segments that need to be persistent in the flow * profile instance */ for (i = 0; i < segs_cnt; i++) ice_memcpy(¶ms->prof->segs[i], &segs[i], sizeof(*segs), ICE_NONDMA_TO_NONDMA); status = ice_flow_proc_segs(hw, params); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error processing a flow's packet segments\n"); goto out; } /* Add a HW profile for this flow profile */ - status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, - params->es); + status = ice_add_prof(hw, blk, prof_id, params->ptypes, params->es); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; } *prof = params->prof; out: if (status) { ice_free(hw, params->prof); } free_params: ice_free(hw, params); return status; } /** * ice_flow_rem_prof_sync - remove a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile to remove * * Assumption: the caller has acquired the lock to the profile list */ static enum ice_status ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof) { enum ice_status status; /* Remove all hardware profiles associated with this flow profile */ status = ice_rem_prof(hw, blk, prof->id); if (!status) { LIST_DEL(&prof->l_entry); ice_free(hw, prof); } return status; } /** * ice_flow_assoc_vsig_vsi - associate a VSI with VSIG * @hw: pointer to the hardware structure * @blk: classification stage * @vsi_handle: software VSI handle * @vsig: target VSI group * * Assumption: the caller has already verified that the VSI to * be added has the same characteristics as the VSIG and will * thereby have access to all resources added to that VSIG. */ enum ice_status ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u16 vsig) { enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT) return ICE_ERR_PARAM; ice_acquire_lock(&hw->fl_profs_locks[blk]); status = ice_add_vsi_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), vsig); ice_release_lock(&hw->fl_profs_locks[blk]); return status; } /** * ice_flow_assoc_prof - associate a VSI with a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile * @vsi_handle: software VSI handle * * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ static enum ice_status ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { enum ice_status status = ICE_SUCCESS; if (!ice_is_bit_set(prof->vsis, vsi_handle)) { status = ice_add_prof_id_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), prof->id); if (!status) ice_set_bit(vsi_handle, prof->vsis); else ice_debug(hw, ICE_DBG_FLOW, "HW profile add failed, %d\n", status); } return status; } /** * ice_flow_disassoc_prof - disassociate a VSI from a flow profile * @hw: pointer to the hardware structure * @blk: classification stage * @prof: pointer to flow profile * @vsi_handle: software VSI handle * * Assumption: the caller has acquired the lock to the profile list * and the software VSI handle has been validated */ static enum ice_status ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, struct ice_flow_prof *prof, u16 vsi_handle) { enum ice_status status = ICE_SUCCESS; if (ice_is_bit_set(prof->vsis, vsi_handle)) { status = ice_rem_prof_id_flow(hw, blk, ice_get_hw_vsi_num(hw, vsi_handle), prof->id); if (!status) ice_clear_bit(vsi_handle, prof->vsis); else ice_debug(hw, ICE_DBG_FLOW, "HW profile remove failed, %d\n", status); } return status; } /** * ice_flow_add_prof - Add a flow profile for packet segments and matched fields * @hw: pointer to the HW struct * @blk: classification stage * @dir: flow direction * @prof_id: unique ID to identify this flow profile * @segs: array of one or more packet segments that describe the flow * @segs_cnt: number of packet segments provided * @acts: array of default actions * @acts_cnt: number of default actions * @prof: stores the returned flow profile added */ static enum ice_status ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt, struct ice_flow_action *acts, u8 acts_cnt, struct ice_flow_prof **prof) { enum ice_status status; if (segs_cnt > ICE_FLOW_SEG_MAX) return ICE_ERR_MAX_LIMIT; if (!segs_cnt) return ICE_ERR_PARAM; if (!segs) return ICE_ERR_BAD_PTR; status = ice_flow_val_hdrs(segs, segs_cnt); if (status) return status; ice_acquire_lock(&hw->fl_profs_locks[blk]); status = ice_flow_add_prof_sync(hw, blk, dir, prof_id, segs, segs_cnt, acts, acts_cnt, prof); if (!status) LIST_ADD(&(*prof)->l_entry, &hw->fl_profs[blk]); ice_release_lock(&hw->fl_profs_locks[blk]); return status; } /** * ice_flow_rem_prof - Remove a flow profile and all entries associated with it * @hw: pointer to the HW struct * @blk: the block for which the flow profile is to be removed * @prof_id: unique ID of the flow profile to be removed */ static enum ice_status ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id) { struct ice_flow_prof *prof; enum ice_status status; ice_acquire_lock(&hw->fl_profs_locks[blk]); prof = ice_flow_find_prof_id(hw, blk, prof_id); if (!prof) { status = ICE_ERR_DOES_NOT_EXIST; goto out; } /* prof becomes invalid after the call */ status = ice_flow_rem_prof_sync(hw, blk, prof); out: ice_release_lock(&hw->fl_profs_locks[blk]); return status; } /** * ice_flow_set_fld_ext - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set * @field_type: type of the field * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * input buffer * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from * entry's input buffer * * This helper function stores information of a field being matched, including * the type of the field and the locations of the value to match, the mask, and * the upper-bound value in the start of the input buffer for a flow entry. * This function should only be used for fixed-size data structures. * * This function also opportunistically determines the protocol headers to be * present based on the fields being set. Some fields cannot be used alone to * determine the protocol headers present. Sometimes, fields for particular * protocol headers are not matched. In those cases, the protocol headers * must be explicitly set. */ static void ice_flow_set_fld_ext(struct ice_flow_seg_info *seg, enum ice_flow_field fld, enum ice_flow_fld_match_type field_type, u16 val_loc, u16 mask_loc, u16 last_loc) { u64 bit = BIT_ULL(fld); seg->match |= bit; if (field_type == ICE_FLOW_FLD_TYPE_RANGE) seg->range |= bit; seg->fields[fld].type = field_type; seg->fields[fld].src.val = val_loc; seg->fields[fld].src.mask = mask_loc; seg->fields[fld].src.last = last_loc; ICE_FLOW_SET_HDRS(seg, ice_flds_info[fld].hdr); } /** * ice_flow_set_fld - specifies locations of field from entry's input buffer * @seg: packet segment the field being set belongs to * @fld: field to be set * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @mask_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of mask value from entry's * input buffer * @last_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of last/upper value from * entry's input buffer * @range: indicate if field being matched is to be in a range * * This function specifies the locations, in the form of byte offsets from the * start of the input buffer for a flow entry, from where the value to match, * the mask value, and upper value can be extracted. These locations are then * stored in the flow profile. When adding a flow entry associated with the * flow profile, these locations will be used to quickly extract the values and * create the content of a match entry. This function should only be used for * fixed-size data structures. */ static void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range) { enum ice_flow_fld_match_type t = range ? ICE_FLOW_FLD_TYPE_RANGE : ICE_FLOW_FLD_TYPE_REG; ice_flow_set_fld_ext(seg, fld, t, val_loc, mask_loc, last_loc); } /** * ice_flow_set_fld_prefix - sets locations of prefix field from entry's buf * @seg: packet segment the field being set belongs to * @fld: field to be set * @val_loc: if not ICE_FLOW_FLD_OFF_INVAL, location of the value to match from * entry's input buffer * @pref_loc: location of prefix value from entry's input buffer * @pref_sz: size of the location holding the prefix value * * This function specifies the locations, in the form of byte offsets from the * start of the input buffer for a flow entry, from where the value to match * and the IPv4 prefix value can be extracted. These locations are then stored * in the flow profile. When adding flow entries to the associated flow profile, * these locations can be used to quickly extract the values to create the * content of a match entry. This function should only be used for fixed-size * data structures. */ void ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 pref_loc, u8 pref_sz) { /* For this type of field, the "mask" location is for the prefix value's * location and the "last" location is for the size of the location of * the prefix value. */ ice_flow_set_fld_ext(seg, fld, ICE_FLOW_FLD_TYPE_PREFIX, val_loc, pref_loc, (u16)pref_sz); } #define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \ (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6) #define ICE_FLOW_RSS_SEG_HDR_L4_MASKS \ (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP) #define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \ (ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \ ICE_FLOW_RSS_SEG_HDR_L4_MASKS) /** * ice_flow_set_rss_seg_info - setup packet segments for RSS * @segs: pointer to the flow field segment(s) * @seg_cnt: segment count * @cfg: configure parameters * * Helper function to extract fields from hash bitmap and use flow * header value to set flow field segment for further use in flow * profile entry or removal. */ static enum ice_status ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt, const struct ice_rss_hash_cfg *cfg) { struct ice_flow_seg_info *seg; u64 val; - u8 i; + u16 i; /* set inner most segment */ seg = &segs[seg_cnt - 1]; ice_for_each_set_bit(i, (const ice_bitmap_t *)&cfg->hash_flds, - ICE_FLOW_FIELD_IDX_MAX) + (u16)ICE_FLOW_FIELD_IDX_MAX) ice_flow_set_fld(seg, (enum ice_flow_field)i, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, false); ICE_FLOW_SET_HDRS(seg, cfg->addl_hdrs); /* set outer most header */ if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4) segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_FRAG | ICE_FLOW_SEG_HDR_IPV_OTHER; else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6) segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_FRAG | ICE_FLOW_SEG_HDR_IPV_OTHER; else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE) segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_GRE | ICE_FLOW_SEG_HDR_IPV_OTHER; else if (cfg->hdr_type == ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE) segs[ICE_RSS_OUTER_HEADERS].hdrs |= ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_GRE | ICE_FLOW_SEG_HDR_IPV_OTHER; if (seg->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS) return ICE_ERR_PARAM; val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS); if (val && !ice_is_pow2(val)) return ICE_ERR_CFG; val = (u64)(seg->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS); if (val && !ice_is_pow2(val)) return ICE_ERR_CFG; return ICE_SUCCESS; } /** * ice_rem_vsi_rss_list - remove VSI from RSS list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * * Remove the VSI from all RSS configurations in the list. */ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle) { struct ice_rss_cfg *r, *tmp; if (LIST_EMPTY(&hw->rss_list_head)) return; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, ice_rss_cfg, l_entry) if (ice_test_and_clear_bit(vsi_handle, r->vsis)) if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { LIST_DEL(&r->l_entry); ice_free(hw, r); } ice_release_lock(&hw->rss_locks); } /** * ice_rem_vsi_rss_cfg - remove RSS configurations associated with VSI * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * * This function will iterate through all flow profiles and disassociate * the VSI from that profile. If the flow profile has no VSIs it will * be removed. */ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *p, *t; enum ice_status status = ICE_SUCCESS; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; if (LIST_EMPTY(&hw->fl_profs[blk])) return ICE_SUCCESS; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof, l_entry) if (ice_is_bit_set(p->vsis, vsi_handle)) { status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); if (status) break; if (!ice_is_any_bit_set(p->vsis, ICE_MAX_VSI)) { status = ice_flow_rem_prof(hw, blk, p->id); if (status) break; } } ice_release_lock(&hw->rss_locks); return status; } /** * ice_get_rss_hdr_type - get a RSS profile's header type * @prof: RSS flow profile */ static enum ice_rss_cfg_hdr_type ice_get_rss_hdr_type(struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type = ICE_RSS_ANY_HEADERS; if (prof->segs_cnt == ICE_FLOW_SEG_SINGLE) { hdr_type = ICE_RSS_OUTER_HEADERS; } else if (prof->segs_cnt == ICE_FLOW_SEG_MAX) { if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs == ICE_FLOW_SEG_HDR_NONE) hdr_type = ICE_RSS_INNER_HEADERS; if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV4) hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4; if (prof->segs[ICE_RSS_OUTER_HEADERS].hdrs & ICE_FLOW_SEG_HDR_IPV6) hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6; } return hdr_type; } /** * ice_rem_rss_list - remove RSS configuration from list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @prof: pointer to flow profile * * Assumption: lock has already been acquired for RSS list */ static void ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *tmp; /* Search for RSS hash fields associated to the VSI that match the * hash configurations associated to the flow profile. If found * remove from the RSS entry list of the VSI context and delete entry. */ hdr_type = ice_get_rss_hdr_type(prof); LIST_FOR_EACH_ENTRY_SAFE(r, tmp, &hw->rss_list_head, ice_rss_cfg, l_entry) if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs && r->hash.hdr_type == hdr_type) { ice_clear_bit(vsi_handle, r->vsis); if (!ice_is_any_bit_set(r->vsis, ICE_MAX_VSI)) { LIST_DEL(&r->l_entry); ice_free(hw, r); } return; } } /** * ice_add_rss_list - add RSS configuration to list * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @prof: pointer to flow profile * * Assumption: lock has already been acquired for RSS list */ static enum ice_status ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof) { enum ice_rss_cfg_hdr_type hdr_type; struct ice_rss_cfg *r, *rss_cfg; hdr_type = ice_get_rss_hdr_type(prof); LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, ice_rss_cfg, l_entry) if (r->hash.hash_flds == prof->segs[prof->segs_cnt - 1].match && r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs && r->hash.hdr_type == hdr_type) { ice_set_bit(vsi_handle, r->vsis); return ICE_SUCCESS; } rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg)); if (!rss_cfg) return ICE_ERR_NO_MEMORY; rss_cfg->hash.hash_flds = prof->segs[prof->segs_cnt - 1].match; rss_cfg->hash.addl_hdrs = prof->segs[prof->segs_cnt - 1].hdrs; rss_cfg->hash.hdr_type = hdr_type; rss_cfg->hash.symm = prof->cfg.symm; ice_set_bit(vsi_handle, rss_cfg->vsis); LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head); return ICE_SUCCESS; } #define ICE_FLOW_PROF_HASH_S 0 #define ICE_FLOW_PROF_HASH_M (0xFFFFFFFFULL << ICE_FLOW_PROF_HASH_S) #define ICE_FLOW_PROF_HDR_S 32 #define ICE_FLOW_PROF_HDR_M (0x3FFFFFFFULL << ICE_FLOW_PROF_HDR_S) #define ICE_FLOW_PROF_ENCAP_S 62 #define ICE_FLOW_PROF_ENCAP_M (0x3ULL << ICE_FLOW_PROF_ENCAP_S) /* Flow profile ID format: * [0:31] - Packet match fields * [32:61] - Protocol header * [62:63] - Encapsulation flag: * 0 if non-tunneled * 1 if tunneled * 2 for tunneled with outer ipv4 * 3 for tunneled with outer ipv6 */ #define ICE_FLOW_GEN_PROFID(hash, hdr, encap) \ ((u64)(((u64)(hash) & ICE_FLOW_PROF_HASH_M) | \ (((u64)(hdr) << ICE_FLOW_PROF_HDR_S) & ICE_FLOW_PROF_HDR_M) | \ (((u64)(encap) << ICE_FLOW_PROF_ENCAP_S) & ICE_FLOW_PROF_ENCAP_M))) /** * ice_add_rss_cfg_sync - add an RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static enum ice_status ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_prof *prof = NULL; struct ice_flow_seg_info *segs; enum ice_status status; u8 segs_cnt; if (cfg->symm) return ICE_ERR_PARAM; segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, sizeof(*segs)); if (!segs) return ICE_ERR_NO_MEMORY; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto exit; /* Search for a flow profile that has matching headers, hash fields * and has the input VSI associated to it. If found, no further * operations required and exit. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS | ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) goto exit; /* Check if a flow profile exists with the same protocol headers and * associated with the input VSI. If so disassociate the VSI from * this profile. The VSI will be added to a new profile created with * the protocol header and new hash field configuration. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_VSI); if (prof) { status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (!status) ice_rem_rss_list(hw, vsi_handle, prof); else goto exit; /* Remove profile if it has no VSIs associated */ if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) { status = ice_flow_rem_prof(hw, blk, prof->id); if (status) goto exit; } } /* Search for a profile that has same match fields only. If this * exists then associate the VSI to this profile. */ prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (prof) { status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); if (!status) status = ice_add_rss_list(hw, vsi_handle, prof); goto exit; } /* Create a new flow profile with generated profile and packet * segment information. */ status = ice_flow_add_prof(hw, blk, ICE_FLOW_RX, ICE_FLOW_GEN_PROFID(cfg->hash_flds, segs[segs_cnt - 1].hdrs, cfg->hdr_type), segs, segs_cnt, NULL, 0, &prof); if (status) goto exit; status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle); /* If association to a new flow profile failed then this profile can * be removed. */ if (status) { ice_flow_rem_prof(hw, blk, prof->id); goto exit; } status = ice_add_rss_list(hw, vsi_handle, prof); prof->cfg.symm = cfg->symm; exit: ice_free(hw, segs); return status; } /** * ice_add_rss_cfg - add an RSS configuration with specified hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * This function will generate a flow profile based on fields associated with * the input fields to hash on, the flow type and use the VSI number to add * a flow entry to the profile. */ enum ice_status ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { struct ice_rss_hash_cfg local_cfg; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || cfg->hash_flds == ICE_HASH_INVALID) return ICE_ERR_PARAM; local_cfg = *cfg; if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { ice_acquire_lock(&hw->rss_locks); status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); ice_release_lock(&hw->rss_locks); } else { ice_acquire_lock(&hw->rss_locks); local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); if (!status) { local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; status = ice_add_rss_cfg_sync(hw, vsi_handle, &local_cfg); } ice_release_lock(&hw->rss_locks); } return status; } /** * ice_rem_rss_cfg_sync - remove an existing RSS configuration * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * Assumption: lock has already been acquired for RSS list */ static enum ice_status ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { const enum ice_block blk = ICE_BLK_RSS; struct ice_flow_seg_info *segs; struct ice_flow_prof *prof; enum ice_status status; u8 segs_cnt; segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ? ICE_FLOW_SEG_SINGLE : ICE_FLOW_SEG_MAX; segs = (struct ice_flow_seg_info *)ice_calloc(hw, segs_cnt, sizeof(*segs)); if (!segs) return ICE_ERR_NO_MEMORY; /* Construct the packet segment info from the hashed fields */ status = ice_flow_set_rss_seg_info(segs, segs_cnt, cfg); if (status) goto out; prof = ice_flow_find_prof_conds(hw, blk, ICE_FLOW_RX, segs, segs_cnt, vsi_handle, ICE_FLOW_FIND_PROF_CHK_FLDS); if (!prof) { status = ICE_ERR_DOES_NOT_EXIST; goto out; } status = ice_flow_disassoc_prof(hw, blk, prof, vsi_handle); if (status) goto out; /* Remove RSS configuration from VSI context before deleting * the flow profile. */ ice_rem_rss_list(hw, vsi_handle, prof); if (!ice_is_any_bit_set(prof->vsis, ICE_MAX_VSI)) status = ice_flow_rem_prof(hw, blk, prof->id); out: ice_free(hw, segs); return status; } /** * ice_rem_rss_cfg - remove an existing RSS config with matching hashed fields * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @cfg: configure parameters * * This function will lookup the flow profile based on the input * hash field bitmap, iterate through the profile entry list of * that profile and find entry associated with input VSI to be * removed. Calls are made to underlying flow apis which will in * turn build or update buffers for RSS XLT1 section. */ enum ice_status ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg) { struct ice_rss_hash_cfg local_cfg; enum ice_status status; if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg || cfg->hdr_type > ICE_RSS_ANY_HEADERS || cfg->hash_flds == ICE_HASH_INVALID) return ICE_ERR_PARAM; ice_acquire_lock(&hw->rss_locks); local_cfg = *cfg; if (cfg->hdr_type < ICE_RSS_ANY_HEADERS) { status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); } else { local_cfg.hdr_type = ICE_RSS_OUTER_HEADERS; status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); if (!status) { local_cfg.hdr_type = ICE_RSS_INNER_HEADERS; status = ice_rem_rss_cfg_sync(hw, vsi_handle, &local_cfg); } } ice_release_lock(&hw->rss_locks); return status; } /* Mapping of AVF hash bit fields to an L3-L4 hash combination. * As the ice_flow_avf_hdr_field represent individual bit shifts in a hash, * convert its values to their appropriate flow L3, L4 values. */ #define ICE_FLOW_AVF_RSS_IPV4_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4)) #define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP)) #define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP)) #define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \ (ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) #define ICE_FLOW_AVF_RSS_IPV6_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6)) #define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP)) #define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \ (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP)) #define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \ (ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) /** * ice_add_avf_rss_cfg - add an RSS configuration for AVF driver * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure * * This function will take the hash bitmap provided by the AVF driver via a * message, convert it to ICE-compatible values, and configure RSS flow * profiles. */ enum ice_status ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash) { enum ice_status status = ICE_SUCCESS; struct ice_rss_hash_cfg hcfg; u64 hash_flds; if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID || !ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; /* Make sure no unsupported bits are specified */ if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS | ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS)) return ICE_ERR_CFG; hash_flds = avf_hash; /* Always create an L3 RSS configuration for any L4 RSS configuration */ if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) hash_flds |= ICE_FLOW_AVF_RSS_IPV4_MASKS; if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) hash_flds |= ICE_FLOW_AVF_RSS_IPV6_MASKS; /* Create the corresponding RSS configuration for each valid hash bit */ while (hash_flds) { u64 rss_hash = ICE_HASH_INVALID; if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS) { if (hash_flds & ICE_FLOW_AVF_RSS_IPV4_MASKS) { rss_hash = ICE_FLOW_HASH_IPV4; hash_flds &= ~ICE_FLOW_AVF_RSS_IPV4_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS; } else if (hash_flds & BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) { rss_hash = ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT; hash_flds &= ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP); } } else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) { if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) { rss_hash = ICE_FLOW_HASH_IPV6; hash_flds &= ~ICE_FLOW_AVF_RSS_IPV6_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS; } else if (hash_flds & ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT; hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS; } else if (hash_flds & BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) { rss_hash = ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT; hash_flds &= ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP); } } if (rss_hash == ICE_HASH_INVALID) return ICE_ERR_OUT_OF_RANGE; hcfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE; hcfg.hash_flds = rss_hash; hcfg.symm = false; hcfg.hdr_type = ICE_RSS_ANY_HEADERS; status = ice_add_rss_cfg(hw, vsi_handle, &hcfg); if (status) break; } return status; } /** * ice_replay_rss_cfg - replay RSS configurations associated with VSI * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle */ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) { enum ice_status status = ICE_SUCCESS; struct ice_rss_cfg *r; if (!ice_is_vsi_valid(hw, vsi_handle)) return ICE_ERR_PARAM; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, ice_rss_cfg, l_entry) { if (ice_is_bit_set(r->vsis, vsi_handle)) { status = ice_add_rss_cfg_sync(hw, vsi_handle, &r->hash); if (status) break; } } ice_release_lock(&hw->rss_locks); return status; } /** * ice_get_rss_cfg - returns hashed fields for the given header types * @hw: pointer to the hardware structure * @vsi_handle: software VSI handle * @hdrs: protocol header type * * This function will return the match fields of the first instance of flow * profile having the given header types and containing input VSI */ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) { u64 rss_hash = ICE_HASH_INVALID; struct ice_rss_cfg *r; /* verify if the protocol header is non zero and VSI is valid */ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) return ICE_HASH_INVALID; ice_acquire_lock(&hw->rss_locks); LIST_FOR_EACH_ENTRY(r, &hw->rss_list_head, ice_rss_cfg, l_entry) if (ice_is_bit_set(r->vsis, vsi_handle) && r->hash.addl_hdrs == hdrs) { rss_hash = r->hash.hash_flds; break; } ice_release_lock(&hw->rss_locks); return rss_hash; } diff --git a/sys/dev/ice/ice_flow.h b/sys/dev/ice/ice_flow.h index a66c773b8d77..07e16e3bc4d1 100644 --- a/sys/dev/ice/ice_flow.h +++ b/sys/dev/ice/ice_flow.h @@ -1,353 +1,353 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_FLOW_H_ #define _ICE_FLOW_H_ #include "ice_flex_type.h" #define ICE_IPV4_MAKE_PREFIX_MASK(prefix) ((u32)(~0) << (32 - (prefix))) #define ICE_FLOW_PROF_ID_INVAL 0xfffffffffffffffful #define ICE_FLOW_PROF_ID_BYPASS 0 #define ICE_FLOW_PROF_ID_DEFAULT 1 #define ICE_FLOW_ENTRY_HANDLE_INVAL 0 #define ICE_FLOW_VSI_INVAL 0xffff #define ICE_FLOW_FLD_OFF_INVAL 0xffff /* Generate flow hash field from flow field type(s) */ #define ICE_FLOW_HASH_IPV4 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)) #define ICE_FLOW_HASH_IPV6 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)) #define ICE_FLOW_HASH_TCP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)) #define ICE_FLOW_HASH_UDP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)) #define ICE_FLOW_HASH_SCTP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)) #define ICE_HASH_INVALID 0 #define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by * tunneling or encapsulation protocols for network virtualization such as GRE, * VxLAN, etc. */ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_NONE = 0x00000000, ICE_FLOW_SEG_HDR_ETH = 0x00000001, ICE_FLOW_SEG_HDR_VLAN = 0x00000002, ICE_FLOW_SEG_HDR_IPV4 = 0x00000004, ICE_FLOW_SEG_HDR_IPV6 = 0x00000008, ICE_FLOW_SEG_HDR_ARP = 0x00000010, ICE_FLOW_SEG_HDR_ICMP = 0x00000020, ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_SCTP = 0x00000100, ICE_FLOW_SEG_HDR_GRE = 0x00000200, /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and * ICE_FLOW_SEG_HDR_IPV6. */ ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000, ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000, }; enum ice_flow_field { /* L2 */ ICE_FLOW_FIELD_IDX_ETH_DA, ICE_FLOW_FIELD_IDX_ETH_SA, ICE_FLOW_FIELD_IDX_S_VLAN, ICE_FLOW_FIELD_IDX_C_VLAN, ICE_FLOW_FIELD_IDX_ETH_TYPE, /* L3 */ ICE_FLOW_FIELD_IDX_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV6_DSCP, ICE_FLOW_FIELD_IDX_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV6_TTL, ICE_FLOW_FIELD_IDX_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV4_SA, ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, ICE_FLOW_FIELD_IDX_IPV6_DA, /* L4 */ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_FLAGS, /* ARP */ ICE_FLOW_FIELD_IDX_ARP_SIP, ICE_FLOW_FIELD_IDX_ARP_DIP, ICE_FLOW_FIELD_IDX_ARP_SHA, ICE_FLOW_FIELD_IDX_ARP_DHA, ICE_FLOW_FIELD_IDX_ARP_OP, /* ICMP */ ICE_FLOW_FIELD_IDX_ICMP_TYPE, ICE_FLOW_FIELD_IDX_ICMP_CODE, /* GRE */ ICE_FLOW_FIELD_IDX_GRE_KEYID, /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; /* Flow headers and fields for AVF support */ enum ice_flow_avf_hdr_field { /* Values 0 - 28 are reserved for future use */ ICE_AVF_FLOW_FIELD_INVALID = 0, ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29, ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP, ICE_AVF_FLOW_FIELD_IPV4_UDP, ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK, ICE_AVF_FLOW_FIELD_IPV4_TCP, ICE_AVF_FLOW_FIELD_IPV4_SCTP, ICE_AVF_FLOW_FIELD_IPV4_OTHER, ICE_AVF_FLOW_FIELD_FRAG_IPV4, /* Values 37-38 are reserved */ ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39, ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP, ICE_AVF_FLOW_FIELD_IPV6_UDP, ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK, ICE_AVF_FLOW_FIELD_IPV6_TCP, ICE_AVF_FLOW_FIELD_IPV6_SCTP, ICE_AVF_FLOW_FIELD_IPV6_OTHER, ICE_AVF_FLOW_FIELD_FRAG_IPV6, ICE_AVF_FLOW_FIELD_RSVD47, ICE_AVF_FLOW_FIELD_FCOE_OX, ICE_AVF_FLOW_FIELD_FCOE_RX, ICE_AVF_FLOW_FIELD_FCOE_OTHER, /* Values 51-62 are reserved */ ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63, ICE_AVF_FLOW_FIELD_MAX }; /* Supported RSS offloads This macro is defined to support * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware * capabilities to the caller of this ops. */ #define ICE_DEFAULT_RSS_HENA ( \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) enum ice_rss_cfg_hdr_type { ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */ ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */ /* take inner headers as inputset for packet with outer ipv4. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4, /* take inner headers as inputset for packet with outer ipv6. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, /* take outer headers first then inner headers as inputset */ /* take inner as inputset for GTPoGRE with outer ipv4 + gre. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE, /* take inner as inputset for GTPoGRE with outer ipv6 + gre. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE, ICE_RSS_ANY_HEADERS }; struct ice_rss_hash_cfg { u32 addl_hdrs; /* protocol header fields */ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */ bool symm; /* symmetric or asymmetric hash */ }; enum ice_flow_dir { ICE_FLOW_DIR_UNDEFINED = 0, ICE_FLOW_TX = 0x01, ICE_FLOW_RX = 0x02, ICE_FLOW_TX_RX = ICE_FLOW_RX | ICE_FLOW_TX }; enum ice_flow_priority { ICE_FLOW_PRIO_LOW, ICE_FLOW_PRIO_NORMAL, ICE_FLOW_PRIO_HIGH }; #define ICE_FLOW_SEG_SINGLE 1 #define ICE_FLOW_SEG_MAX 2 #define ICE_FLOW_PROFILE_MAX 1024 #define ICE_FLOW_ACL_FIELD_VECTOR_MAX 32 #define ICE_FLOW_FV_EXTRACT_SZ 2 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) struct ice_flow_seg_xtrct { u8 prot_id; /* Protocol ID of extracted header field */ u16 off; /* Starting offset of the field in header in bytes */ u8 idx; /* Index of FV entry used */ u8 disp; /* Displacement of field in bits fr. FV entry's start */ }; enum ice_flow_fld_match_type { ICE_FLOW_FLD_TYPE_REG, /* Value, mask */ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */ ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */ ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */ }; struct ice_flow_fld_loc { /* Describe offsets of field information relative to the beginning of * input buffer provided when adding flow entries. */ u16 val; /* Offset where the value is located */ u16 mask; /* Offset where the mask/prefix value is located */ u16 last; /* Length or offset where the upper value is located */ }; struct ice_flow_fld_info { enum ice_flow_fld_match_type type; /* Location where to retrieve data from an input buffer */ struct ice_flow_fld_loc src; /* Location where to put the data into the final entry buffer */ struct ice_flow_fld_loc entry; struct ice_flow_seg_xtrct xtrct; }; struct ice_flow_seg_info { u32 hdrs; /* Bitmask indicating protocol headers present */ u64 match; /* Bitmask indicating header fields to be matched */ u64 range; /* Bitmask indicating header fields matched as ranges */ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX]; }; #define ICE_FLOW_ENTRY_HNDL(e) ((u64)e) struct ice_flow_prof { struct LIST_ENTRY_TYPE l_entry; u64 id; enum ice_flow_dir dir; u8 segs_cnt; struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX]; /* software VSI handles referenced by this flow profile */ ice_declare_bitmap(vsis, ICE_MAX_VSI); union { /* struct sw_recipe */ bool symm; /* Symmetric Hash for RSS */ } cfg; }; struct ice_rss_cfg { struct LIST_ENTRY_TYPE l_entry; /* bitmap of VSIs added to the RSS entry */ ice_declare_bitmap(vsis, ICE_MAX_VSI); struct ice_rss_hash_cfg hash; }; enum ice_flow_action_type { ICE_FLOW_ACT_NOP, ICE_FLOW_ACT_ALLOW, ICE_FLOW_ACT_DROP, ICE_FLOW_ACT_CNTR_PKT, ICE_FLOW_ACT_FWD_VSI, ICE_FLOW_ACT_FWD_VSI_LIST, /* Should be abstracted away */ ICE_FLOW_ACT_FWD_QUEUE, /* Can Queues be abstracted away? */ ICE_FLOW_ACT_FWD_QUEUE_GROUP, /* Can Queues be abstracted away? */ ICE_FLOW_ACT_PUSH, ICE_FLOW_ACT_POP, ICE_FLOW_ACT_MODIFY, ICE_FLOW_ACT_CNTR_BYTES, ICE_FLOW_ACT_CNTR_PKT_BYTES, ICE_FLOW_ACT_GENERIC_0, ICE_FLOW_ACT_GENERIC_1, ICE_FLOW_ACT_GENERIC_2, ICE_FLOW_ACT_GENERIC_3, ICE_FLOW_ACT_GENERIC_4, ICE_FLOW_ACT_RPT_FLOW_ID, ICE_FLOW_ACT_BUILD_PROF_IDX, }; struct ice_flow_action { enum ice_flow_action_type type; union { u32 dummy; } data; }; u64 ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt); enum ice_status ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle, u16 vsig); enum ice_status ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u8 *hw_prof); void ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 prefix_loc, u8 prefix_sz); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds); enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); enum ice_status ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); enum ice_status ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs); #endif /* _ICE_FLOW_H_ */ diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c index fb97df5f5797..e49a82c88982 100644 --- a/sys/dev/ice/ice_fw_logging.c +++ b/sys/dev/ice/ice_fw_logging.c @@ -1,425 +1,425 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /** * @file ice_fw_logging.c * @brief firmware logging sysctls * * Contains sysctls to enable and configure firmware logging debug support. */ #include "ice_lib.h" #include "ice_iflib.h" #include #include /* * SDT provider for DTrace probes related to firmware logging events */ SDT_PROVIDER_DEFINE(ice_fwlog); /* * SDT DTrace probe fired when a firmware log message is received over the * AdminQ. It passes the buffer of the firwmare log message along with its * length in bytes to the DTrace framework. */ SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int"); /* * Helper function prototypes */ static int ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg); /* * dynamic sysctl handlers */ static int ice_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS); static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS); /** * ice_reconfig_fw_log - Re-program firmware logging configuration * @sc: private softc structure * @cfg: firmware log configuration to latch * * If the adminq is currently active, ask firmware to update the logging * configuration. If the adminq is currently down, then do nothing. In this * case, ice_init_hw() will re-configure firmware logging as soon as it brings * up the adminq. */ static int ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg) { enum ice_status status; ice_fwlog_init(&sc->hw, cfg); if (!ice_check_sq_alive(&sc->hw, &sc->hw.adminq)) return (0); if (!ice_fwlog_supported(&sc->hw)) return (0); status = ice_fwlog_set(&sc->hw, cfg); if (status) { device_printf(sc->dev, "Failed to reconfigure firmware logging, err %s aq_err %s\n", ice_status_str(status), ice_aq_str(sc->hw.adminq.sq_last_status)); return (ENODEV); } return (0); } #define ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION \ "\nControl firmware message limit to send per ARQ event" \ "\t\nMin: 1" \ "\t\nMax: 128" #define ICE_SYSCTL_HELP_FWLOG_ARQ_ENA \ "\nControl whether to enable/disable reporing to admin Rx queue" \ "\n0 - Enable firmware reporting via ARQ" \ "\n1 - Disable firmware reporting via ARQ" #define ICE_SYSCTL_HELP_FWLOG_UART_ENA \ "\nControl whether to enable/disable reporing to UART" \ "\n0 - Enable firmware reporting via UART" \ "\n1 - Disable firmware reporting via UART" #define ICE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD \ "\nControl whether to enable logging during the attach phase" \ "\n0 - Enable firmware logging during attach phase" \ "\n1 - Disable firmware logging during attach phase" #define ICE_SYSCTL_HELP_FWLOG_REGISTER \ "\nControl whether to enable/disable firmware logging" \ "\n0 - Enable firmware logging" \ "\n1 - Disable firmware logging" #define ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY \ "\nControl the level of log output messages for this module" \ "\n\tverbose <4> - Verbose messages + (Error|Warning|Normal)" \ "\n\tnormal <3> - Normal messages + (Error|Warning)" \ "\n\twarning <2> - Warning messages + (Error)" \ "\n\terror <1> - Error messages" \ "\n\tnone <0> - Disables all logging for this module" /** * ice_sysctl_fwlog_set_cfg_options - Sysctl for setting fwlog cfg options * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: option to adjust * @req: sysctl request pointer * * On read: displays whether firmware logging was reported during attachment * On write: enables/disables firmware logging during attach phase * * This has no effect on the legacy (V1) version of firmware logging. */ static int ice_sysctl_fwlog_set_cfg_options(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; int error; u16 option = (u16)arg2; bool enabled; enabled = !!(cfg->options & option); error = sysctl_handle_bool(oidp, &enabled, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (enabled) cfg->options |= option; else cfg->options &= ~option; return ice_reconfig_fw_log(sc, cfg); } /** * ice_sysctl_fwlog_log_resolution - Sysctl for setting log message resolution * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: __unused__ * @req: sysctl request pointer * * On read: displays message queue limit before posting * On write: sets message queue limit before posting * * This has no effect on the legacy (V1) version of firmware logging. */ static int ice_sysctl_fwlog_log_resolution(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; int error; u8 resolution; UNREFERENCED_PARAMETER(arg2); resolution = cfg->log_resolution; error = sysctl_handle_8(oidp, &resolution, 0, req); if ((error) || (req->newptr == NULL)) return (error); if ((resolution < ICE_AQC_FW_LOG_MIN_RESOLUTION) || (resolution > ICE_AQC_FW_LOG_MAX_RESOLUTION)) { device_printf(sc->dev, "Log resolution out-of-bounds\n"); return (EINVAL); } cfg->log_resolution = resolution; return ice_reconfig_fw_log(sc, cfg); } /** * ice_sysctl_fwlog_register - Sysctl for (de)registering firmware logs * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: __unused__ * @req: sysctl request pointer * * On read: displays whether firmware logging is registered * On write: (de)registers firmware logging. */ static int ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; enum ice_status status; int error; u8 enabled; UNREFERENCED_PARAMETER(arg2); if (ice_test_state(&sc->state, ICE_STATE_ATTACHING)) { device_printf(sc->dev, "Registering FW Logging via kenv is supported with the on_load option\n"); return (EIO); } if (cfg->options & ICE_FWLOG_OPTION_IS_REGISTERED) enabled = true; else enabled = false; error = sysctl_handle_bool(oidp, &enabled, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (!ice_check_sq_alive(&sc->hw, &sc->hw.adminq)) return (0); if (enabled) { status = ice_fwlog_register(&sc->hw); if (!status) ice_set_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); } else { status = ice_fwlog_unregister(&sc->hw); if (!status) ice_clear_bit(ICE_FEATURE_FW_LOGGING, sc->feat_en); } if (status) return (EIO); return (0); } /** * ice_sysctl_fwlog_module_log_severity - Add tunables for a FW logging module * @oidp: sysctl oid structure * @arg1: private softc structure * @arg2: index to logging module * @req: sysctl request pointer */ static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS) { struct ice_softc *sc = (struct ice_softc *)arg1; struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg; struct sbuf *sbuf; char *sev_str_end; enum ice_aqc_fw_logging_mod module = (enum ice_aqc_fw_logging_mod)arg2; int error, ll_num; u8 log_level; char sev_str[16]; bool sev_set = false; log_level = cfg->module_entries[module].log_level; sbuf = sbuf_new(NULL, sev_str, sizeof(sev_str), SBUF_FIXEDLEN); sbuf_printf(sbuf, "%d<%s>", log_level, ice_log_sev_str(log_level)); sbuf_finish(sbuf); sbuf_delete(sbuf); error = sysctl_handle_string(oidp, sev_str, sizeof(sev_str), req); if ((error) || (req->newptr == NULL)) return (error); if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_VERBOSE), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_VERBOSE; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_NORMAL), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_NORMAL; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_WARNING), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_WARNING; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_ERROR), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_ERROR; sev_set = true; } else if (strcasecmp(ice_log_sev_str(ICE_FWLOG_LEVEL_NONE), sev_str) == 0) { log_level = ICE_FWLOG_LEVEL_NONE; sev_set = true; } if (!sev_set) { ll_num = strtol(sev_str, &sev_str_end, 0); if (sev_str_end == sev_str) ll_num = -1; if ((ll_num >= ICE_FWLOG_LEVEL_NONE) && (ll_num < ICE_FWLOG_LEVEL_INVALID)) log_level = ll_num; else { device_printf(sc->dev, "%s: \"%s\" is not a valid log level\n", __func__, sev_str); return (EINVAL); } } cfg->module_entries[module].log_level = log_level; return ice_reconfig_fw_log(sc, cfg); } /** * ice_add_fw_logging_tunables - Add tunables to configure FW logging events * @sc: private softc structure * @parent: parent node to add the tunables under * * Add tunables for configuring the firmware logging support. This includes * a control to enable the logging, and controls for each module to configure * which events to receive. */ void ice_add_fw_logging_tunables(struct ice_softc *sc, struct sysctl_oid *parent) { struct sysctl_oid_list *parent_list, *fwlog_list, *module_list; struct sysctl_oid *fwlog_node, *module_node; struct sysctl_ctx_list *ctx; struct ice_hw *hw = &sc->hw; struct ice_fwlog_cfg *cfg; device_t dev = sc->dev; enum ice_aqc_fw_logging_mod module; u16 i; cfg = &hw->fwlog_cfg; ctx = device_get_sysctl_ctx(dev); parent_list = SYSCTL_CHILDREN(parent); fwlog_node = SYSCTL_ADD_NODE(ctx, parent_list, OID_AUTO, "fw_log", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Firmware Logging"); fwlog_list = SYSCTL_CHILDREN(fwlog_node); cfg->log_resolution = 10; SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_fwlog_log_resolution, "CU", ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION); cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, ICE_FWLOG_OPTION_ARQ_ENA, ice_sysctl_fwlog_set_cfg_options, "CU", ICE_SYSCTL_HELP_FWLOG_ARQ_ENA); SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, ICE_FWLOG_OPTION_UART_ENA, ice_sysctl_fwlog_set_cfg_options, "CU", ICE_SYSCTL_HELP_FWLOG_UART_ENA); SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "on_load", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, ICE_FWLOG_OPTION_REGISTER_ON_INIT, ice_sysctl_fwlog_set_cfg_options, "CU", ICE_SYSCTL_HELP_FWLOG_ENABLE_ON_LOAD); SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "register", ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc, 0, ice_sysctl_fwlog_register, "CU", ICE_SYSCTL_HELP_FWLOG_REGISTER); module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity", ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL, "Level of log output"); module_list = SYSCTL_CHILDREN(module_node); for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { /* Setup some defaults */ cfg->module_entries[i].module_id = i; cfg->module_entries[i].log_level = ICE_FWLOG_LEVEL_NONE; module = (enum ice_aqc_fw_logging_mod)i; SYSCTL_ADD_PROC(ctx, module_list, OID_AUTO, ice_fw_module_str(module), ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RWTUN, sc, module, ice_sysctl_fwlog_module_log_severity, "A", ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY); } } /** * ice_handle_fw_log_event - Handle a firmware logging event from the AdminQ * @sc: pointer to private softc structure * @desc: the AdminQ descriptor for this firmware event * @buf: pointer to the buffer accompanying the AQ message */ void ice_handle_fw_log_event(struct ice_softc *sc, struct ice_aq_desc *desc, void *buf) { /* Trigger a DTrace probe event for this firmware message */ SDT_PROBE2(ice_fwlog, , , message, (const u8 *)buf, desc->datalen); /* Possibly dump the firmware message to the console, if enabled */ ice_fwlog_event_dump(&sc->hw, desc, buf); } diff --git a/sys/dev/ice/ice_fwlog.c b/sys/dev/ice/ice_fwlog.c index 375b80647e1a..9c04b4ca0411 100644 --- a/sys/dev/ice/ice_fwlog.c +++ b/sys/dev/ice/ice_fwlog.c @@ -1,505 +1,505 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "ice_common.h" #include "ice_fwlog.h" /** * cache_cfg - Cache FW logging config * @hw: pointer to the HW structure * @cfg: config to cache */ static void cache_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { hw->fwlog_cfg = *cfg; } /** * valid_module_entries - validate all the module entry IDs and log levels * @hw: pointer to the HW structure * @entries: entries to validate * @num_entries: number of entries to validate */ static bool valid_module_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries) { u16 i; if (!entries) { ice_debug(hw, ICE_DBG_FW_LOG, "Null ice_fwlog_module_entry array\n"); return false; } if (!num_entries) { ice_debug(hw, ICE_DBG_FW_LOG, "num_entries must be non-zero\n"); return false; } for (i = 0; i < num_entries; i++) { struct ice_fwlog_module_entry *entry = &entries[i]; if (entry->module_id >= ICE_AQC_FW_LOG_ID_MAX) { ice_debug(hw, ICE_DBG_FW_LOG, "Invalid module_id %u, max valid module_id is %u\n", entry->module_id, ICE_AQC_FW_LOG_ID_MAX - 1); return false; } if (entry->log_level >= ICE_FWLOG_LEVEL_INVALID) { ice_debug(hw, ICE_DBG_FW_LOG, "Invalid log_level %u, max valid log_level is %u\n", entry->log_level, ICE_AQC_FW_LOG_ID_MAX - 1); return false; } } return true; } /** * valid_cfg - validate entire configuration * @hw: pointer to the HW structure * @cfg: config to validate */ static bool valid_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { if (!cfg) { ice_debug(hw, ICE_DBG_FW_LOG, "Null ice_fwlog_cfg\n"); return false; } if (cfg->log_resolution < ICE_AQC_FW_LOG_MIN_RESOLUTION || cfg->log_resolution > ICE_AQC_FW_LOG_MAX_RESOLUTION) { ice_debug(hw, ICE_DBG_FW_LOG, "Unsupported log_resolution %u, must be between %u and %u\n", cfg->log_resolution, ICE_AQC_FW_LOG_MIN_RESOLUTION, ICE_AQC_FW_LOG_MAX_RESOLUTION); return false; } if (!valid_module_entries(hw, cfg->module_entries, ICE_AQC_FW_LOG_ID_MAX)) return false; return true; } /** * ice_fwlog_init - Initialize cached structures for tracking FW logging * @hw: pointer to the HW structure * @cfg: config used to initialize the cached structures * * This function should be called on driver initialization and before calling * ice_init_hw(). Firmware logging will be configured based on these settings * and also the PF will be registered on init. */ enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { if (!valid_cfg(hw, cfg)) return ICE_ERR_PARAM; cache_cfg(hw, cfg); return ICE_SUCCESS; } /** * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30) * @hw: pointer to the HW structure * @entries: entries to configure * @num_entries: number of @entries * @options: options from ice_fwlog_cfg->options structure * @log_resolution: logging resolution */ static enum ice_status ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries, u16 options, u16 log_resolution) { struct ice_aqc_fw_log_cfg_resp *fw_modules; struct ice_aqc_fw_log *cmd; struct ice_aq_desc desc; enum ice_status status; u16 i; fw_modules = (struct ice_aqc_fw_log_cfg_resp *) ice_calloc(hw, num_entries, sizeof(*fw_modules)); if (!fw_modules) return ICE_ERR_NO_MEMORY; for (i = 0; i < num_entries; i++) { fw_modules[i].module_identifier = CPU_TO_LE16(entries[i].module_id); fw_modules[i].log_level = entries[i].log_level; } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config); desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); cmd = &desc.params.fw_log; cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID; cmd->ops.cfg.log_resolution = CPU_TO_LE16(log_resolution); cmd->ops.cfg.mdl_cnt = CPU_TO_LE16(num_entries); if (options & ICE_FWLOG_OPTION_ARQ_ENA) cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN; if (options & ICE_FWLOG_OPTION_UART_ENA) cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN; status = ice_aq_send_cmd(hw, &desc, fw_modules, sizeof(*fw_modules) * num_entries, NULL); ice_free(hw, fw_modules); return status; } /** * ice_fwlog_supported - Cached for whether FW supports FW logging or not * @hw: pointer to the HW structure * * This will always return false if called before ice_init_hw(), so it must be * called after ice_init_hw(). */ bool ice_fwlog_supported(struct ice_hw *hw) { return hw->fwlog_support_ena; } /** * ice_fwlog_set - Set the firmware logging settings * @hw: pointer to the HW structure * @cfg: config used to set firmware logging * * This function should be called whenever the driver needs to set the firmware * logging configuration. It can be called on initialization, reset, or during * runtime. * * If the PF wishes to receive FW logging then it must register via * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called * for init. */ enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { enum ice_status status; if (!ice_fwlog_supported(hw)) return ICE_ERR_NOT_SUPPORTED; if (!valid_cfg(hw, cfg)) return ICE_ERR_PARAM; status = ice_aq_fwlog_set(hw, cfg->module_entries, ICE_AQC_FW_LOG_ID_MAX, cfg->options, cfg->log_resolution); if (!status) cache_cfg(hw, cfg); return status; } /** * update_cached_entries - Update module entries in cached FW logging config * @hw: pointer to the HW structure * @entries: entries to cache * @num_entries: number of @entries */ static void update_cached_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries) { u16 i; for (i = 0; i < num_entries; i++) { struct ice_fwlog_module_entry *updated = &entries[i]; u16 j; for (j = 0; j < ICE_AQC_FW_LOG_ID_MAX; j++) { struct ice_fwlog_module_entry *cached = &hw->fwlog_cfg.module_entries[j]; if (cached->module_id == updated->module_id) { cached->log_level = updated->log_level; break; } } } } /** * ice_fwlog_update_modules - Update the log level 1 or more FW logging modules * @hw: pointer to the HW structure * @entries: array of ice_fwlog_module_entry(s) * @num_entries: number of entries * * This function should be called to update the log level of 1 or more FW * logging modules via module ID. * * Only the entries passed in will be affected. All other firmware logging * settings will be unaffected. */ enum ice_status ice_fwlog_update_modules(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries) { struct ice_fwlog_cfg *cfg; enum ice_status status; if (!ice_fwlog_supported(hw)) return ICE_ERR_NOT_SUPPORTED; if (!valid_module_entries(hw, entries, num_entries)) return ICE_ERR_PARAM; cfg = (struct ice_fwlog_cfg *)ice_calloc(hw, 1, sizeof(*cfg)); if (!cfg) return ICE_ERR_NO_MEMORY; status = ice_fwlog_get(hw, cfg); if (status) goto status_out; status = ice_aq_fwlog_set(hw, entries, num_entries, cfg->options, cfg->log_resolution); if (!status) update_cached_entries(hw, entries, num_entries); status_out: ice_free(hw, cfg); return status; } /** * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31) * @hw: pointer to the HW structure * @reg: true to register and false to unregister */ static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg) { struct ice_aq_desc desc; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register); if (reg) desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER; return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } /** * ice_fwlog_register - Register the PF for firmware logging * @hw: pointer to the HW structure * * After this call the PF will start to receive firmware logging based on the * configuration set in ice_fwlog_set. */ enum ice_status ice_fwlog_register(struct ice_hw *hw) { enum ice_status status; if (!ice_fwlog_supported(hw)) return ICE_ERR_NOT_SUPPORTED; status = ice_aq_fwlog_register(hw, true); if (status) ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n"); else hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED; return status; } /** * ice_fwlog_unregister - Unregister the PF from firmware logging * @hw: pointer to the HW structure */ enum ice_status ice_fwlog_unregister(struct ice_hw *hw) { enum ice_status status; if (!ice_fwlog_supported(hw)) return ICE_ERR_NOT_SUPPORTED; status = ice_aq_fwlog_register(hw, false); if (status) ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n"); else hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED; return status; } /** * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32) * @hw: pointer to the HW structure * @cfg: firmware logging configuration to populate */ static enum ice_status ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { struct ice_aqc_fw_log_cfg_resp *fw_modules; struct ice_aqc_fw_log *cmd; struct ice_aq_desc desc; enum ice_status status; u16 i, module_id_cnt; void *buf; ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); buf = ice_calloc(hw, 1, ICE_AQ_MAX_BUF_LEN); if (!buf) return ICE_ERR_NO_MEMORY; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query); cmd = &desc.params.fw_log; cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY; status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL); if (status) { ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n"); goto status_out; } module_id_cnt = LE16_TO_CPU(cmd->ops.cfg.mdl_cnt); if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) { ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n"); } else { if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n", ICE_AQC_FW_LOG_ID_MAX); module_id_cnt = ICE_AQC_FW_LOG_ID_MAX; } cfg->log_resolution = LE16_TO_CPU(cmd->ops.cfg.log_resolution); if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN) cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA; if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN) cfg->options |= ICE_FWLOG_OPTION_UART_ENA; if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED) cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED; fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf; for (i = 0; i < module_id_cnt; i++) { struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i]; cfg->module_entries[i].module_id = LE16_TO_CPU(fw_module->module_identifier); cfg->module_entries[i].log_level = fw_module->log_level; } status_out: ice_free(hw, buf); return status; } /** * ice_fwlog_set_support_ena - Set if FW logging is supported by FW * @hw: pointer to the HW struct * * If FW returns success to the ice_aq_fwlog_get call then it supports FW * logging, else it doesn't. Set the fwlog_support_ena flag accordingly. * * This function is only meant to be called during driver init to determine if * the FW support FW logging. */ void ice_fwlog_set_support_ena(struct ice_hw *hw) { struct ice_fwlog_cfg *cfg; enum ice_status status; hw->fwlog_support_ena = false; cfg = (struct ice_fwlog_cfg *)ice_calloc(hw, 1, sizeof(*cfg)); if (!cfg) return; /* don't call ice_fwlog_get() because that would overwrite the cached * configuration from the call to ice_fwlog_init(), which is expected to * be called prior to this function */ status = ice_aq_fwlog_get(hw, cfg); if (status) ice_debug(hw, ICE_DBG_FW_LOG, "ice_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n", status); else hw->fwlog_support_ena = true; ice_free(hw, cfg); } /** * ice_fwlog_get - Get the firmware logging settings * @hw: pointer to the HW structure * @cfg: config to populate based on current firmware logging settings */ enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg) { enum ice_status status; if (!ice_fwlog_supported(hw)) return ICE_ERR_NOT_SUPPORTED; if (!cfg) return ICE_ERR_PARAM; status = ice_aq_fwlog_get(hw, cfg); if (status) return status; cache_cfg(hw, cfg); return ICE_SUCCESS; } /** * ice_fwlog_event_dump - Dump the event received over the Admin Receive Queue * @hw: pointer to the HW structure * @desc: Admin Receive Queue descriptor * @buf: buffer that contains the FW log event data * * If the driver receives the ice_aqc_opc_fw_logs_event on the Admin Receive * Queue, then it should call this function to dump the FW log data. */ void ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) { if (!ice_fwlog_supported(hw)) return; ice_info_fwlog(hw, 32, 1, (u8 *)buf, LE16_TO_CPU(desc->datalen)); } diff --git a/sys/dev/ice/ice_fwlog.h b/sys/dev/ice/ice_fwlog.h index c8906d56a75a..4b8cc0938db5 100644 --- a/sys/dev/ice/ice_fwlog.h +++ b/sys/dev/ice/ice_fwlog.h @@ -1,91 +1,91 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #ifndef _ICE_FWLOG_H_ #define _ICE_FWLOG_H_ #include "ice_adminq_cmd.h" struct ice_hw; /* Only a single log level should be set and all log levels under the set value * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all * other log levels are included (except ICE_FW_LOG_LEVEL_NONE) */ enum ice_fwlog_level { ICE_FWLOG_LEVEL_NONE = 0, ICE_FWLOG_LEVEL_ERROR = 1, ICE_FWLOG_LEVEL_WARNING = 2, ICE_FWLOG_LEVEL_NORMAL = 3, ICE_FWLOG_LEVEL_VERBOSE = 4, ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */ }; struct ice_fwlog_module_entry { /* module ID for the corresponding firmware logging event */ u16 module_id; /* verbosity level for the module_id */ u8 log_level; }; struct ice_fwlog_cfg { /* list of modules for configuring log level */ struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX]; #define ICE_FWLOG_OPTION_ARQ_ENA BIT(0) #define ICE_FWLOG_OPTION_UART_ENA BIT(1) /* set before calling ice_fwlog_init() so the PF registers for firmware * logging on initialization */ #define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2) /* set in the ice_fwlog_get() response if the PF is registered for FW * logging events over ARQ */ #define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3) /* options used to configure firmware logging */ u16 options; /* minimum number of log events sent per Admin Receive Queue event */ - u8 log_resolution; + u16 log_resolution; }; void ice_fwlog_set_support_ena(struct ice_hw *hw); bool ice_fwlog_supported(struct ice_hw *hw); enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg); enum ice_status ice_fwlog_update_modules(struct ice_hw *hw, struct ice_fwlog_module_entry *entries, u16 num_entries); enum ice_status ice_fwlog_register(struct ice_hw *hw); enum ice_status ice_fwlog_unregister(struct ice_hw *hw); void ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); #endif /* _ICE_FWLOG_H_ */ diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h index a1e9024ba34e..691c36d81078 100644 --- a/sys/dev/ice/ice_hw_autogen.h +++ b/sys/dev/ice/ice_hw_autogen.h @@ -1,9490 +1,9490 @@ /* SPDX-License-Identifier: BSD-3-Clause */ -/* Copyright (c) 2021, Intel Corporation +/* Copyright (c) 2022, Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ /* Machine generated file. Do not edit. */ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ #define GL_HIDA(_i) (0x00082000 + ((_i) * 4)) #define GL_HIBA(_i) (0x00081000 + ((_i) * 4)) #define GL_HICR 0x00082040 #define GL_HICR_EN 0x00082044 #define GLGEN_CSR_DEBUG_C 0x00075750 #define GLNVM_GENS 0x000B6100 #define GLNVM_FLA 0x000B6108 #define GL_HIDA_MAX_INDEX 15 #define GL_HIBA_MAX_INDEX 1023 #define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */ #define GL_RDPU_CNTRL_RX_PAD_EN_S 0 #define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0) #define GL_RDPU_CNTRL_UDP_ZERO_EN_S 1 #define GL_RDPU_CNTRL_UDP_ZERO_EN_M BIT(1) #define GL_RDPU_CNTRL_BLNC_EN_S 2 #define GL_RDPU_CNTRL_BLNC_EN_M BIT(2) #define GL_RDPU_CNTRL_RECIPE_BYPASS_S 3 #define GL_RDPU_CNTRL_RECIPE_BYPASS_M BIT(3) #define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_S 4 #define GL_RDPU_CNTRL_RLAN_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 4) #define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_S 10 #define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 10) #define GL_RDPU_CNTRL_REQ_WB_PM_TH_S 16 #define GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16) #define GL_RDPU_CNTRL_ECO_S 21 #define GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21) #define MSIX_PBA(_i) (0x00008000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: FLR */ #define MSIX_PBA_MAX_INDEX 2 #define MSIX_PBA_PENBIT_S 0 #define MSIX_PBA_PENBIT_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TADD_MAX_INDEX 64 #define MSIX_TADD_MSIXTADD10_S 0 #define MSIX_TADD_MSIXTADD10_M MAKEMASK(0x3, 0) #define MSIX_TADD_MSIXTADD_S 2 #define MSIX_TADD_MSIXTADD_M MAKEMASK(0x3FFFFFFF, 2) #define MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TUADD_MAX_INDEX 64 #define MSIX_TUADD_MSIXTUADD_S 0 #define MSIX_TUADD_MSIXTUADD_M MAKEMASK(0xFFFFFFFF, 0) #define MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...64 */ /* Reset Source: FLR */ #define MSIX_TVCTRL_MAX_INDEX 64 #define MSIX_TVCTRL_MASK_S 0 #define MSIX_TVCTRL_MASK_M BIT(0) #define PF0_FW_HLP_ARQBAH_PAGE 0x02D00180 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_FW_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ARQBAL_PAGE 0x02D00080 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_FW_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ARQH_PAGE 0x02D00380 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQH_PAGE_ARQH_S 0 #define PF0_FW_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN_PAGE 0x02D00280 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_FW_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_FW_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_FW_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_FW_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_FW_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_FW_HLP_ARQT_PAGE 0x02D00480 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQT_PAGE_ARQT_S 0 #define PF0_FW_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQBAH_PAGE 0x02D00100 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_FW_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ATQBAL_PAGE 0x02D00000 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_S 0 #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_FW_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ATQH_PAGE 0x02D00300 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQH_PAGE_ATQH_S 0 #define PF0_FW_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN_PAGE 0x02D00200 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_FW_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_FW_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_FW_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_FW_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_FW_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_FW_HLP_ATQT_PAGE 0x02D00400 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQT_PAGE_ATQT_S 0 #define PF0_FW_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQBAH_PAGE 0x02D40180 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_FW_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ARQBAL_PAGE 0x02D40080 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_FW_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ARQH_PAGE 0x02D40380 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQH_PAGE_ARQH_S 0 #define PF0_FW_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN_PAGE 0x02D40280 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_FW_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_FW_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_FW_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_FW_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_FW_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_FW_PSM_ARQT_PAGE 0x02D40480 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQT_PAGE_ARQT_S 0 #define PF0_FW_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQBAH_PAGE 0x02D40100 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_FW_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ATQBAL_PAGE 0x02D40000 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_S 0 #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_FW_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ATQH_PAGE 0x02D40300 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQH_PAGE_ATQH_S 0 #define PF0_FW_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN_PAGE 0x02D40200 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_FW_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_FW_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_FW_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_FW_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_FW_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_FW_PSM_ATQT_PAGE 0x02D40400 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQT_PAGE_ATQT_S 0 #define PF0_FW_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQBAH_PAGE 0x02D80190 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_MBX_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ARQBAL_PAGE 0x02D80090 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_MBX_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ARQH_PAGE 0x02D80390 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQH_PAGE_ARQH_S 0 #define PF0_MBX_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN_PAGE 0x02D80290 /* Reset Source: PFR */ #define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_MBX_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_MBX_CPM_ARQT_PAGE 0x02D80490 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQT_PAGE_ARQT_S 0 #define PF0_MBX_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQBAH_PAGE 0x02D80110 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_MBX_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ATQBAL_PAGE 0x02D80010 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_MBX_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ATQH_PAGE 0x02D80310 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQH_PAGE_ATQH_S 0 #define PF0_MBX_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN_PAGE 0x02D80210 /* Reset Source: PFR */ #define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_MBX_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_MBX_CPM_ATQT_PAGE 0x02D80410 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQT_PAGE_ATQT_S 0 #define PF0_MBX_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQBAH_PAGE 0x02D00190 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_MBX_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ARQBAL_PAGE 0x02D00090 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_MBX_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ARQH_PAGE 0x02D00390 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQH_PAGE_ARQH_S 0 #define PF0_MBX_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN_PAGE 0x02D00290 /* Reset Source: PFR */ #define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_MBX_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_MBX_HLP_ARQT_PAGE 0x02D00490 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQT_PAGE_ARQT_S 0 #define PF0_MBX_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQBAH_PAGE 0x02D00110 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_MBX_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ATQBAL_PAGE 0x02D00010 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_MBX_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ATQH_PAGE 0x02D00310 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQH_PAGE_ATQH_S 0 #define PF0_MBX_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN_PAGE 0x02D00210 /* Reset Source: PFR */ #define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_MBX_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_MBX_HLP_ATQT_PAGE 0x02D00410 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQT_PAGE_ATQT_S 0 #define PF0_MBX_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQBAH_PAGE 0x02D40190 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_MBX_PSM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ARQBAL_PAGE 0x02D40090 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_MBX_PSM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ARQH_PAGE 0x02D40390 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQH_PAGE_ARQH_S 0 #define PF0_MBX_PSM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN_PAGE 0x02D40290 /* Reset Source: PFR */ #define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_MBX_PSM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_MBX_PSM_ARQT_PAGE 0x02D40490 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQT_PAGE_ARQT_S 0 #define PF0_MBX_PSM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQBAH_PAGE 0x02D40110 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_MBX_PSM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ATQBAL_PAGE 0x02D40010 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_MBX_PSM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ATQH_PAGE 0x02D40310 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQH_PAGE_ATQH_S 0 #define PF0_MBX_PSM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN_PAGE 0x02D40210 /* Reset Source: PFR */ #define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_MBX_PSM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_MBX_PSM_ATQT_PAGE 0x02D40410 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQT_PAGE_ATQT_S 0 #define PF0_MBX_PSM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQBAH_PAGE 0x02D801A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_SB_CPM_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ARQBAL_PAGE 0x02D800A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_SB_CPM_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ARQH_PAGE 0x02D803A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQH_PAGE_ARQH_S 0 #define PF0_SB_CPM_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN_PAGE 0x02D802A0 /* Reset Source: PFR */ #define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_SB_CPM_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_SB_CPM_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_SB_CPM_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_SB_CPM_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_SB_CPM_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_SB_CPM_ARQT_PAGE 0x02D804A0 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQT_PAGE_ARQT_S 0 #define PF0_SB_CPM_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQBAH_PAGE 0x02D80120 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_SB_CPM_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ATQBAL_PAGE 0x02D80020 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_SB_CPM_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ATQH_PAGE 0x02D80320 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQH_PAGE_ATQH_S 0 #define PF0_SB_CPM_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN_PAGE 0x02D80220 /* Reset Source: PFR */ #define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_SB_CPM_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_SB_CPM_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_SB_CPM_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_SB_CPM_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_SB_CPM_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_SB_CPM_ATQT_PAGE 0x02D80420 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQT_PAGE_ATQT_S 0 #define PF0_SB_CPM_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQBAH_PAGE 0x02D001A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_S 0 #define PF0_SB_HLP_ARQBAH_PAGE_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_HLP_ARQBAL_PAGE 0x02D000A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_S 0 #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_S 6 #define PF0_SB_HLP_ARQBAL_PAGE_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_HLP_ARQH_PAGE 0x02D003A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQH_PAGE_ARQH_S 0 #define PF0_SB_HLP_ARQH_PAGE_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQLEN_PAGE 0x02D002A0 /* Reset Source: PFR */ #define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_S 0 #define PF0_SB_HLP_ARQLEN_PAGE_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_S 28 #define PF0_SB_HLP_ARQLEN_PAGE_ARQVFE_M BIT(28) #define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_S 29 #define PF0_SB_HLP_ARQLEN_PAGE_ARQOVFL_M BIT(29) #define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_S 30 #define PF0_SB_HLP_ARQLEN_PAGE_ARQCRIT_M BIT(30) #define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_S 31 #define PF0_SB_HLP_ARQLEN_PAGE_ARQENABLE_M BIT(31) #define PF0_SB_HLP_ARQT_PAGE 0x02D004A0 /* Reset Source: CORER */ #define PF0_SB_HLP_ARQT_PAGE_ARQT_S 0 #define PF0_SB_HLP_ARQT_PAGE_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQBAH_PAGE 0x02D00120 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_S 0 #define PF0_SB_HLP_ATQBAH_PAGE_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_HLP_ATQBAL_PAGE 0x02D00020 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_S 6 #define PF0_SB_HLP_ATQBAL_PAGE_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_HLP_ATQH_PAGE 0x02D00320 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQH_PAGE_ATQH_S 0 #define PF0_SB_HLP_ATQH_PAGE_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQLEN_PAGE 0x02D00220 /* Reset Source: PFR */ #define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_S 0 #define PF0_SB_HLP_ATQLEN_PAGE_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_S 28 #define PF0_SB_HLP_ATQLEN_PAGE_ATQVFE_M BIT(28) #define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_S 29 #define PF0_SB_HLP_ATQLEN_PAGE_ATQOVFL_M BIT(29) #define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_S 30 #define PF0_SB_HLP_ATQLEN_PAGE_ATQCRIT_M BIT(30) #define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_S 31 #define PF0_SB_HLP_ATQLEN_PAGE_ATQENABLE_M BIT(31) #define PF0_SB_HLP_ATQT_PAGE 0x02D00420 /* Reset Source: CORER */ #define PF0_SB_HLP_ATQT_PAGE_ATQT_S 0 #define PF0_SB_HLP_ATQT_PAGE_ATQT_M MAKEMASK(0x3FF, 0) #define PF0INT_DYN_CTL(_i) (0x03000000 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_DYN_CTL_MAX_INDEX 2047 #define PF0INT_DYN_CTL_INTENA_S 0 #define PF0INT_DYN_CTL_INTENA_M BIT(0) #define PF0INT_DYN_CTL_CLEARPBA_S 1 #define PF0INT_DYN_CTL_CLEARPBA_M BIT(1) #define PF0INT_DYN_CTL_SWINT_TRIG_S 2 #define PF0INT_DYN_CTL_SWINT_TRIG_M BIT(2) #define PF0INT_DYN_CTL_ITR_INDX_S 3 #define PF0INT_DYN_CTL_ITR_INDX_M MAKEMASK(0x3, 3) #define PF0INT_DYN_CTL_INTERVAL_S 5 #define PF0INT_DYN_CTL_INTERVAL_M MAKEMASK(0xFFF, 5) #define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_S 24 #define PF0INT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24) #define PF0INT_DYN_CTL_SW_ITR_INDX_S 25 #define PF0INT_DYN_CTL_SW_ITR_INDX_M MAKEMASK(0x3, 25) #define PF0INT_DYN_CTL_WB_ON_ITR_S 30 #define PF0INT_DYN_CTL_WB_ON_ITR_M BIT(30) #define PF0INT_DYN_CTL_INTENA_MSK_S 31 #define PF0INT_DYN_CTL_INTENA_MSK_M BIT(31) #define PF0INT_ITR_0(_i) (0x03000004 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_ITR_0_MAX_INDEX 2047 #define PF0INT_ITR_0_INTERVAL_S 0 #define PF0INT_ITR_0_INTERVAL_M MAKEMASK(0xFFF, 0) #define PF0INT_ITR_1(_i) (0x03000008 + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_ITR_1_MAX_INDEX 2047 #define PF0INT_ITR_1_INTERVAL_S 0 #define PF0INT_ITR_1_INTERVAL_M MAKEMASK(0xFFF, 0) #define PF0INT_ITR_2(_i) (0x0300000C + ((_i) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define PF0INT_ITR_2_MAX_INDEX 2047 #define PF0INT_ITR_2_INTERVAL_S 0 #define PF0INT_ITR_2_INTERVAL_M MAKEMASK(0xFFF, 0) #define PF0INT_OICR_CPM_PAGE 0x02D03000 /* Reset Source: CORER */ #define PF0INT_OICR_CPM_PAGE_INTEVENT_S 0 #define PF0INT_OICR_CPM_PAGE_INTEVENT_M BIT(0) #define PF0INT_OICR_CPM_PAGE_QUEUE_S 1 #define PF0INT_OICR_CPM_PAGE_QUEUE_M BIT(1) #define PF0INT_OICR_CPM_PAGE_RSV1_S 2 #define PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2) #define PF0INT_OICR_CPM_PAGE_HH_COMP_S 10 #define PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10) #define PF0INT_OICR_CPM_PAGE_TSYN_TX_S 11 #define PF0INT_OICR_CPM_PAGE_TSYN_TX_M BIT(11) #define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_S 12 #define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_CPM_PAGE_TSYN_TGT_S 13 #define PF0INT_OICR_CPM_PAGE_TSYN_TGT_M BIT(13) #define PF0INT_OICR_CPM_PAGE_HLP_RDY_S 14 #define PF0INT_OICR_CPM_PAGE_HLP_RDY_M BIT(14) #define PF0INT_OICR_CPM_PAGE_CPM_RDY_S 15 #define PF0INT_OICR_CPM_PAGE_CPM_RDY_M BIT(15) #define PF0INT_OICR_CPM_PAGE_ECC_ERR_S 16 #define PF0INT_OICR_CPM_PAGE_ECC_ERR_M BIT(16) #define PF0INT_OICR_CPM_PAGE_RSV2_S 17 #define PF0INT_OICR_CPM_PAGE_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_CPM_PAGE_MAL_DETECT_S 19 #define PF0INT_OICR_CPM_PAGE_MAL_DETECT_M BIT(19) #define PF0INT_OICR_CPM_PAGE_GRST_S 20 #define PF0INT_OICR_CPM_PAGE_GRST_M BIT(20) #define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_S 21 #define PF0INT_OICR_CPM_PAGE_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_CPM_PAGE_GPIO_S 22 #define PF0INT_OICR_CPM_PAGE_GPIO_M BIT(22) #define PF0INT_OICR_CPM_PAGE_RSV3_S 23 #define PF0INT_OICR_CPM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_CPM_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_CPM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_CPM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_CPM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_CPM_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_CPM_PAGE_PE_PUSH_M BIT(27) #define PF0INT_OICR_CPM_PAGE_PE_CRITERR_S 28 #define PF0INT_OICR_CPM_PAGE_PE_CRITERR_M BIT(28) #define PF0INT_OICR_CPM_PAGE_VFLR_S 29 #define PF0INT_OICR_CPM_PAGE_VFLR_M BIT(29) #define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_S 30 #define PF0INT_OICR_CPM_PAGE_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_CPM_PAGE_SWINT_S 31 #define PF0INT_OICR_CPM_PAGE_SWINT_M BIT(31) #define PF0INT_OICR_ENA_CPM_PAGE 0x02D03100 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_CPM_PAGE_RSV0_S 0 #define PF0INT_OICR_ENA_CPM_PAGE_RSV0_M BIT(0) #define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_S 1 #define PF0INT_OICR_ENA_CPM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_ENA_HLP_PAGE 0x02D01100 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_HLP_PAGE_RSV0_S 0 #define PF0INT_OICR_ENA_HLP_PAGE_RSV0_M BIT(0) #define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_S 1 #define PF0INT_OICR_ENA_HLP_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_ENA_PSM_PAGE 0x02D02100 /* Reset Source: CORER */ #define PF0INT_OICR_ENA_PSM_PAGE_RSV0_S 0 #define PF0INT_OICR_ENA_PSM_PAGE_RSV0_M BIT(0) #define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_S 1 #define PF0INT_OICR_ENA_PSM_PAGE_INT_ENA_M MAKEMASK(0x7FFFFFFF, 1) #define PF0INT_OICR_HLP_PAGE 0x02D01000 /* Reset Source: CORER */ #define PF0INT_OICR_HLP_PAGE_INTEVENT_S 0 #define PF0INT_OICR_HLP_PAGE_INTEVENT_M BIT(0) #define PF0INT_OICR_HLP_PAGE_QUEUE_S 1 #define PF0INT_OICR_HLP_PAGE_QUEUE_M BIT(1) #define PF0INT_OICR_HLP_PAGE_RSV1_S 2 #define PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2) #define PF0INT_OICR_HLP_PAGE_HH_COMP_S 10 #define PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10) #define PF0INT_OICR_HLP_PAGE_TSYN_TX_S 11 #define PF0INT_OICR_HLP_PAGE_TSYN_TX_M BIT(11) #define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_S 12 #define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_HLP_PAGE_TSYN_TGT_S 13 #define PF0INT_OICR_HLP_PAGE_TSYN_TGT_M BIT(13) #define PF0INT_OICR_HLP_PAGE_HLP_RDY_S 14 #define PF0INT_OICR_HLP_PAGE_HLP_RDY_M BIT(14) #define PF0INT_OICR_HLP_PAGE_CPM_RDY_S 15 #define PF0INT_OICR_HLP_PAGE_CPM_RDY_M BIT(15) #define PF0INT_OICR_HLP_PAGE_ECC_ERR_S 16 #define PF0INT_OICR_HLP_PAGE_ECC_ERR_M BIT(16) #define PF0INT_OICR_HLP_PAGE_RSV2_S 17 #define PF0INT_OICR_HLP_PAGE_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_HLP_PAGE_MAL_DETECT_S 19 #define PF0INT_OICR_HLP_PAGE_MAL_DETECT_M BIT(19) #define PF0INT_OICR_HLP_PAGE_GRST_S 20 #define PF0INT_OICR_HLP_PAGE_GRST_M BIT(20) #define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_S 21 #define PF0INT_OICR_HLP_PAGE_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_HLP_PAGE_GPIO_S 22 #define PF0INT_OICR_HLP_PAGE_GPIO_M BIT(22) #define PF0INT_OICR_HLP_PAGE_RSV3_S 23 #define PF0INT_OICR_HLP_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_HLP_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_HLP_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_HLP_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_HLP_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_HLP_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_HLP_PAGE_PE_PUSH_M BIT(27) #define PF0INT_OICR_HLP_PAGE_PE_CRITERR_S 28 #define PF0INT_OICR_HLP_PAGE_PE_CRITERR_M BIT(28) #define PF0INT_OICR_HLP_PAGE_VFLR_S 29 #define PF0INT_OICR_HLP_PAGE_VFLR_M BIT(29) #define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_S 30 #define PF0INT_OICR_HLP_PAGE_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_HLP_PAGE_SWINT_S 31 #define PF0INT_OICR_HLP_PAGE_SWINT_M BIT(31) #define PF0INT_OICR_PSM_PAGE 0x02D02000 /* Reset Source: CORER */ #define PF0INT_OICR_PSM_PAGE_INTEVENT_S 0 #define PF0INT_OICR_PSM_PAGE_INTEVENT_M BIT(0) #define PF0INT_OICR_PSM_PAGE_QUEUE_S 1 #define PF0INT_OICR_PSM_PAGE_QUEUE_M BIT(1) #define PF0INT_OICR_PSM_PAGE_RSV1_S 2 #define PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2) #define PF0INT_OICR_PSM_PAGE_HH_COMP_S 10 #define PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10) #define PF0INT_OICR_PSM_PAGE_TSYN_TX_S 11 #define PF0INT_OICR_PSM_PAGE_TSYN_TX_M BIT(11) #define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_S 12 #define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_M BIT(12) #define PF0INT_OICR_PSM_PAGE_TSYN_TGT_S 13 #define PF0INT_OICR_PSM_PAGE_TSYN_TGT_M BIT(13) #define PF0INT_OICR_PSM_PAGE_HLP_RDY_S 14 #define PF0INT_OICR_PSM_PAGE_HLP_RDY_M BIT(14) #define PF0INT_OICR_PSM_PAGE_CPM_RDY_S 15 #define PF0INT_OICR_PSM_PAGE_CPM_RDY_M BIT(15) #define PF0INT_OICR_PSM_PAGE_ECC_ERR_S 16 #define PF0INT_OICR_PSM_PAGE_ECC_ERR_M BIT(16) #define PF0INT_OICR_PSM_PAGE_RSV2_S 17 #define PF0INT_OICR_PSM_PAGE_RSV2_M MAKEMASK(0x3, 17) #define PF0INT_OICR_PSM_PAGE_MAL_DETECT_S 19 #define PF0INT_OICR_PSM_PAGE_MAL_DETECT_M BIT(19) #define PF0INT_OICR_PSM_PAGE_GRST_S 20 #define PF0INT_OICR_PSM_PAGE_GRST_M BIT(20) #define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_S 21 #define PF0INT_OICR_PSM_PAGE_PCI_EXCEPTION_M BIT(21) #define PF0INT_OICR_PSM_PAGE_GPIO_S 22 #define PF0INT_OICR_PSM_PAGE_GPIO_M BIT(22) #define PF0INT_OICR_PSM_PAGE_RSV3_S 23 #define PF0INT_OICR_PSM_PAGE_RSV3_M BIT(23) #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_S 24 #define PF0INT_OICR_PSM_PAGE_STORM_DETECT_M BIT(24) #define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_S 25 #define PF0INT_OICR_PSM_PAGE_LINK_STAT_CHANGE_M BIT(25) #define PF0INT_OICR_PSM_PAGE_HMC_ERR_S 26 #define PF0INT_OICR_PSM_PAGE_HMC_ERR_M BIT(26) #define PF0INT_OICR_PSM_PAGE_PE_PUSH_S 27 #define PF0INT_OICR_PSM_PAGE_PE_PUSH_M BIT(27) #define PF0INT_OICR_PSM_PAGE_PE_CRITERR_S 28 #define PF0INT_OICR_PSM_PAGE_PE_CRITERR_M BIT(28) #define PF0INT_OICR_PSM_PAGE_VFLR_S 29 #define PF0INT_OICR_PSM_PAGE_VFLR_M BIT(29) #define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_S 30 #define PF0INT_OICR_PSM_PAGE_XLR_HW_DONE_M BIT(30) #define PF0INT_OICR_PSM_PAGE_SWINT_S 31 #define PF0INT_OICR_PSM_PAGE_SWINT_M BIT(31) #define QRX_TAIL_PAGE(_QRX) (0x03800000 + ((_QRX) * 4096)) /* _i=0...2047 */ /* Reset Source: CORER */ #define QRX_TAIL_PAGE_MAX_INDEX 2047 #define QRX_TAIL_PAGE_TAIL_S 0 #define QRX_TAIL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0) #define QTX_COMM_DBELL_PAGE(_DBQM) (0x04000000 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QTX_COMM_DBELL_PAGE_MAX_INDEX 16383 #define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_S 0 #define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */ #define QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255 #define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0 #define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0) #define VSI_MBX_ARQBAH(_VSI) (0x02000018 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQBAH_MAX_INDEX 767 #define VSI_MBX_ARQBAH_ARQBAH_S 0 #define VSI_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_MBX_ARQBAL(_VSI) (0x02000014 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQBAL_MAX_INDEX 767 #define VSI_MBX_ARQBAL_ARQBAL_LSB_S 0 #define VSI_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define VSI_MBX_ARQBAL_ARQBAL_S 6 #define VSI_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VSI_MBX_ARQH(_VSI) (0x02000020 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQH_MAX_INDEX 767 #define VSI_MBX_ARQH_ARQH_S 0 #define VSI_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ARQLEN(_VSI) (0x0200001C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_MBX_ARQLEN_MAX_INDEX 767 #define VSI_MBX_ARQLEN_ARQLEN_S 0 #define VSI_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ARQLEN_ARQVFE_S 28 #define VSI_MBX_ARQLEN_ARQVFE_M BIT(28) #define VSI_MBX_ARQLEN_ARQOVFL_S 29 #define VSI_MBX_ARQLEN_ARQOVFL_M BIT(29) #define VSI_MBX_ARQLEN_ARQCRIT_S 30 #define VSI_MBX_ARQLEN_ARQCRIT_M BIT(30) #define VSI_MBX_ARQLEN_ARQENABLE_S 31 #define VSI_MBX_ARQLEN_ARQENABLE_M BIT(31) #define VSI_MBX_ARQT(_VSI) (0x02000024 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ARQT_MAX_INDEX 767 #define VSI_MBX_ARQT_ARQT_S 0 #define VSI_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ATQBAH(_VSI) (0x02000004 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQBAH_MAX_INDEX 767 #define VSI_MBX_ATQBAH_ATQBAH_S 0 #define VSI_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_MBX_ATQBAL(_VSI) (0x02000000 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQBAL_MAX_INDEX 767 #define VSI_MBX_ATQBAL_ATQBAL_S 6 #define VSI_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define VSI_MBX_ATQH(_VSI) (0x0200000C + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQH_MAX_INDEX 767 #define VSI_MBX_ATQH_ATQH_S 0 #define VSI_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ATQLEN(_VSI) (0x02000008 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: PFR */ #define VSI_MBX_ATQLEN_MAX_INDEX 767 #define VSI_MBX_ATQLEN_ATQLEN_S 0 #define VSI_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define VSI_MBX_ATQLEN_ATQVFE_S 28 #define VSI_MBX_ATQLEN_ATQVFE_M BIT(28) #define VSI_MBX_ATQLEN_ATQOVFL_S 29 #define VSI_MBX_ATQLEN_ATQOVFL_M BIT(29) #define VSI_MBX_ATQLEN_ATQCRIT_S 30 #define VSI_MBX_ATQLEN_ATQCRIT_M BIT(30) #define VSI_MBX_ATQLEN_ATQENABLE_S 31 #define VSI_MBX_ATQLEN_ATQENABLE_M BIT(31) #define VSI_MBX_ATQT(_VSI) (0x02000010 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_MBX_ATQT_MAX_INDEX 767 #define VSI_MBX_ATQT_ATQT_S 0 #define VSI_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define GL_ACL_ACCESS_CMD 0x00391000 /* Reset Source: CORER */ #define GL_ACL_ACCESS_CMD_TABLE_ID_S 0 #define GL_ACL_ACCESS_CMD_TABLE_ID_M MAKEMASK(0xFF, 0) #define GL_ACL_ACCESS_CMD_ENTRY_INDEX_S 8 #define GL_ACL_ACCESS_CMD_ENTRY_INDEX_M MAKEMASK(0xFFF, 8) #define GL_ACL_ACCESS_CMD_OPERATION_S 20 #define GL_ACL_ACCESS_CMD_OPERATION_M BIT(20) #define GL_ACL_ACCESS_CMD_OBJ_TYPE_S 24 #define GL_ACL_ACCESS_CMD_OBJ_TYPE_M MAKEMASK(0xF, 24) #define GL_ACL_ACCESS_CMD_EXECUTE_S 31 #define GL_ACL_ACCESS_CMD_EXECUTE_M BIT(31) #define GL_ACL_ACCESS_STATUS 0x00391004 /* Reset Source: CORER */ #define GL_ACL_ACCESS_STATUS_BUSY_S 0 #define GL_ACL_ACCESS_STATUS_BUSY_M BIT(0) #define GL_ACL_ACCESS_STATUS_DONE_S 1 #define GL_ACL_ACCESS_STATUS_DONE_M BIT(1) #define GL_ACL_ACCESS_STATUS_ERROR_S 2 #define GL_ACL_ACCESS_STATUS_ERROR_M BIT(2) #define GL_ACL_ACCESS_STATUS_OPERATION_S 3 #define GL_ACL_ACCESS_STATUS_OPERATION_M BIT(3) #define GL_ACL_ACCESS_STATUS_ERROR_CODE_S 4 #define GL_ACL_ACCESS_STATUS_ERROR_CODE_M MAKEMASK(0xF, 4) #define GL_ACL_ACCESS_STATUS_TABLE_ID_S 8 #define GL_ACL_ACCESS_STATUS_TABLE_ID_M MAKEMASK(0xFF, 8) #define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_S 16 #define GL_ACL_ACCESS_STATUS_ENTRY_INDEX_M MAKEMASK(0xFFF, 16) #define GL_ACL_ACCESS_STATUS_OBJ_TYPE_S 28 #define GL_ACL_ACCESS_STATUS_OBJ_TYPE_M MAKEMASK(0xF, 28) #define GL_ACL_ACTMEM_ACT(_i) (0x00393824 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */ #define GL_ACL_ACTMEM_ACT_MAX_INDEX 1 #define GL_ACL_ACTMEM_ACT_VALUE_S 0 #define GL_ACL_ACTMEM_ACT_VALUE_M MAKEMASK(0xFFFF, 0) #define GL_ACL_ACTMEM_ACT_MDID_S 20 #define GL_ACL_ACTMEM_ACT_MDID_M MAKEMASK(0x3F, 20) #define GL_ACL_ACTMEM_ACT_PRIORITY_S 28 #define GL_ACL_ACTMEM_ACT_PRIORITY_M MAKEMASK(0x7, 28) #define GL_ACL_CHICKEN_REGISTER 0x00393810 /* Reset Source: CORER */ #define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_S 0 #define GL_ACL_CHICKEN_REGISTER_TCAM_DATA_POL_CH_M BIT(0) #define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_S 1 #define GL_ACL_CHICKEN_REGISTER_TCAM_ADDR_POL_CH_M BIT(1) #define GL_ACL_DEFAULT_ACT(_i) (0x00391168 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_DEFAULT_ACT_MAX_INDEX 15 #define GL_ACL_DEFAULT_ACT_VALUE_S 0 #define GL_ACL_DEFAULT_ACT_VALUE_M MAKEMASK(0xFFFF, 0) #define GL_ACL_DEFAULT_ACT_MDID_S 20 #define GL_ACL_DEFAULT_ACT_MDID_M MAKEMASK(0x3F, 20) #define GL_ACL_DEFAULT_ACT_PRIORITY_S 28 #define GL_ACL_DEFAULT_ACT_PRIORITY_M MAKEMASK(0x7, 28) #define GL_ACL_PROFILE_BWSB_SEL(_i) (0x00391008 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_BWSB_SEL_MAX_INDEX 31 #define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_S 0 #define GL_ACL_PROFILE_BWSB_SEL_BSB_SRC_OFF_M MAKEMASK(0x3F, 0) #define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_S 8 #define GL_ACL_PROFILE_BWSB_SEL_WSB_SRC_OFF_M MAKEMASK(0x1F, 8) #define GL_ACL_PROFILE_DWSB_SEL(_i) (0x00391088 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_DWSB_SEL_MAX_INDEX 15 #define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_S 0 #define GL_ACL_PROFILE_DWSB_SEL_DWORD_SEL_OFF_M MAKEMASK(0xF, 0) #define GL_ACL_PROFILE_PF_CFG(_i) (0x003910C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_PF_CFG_MAX_INDEX 7 #define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_S 0 #define GL_ACL_PROFILE_PF_CFG_SCEN_SEL_M MAKEMASK(0x3F, 0) #define GL_ACL_PROFILE_RC_CFG(_i) (0x003910E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_RC_CFG_MAX_INDEX 7 #define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_S 0 #define GL_ACL_PROFILE_RC_CFG_LOW_BOUND_M MAKEMASK(0xFFFF, 0) #define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_S 16 #define GL_ACL_PROFILE_RC_CFG_HIGH_BOUND_M MAKEMASK(0xFFFF, 16) #define GL_ACL_PROFILE_RCF_MASK(_i) (0x00391108 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_ACL_PROFILE_RCF_MASK_MAX_INDEX 7 #define GL_ACL_PROFILE_RCF_MASK_MASK_S 0 #define GL_ACL_PROFILE_RCF_MASK_MASK_M MAKEMASK(0xFFFF, 0) #define GL_ACL_SCENARIO_ACT_CFG(_i) (0x003938AC + ((_i) * 4)) /* _i=0...19 */ /* Reset Source: CORER */ #define GL_ACL_SCENARIO_ACT_CFG_MAX_INDEX 19 #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_S 0 #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_SEL_M MAKEMASK(0xF, 0) #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_S 8 #define GL_ACL_SCENARIO_ACT_CFG_ACTMEM_EN_M BIT(8) #define GL_ACL_SCENARIO_CFG_H(_i) (0x0039386C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_SCENARIO_CFG_H_MAX_INDEX 15 #define GL_ACL_SCENARIO_CFG_H_SELECT4_S 0 #define GL_ACL_SCENARIO_CFG_H_SELECT4_M MAKEMASK(0x1F, 0) #define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_S 8 #define GL_ACL_SCENARIO_CFG_H_CHUNKMASK_M MAKEMASK(0xFF, 8) #define GL_ACL_SCENARIO_CFG_H_START_COMPARE_S 24 #define GL_ACL_SCENARIO_CFG_H_START_COMPARE_M BIT(24) #define GL_ACL_SCENARIO_CFG_H_START_SET_S 28 #define GL_ACL_SCENARIO_CFG_H_START_SET_M BIT(28) #define GL_ACL_SCENARIO_CFG_L(_i) (0x0039382C + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GL_ACL_SCENARIO_CFG_L_MAX_INDEX 15 #define GL_ACL_SCENARIO_CFG_L_SELECT0_S 0 #define GL_ACL_SCENARIO_CFG_L_SELECT0_M MAKEMASK(0x7F, 0) #define GL_ACL_SCENARIO_CFG_L_SELECT1_S 8 #define GL_ACL_SCENARIO_CFG_L_SELECT1_M MAKEMASK(0x7F, 8) #define GL_ACL_SCENARIO_CFG_L_SELECT2_S 16 #define GL_ACL_SCENARIO_CFG_L_SELECT2_M MAKEMASK(0x7F, 16) #define GL_ACL_SCENARIO_CFG_L_SELECT3_S 24 #define GL_ACL_SCENARIO_CFG_L_SELECT3_M MAKEMASK(0x7F, 24) #define GL_ACL_TCAM_KEY_H 0x00393818 /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_S 0 #define GL_ACL_TCAM_KEY_H_GL_ACL_FFU_TCAM_KEY_H_M MAKEMASK(0xFF, 0) #define GL_ACL_TCAM_KEY_INV_H 0x00393820 /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_S 0 #define GL_ACL_TCAM_KEY_INV_H_GL_ACL_FFU_TCAM_KEY_INV_H_M MAKEMASK(0xFF, 0) #define GL_ACL_TCAM_KEY_INV_L 0x0039381C /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_S 0 #define GL_ACL_TCAM_KEY_INV_L_GL_ACL_FFU_TCAM_KEY_INV_L_M MAKEMASK(0xFFFFFFFF, 0) #define GL_ACL_TCAM_KEY_L 0x00393814 /* Reset Source: CORER */ #define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_S 0 #define GL_ACL_TCAM_KEY_L_GL_ACL_FFU_TCAM_KEY_L_M MAKEMASK(0xFFFFFFFF, 0) #define VSI_ACL_DEF_SEL(_VSI) (0x00391800 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */ #define VSI_ACL_DEF_SEL_MAX_INDEX 767 #define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_S 0 #define VSI_ACL_DEF_SEL_RX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 0) #define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_S 4 #define VSI_ACL_DEF_SEL_RX_TABLES_MISS_SEL_M MAKEMASK(0x3, 4) #define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_S 8 #define VSI_ACL_DEF_SEL_TX_PROFILE_MISS_SEL_M MAKEMASK(0x3, 8) #define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_S 12 #define VSI_ACL_DEF_SEL_TX_TABLES_MISS_SEL_M MAKEMASK(0x3, 12) #define GL_SWT_L2TAG0(_i) (0x000492A8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAG0_MAX_INDEX 7 #define GL_SWT_L2TAG0_DATA_S 0 #define GL_SWT_L2TAG0_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_SWT_L2TAG1(_i) (0x000492C8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAG1_MAX_INDEX 7 #define GL_SWT_L2TAG1_DATA_S 0 #define GL_SWT_L2TAG1_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GL_SWT_L2TAGCTRL(_i) (0x001D2660 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAGCTRL_MAX_INDEX 7 #define GL_SWT_L2TAGCTRL_LENGTH_S 0 #define GL_SWT_L2TAGCTRL_LENGTH_M MAKEMASK(0x7F, 0) #define GL_SWT_L2TAGCTRL_HAS_UP_S 7 #define GL_SWT_L2TAGCTRL_HAS_UP_M BIT(7) #define GL_SWT_L2TAGCTRL_ISVLAN_S 9 #define GL_SWT_L2TAGCTRL_ISVLAN_M BIT(9) #define GL_SWT_L2TAGCTRL_INNERUP_S 10 #define GL_SWT_L2TAGCTRL_INNERUP_M BIT(10) #define GL_SWT_L2TAGCTRL_OUTERUP_S 11 #define GL_SWT_L2TAGCTRL_OUTERUP_M BIT(11) #define GL_SWT_L2TAGCTRL_LONG_S 12 #define GL_SWT_L2TAGCTRL_LONG_M BIT(12) #define GL_SWT_L2TAGCTRL_ISMPLS_S 13 #define GL_SWT_L2TAGCTRL_ISMPLS_M BIT(13) #define GL_SWT_L2TAGCTRL_ISNSH_S 14 #define GL_SWT_L2TAGCTRL_ISNSH_M BIT(14) #define GL_SWT_L2TAGCTRL_ETHERTYPE_S 16 #define GL_SWT_L2TAGCTRL_ETHERTYPE_M MAKEMASK(0xFFFF, 16) #define GL_SWT_L2TAGRXEB(_i) (0x00052000 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAGRXEB_MAX_INDEX 7 #define GL_SWT_L2TAGRXEB_OFFSET_S 0 #define GL_SWT_L2TAGRXEB_OFFSET_M MAKEMASK(0xFF, 0) #define GL_SWT_L2TAGRXEB_LENGTH_S 8 #define GL_SWT_L2TAGRXEB_LENGTH_M MAKEMASK(0x3, 8) #define GL_SWT_L2TAGTXIB(_i) (0x000492E8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GL_SWT_L2TAGTXIB_MAX_INDEX 7 #define GL_SWT_L2TAGTXIB_OFFSET_S 0 #define GL_SWT_L2TAGTXIB_OFFSET_M MAKEMASK(0xFF, 0) #define GL_SWT_L2TAGTXIB_LENGTH_S 8 #define GL_SWT_L2TAGTXIB_LENGTH_M MAKEMASK(0x3, 8) #define GLCM_PE_CACHESIZE 0x005046B4 /* Reset Source: CORER */ #define GLCM_PE_CACHESIZE_WORD_SIZE_S 0 #define GLCM_PE_CACHESIZE_WORD_SIZE_M MAKEMASK(0xFFF, 0) #define GLCM_PE_CACHESIZE_SETS_S 12 #define GLCM_PE_CACHESIZE_SETS_M MAKEMASK(0xF, 12) #define GLCM_PE_CACHESIZE_WAYS_S 16 #define GLCM_PE_CACHESIZE_WAYS_M MAKEMASK(0x1FF, 16) #define GLCOMM_CQ_CTL(_CQ) (0x000F0000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLCOMM_CQ_CTL_MAX_INDEX 511 #define GLCOMM_CQ_CTL_COMP_TYPE_S 0 #define GLCOMM_CQ_CTL_COMP_TYPE_M MAKEMASK(0x7, 0) #define GLCOMM_CQ_CTL_CMD_S 4 #define GLCOMM_CQ_CTL_CMD_M MAKEMASK(0x7, 4) #define GLCOMM_CQ_CTL_ID_S 16 #define GLCOMM_CQ_CTL_ID_M MAKEMASK(0x3FFF, 16) #define GLCOMM_MIN_MAX_PKT 0x000FC064 /* Reset Source: CORER */ #define GLCOMM_MIN_MAX_PKT_MAHDL_S 0 #define GLCOMM_MIN_MAX_PKT_MAHDL_M MAKEMASK(0x3FFF, 0) #define GLCOMM_MIN_MAX_PKT_MIHDL_S 16 #define GLCOMM_MIN_MAX_PKT_MIHDL_M MAKEMASK(0x3F, 16) #define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_S 22 #define GLCOMM_MIN_MAX_PKT_LSO_COMS_MIHDL_M MAKEMASK(0x3FF, 22) #define GLCOMM_PKT_SHAPER_PROF(_i) (0x002D2DA8 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */ #define GLCOMM_PKT_SHAPER_PROF_MAX_INDEX 7 #define GLCOMM_PKT_SHAPER_PROF_PKTCNT_S 0 #define GLCOMM_PKT_SHAPER_PROF_PKTCNT_M MAKEMASK(0x3F, 0) #define GLCOMM_QTX_CNTX_CTL 0x002D2DC8 /* Reset Source: CORER */ #define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_S 0 #define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x3FFF, 0) #define GLCOMM_QTX_CNTX_CTL_CMD_S 16 #define GLCOMM_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16) #define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_S 19 #define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19) #define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4)) /* _i=0...9 */ /* Reset Source: CORER */ #define GLCOMM_QTX_CNTX_DATA_MAX_INDEX 9 #define GLCOMM_QTX_CNTX_DATA_DATA_S 0 #define GLCOMM_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define GLCOMM_QTX_CNTX_STAT 0x002D2DCC /* Reset Source: CORER */ #define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_S 0 #define GLCOMM_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0) #define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */ #define GLCOMM_QUANTA_PROF_MAX_INDEX 15 #define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0 #define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M MAKEMASK(0x3FFF, 0) #define GLCOMM_QUANTA_PROF_MAX_CMD_S 16 #define GLCOMM_QUANTA_PROF_MAX_CMD_M MAKEMASK(0xFF, 16) #define GLCOMM_QUANTA_PROF_MAX_DESC_S 24 #define GLCOMM_QUANTA_PROF_MAX_DESC_M MAKEMASK(0x3F, 24) #define GLLAN_TCLAN_CACHE_CTL 0x000FC0B8 /* Reset Source: CORER */ #define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_S 0 #define GLLAN_TCLAN_CACHE_CTL_MIN_FETCH_THRESH_M MAKEMASK(0x3F, 0) #define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_S 6 #define GLLAN_TCLAN_CACHE_CTL_FETCH_CL_ALIGN_M BIT(6) #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_S 7 #define GLLAN_TCLAN_CACHE_CTL_MIN_ALLOC_THRESH_M MAKEMASK(0x7F, 7) #define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_S 14 #define GLLAN_TCLAN_CACHE_CTL_CACHE_ENTRY_CNT_M MAKEMASK(0xFF, 14) #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_S 22 #define GLLAN_TCLAN_CACHE_CTL_CACHE_DESC_LIM_M MAKEMASK(0x3FF, 22) #define GLTCLAN_CQ_CNTX0(_CQ) (0x000F0800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX0_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_S 0 #define GLTCLAN_CQ_CNTX0_RING_ADDR_LSB_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX1(_CQ) (0x000F1000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX1_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_S 0 #define GLTCLAN_CQ_CNTX1_RING_ADDR_MSB_M MAKEMASK(0x1FFFFFF, 0) #define GLTCLAN_CQ_CNTX10(_CQ) (0x000F5800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX10_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX10_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX11(_CQ) (0x000F6000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX11_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX11_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX12(_CQ) (0x000F6800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX12_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX12_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX13(_CQ) (0x000F7000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX13_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX13_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX14(_CQ) (0x000F7800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX14_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX14_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX15(_CQ) (0x000F8000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX15_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX15_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX16(_CQ) (0x000F8800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX16_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX16_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX17(_CQ) (0x000F9000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX17_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX17_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX18(_CQ) (0x000F9800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX18_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX18_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX19(_CQ) (0x000FA000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX19_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX19_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX2(_CQ) (0x000F1800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX2_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX2_RING_LEN_S 0 #define GLTCLAN_CQ_CNTX2_RING_LEN_M MAKEMASK(0x3FFFF, 0) #define GLTCLAN_CQ_CNTX20(_CQ) (0x000FA800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX20_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX20_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX21(_CQ) (0x000FB000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX21_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX21_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX3(_CQ) (0x000F2000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX3_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX3_GENERATION_S 0 #define GLTCLAN_CQ_CNTX3_GENERATION_M BIT(0) #define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_S 1 #define GLTCLAN_CQ_CNTX3_CQ_WR_PTR_M MAKEMASK(0x3FFFFF, 1) #define GLTCLAN_CQ_CNTX4(_CQ) (0x000F2800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX4_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX4_PF_NUM_S 0 #define GLTCLAN_CQ_CNTX4_PF_NUM_M MAKEMASK(0x7, 0) #define GLTCLAN_CQ_CNTX4_VMVF_NUM_S 3 #define GLTCLAN_CQ_CNTX4_VMVF_NUM_M MAKEMASK(0x3FF, 3) #define GLTCLAN_CQ_CNTX4_VMVF_TYPE_S 13 #define GLTCLAN_CQ_CNTX4_VMVF_TYPE_M MAKEMASK(0x3, 13) #define GLTCLAN_CQ_CNTX5(_CQ) (0x000F3000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX5_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX5_TPH_EN_S 0 #define GLTCLAN_CQ_CNTX5_TPH_EN_M BIT(0) #define GLTCLAN_CQ_CNTX5_CPU_ID_S 1 #define GLTCLAN_CQ_CNTX5_CPU_ID_M MAKEMASK(0xFF, 1) #define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_S 9 #define GLTCLAN_CQ_CNTX5_FLUSH_ON_ITR_DIS_M BIT(9) #define GLTCLAN_CQ_CNTX6(_CQ) (0x000F3800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX6_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX6_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX7(_CQ) (0x000F4000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX7_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX7_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX8(_CQ) (0x000F4800 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX8_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX8_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define GLTCLAN_CQ_CNTX9(_CQ) (0x000F5000 + ((_CQ) * 4)) /* _i=0...511 */ /* Reset Source: CORER */ #define GLTCLAN_CQ_CNTX9_MAX_INDEX 511 #define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_S 0 #define GLTCLAN_CQ_CNTX9_CQ_CACHLINE_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QTX_COMM_DBELL_MAX_INDEX 16383 #define QTX_COMM_DBELL_QTX_COMM_DBELL_S 0 #define QTX_COMM_DBELL_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBLQ_CNTX(_i, _DBLQ) (0x002D0000 + ((_i) * 1024 + (_DBLQ) * 4)) /* _i=0...4, _DBLQ=0...255 */ /* Reset Source: CORER */ #define QTX_COMM_DBLQ_CNTX_MAX_INDEX 4 #define QTX_COMM_DBLQ_CNTX_DATA_S 0 #define QTX_COMM_DBLQ_CNTX_DATA_M MAKEMASK(0xFFFFFFFF, 0) #define QTX_COMM_DBLQ_DBELL(_DBLQ) (0x002D1400 + ((_DBLQ) * 4)) /* _i=0...255 */ /* Reset Source: CORER */ #define QTX_COMM_DBLQ_DBELL_MAX_INDEX 255 #define QTX_COMM_DBLQ_DBELL_TAIL_S 0 #define QTX_COMM_DBLQ_DBELL_TAIL_M MAKEMASK(0x1FFF, 0) #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) /* _i=0...16383 */ /* Reset Source: CORER */ #define QTX_COMM_HEAD_MAX_INDEX 16383 #define QTX_COMM_HEAD_HEAD_S 0 #define QTX_COMM_HEAD_HEAD_M MAKEMASK(0x1FFF, 0) #define QTX_COMM_HEAD_RS_PENDING_S 16 #define QTX_COMM_HEAD_RS_PENDING_M BIT(16) #define GL_FW_TOOL_ARQBAH 0x000801C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQBAH_ARQBAH_S 0 #define GL_FW_TOOL_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define GL_FW_TOOL_ARQBAL 0x000800C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_S 0 #define GL_FW_TOOL_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define GL_FW_TOOL_ARQBAL_ARQBAL_S 6 #define GL_FW_TOOL_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define GL_FW_TOOL_ARQH 0x000803C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQH_ARQH_S 0 #define GL_FW_TOOL_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ARQLEN 0x000802C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQLEN_ARQLEN_S 0 #define GL_FW_TOOL_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ARQLEN_ARQVFE_S 28 #define GL_FW_TOOL_ARQLEN_ARQVFE_M BIT(28) #define GL_FW_TOOL_ARQLEN_ARQOVFL_S 29 #define GL_FW_TOOL_ARQLEN_ARQOVFL_M BIT(29) #define GL_FW_TOOL_ARQLEN_ARQCRIT_S 30 #define GL_FW_TOOL_ARQLEN_ARQCRIT_M BIT(30) #define GL_FW_TOOL_ARQLEN_ARQENABLE_S 31 #define GL_FW_TOOL_ARQLEN_ARQENABLE_M BIT(31) #define GL_FW_TOOL_ARQT 0x000804C0 /* Reset Source: EMPR */ #define GL_FW_TOOL_ARQT_ARQT_S 0 #define GL_FW_TOOL_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ATQBAH 0x00080140 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQBAH_ATQBAH_S 0 #define GL_FW_TOOL_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define GL_FW_TOOL_ATQBAL 0x00080040 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_S 0 #define GL_FW_TOOL_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define GL_FW_TOOL_ATQBAL_ATQBAL_S 6 #define GL_FW_TOOL_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define GL_FW_TOOL_ATQH 0x00080340 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQH_ATQH_S 0 #define GL_FW_TOOL_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ATQLEN 0x00080240 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQLEN_ATQLEN_S 0 #define GL_FW_TOOL_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define GL_FW_TOOL_ATQLEN_ATQVFE_S 28 #define GL_FW_TOOL_ATQLEN_ATQVFE_M BIT(28) #define GL_FW_TOOL_ATQLEN_ATQOVFL_S 29 #define GL_FW_TOOL_ATQLEN_ATQOVFL_M BIT(29) #define GL_FW_TOOL_ATQLEN_ATQCRIT_S 30 #define GL_FW_TOOL_ATQLEN_ATQCRIT_M BIT(30) #define GL_FW_TOOL_ATQLEN_ATQENABLE_S 31 #define GL_FW_TOOL_ATQLEN_ATQENABLE_M BIT(31) #define GL_FW_TOOL_ATQT 0x00080440 /* Reset Source: EMPR */ #define GL_FW_TOOL_ATQT_ATQT_S 0 #define GL_FW_TOOL_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define GL_MBX_PASID 0x00231EC0 /* Reset Source: CORER */ #define GL_MBX_PASID_PASID_MODE_S 0 #define GL_MBX_PASID_PASID_MODE_M BIT(0) #define GL_MBX_PASID_PASID_MODE_VALID_S 1 #define GL_MBX_PASID_PASID_MODE_VALID_M BIT(1) #define PF_FW_ARQBAH 0x00080180 /* Reset Source: EMPR */ #define PF_FW_ARQBAH_ARQBAH_S 0 #define PF_FW_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_FW_ARQBAL 0x00080080 /* Reset Source: EMPR */ #define PF_FW_ARQBAL_ARQBAL_LSB_S 0 #define PF_FW_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_FW_ARQBAL_ARQBAL_S 6 #define PF_FW_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_FW_ARQH 0x00080380 /* Reset Source: EMPR */ #define PF_FW_ARQH_ARQH_S 0 #define PF_FW_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF_FW_ARQLEN 0x00080280 /* Reset Source: EMPR */ #define PF_FW_ARQLEN_ARQLEN_S 0 #define PF_FW_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF_FW_ARQLEN_ARQVFE_S 28 #define PF_FW_ARQLEN_ARQVFE_M BIT(28) #define PF_FW_ARQLEN_ARQOVFL_S 29 #define PF_FW_ARQLEN_ARQOVFL_M BIT(29) #define PF_FW_ARQLEN_ARQCRIT_S 30 #define PF_FW_ARQLEN_ARQCRIT_M BIT(30) #define PF_FW_ARQLEN_ARQENABLE_S 31 #define PF_FW_ARQLEN_ARQENABLE_M BIT(31) #define PF_FW_ARQT 0x00080480 /* Reset Source: EMPR */ #define PF_FW_ARQT_ARQT_S 0 #define PF_FW_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF_FW_ATQBAH 0x00080100 /* Reset Source: EMPR */ #define PF_FW_ATQBAH_ATQBAH_S 0 #define PF_FW_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_FW_ATQBAL 0x00080000 /* Reset Source: EMPR */ #define PF_FW_ATQBAL_ATQBAL_LSB_S 0 #define PF_FW_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_FW_ATQBAL_ATQBAL_S 6 #define PF_FW_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_FW_ATQH 0x00080300 /* Reset Source: EMPR */ #define PF_FW_ATQH_ATQH_S 0 #define PF_FW_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF_FW_ATQLEN 0x00080200 /* Reset Source: EMPR */ #define PF_FW_ATQLEN_ATQLEN_S 0 #define PF_FW_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF_FW_ATQLEN_ATQVFE_S 28 #define PF_FW_ATQLEN_ATQVFE_M BIT(28) #define PF_FW_ATQLEN_ATQOVFL_S 29 #define PF_FW_ATQLEN_ATQOVFL_M BIT(29) #define PF_FW_ATQLEN_ATQCRIT_S 30 #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) #define PF_FW_ATQLEN_ATQENABLE_S 31 #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQT 0x00080400 /* Reset Source: EMPR */ #define PF_FW_ATQT_ATQT_S 0 #define PF_FW_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF_MBX_ARQBAH 0x0022E400 /* Reset Source: CORER */ #define PF_MBX_ARQBAH_ARQBAH_S 0 #define PF_MBX_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_MBX_ARQBAL 0x0022E380 /* Reset Source: CORER */ #define PF_MBX_ARQBAL_ARQBAL_LSB_S 0 #define PF_MBX_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_MBX_ARQBAL_ARQBAL_S 6 #define PF_MBX_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_MBX_ARQH 0x0022E500 /* Reset Source: CORER */ #define PF_MBX_ARQH_ARQH_S 0 #define PF_MBX_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF_MBX_ARQLEN 0x0022E480 /* Reset Source: PFR */ #define PF_MBX_ARQLEN_ARQLEN_S 0 #define PF_MBX_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF_MBX_ARQLEN_ARQVFE_S 28 #define PF_MBX_ARQLEN_ARQVFE_M BIT(28) #define PF_MBX_ARQLEN_ARQOVFL_S 29 #define PF_MBX_ARQLEN_ARQOVFL_M BIT(29) #define PF_MBX_ARQLEN_ARQCRIT_S 30 #define PF_MBX_ARQLEN_ARQCRIT_M BIT(30) #define PF_MBX_ARQLEN_ARQENABLE_S 31 #define PF_MBX_ARQLEN_ARQENABLE_M BIT(31) #define PF_MBX_ARQT 0x0022E580 /* Reset Source: CORER */ #define PF_MBX_ARQT_ARQT_S 0 #define PF_MBX_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF_MBX_ATQBAH 0x0022E180 /* Reset Source: CORER */ #define PF_MBX_ATQBAH_ATQBAH_S 0 #define PF_MBX_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_MBX_ATQBAL 0x0022E100 /* Reset Source: CORER */ #define PF_MBX_ATQBAL_ATQBAL_S 6 #define PF_MBX_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_MBX_ATQH 0x0022E280 /* Reset Source: CORER */ #define PF_MBX_ATQH_ATQH_S 0 #define PF_MBX_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF_MBX_ATQLEN 0x0022E200 /* Reset Source: PFR */ #define PF_MBX_ATQLEN_ATQLEN_S 0 #define PF_MBX_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF_MBX_ATQLEN_ATQVFE_S 28 #define PF_MBX_ATQLEN_ATQVFE_M BIT(28) #define PF_MBX_ATQLEN_ATQOVFL_S 29 #define PF_MBX_ATQLEN_ATQOVFL_M BIT(29) #define PF_MBX_ATQLEN_ATQCRIT_S 30 #define PF_MBX_ATQLEN_ATQCRIT_M BIT(30) #define PF_MBX_ATQLEN_ATQENABLE_S 31 #define PF_MBX_ATQLEN_ATQENABLE_M BIT(31) #define PF_MBX_ATQT 0x0022E300 /* Reset Source: CORER */ #define PF_MBX_ATQT_ATQT_S 0 #define PF_MBX_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF_SB_ARQBAH 0x0022FF00 /* Reset Source: CORER */ #define PF_SB_ARQBAH_ARQBAH_S 0 #define PF_SB_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_SB_ARQBAL 0x0022FE80 /* Reset Source: CORER */ #define PF_SB_ARQBAL_ARQBAL_LSB_S 0 #define PF_SB_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF_SB_ARQBAL_ARQBAL_S 6 #define PF_SB_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_SB_ARQH 0x00230000 /* Reset Source: CORER */ #define PF_SB_ARQH_ARQH_S 0 #define PF_SB_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF_SB_ARQLEN 0x0022FF80 /* Reset Source: PFR */ #define PF_SB_ARQLEN_ARQLEN_S 0 #define PF_SB_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF_SB_ARQLEN_ARQVFE_S 28 #define PF_SB_ARQLEN_ARQVFE_M BIT(28) #define PF_SB_ARQLEN_ARQOVFL_S 29 #define PF_SB_ARQLEN_ARQOVFL_M BIT(29) #define PF_SB_ARQLEN_ARQCRIT_S 30 #define PF_SB_ARQLEN_ARQCRIT_M BIT(30) #define PF_SB_ARQLEN_ARQENABLE_S 31 #define PF_SB_ARQLEN_ARQENABLE_M BIT(31) #define PF_SB_ARQT 0x00230080 /* Reset Source: CORER */ #define PF_SB_ARQT_ARQT_S 0 #define PF_SB_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF_SB_ATQBAH 0x0022FC80 /* Reset Source: CORER */ #define PF_SB_ATQBAH_ATQBAH_S 0 #define PF_SB_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF_SB_ATQBAL 0x0022FC00 /* Reset Source: CORER */ #define PF_SB_ATQBAL_ATQBAL_S 6 #define PF_SB_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF_SB_ATQH 0x0022FD80 /* Reset Source: CORER */ #define PF_SB_ATQH_ATQH_S 0 #define PF_SB_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF_SB_ATQLEN 0x0022FD00 /* Reset Source: PFR */ #define PF_SB_ATQLEN_ATQLEN_S 0 #define PF_SB_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF_SB_ATQLEN_ATQVFE_S 28 #define PF_SB_ATQLEN_ATQVFE_M BIT(28) #define PF_SB_ATQLEN_ATQOVFL_S 29 #define PF_SB_ATQLEN_ATQOVFL_M BIT(29) #define PF_SB_ATQLEN_ATQCRIT_S 30 #define PF_SB_ATQLEN_ATQCRIT_M BIT(30) #define PF_SB_ATQLEN_ATQENABLE_S 31 #define PF_SB_ATQLEN_ATQENABLE_M BIT(31) #define PF_SB_ATQT 0x0022FE00 /* Reset Source: CORER */ #define PF_SB_ATQT_ATQT_S 0 #define PF_SB_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF_SB_REM_DEV_CTL 0x002300F0 /* Reset Source: CORER */ #define PF_SB_REM_DEV_CTL_DEST_EN_S 0 #define PF_SB_REM_DEV_CTL_DEST_EN_M MAKEMASK(0xFFFF, 0) #define PF0_FW_HLP_ARQBAH 0x000801C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAH_ARQBAH_S 0 #define PF0_FW_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ARQBAL 0x000800C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_S 0 #define PF0_FW_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ARQBAL_ARQBAL_S 6 #define PF0_FW_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ARQH 0x000803C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQH_ARQH_S 0 #define PF0_FW_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN 0x000802C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQLEN_ARQLEN_S 0 #define PF0_FW_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ARQLEN_ARQVFE_S 28 #define PF0_FW_HLP_ARQLEN_ARQVFE_M BIT(28) #define PF0_FW_HLP_ARQLEN_ARQOVFL_S 29 #define PF0_FW_HLP_ARQLEN_ARQOVFL_M BIT(29) #define PF0_FW_HLP_ARQLEN_ARQCRIT_S 30 #define PF0_FW_HLP_ARQLEN_ARQCRIT_M BIT(30) #define PF0_FW_HLP_ARQLEN_ARQENABLE_S 31 #define PF0_FW_HLP_ARQLEN_ARQENABLE_M BIT(31) #define PF0_FW_HLP_ARQT 0x000804C8 /* Reset Source: EMPR */ #define PF0_FW_HLP_ARQT_ARQT_S 0 #define PF0_FW_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQBAH 0x00080148 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAH_ATQBAH_S 0 #define PF0_FW_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_HLP_ATQBAL 0x00080048 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_S 0 #define PF0_FW_HLP_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_HLP_ATQBAL_ATQBAL_S 6 #define PF0_FW_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_HLP_ATQH 0x00080348 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQH_ATQH_S 0 #define PF0_FW_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN 0x00080248 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQLEN_ATQLEN_S 0 #define PF0_FW_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_HLP_ATQLEN_ATQVFE_S 28 #define PF0_FW_HLP_ATQLEN_ATQVFE_M BIT(28) #define PF0_FW_HLP_ATQLEN_ATQOVFL_S 29 #define PF0_FW_HLP_ATQLEN_ATQOVFL_M BIT(29) #define PF0_FW_HLP_ATQLEN_ATQCRIT_S 30 #define PF0_FW_HLP_ATQLEN_ATQCRIT_M BIT(30) #define PF0_FW_HLP_ATQLEN_ATQENABLE_S 31 #define PF0_FW_HLP_ATQLEN_ATQENABLE_M BIT(31) #define PF0_FW_HLP_ATQT 0x00080448 /* Reset Source: EMPR */ #define PF0_FW_HLP_ATQT_ATQT_S 0 #define PF0_FW_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQBAH 0x000801C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAH_ARQBAH_S 0 #define PF0_FW_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ARQBAL 0x000800C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_FW_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ARQBAL_ARQBAL_S 6 #define PF0_FW_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ARQH 0x000803C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQH_ARQH_S 0 #define PF0_FW_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN 0x000802C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQLEN_ARQLEN_S 0 #define PF0_FW_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ARQLEN_ARQVFE_S 28 #define PF0_FW_PSM_ARQLEN_ARQVFE_M BIT(28) #define PF0_FW_PSM_ARQLEN_ARQOVFL_S 29 #define PF0_FW_PSM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_FW_PSM_ARQLEN_ARQCRIT_S 30 #define PF0_FW_PSM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_FW_PSM_ARQLEN_ARQENABLE_S 31 #define PF0_FW_PSM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_FW_PSM_ARQT 0x000804C4 /* Reset Source: EMPR */ #define PF0_FW_PSM_ARQT_ARQT_S 0 #define PF0_FW_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQBAH 0x00080144 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAH_ATQBAH_S 0 #define PF0_FW_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_FW_PSM_ATQBAL 0x00080044 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_S 0 #define PF0_FW_PSM_ATQBAL_ATQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_FW_PSM_ATQBAL_ATQBAL_S 6 #define PF0_FW_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_FW_PSM_ATQH 0x00080344 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQH_ATQH_S 0 #define PF0_FW_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN 0x00080244 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQLEN_ATQLEN_S 0 #define PF0_FW_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_FW_PSM_ATQLEN_ATQVFE_S 28 #define PF0_FW_PSM_ATQLEN_ATQVFE_M BIT(28) #define PF0_FW_PSM_ATQLEN_ATQOVFL_S 29 #define PF0_FW_PSM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_FW_PSM_ATQLEN_ATQCRIT_S 30 #define PF0_FW_PSM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_FW_PSM_ATQLEN_ATQENABLE_S 31 #define PF0_FW_PSM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_FW_PSM_ATQT 0x00080444 /* Reset Source: EMPR */ #define PF0_FW_PSM_ATQT_ATQT_S 0 #define PF0_FW_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQBAH 0x0022E5D8 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAH_ARQBAH_S 0 #define PF0_MBX_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ARQBAL 0x0022E5D4 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_MBX_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_CPM_ARQBAL_ARQBAL_S 6 #define PF0_MBX_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ARQH 0x0022E5E0 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQH_ARQH_S 0 #define PF0_MBX_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN 0x0022E5DC /* Reset Source: PFR */ #define PF0_MBX_CPM_ARQLEN_ARQLEN_S 0 #define PF0_MBX_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ARQLEN_ARQVFE_S 28 #define PF0_MBX_CPM_ARQLEN_ARQVFE_M BIT(28) #define PF0_MBX_CPM_ARQLEN_ARQOVFL_S 29 #define PF0_MBX_CPM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_MBX_CPM_ARQLEN_ARQCRIT_S 30 #define PF0_MBX_CPM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_MBX_CPM_ARQLEN_ARQENABLE_S 31 #define PF0_MBX_CPM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_MBX_CPM_ARQT 0x0022E5E4 /* Reset Source: CORER */ #define PF0_MBX_CPM_ARQT_ARQT_S 0 #define PF0_MBX_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQBAH 0x0022E5C4 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAH_ATQBAH_S 0 #define PF0_MBX_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_CPM_ATQBAL 0x0022E5C0 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQBAL_ATQBAL_S 6 #define PF0_MBX_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_CPM_ATQH 0x0022E5CC /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQH_ATQH_S 0 #define PF0_MBX_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN 0x0022E5C8 /* Reset Source: PFR */ #define PF0_MBX_CPM_ATQLEN_ATQLEN_S 0 #define PF0_MBX_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_CPM_ATQLEN_ATQVFE_S 28 #define PF0_MBX_CPM_ATQLEN_ATQVFE_M BIT(28) #define PF0_MBX_CPM_ATQLEN_ATQOVFL_S 29 #define PF0_MBX_CPM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_MBX_CPM_ATQLEN_ATQCRIT_S 30 #define PF0_MBX_CPM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_MBX_CPM_ATQLEN_ATQENABLE_S 31 #define PF0_MBX_CPM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_MBX_CPM_ATQT 0x0022E5D0 /* Reset Source: CORER */ #define PF0_MBX_CPM_ATQT_ATQT_S 0 #define PF0_MBX_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQBAH 0x0022E600 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAH_ARQBAH_S 0 #define PF0_MBX_HLP_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ARQBAL 0x0022E5FC /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_S 0 #define PF0_MBX_HLP_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_HLP_ARQBAL_ARQBAL_S 6 #define PF0_MBX_HLP_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ARQH 0x0022E608 /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQH_ARQH_S 0 #define PF0_MBX_HLP_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN 0x0022E604 /* Reset Source: PFR */ #define PF0_MBX_HLP_ARQLEN_ARQLEN_S 0 #define PF0_MBX_HLP_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ARQLEN_ARQVFE_S 28 #define PF0_MBX_HLP_ARQLEN_ARQVFE_M BIT(28) #define PF0_MBX_HLP_ARQLEN_ARQOVFL_S 29 #define PF0_MBX_HLP_ARQLEN_ARQOVFL_M BIT(29) #define PF0_MBX_HLP_ARQLEN_ARQCRIT_S 30 #define PF0_MBX_HLP_ARQLEN_ARQCRIT_M BIT(30) #define PF0_MBX_HLP_ARQLEN_ARQENABLE_S 31 #define PF0_MBX_HLP_ARQLEN_ARQENABLE_M BIT(31) #define PF0_MBX_HLP_ARQT 0x0022E60C /* Reset Source: CORER */ #define PF0_MBX_HLP_ARQT_ARQT_S 0 #define PF0_MBX_HLP_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQBAH 0x0022E5EC /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAH_ATQBAH_S 0 #define PF0_MBX_HLP_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_HLP_ATQBAL 0x0022E5E8 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQBAL_ATQBAL_S 6 #define PF0_MBX_HLP_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_HLP_ATQH 0x0022E5F4 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQH_ATQH_S 0 #define PF0_MBX_HLP_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN 0x0022E5F0 /* Reset Source: PFR */ #define PF0_MBX_HLP_ATQLEN_ATQLEN_S 0 #define PF0_MBX_HLP_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_HLP_ATQLEN_ATQVFE_S 28 #define PF0_MBX_HLP_ATQLEN_ATQVFE_M BIT(28) #define PF0_MBX_HLP_ATQLEN_ATQOVFL_S 29 #define PF0_MBX_HLP_ATQLEN_ATQOVFL_M BIT(29) #define PF0_MBX_HLP_ATQLEN_ATQCRIT_S 30 #define PF0_MBX_HLP_ATQLEN_ATQCRIT_M BIT(30) #define PF0_MBX_HLP_ATQLEN_ATQENABLE_S 31 #define PF0_MBX_HLP_ATQLEN_ATQENABLE_M BIT(31) #define PF0_MBX_HLP_ATQT 0x0022E5F8 /* Reset Source: CORER */ #define PF0_MBX_HLP_ATQT_ATQT_S 0 #define PF0_MBX_HLP_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQBAH 0x0022E628 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAH_ARQBAH_S 0 #define PF0_MBX_PSM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ARQBAL 0x0022E624 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_MBX_PSM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_MBX_PSM_ARQBAL_ARQBAL_S 6 #define PF0_MBX_PSM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ARQH 0x0022E630 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQH_ARQH_S 0 #define PF0_MBX_PSM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN 0x0022E62C /* Reset Source: PFR */ #define PF0_MBX_PSM_ARQLEN_ARQLEN_S 0 #define PF0_MBX_PSM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ARQLEN_ARQVFE_S 28 #define PF0_MBX_PSM_ARQLEN_ARQVFE_M BIT(28) #define PF0_MBX_PSM_ARQLEN_ARQOVFL_S 29 #define PF0_MBX_PSM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_MBX_PSM_ARQLEN_ARQCRIT_S 30 #define PF0_MBX_PSM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_MBX_PSM_ARQLEN_ARQENABLE_S 31 #define PF0_MBX_PSM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_MBX_PSM_ARQT 0x0022E634 /* Reset Source: CORER */ #define PF0_MBX_PSM_ARQT_ARQT_S 0 #define PF0_MBX_PSM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQBAH 0x0022E614 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAH_ATQBAH_S 0 #define PF0_MBX_PSM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_MBX_PSM_ATQBAL 0x0022E610 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQBAL_ATQBAL_S 6 #define PF0_MBX_PSM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_MBX_PSM_ATQH 0x0022E61C /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQH_ATQH_S 0 #define PF0_MBX_PSM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN 0x0022E618 /* Reset Source: PFR */ #define PF0_MBX_PSM_ATQLEN_ATQLEN_S 0 #define PF0_MBX_PSM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_MBX_PSM_ATQLEN_ATQVFE_S 28 #define PF0_MBX_PSM_ATQLEN_ATQVFE_M BIT(28) #define PF0_MBX_PSM_ATQLEN_ATQOVFL_S 29 #define PF0_MBX_PSM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_MBX_PSM_ATQLEN_ATQCRIT_S 30 #define PF0_MBX_PSM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_MBX_PSM_ATQLEN_ATQENABLE_S 31 #define PF0_MBX_PSM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_MBX_PSM_ATQT 0x0022E620 /* Reset Source: CORER */ #define PF0_MBX_PSM_ATQT_ATQT_S 0 #define PF0_MBX_PSM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQBAH 0x0022E650 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAH_ARQBAH_S 0 #define PF0_SB_CPM_ARQBAH_ARQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ARQBAL 0x0022E64C /* Reset Source: CORER */ #define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_S 0 #define PF0_SB_CPM_ARQBAL_ARQBAL_LSB_M MAKEMASK(0x3F, 0) #define PF0_SB_CPM_ARQBAL_ARQBAL_S 6 #define PF0_SB_CPM_ARQBAL_ARQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ARQH 0x0022E658 /* Reset Source: CORER */ #define PF0_SB_CPM_ARQH_ARQH_S 0 #define PF0_SB_CPM_ARQH_ARQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN 0x0022E654 /* Reset Source: PFR */ #define PF0_SB_CPM_ARQLEN_ARQLEN_S 0 #define PF0_SB_CPM_ARQLEN_ARQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ARQLEN_ARQVFE_S 28 #define PF0_SB_CPM_ARQLEN_ARQVFE_M BIT(28) #define PF0_SB_CPM_ARQLEN_ARQOVFL_S 29 #define PF0_SB_CPM_ARQLEN_ARQOVFL_M BIT(29) #define PF0_SB_CPM_ARQLEN_ARQCRIT_S 30 #define PF0_SB_CPM_ARQLEN_ARQCRIT_M BIT(30) #define PF0_SB_CPM_ARQLEN_ARQENABLE_S 31 #define PF0_SB_CPM_ARQLEN_ARQENABLE_M BIT(31) #define PF0_SB_CPM_ARQT 0x0022E65C /* Reset Source: CORER */ #define PF0_SB_CPM_ARQT_ARQT_S 0 #define PF0_SB_CPM_ARQT_ARQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQBAH 0x0022E63C /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAH_ATQBAH_S 0 #define PF0_SB_CPM_ATQBAH_ATQBAH_M MAKEMASK(0xFFFFFFFF, 0) #define PF0_SB_CPM_ATQBAL 0x0022E638 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQBAL_ATQBAL_S 6 #define PF0_SB_CPM_ATQBAL_ATQBAL_M MAKEMASK(0x3FFFFFF, 6) #define PF0_SB_CPM_ATQH 0x0022E644 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQH_ATQH_S 0 #define PF0_SB_CPM_ATQH_ATQH_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN 0x0022E640 /* Reset Source: PFR */ #define PF0_SB_CPM_ATQLEN_ATQLEN_S 0 #define PF0_SB_CPM_ATQLEN_ATQLEN_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_ATQLEN_ATQVFE_S 28 #define PF0_SB_CPM_ATQLEN_ATQVFE_M BIT(28) #define PF0_SB_CPM_ATQLEN_ATQOVFL_S 29 #define PF0_SB_CPM_ATQLEN_ATQOVFL_M BIT(29) #define PF0_SB_CPM_ATQLEN_ATQCRIT_S 30 #define PF0_SB_CPM_ATQLEN_ATQCRIT_M BIT(30) #define PF0_SB_CPM_ATQLEN_ATQENABLE_S 31 #define PF0_SB_CPM_ATQLEN_ATQENABLE_M BIT(31) #define PF0_SB_CPM_ATQT 0x0022E648 /* Reset Source: CORER */ #define PF0_SB_CPM_ATQT_ATQT_S 0 #define PF0_SB_CPM_ATQT_ATQT_M MAKEMASK(0x3FF, 0) #define PF0_SB_CPM_REM_DEV_CTL 0x002300F4 /* Reset Source: CORER */ #define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_S 0 #define PF0_SB_CPM_REM_DEV_CTL_DEST_EN_M MAKEMA