Index: head/sys/conf/files.amd64 =================================================================== --- head/sys/conf/files.amd64 (revision 339361) +++ head/sys/conf/files.amd64 (revision 339362) @@ -1,757 +1,757 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_x86_64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_x86_64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" # cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" # linux32_genassym.o optional compat_linux32 \ dependency "$S/amd64/linux32/linux32_genassym.c offset.inc" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux32_genassym.o" # linux32_assym.h optional compat_linux32 \ dependency "$S/kern/genassym.sh linux32_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux32_assym.h" # linux32_locore.o optional compat_linux32 \ dependency "linux32_assym.h $S/amd64/linux32/linux32_locore.s" \ compile-with "${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s -pipe -I. -I$S -Werror -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/amd64/linux32/linux32_vdso.lds.s -Wl,-soname=linux32_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "linux32_locore.o" # linux32_vdso.so optional compat_linux32 \ dependency "linux32_locore.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 linux32_locore.o ${.TARGET}" \ no-implicit-rule \ clean "linux32_vdso.so" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c offset.inc" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # hpt27xx_lib.o optional hpt27xx \ dependency "$S/dev/hpt27xx/amd64-elf.hpt27xx_lib.o.uu" \ compile-with "uudecode < $S/dev/hpt27xx/amd64-elf.hpt27xx_lib.o.uu" \ no-implicit-rule # hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/amd64-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/amd64-elf.raid.o.uu" \ no-implicit-rule # hptnr_lib.o optional hptnr \ dependency "$S/dev/hptnr/amd64-elf.hptnr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptnr/amd64-elf.hptnr_lib.o.uu" \ no-implicit-rule # hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ no-implicit-rule # amd64/acpica/acpi_machdep.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.inc" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # #amd64/amd64/apic_vector.S standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/copyout.c standard amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/efirt_machdep.c optional efirt amd64/amd64/efirt_support.S optional efirt amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/in_cksum.c optional inet | inet6 amd64/amd64/initcpu.c standard amd64/amd64/io.c optional io amd64/amd64/locore.S standard no-obj amd64/amd64/xen-locore.S optional xenhvm amd64/amd64/machdep.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mpboot.S optional smp amd64/amd64/pmap.c standard amd64/amd64/prof_machdep.c optional profiling-routine amd64/amd64/ptrace_machdep.c standard amd64/amd64/sigtramp.S standard amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 amd64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 amd64/pci/pci_cfgreg.c optional pci cddl/contrib/opensolaris/common/atomic/amd64/opensolaris_atomic.S optional zfs | dtrace compile-with "${ZFS_S}" cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/amd64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/x86/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/dtrace/x86/dis_tables.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" crypto/aesni/aeskeys_amd64.S optional aesni crypto/aesni/aesni.c optional aesni aesni_ghash.o optional aesni \ dependency "$S/crypto/aesni/aesni_ghash.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ghash.o" aesni_wrap.o optional aesni \ dependency "$S/crypto/aesni/aesni_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_wrap.o" crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support crypto/des/des_enc.c optional crypto | ipsec | \ ipsec_support | netsmb intel_sha1.o optional aesni \ dependency "$S/crypto/aesni/intel_sha1.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha1.o" intel_sha256.o optional aesni \ dependency "$S/crypto/aesni/intel_sha256.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha256.o" crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_if.m standard dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_timer.c optional acpi dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdsmn/amdsmn.c optional amdsmn | amdtemp dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/bxe/bxe.c optional bxe pci dev/bxe/bxe_stats.c optional bxe pci dev/bxe/bxe_debug.c optional bxe pci dev/bxe/ecore_sp.c optional bxe pci dev/bxe/bxe_elink.c optional bxe pci dev/bxe/57710_init_values.c optional bxe pci dev/bxe/57711_init_values.c optional bxe pci dev/bxe/57712_init_values.c optional bxe pci dev/coretemp/coretemp.c optional coretemp dev/cpuctl/cpuctl.c optional cpuctl dev/dpms/dpms.c optional dpms # There are no systems with isa slots, so all ed isa entries should go.. dev/ed/if_ed_3c503.c optional ed isa ed_3c503 dev/ed/if_ed_isa.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/ed/if_ed_hpp.c optional ed isa ed_hpp dev/ed/if_ed_sic.c optional ed isa ed_sic dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/imcsmb/imcsmb.c optional imcsmb dev/imcsmb/imcsmb_pci.c optional imcsmb pci dev/intel/spi.c optional intelspi dev/io/iodev.c optional io dev/ioat/ioat.c optional ioat pci dev/ioat/ioat_test.c optional ioat pci dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux32 dev/ixl/if_ixl.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" -dev/ixl/if_ixlv.c optional iavf pci \ +dev/ixl/if_iavf.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" -dev/ixl/ixlvc.c optional iavf pci \ +dev/ixl/iavf_vc.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_dcb.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/gpio/bytgpio.c optional bytgpio dev/gpio/chvgpio.c optional chvgpio dev/hpt27xx/hpt27xx_os_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_osm_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_config.c optional hpt27xx dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptnr/hptnr_os_bsd.c optional hptnr dev/hptnr/hptnr_osm_bsd.c optional hptnr dev/hptnr/hptnr_config.c optional hptnr dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_uncore.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/hyperv/input/hv_kbd.c optional hyperv dev/hyperv/input/hv_kbdc.c optional hyperv dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci dev/hyperv/netvsc/hn_nvs.c optional hyperv dev/hyperv/netvsc/hn_rndis.c optional hyperv dev/hyperv/netvsc/if_hn.c optional hyperv dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv dev/hyperv/utilities/hv_kvp.c optional hyperv dev/hyperv/utilities/hv_snapshot.c optional hyperv dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv dev/hyperv/utilities/vmbus_ic.c optional hyperv dev/hyperv/utilities/vmbus_shutdown.c optional hyperv dev/hyperv/utilities/vmbus_timesync.c optional hyperv dev/hyperv/vmbus/hyperv.c optional hyperv dev/hyperv/vmbus/hyperv_busdma.c optional hyperv dev/hyperv/vmbus/vmbus.c optional hyperv pci dev/hyperv/vmbus/vmbus_br.c optional hyperv dev/hyperv/vmbus/vmbus_chan.c optional hyperv dev/hyperv/vmbus/vmbus_et.c optional hyperv dev/hyperv/vmbus/vmbus_if.m optional hyperv dev/hyperv/vmbus/vmbus_res.c optional hyperv dev/hyperv/vmbus/vmbus_xact.c optional hyperv dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional ntb_transport | if_ntb dev/ntb/ntb.c optional ntb | ntb_transport | if_ntb | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_if.m optional ntb | ntb_transport | if_ntb | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_hw/ntb_hw_intel.c optional ntb_hw_intel | ntb_hw dev/ntb/ntb_hw/ntb_hw_plx.c optional ntb_hw_plx | ntb_hw dev/nvd/nvd.c optional nvd nvme dev/nvme/nvme.c optional nvme dev/nvme/nvme_ctrlr.c optional nvme dev/nvme/nvme_ctrlr_cmd.c optional nvme dev/nvme/nvme_ns.c optional nvme dev/nvme/nvme_ns_cmd.c optional nvme dev/nvme/nvme_qpair.c optional nvme dev/nvme/nvme_sim.c optional nvme scbus dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng dev/random/nehemiah.c optional padlock_rng dev/qlxge/qls_dbg.c optional qlxge pci dev/qlxge/qls_dump.c optional qlxge pci dev/qlxge/qls_hw.c optional qlxge pci dev/qlxge/qls_ioctl.c optional qlxge pci dev/qlxge/qls_isr.c optional qlxge pci dev/qlxge/qls_os.c optional qlxge pci dev/qlxgb/qla_dbg.c optional qlxgb pci dev/qlxgb/qla_hw.c optional qlxgb pci dev/qlxgb/qla_ioctl.c optional qlxgb pci dev/qlxgb/qla_isr.c optional qlxgb pci dev/qlxgb/qla_misc.c optional qlxgb pci dev/qlxgb/qla_os.c optional qlxgb pci dev/qlxgbe/ql_dbg.c optional qlxgbe pci dev/qlxgbe/ql_hw.c optional qlxgbe pci dev/qlxgbe/ql_ioctl.c optional qlxgbe pci dev/qlxgbe/ql_isr.c optional qlxgbe pci dev/qlxgbe/ql_misc.c optional qlxgbe pci dev/qlxgbe/ql_os.c optional qlxgbe pci dev/qlxgbe/ql_reset.c optional qlxgbe pci dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/sfxge/common/ef10_ev.c optional sfxge pci dev/sfxge/common/ef10_filter.c optional sfxge pci dev/sfxge/common/ef10_intr.c optional sfxge pci dev/sfxge/common/ef10_mac.c optional sfxge pci dev/sfxge/common/ef10_mcdi.c optional sfxge pci dev/sfxge/common/ef10_nic.c optional sfxge pci dev/sfxge/common/ef10_nvram.c optional sfxge pci dev/sfxge/common/ef10_phy.c optional sfxge pci dev/sfxge/common/ef10_rx.c optional sfxge pci dev/sfxge/common/ef10_tx.c optional sfxge pci dev/sfxge/common/ef10_vpd.c optional sfxge pci dev/sfxge/common/efx_bootcfg.c optional sfxge pci dev/sfxge/common/efx_crc32.c optional sfxge pci dev/sfxge/common/efx_ev.c optional sfxge pci dev/sfxge/common/efx_filter.c optional sfxge pci dev/sfxge/common/efx_hash.c optional sfxge pci dev/sfxge/common/efx_intr.c optional sfxge pci dev/sfxge/common/efx_lic.c optional sfxge pci dev/sfxge/common/efx_mac.c optional sfxge pci dev/sfxge/common/efx_mcdi.c optional sfxge pci dev/sfxge/common/efx_mon.c optional sfxge pci dev/sfxge/common/efx_nic.c optional sfxge pci dev/sfxge/common/efx_nvram.c optional sfxge pci dev/sfxge/common/efx_phy.c optional sfxge pci dev/sfxge/common/efx_port.c optional sfxge pci dev/sfxge/common/efx_rx.c optional sfxge pci dev/sfxge/common/efx_sram.c optional sfxge pci dev/sfxge/common/efx_tx.c optional sfxge pci dev/sfxge/common/efx_vpd.c optional sfxge pci dev/sfxge/common/hunt_nic.c optional sfxge pci dev/sfxge/common/mcdi_mon.c optional sfxge pci dev/sfxge/common/medford_nic.c optional sfxge pci dev/sfxge/common/siena_mac.c optional sfxge pci dev/sfxge/common/siena_mcdi.c optional sfxge pci dev/sfxge/common/siena_nic.c optional sfxge pci dev/sfxge/common/siena_nvram.c optional sfxge pci dev/sfxge/common/siena_phy.c optional sfxge pci dev/sfxge/common/siena_sram.c optional sfxge pci dev/sfxge/common/siena_vpd.c optional sfxge pci dev/sfxge/sfxge.c optional sfxge pci dev/sfxge/sfxge_dma.c optional sfxge pci dev/sfxge/sfxge_ev.c optional sfxge pci dev/sfxge/sfxge_intr.c optional sfxge pci dev/sfxge/sfxge_mcdi.c optional sfxge pci dev/sfxge/sfxge_nvram.c optional sfxge pci dev/sfxge/sfxge_port.c optional sfxge pci dev/sfxge/sfxge_rx.c optional sfxge pci dev/sfxge/sfxge_tx.c optional sfxge pci dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/smartpqi/smartpqi_cam.c optional smartpqi dev/smartpqi/smartpqi_cmd.c optional smartpqi dev/smartpqi/smartpqi_discovery.c optional smartpqi dev/smartpqi/smartpqi_event.c optional smartpqi dev/smartpqi/smartpqi_helper.c optional smartpqi dev/smartpqi/smartpqi_init.c optional smartpqi dev/smartpqi/smartpqi_intr.c optional smartpqi dev/smartpqi/smartpqi_ioctl.c optional smartpqi dev/smartpqi/smartpqi_main.c optional smartpqi dev/smartpqi/smartpqi_mem.c optional smartpqi dev/smartpqi/smartpqi_misc.c optional smartpqi dev/smartpqi/smartpqi_queue.c optional smartpqi dev/smartpqi/smartpqi_request.c optional smartpqi dev/smartpqi/smartpqi_response.c optional smartpqi dev/smartpqi/smartpqi_sis.c optional smartpqi dev/smartpqi/smartpqi_tag.c optional smartpqi dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scterm-teken.c optional sc dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/tpm/tpm.c optional tpm dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmware/vmxnet3/if_vmx.c optional vmx dev/vmware/vmci/vmci.c optional vmci dev/vmware/vmci/vmci_datagram.c optional vmci dev/vmware/vmci/vmci_doorbell.c optional vmci dev/vmware/vmci/vmci_driver.c optional vmci dev/vmware/vmci/vmci_event.c optional vmci dev/vmware/vmci/vmci_hashtable.c optional vmci dev/vmware/vmci/vmci_kernel_if.c optional vmci dev/vmware/vmci/vmci_qpair.c optional vmci dev/vmware/vmci/vmci_queue_pair.c optional vmci dev/vmware/vmci/vmci_resource.c optional vmci dev/wbwd/wbwd.c optional wbwd dev/xen/pci/xen_acpi_pci.c optional xenhvm dev/xen/pci/xen_pci.c optional xenhvm dev/isci/isci.c optional isci dev/isci/isci_controller.c optional isci dev/isci/isci_domain.c optional isci dev/isci/isci_interrupt.c optional isci dev/isci/isci_io_request.c optional isci dev/isci/isci_logger.c optional isci dev/isci/isci_oem_parameters.c optional isci dev/isci/isci_remote_device.c optional isci dev/isci/isci_sysctl.c optional isci dev/isci/isci_task_request.c optional isci dev/isci/isci_timer.c optional isci dev/isci/scil/sati.c optional isci dev/isci/scil/sati_abort_task_set.c optional isci dev/isci/scil/sati_atapi.c optional isci dev/isci/scil/sati_device.c optional isci dev/isci/scil/sati_inquiry.c optional isci dev/isci/scil/sati_log_sense.c optional isci dev/isci/scil/sati_lun_reset.c optional isci dev/isci/scil/sati_mode_pages.c optional isci dev/isci/scil/sati_mode_select.c optional isci dev/isci/scil/sati_mode_sense.c optional isci dev/isci/scil/sati_mode_sense_10.c optional isci dev/isci/scil/sati_mode_sense_6.c optional isci dev/isci/scil/sati_move.c optional isci dev/isci/scil/sati_passthrough.c optional isci dev/isci/scil/sati_read.c optional isci dev/isci/scil/sati_read_buffer.c optional isci dev/isci/scil/sati_read_capacity.c optional isci dev/isci/scil/sati_reassign_blocks.c optional isci dev/isci/scil/sati_report_luns.c optional isci dev/isci/scil/sati_request_sense.c optional isci dev/isci/scil/sati_start_stop_unit.c optional isci dev/isci/scil/sati_synchronize_cache.c optional isci dev/isci/scil/sati_test_unit_ready.c optional isci dev/isci/scil/sati_unmap.c optional isci dev/isci/scil/sati_util.c optional isci dev/isci/scil/sati_verify.c optional isci dev/isci/scil/sati_write.c optional isci dev/isci/scil/sati_write_and_verify.c optional isci dev/isci/scil/sati_write_buffer.c optional isci dev/isci/scil/sati_write_long.c optional isci dev/isci/scil/sci_abstract_list.c optional isci dev/isci/scil/sci_base_controller.c optional isci dev/isci/scil/sci_base_domain.c optional isci dev/isci/scil/sci_base_iterator.c optional isci dev/isci/scil/sci_base_library.c optional isci dev/isci/scil/sci_base_logger.c optional isci dev/isci/scil/sci_base_memory_descriptor_list.c optional isci dev/isci/scil/sci_base_memory_descriptor_list_decorator.c optional isci dev/isci/scil/sci_base_object.c optional isci dev/isci/scil/sci_base_observer.c optional isci dev/isci/scil/sci_base_phy.c optional isci dev/isci/scil/sci_base_port.c optional isci dev/isci/scil/sci_base_remote_device.c optional isci dev/isci/scil/sci_base_request.c optional isci dev/isci/scil/sci_base_state_machine.c optional isci dev/isci/scil/sci_base_state_machine_logger.c optional isci dev/isci/scil/sci_base_state_machine_observer.c optional isci dev/isci/scil/sci_base_subject.c optional isci dev/isci/scil/sci_util.c optional isci dev/isci/scil/scic_sds_controller.c optional isci dev/isci/scil/scic_sds_library.c optional isci dev/isci/scil/scic_sds_pci.c optional isci dev/isci/scil/scic_sds_phy.c optional isci dev/isci/scil/scic_sds_port.c optional isci dev/isci/scil/scic_sds_port_configuration_agent.c optional isci dev/isci/scil/scic_sds_remote_device.c optional isci dev/isci/scil/scic_sds_remote_node_context.c optional isci dev/isci/scil/scic_sds_remote_node_table.c optional isci dev/isci/scil/scic_sds_request.c optional isci dev/isci/scil/scic_sds_sgpio.c optional isci dev/isci/scil/scic_sds_smp_remote_device.c optional isci dev/isci/scil/scic_sds_smp_request.c optional isci dev/isci/scil/scic_sds_ssp_request.c optional isci dev/isci/scil/scic_sds_stp_packet_request.c optional isci dev/isci/scil/scic_sds_stp_remote_device.c optional isci dev/isci/scil/scic_sds_stp_request.c optional isci dev/isci/scil/scic_sds_unsolicited_frame_control.c optional isci dev/isci/scil/scif_sas_controller.c optional isci dev/isci/scil/scif_sas_controller_state_handlers.c optional isci dev/isci/scil/scif_sas_controller_states.c optional isci dev/isci/scil/scif_sas_domain.c optional isci dev/isci/scil/scif_sas_domain_state_handlers.c optional isci dev/isci/scil/scif_sas_domain_states.c optional isci dev/isci/scil/scif_sas_high_priority_request_queue.c optional isci dev/isci/scil/scif_sas_internal_io_request.c optional isci dev/isci/scil/scif_sas_io_request.c optional isci dev/isci/scil/scif_sas_io_request_state_handlers.c optional isci dev/isci/scil/scif_sas_io_request_states.c optional isci dev/isci/scil/scif_sas_library.c optional isci dev/isci/scil/scif_sas_remote_device.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substates.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substates.c optional isci dev/isci/scil/scif_sas_remote_device_state_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_states.c optional isci dev/isci/scil/scif_sas_request.c optional isci dev/isci/scil/scif_sas_smp_activity_clear_affiliation.c optional isci dev/isci/scil/scif_sas_smp_io_request.c optional isci dev/isci/scil/scif_sas_smp_phy.c optional isci dev/isci/scil/scif_sas_smp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_io_request.c optional isci dev/isci/scil/scif_sas_stp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_task_request.c optional isci dev/isci/scil/scif_sas_task_request.c optional isci dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci dev/isci/scil/scif_sas_task_request_states.c optional isci dev/isci/scil/scif_sas_timer.c optional isci isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/imgact_aout.c optional compat_aout kern/imgact_gzip.c optional gzip kern/link_elf_obj.c standard libkern/x86/crc32_sse42.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_freebsd32 amd64/ia32/ia32_reg.c optional compat_freebsd32 amd64/ia32/ia32_signal.c optional compat_freebsd32 amd64/ia32/ia32_sigtramp.S optional compat_freebsd32 amd64/ia32/ia32_syscall.c optional compat_freebsd32 amd64/ia32/ia32_misc.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs # # Linux/i386 binary support # amd64/linux32/linux32_dummy.c optional compat_linux32 amd64/linux32/linux32_machdep.c optional compat_linux32 amd64/linux32/linux32_support.s optional compat_linux32 \ dependency "linux32_assym.h" amd64/linux32/linux32_sysent.c optional compat_linux32 amd64/linux32/linux32_sysvec.c optional compat_linux32 compat/linux/linux_emul.c optional compat_linux32 compat/linux/linux_errno.c optional compat_linux32 compat/linux/linux_file.c optional compat_linux32 compat/linux/linux_fork.c optional compat_linux32 compat/linux/linux_futex.c optional compat_linux32 compat/linux/linux_getcwd.c optional compat_linux32 compat/linux/linux_ioctl.c optional compat_linux32 compat/linux/linux_ipc.c optional compat_linux32 compat/linux/linux_mib.c optional compat_linux32 compat/linux/linux_misc.c optional compat_linux32 compat/linux/linux_mmap.c optional compat_linux32 compat/linux/linux_signal.c optional compat_linux32 compat/linux/linux_socket.c optional compat_linux32 compat/linux/linux_stats.c optional compat_linux32 compat/linux/linux_sysctl.c optional compat_linux32 compat/linux/linux_time.c optional compat_linux32 compat/linux/linux_timer.c optional compat_linux32 compat/linux/linux_uid16.c optional compat_linux32 compat/linux/linux_util.c optional compat_linux32 compat/linux/linux_vdso.c optional compat_linux32 compat/linux/linux_common.c optional compat_linux32 compat/linux/linux_event.c optional compat_linux32 compat/linux/linux.c optional compat_linux32 dev/amr/amr_linux.c optional compat_linux32 amr dev/mfi/mfi_linux.c optional compat_linux32 mfi # # Windows NDIS driver support # compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci compat/ndis/winx64_wrap.S optional ndisapi pci # # x86 real mode BIOS emulator, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa contrib/x86emu/x86emu.c optional x86bios | dpms | pci | vesa # # bvm console # dev/bvm/bvm_console.c optional bvmconsole dev/bvm/bvm_dbg.c optional bvmdebug # # x86 shared code between IA32 and AMD64 architectures # x86/acpica/OsdEnvironment.c optional acpi x86/acpica/acpi_apm.c optional acpi x86/acpica/acpi_wakeup.c optional acpi x86/acpica/madt.c optional acpi x86/acpica/srat.c optional acpi x86/bios/smbios.c optional smbios x86/bios/vpd.c optional vpd x86/cpufreq/powernow.c optional cpufreq x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/iommu/busdma_dmar.c optional acpi acpi_dmar pci x86/iommu/intel_ctx.c optional acpi acpi_dmar pci x86/iommu/intel_drv.c optional acpi acpi_dmar pci x86/iommu/intel_fault.c optional acpi acpi_dmar pci x86/iommu/intel_gas.c optional acpi acpi_dmar pci x86/iommu/intel_idpgtbl.c optional acpi acpi_dmar pci x86/iommu/intel_intrmap.c optional acpi acpi_dmar pci x86/iommu/intel_qi.c optional acpi acpi_dmar pci x86/iommu/intel_quirks.c optional acpi acpi_dmar pci x86/iommu/intel_utils.c optional acpi acpi_dmar pci x86/isa/atpic.c optional atpic isa x86/isa/atrtc.c standard x86/isa/clock.c standard x86/isa/elcr.c optional atpic isa | mptable x86/isa/isa.c standard x86/isa/isa_dma.c standard x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci x86/x86/autoconf.c standard x86/x86/bus_machdep.c standard x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/cpu_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/fdt_machdep.c optional fdt x86/x86/identcpu.c standard x86/x86/intr_machdep.c standard x86/x86/io_apic.c standard x86/x86/legacy.c standard x86/x86/local_apic.c standard x86/x86/mca.c standard x86/x86/x86_mem.c optional mem x86/x86/mptable.c optional mptable x86/x86/mptable_pci.c optional mptable pci x86/x86/mp_x86.c optional smp x86/x86/mp_watchdog.c optional mp_watchdog smp x86/x86/msi.c optional pci x86/x86/nexus.c standard x86/x86/pvclock.c standard x86/x86/stack_machdep.c optional ddb | stack x86/x86/tsc.c standard x86/x86/ucode.c standard x86/x86/delay.c standard x86/xen/hvm.c optional xenhvm x86/xen/xen_intr.c optional xenhvm x86/xen/pv.c optional xenhvm x86/xen/pvcpu_enum.c optional xenhvm x86/xen/xen_apic.c optional xenhvm x86/xen/xenpv.c optional xenhvm x86/xen/xen_nexus.c optional xenhvm x86/xen/xen_msi.c optional xenhvm x86/xen/xen_pci_bus.c optional xenhvm Index: head/sys/dev/ixl/if_ixlv.c =================================================================== --- head/sys/dev/ixl/if_ixlv.c (revision 339361) +++ head/sys/dev/ixl/if_ixlv.c (nonexistent) @@ -1,2435 +0,0 @@ -/****************************************************************************** - - Copyright (c) 2013-2018, Intel Corporation - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ -/*$FreeBSD$*/ - -#include "ixlv.h" - -/********************************************************************* - * Driver version - *********************************************************************/ -#define IAVF_DRIVER_VERSION_MAJOR 2 -#define IAVF_DRIVER_VERSION_MINOR 0 -#define IAVF_DRIVER_VERSION_BUILD 0 - -#define IAVF_DRIVER_VERSION_STRING \ - __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \ - __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \ - __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k" - -/********************************************************************* - * PCI Device ID Table - * - * Used by probe to select devices to load on - * - * ( Vendor ID, Device ID, Branding String ) - *********************************************************************/ - -static pci_vendor_info_t ixlv_vendor_info_array[] = -{ - PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"), - PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"), - PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"), - /* required last entry */ - PVID_END -}; - -/********************************************************************* - * Function prototypes - *********************************************************************/ -static void *ixlv_register(device_t dev); -static int ixlv_if_attach_pre(if_ctx_t ctx); -static int ixlv_if_attach_post(if_ctx_t ctx); -static int ixlv_if_detach(if_ctx_t ctx); -static int ixlv_if_shutdown(if_ctx_t ctx); -static int ixlv_if_suspend(if_ctx_t ctx); -static int ixlv_if_resume(if_ctx_t ctx); -static int ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix); -static void ixlv_if_enable_intr(if_ctx_t ctx); -static void ixlv_if_disable_intr(if_ctx_t ctx); -static int ixlv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); -static int ixlv_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); -static int ixlv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); -static int ixlv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); -static void ixlv_if_queues_free(if_ctx_t ctx); -static void ixlv_if_update_admin_status(if_ctx_t ctx); -static void ixlv_if_multi_set(if_ctx_t ctx); -static int ixlv_if_mtu_set(if_ctx_t ctx, uint32_t mtu); -static void ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); -static int ixlv_if_media_change(if_ctx_t ctx); -static int ixlv_if_promisc_set(if_ctx_t ctx, int flags); -static void ixlv_if_timer(if_ctx_t ctx, uint16_t qid); -static void ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag); -static void ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag); -static uint64_t ixlv_if_get_counter(if_ctx_t ctx, ift_counter cnt); -static void ixlv_if_stop(if_ctx_t ctx); - -static int ixlv_allocate_pci_resources(struct ixlv_sc *); -static int ixlv_reset_complete(struct i40e_hw *); -static int ixlv_setup_vc(struct ixlv_sc *); -static int ixlv_reset(struct ixlv_sc *); -static int ixlv_vf_config(struct ixlv_sc *); -static void ixlv_init_filters(struct ixlv_sc *); -static void ixlv_free_pci_resources(struct ixlv_sc *); -static void ixlv_free_filters(struct ixlv_sc *); -static void ixlv_setup_interface(device_t, struct ixlv_sc *); -static void ixlv_add_device_sysctls(struct ixlv_sc *); -static void ixlv_enable_adminq_irq(struct i40e_hw *); -static void ixlv_disable_adminq_irq(struct i40e_hw *); -static void ixlv_enable_queue_irq(struct i40e_hw *, int); -static void ixlv_disable_queue_irq(struct i40e_hw *, int); -static void ixlv_config_rss(struct ixlv_sc *); -static void ixlv_stop(struct ixlv_sc *); - -static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16); -static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr); -static int ixlv_msix_que(void *); -static int ixlv_msix_adminq(void *); -//static void ixlv_del_multi(struct ixlv_sc *sc); -static void ixlv_init_multi(struct ixlv_sc *sc); -static void ixlv_configure_itr(struct ixlv_sc *sc); - -static int ixlv_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); -static int ixlv_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); -static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS); -static int ixlv_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); -static int ixlv_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); -static int ixlv_sysctl_vf_reset(SYSCTL_HANDLER_ARGS); -static int ixlv_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS); - -char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed); -static void ixlv_save_tunables(struct ixlv_sc *); -static enum i40e_status_code - ixlv_process_adminq(struct ixlv_sc *, u16 *); -static int ixlv_send_vc_msg(struct ixlv_sc *sc, u32 op); -static int ixlv_send_vc_msg_sleep(struct ixlv_sc *sc, u32 op); - -/********************************************************************* - * FreeBSD Device Interface Entry Points - *********************************************************************/ - -static device_method_t ixlv_methods[] = { - /* Device interface */ - DEVMETHOD(device_register, ixlv_register), - DEVMETHOD(device_probe, iflib_device_probe), - DEVMETHOD(device_attach, iflib_device_attach), - DEVMETHOD(device_detach, iflib_device_detach), - DEVMETHOD(device_shutdown, iflib_device_shutdown), - DEVMETHOD_END -}; - -static driver_t ixlv_driver = { - "iavf", ixlv_methods, sizeof(struct ixlv_sc), -}; - -devclass_t ixlv_devclass; -DRIVER_MODULE(iavf, pci, ixlv_driver, ixlv_devclass, 0, 0); -MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision", - pci, iavf, ixlv_vendor_info_array, - nitems(ixlv_vendor_info_array) - 1); -MODULE_VERSION(iavf, 1); - -MODULE_DEPEND(iavf, pci, 1, 1, 1); -MODULE_DEPEND(iavf, ether, 1, 1, 1); -MODULE_DEPEND(iavf, iflib, 1, 1, 1); - -MALLOC_DEFINE(M_IXLV, "iavf", "iavf driver allocations"); - -static device_method_t ixlv_if_methods[] = { - DEVMETHOD(ifdi_attach_pre, ixlv_if_attach_pre), - DEVMETHOD(ifdi_attach_post, ixlv_if_attach_post), - DEVMETHOD(ifdi_detach, ixlv_if_detach), - DEVMETHOD(ifdi_shutdown, ixlv_if_shutdown), - DEVMETHOD(ifdi_suspend, ixlv_if_suspend), - DEVMETHOD(ifdi_resume, ixlv_if_resume), - DEVMETHOD(ifdi_init, ixlv_if_init), - DEVMETHOD(ifdi_stop, ixlv_if_stop), - DEVMETHOD(ifdi_msix_intr_assign, ixlv_if_msix_intr_assign), - DEVMETHOD(ifdi_intr_enable, ixlv_if_enable_intr), - DEVMETHOD(ifdi_intr_disable, ixlv_if_disable_intr), - DEVMETHOD(ifdi_rx_queue_intr_enable, ixlv_if_rx_queue_intr_enable), - DEVMETHOD(ifdi_tx_queue_intr_enable, ixlv_if_tx_queue_intr_enable), - DEVMETHOD(ifdi_tx_queues_alloc, ixlv_if_tx_queues_alloc), - DEVMETHOD(ifdi_rx_queues_alloc, ixlv_if_rx_queues_alloc), - DEVMETHOD(ifdi_queues_free, ixlv_if_queues_free), - DEVMETHOD(ifdi_update_admin_status, ixlv_if_update_admin_status), - DEVMETHOD(ifdi_multi_set, ixlv_if_multi_set), - DEVMETHOD(ifdi_mtu_set, ixlv_if_mtu_set), - DEVMETHOD(ifdi_media_status, ixlv_if_media_status), - DEVMETHOD(ifdi_media_change, ixlv_if_media_change), - DEVMETHOD(ifdi_promisc_set, ixlv_if_promisc_set), - DEVMETHOD(ifdi_timer, ixlv_if_timer), - DEVMETHOD(ifdi_vlan_register, ixlv_if_vlan_register), - DEVMETHOD(ifdi_vlan_unregister, ixlv_if_vlan_unregister), - DEVMETHOD(ifdi_get_counter, ixlv_if_get_counter), - DEVMETHOD_END -}; - -static driver_t ixlv_if_driver = { - "iavf_if", ixlv_if_methods, sizeof(struct ixlv_sc) -}; - -/* -** TUNEABLE PARAMETERS: -*/ - -static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0, - "iavf driver parameters"); - -/* - * Different method for processing TX descriptor - * completion. - */ -static int ixlv_enable_head_writeback = 0; -TUNABLE_INT("hw.iavf.enable_head_writeback", - &ixlv_enable_head_writeback); -SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, - &ixlv_enable_head_writeback, 0, - "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); - -static int ixlv_core_debug_mask = 0; -TUNABLE_INT("hw.iavf.core_debug_mask", - &ixlv_core_debug_mask); -SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, - &ixlv_core_debug_mask, 0, - "Display debug statements that are printed in non-shared code"); - -static int ixlv_shared_debug_mask = 0; -TUNABLE_INT("hw.iavf.shared_debug_mask", - &ixlv_shared_debug_mask); -SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, - &ixlv_shared_debug_mask, 0, - "Display debug statements that are printed in shared code"); - -int ixlv_rx_itr = IXL_ITR_8K; -TUNABLE_INT("hw.iavf.rx_itr", &ixlv_rx_itr); -SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN, - &ixlv_rx_itr, 0, "RX Interrupt Rate"); - -int ixlv_tx_itr = IXL_ITR_4K; -TUNABLE_INT("hw.iavf.tx_itr", &ixlv_tx_itr); -SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN, - &ixlv_tx_itr, 0, "TX Interrupt Rate"); - -extern struct if_txrx ixl_txrx_hwb; -extern struct if_txrx ixl_txrx_dwb; - -static struct if_shared_ctx ixlv_sctx_init = { - .isc_magic = IFLIB_MAGIC, - .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ - .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), - .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, - .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), - .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, - .isc_rx_maxsize = 16384, - .isc_rx_nsegments = IXL_MAX_RX_SEGS, - .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, - .isc_nfl = 1, - .isc_ntxqs = 1, - .isc_nrxqs = 1, - - .isc_admin_intrcnt = 1, - .isc_vendor_info = ixlv_vendor_info_array, - .isc_driver_version = IAVF_DRIVER_VERSION_STRING, - .isc_driver = &ixlv_if_driver, - .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_IS_VF, - - .isc_nrxd_min = {IXL_MIN_RING}, - .isc_ntxd_min = {IXL_MIN_RING}, - .isc_nrxd_max = {IXL_MAX_RING}, - .isc_ntxd_max = {IXL_MAX_RING}, - .isc_nrxd_default = {IXL_DEFAULT_RING}, - .isc_ntxd_default = {IXL_DEFAULT_RING}, -}; - -if_shared_ctx_t ixlv_sctx = &ixlv_sctx_init; - -/*** Functions ***/ -static void * -ixlv_register(device_t dev) -{ - return (ixlv_sctx); -} - -static int -ixlv_allocate_pci_resources(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = iflib_get_dev(sc->vsi.ctx); - int rid; - - /* Map BAR0 */ - rid = PCIR_BAR(0); - sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, - &rid, RF_ACTIVE); - - if (!(sc->pci_mem)) { - device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); - return (ENXIO); - } - - /* Save off the PCI information */ - hw->vendor_id = pci_get_vendor(dev); - hw->device_id = pci_get_device(dev); - hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); - hw->subsystem_vendor_id = - pci_read_config(dev, PCIR_SUBVEND_0, 2); - hw->subsystem_device_id = - pci_read_config(dev, PCIR_SUBDEV_0, 2); - - hw->bus.device = pci_get_slot(dev); - hw->bus.func = pci_get_function(dev); - - /* Save off register access information */ - sc->osdep.mem_bus_space_tag = - rman_get_bustag(sc->pci_mem); - sc->osdep.mem_bus_space_handle = - rman_get_bushandle(sc->pci_mem); - sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); - sc->osdep.flush_reg = I40E_VFGEN_RSTAT; - sc->osdep.dev = dev; - - sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; - sc->hw.back = &sc->osdep; - - return (0); -} - -static int -ixlv_if_attach_pre(if_ctx_t ctx) -{ - device_t dev; - struct ixlv_sc *sc; - struct i40e_hw *hw; - struct ixl_vsi *vsi; - if_softc_ctx_t scctx; - int error = 0; - - dev = iflib_get_dev(ctx); - sc = iflib_get_softc(ctx); - - vsi = &sc->vsi; - vsi->back = sc; - sc->dev = dev; - hw = &sc->hw; - - vsi->dev = dev; - vsi->hw = &sc->hw; - vsi->num_vlans = 0; - vsi->ctx = ctx; - vsi->media = iflib_get_media(ctx); - vsi->shared = scctx = iflib_get_softc_ctx(ctx); - - ixlv_save_tunables(sc); - - /* Do PCI setup - map BAR0, etc */ - if (ixlv_allocate_pci_resources(sc)) { - device_printf(dev, "%s: Allocation of PCI resources failed\n", - __func__); - error = ENXIO; - goto err_early; - } - - ixlv_dbg_init(sc, "Allocated PCI resources and MSIX vectors\n"); - - /* - * XXX: This is called by init_shared_code in the PF driver, - * but the rest of that function does not support VFs. - */ - error = i40e_set_mac_type(hw); - if (error) { - device_printf(dev, "%s: set_mac_type failed: %d\n", - __func__, error); - goto err_pci_res; - } - - error = ixlv_reset_complete(hw); - if (error) { - device_printf(dev, "%s: Device is still being reset\n", - __func__); - goto err_pci_res; - } - - ixlv_dbg_init(sc, "VF Device is ready for configuration\n"); - - /* Sets up Admin Queue */ - error = ixlv_setup_vc(sc); - if (error) { - device_printf(dev, "%s: Error setting up PF comms, %d\n", - __func__, error); - goto err_pci_res; - } - - ixlv_dbg_init(sc, "PF API version verified\n"); - - /* Need API version before sending reset message */ - error = ixlv_reset(sc); - if (error) { - device_printf(dev, "VF reset failed; reload the driver\n"); - goto err_aq; - } - - ixlv_dbg_init(sc, "VF reset complete\n"); - - /* Ask for VF config from PF */ - error = ixlv_vf_config(sc); - if (error) { - device_printf(dev, "Error getting configuration from PF: %d\n", - error); - goto err_aq; - } - - device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n", - sc->vf_res->num_vsis, - sc->vf_res->num_queue_pairs, - sc->vf_res->max_vectors, - sc->vf_res->rss_key_size, - sc->vf_res->rss_lut_size); - ixlv_dbg_info(sc, "Capabilities=%b\n", - sc->vf_res->vf_cap_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS); - - /* got VF config message back from PF, now we can parse it */ - for (int i = 0; i < sc->vf_res->num_vsis; i++) { - /* XXX: We only use the first VSI we find */ - if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - sc->vsi_res = &sc->vf_res->vsi_res[i]; - } - if (!sc->vsi_res) { - device_printf(dev, "%s: no LAN VSI found\n", __func__); - error = EIO; - goto err_res_buf; - } - vsi->id = sc->vsi_res->vsi_id; - - ixlv_dbg_init(sc, "Resource Acquisition complete\n"); - - /* If no mac address was assigned just make a random one */ - if (!ixlv_check_ether_addr(hw->mac.addr)) { - u8 addr[ETHER_ADDR_LEN]; - arc4rand(&addr, sizeof(addr), 0); - addr[0] &= 0xFE; - addr[0] |= 0x02; - bcopy(addr, hw->mac.addr, sizeof(addr)); - } - bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); - iflib_set_mac(ctx, hw->mac.addr); - - /* Allocate filter lists */ - ixlv_init_filters(sc); - - /* Fill out more iflib parameters */ - scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = - sc->vsi_res->num_queue_pairs; - if (vsi->enable_head_writeback) { - scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] - * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); - scctx->isc_txrx = &ixl_txrx_hwb; - } else { - scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] - * sizeof(struct i40e_tx_desc), DBA_ALIGN); - scctx->isc_txrx = &ixl_txrx_dwb; - } - scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] - * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); - scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); - scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; - scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; - scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; - scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; - scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE; - scctx->isc_tx_csum_flags = CSUM_OFFLOAD; - scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; - - return (0); - -err_res_buf: - free(sc->vf_res, M_IXLV); -err_aq: - i40e_shutdown_adminq(hw); -err_pci_res: - ixlv_free_pci_resources(sc); -err_early: - return (error); -} - -static int -ixlv_if_attach_post(if_ctx_t ctx) -{ - device_t dev; - struct ixlv_sc *sc; - struct i40e_hw *hw; - struct ixl_vsi *vsi; - int error = 0; - - INIT_DBG_DEV(dev, "begin"); - - dev = iflib_get_dev(ctx); - sc = iflib_get_softc(ctx); - vsi = &sc->vsi; - vsi->ifp = iflib_get_ifp(ctx); - hw = &sc->hw; - - /* Save off determined number of queues for interface */ - vsi->num_rx_queues = vsi->shared->isc_nrxqsets; - vsi->num_tx_queues = vsi->shared->isc_ntxqsets; - - /* Setup the stack interface */ - ixlv_setup_interface(dev, sc); - - INIT_DBG_DEV(dev, "Interface setup complete"); - - /* Initialize statistics & add sysctls */ - bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); - ixlv_add_device_sysctls(sc); - - sc->init_state = IXLV_INIT_READY; - atomic_store_rel_32(&sc->queues_enabled, 0); - - /* We want AQ enabled early for init */ - ixlv_enable_adminq_irq(hw); - - INIT_DBG_DEV(dev, "end"); - - return (error); -} - -/** - * XXX: iflib always ignores the return value of detach() - * -> This means that this isn't allowed to fail - */ -static int -ixlv_if_detach(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - enum i40e_status_code status; - - INIT_DBG_DEV(dev, "begin"); - - /* Remove all the media and link information */ - ifmedia_removeall(vsi->media); - - ixlv_disable_adminq_irq(hw); - status = i40e_shutdown_adminq(&sc->hw); - if (status != I40E_SUCCESS) { - device_printf(dev, - "i40e_shutdown_adminq() failed with status %s\n", - i40e_stat_str(hw, status)); - } - - free(sc->vf_res, M_IXLV); - ixlv_free_pci_resources(sc); - ixlv_free_filters(sc); - - INIT_DBG_DEV(dev, "end"); - return (0); -} - -static int -ixlv_if_shutdown(if_ctx_t ctx) -{ - return (0); -} - -static int -ixlv_if_suspend(if_ctx_t ctx) -{ - return (0); -} - -static int -ixlv_if_resume(if_ctx_t ctx) -{ - return (0); -} - -static int -ixlv_send_vc_msg_sleep(struct ixlv_sc *sc, u32 op) -{ - int error = 0; - if_ctx_t ctx = sc->vsi.ctx; - - error = ixl_vc_send_cmd(sc, op); - if (error != 0) { - ixlv_dbg_vc(sc, "Error sending %b: %d\n", op, IXLV_FLAGS, error); - return (error); - } - - /* Don't wait for a response if the device is being detached. */ - if (!iflib_in_detach(ctx)) { - ixlv_dbg_vc(sc, "Sleeping for op %b\n", op, IXLV_FLAGS); - error = sx_sleep(ixl_vc_get_op_chan(sc, op), - iflib_ctx_lock_get(ctx), PRI_MAX, "ixlvc", IXLV_AQ_TIMEOUT); - - if (error == EWOULDBLOCK) - device_printf(sc->dev, "%b timed out\n", op, IXLV_FLAGS); - } - - return (error); -} - -static int -ixlv_send_vc_msg(struct ixlv_sc *sc, u32 op) -{ - int error = 0; - - error = ixl_vc_send_cmd(sc, op); - if (error != 0) - ixlv_dbg_vc(sc, "Error sending %b: %d\n", op, IXLV_FLAGS, error); - - return (error); -} - -static void -ixlv_init_queues(struct ixl_vsi *vsi) -{ - if_softc_ctx_t scctx = vsi->shared; - struct ixl_tx_queue *tx_que = vsi->tx_queues; - struct ixl_rx_queue *rx_que = vsi->rx_queues; - struct rx_ring *rxr; - - for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) - ixl_init_tx_ring(vsi, tx_que); - - for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { - rxr = &rx_que->rxr; - - if (scctx->isc_max_frame_size <= MCLBYTES) - rxr->mbuf_sz = MCLBYTES; - else - rxr->mbuf_sz = MJUMPAGESIZE; - - wr32(vsi->hw, rxr->tail, 0); - } -} - -void -ixlv_if_init(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct i40e_hw *hw = &sc->hw; - struct ifnet *ifp = iflib_get_ifp(ctx); - u8 tmpaddr[ETHER_ADDR_LEN]; - int error = 0; - - INIT_DBG_IF(ifp, "begin"); - - MPASS(sx_xlocked(iflib_ctx_lock_get(ctx))); - - error = ixlv_reset_complete(hw); - if (error) { - device_printf(sc->dev, "%s: VF reset failed\n", - __func__); - } - - if (!i40e_check_asq_alive(hw)) { - ixlv_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n"); - pci_enable_busmaster(sc->dev); - i40e_shutdown_adminq(hw); - i40e_init_adminq(hw); - } - - /* Make sure queues are disabled */ - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_DISABLE_QUEUES); - - bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN); - if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && - (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { - error = ixlv_del_mac_filter(sc, hw->mac.addr); - if (error == 0) - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_DEL_MAC_FILTER); - - bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); - } - - error = ixlv_add_mac_filter(sc, hw->mac.addr, 0); - if (!error || error == EEXIST) - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_ADD_MAC_FILTER); - iflib_set_mac(ctx, hw->mac.addr); - - /* Prepare the queues for operation */ - ixlv_init_queues(vsi); - - /* Set initial ITR values */ - ixlv_configure_itr(sc); - - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_CONFIGURE_QUEUES); - - /* Set up RSS */ - ixlv_config_rss(sc); - - /* Map vectors */ - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_MAP_VECTORS); - - /* Init SW TX ring indices */ - if (vsi->enable_head_writeback) - ixl_init_tx_cidx(vsi); - else - ixl_init_tx_rsqs(vsi); - - /* Configure promiscuous mode */ - ixlv_if_promisc_set(ctx, if_getflags(ifp)); - - /* Enable queues */ - ixlv_send_vc_msg_sleep(sc, IXLV_FLAG_AQ_ENABLE_QUEUES); - - sc->init_state = IXLV_RUNNING; -} - -/* - * ixlv_attach() helper function; initalizes the admin queue - * and attempts to establish contact with the PF by - * retrying the initial "API version" message several times - * or until the PF responds. - */ -static int -ixlv_setup_vc(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - int error = 0, ret_error = 0, asq_retries = 0; - bool send_api_ver_retried = 0; - - /* Need to set these AQ paramters before initializing AQ */ - hw->aq.num_arq_entries = IXL_AQ_LEN; - hw->aq.num_asq_entries = IXL_AQ_LEN; - hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; - hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; - - for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) { - /* Initialize admin queue */ - error = i40e_init_adminq(hw); - if (error) { - device_printf(dev, "%s: init_adminq failed: %d\n", - __func__, error); - ret_error = 1; - continue; - } - - ixlv_dbg_init(sc, "Initialized Admin Queue; starting" - " send_api_ver attempt %d", i+1); - -retry_send: - /* Send VF's API version */ - error = ixlv_send_api_ver(sc); - if (error) { - i40e_shutdown_adminq(hw); - ret_error = 2; - device_printf(dev, "%s: unable to send api" - " version to PF on attempt %d, error %d\n", - __func__, i+1, error); - } - - asq_retries = 0; - while (!i40e_asq_done(hw)) { - if (++asq_retries > IXLV_AQ_MAX_ERR) { - i40e_shutdown_adminq(hw); - device_printf(dev, "Admin Queue timeout " - "(waiting for send_api_ver), %d more tries...\n", - IXLV_AQ_MAX_ERR - (i + 1)); - ret_error = 3; - break; - } - i40e_msec_pause(10); - } - if (asq_retries > IXLV_AQ_MAX_ERR) - continue; - - ixlv_dbg_init(sc, "Sent API version message to PF"); - - /* Verify that the VF accepts the PF's API version */ - error = ixlv_verify_api_ver(sc); - if (error == ETIMEDOUT) { - if (!send_api_ver_retried) { - /* Resend message, one more time */ - send_api_ver_retried = true; - device_printf(dev, - "%s: Timeout while verifying API version on first" - " try!\n", __func__); - goto retry_send; - } else { - device_printf(dev, - "%s: Timeout while verifying API version on second" - " try!\n", __func__); - ret_error = 4; - break; - } - } - if (error) { - device_printf(dev, - "%s: Unable to verify API version," - " error %s\n", __func__, i40e_stat_str(hw, error)); - ret_error = 5; - } - break; - } - - if (ret_error >= 4) - i40e_shutdown_adminq(hw); - return (ret_error); -} - -/* - * ixlv_attach() helper function; asks the PF for this VF's - * configuration, and saves the information if it receives it. - */ -static int -ixlv_vf_config(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - int bufsz, error = 0, ret_error = 0; - int asq_retries, retried = 0; - -retry_config: - error = ixlv_send_vf_config_msg(sc); - if (error) { - device_printf(dev, - "%s: Unable to send VF config request, attempt %d," - " error %d\n", __func__, retried + 1, error); - ret_error = 2; - } - - asq_retries = 0; - while (!i40e_asq_done(hw)) { - if (++asq_retries > IXLV_AQ_MAX_ERR) { - device_printf(dev, "%s: Admin Queue timeout " - "(waiting for send_vf_config_msg), attempt %d\n", - __func__, retried + 1); - ret_error = 3; - goto fail; - } - i40e_msec_pause(10); - } - - ixlv_dbg_init(sc, "Sent VF config message to PF, attempt %d\n", - retried + 1); - - if (!sc->vf_res) { - bufsz = sizeof(struct virtchnl_vf_resource) + - (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); - sc->vf_res = malloc(bufsz, M_IXLV, M_NOWAIT); - if (!sc->vf_res) { - device_printf(dev, - "%s: Unable to allocate memory for VF configuration" - " message from PF on attempt %d\n", __func__, retried + 1); - ret_error = 1; - goto fail; - } - } - - /* Check for VF config response */ - error = ixlv_get_vf_config(sc); - if (error == ETIMEDOUT) { - /* The 1st time we timeout, send the configuration message again */ - if (!retried) { - retried++; - goto retry_config; - } - device_printf(dev, - "%s: ixlv_get_vf_config() timed out waiting for a response\n", - __func__); - } - if (error) { - device_printf(dev, - "%s: Unable to get VF configuration from PF after %d tries!\n", - __func__, retried + 1); - ret_error = 4; - } - goto done; - -fail: - free(sc->vf_res, M_IXLV); -done: - return (ret_error); -} - -static int -ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_rx_queue *rx_que = vsi->rx_queues; - struct ixl_tx_queue *tx_que = vsi->tx_queues; - int err, i, rid, vector = 0; - char buf[16]; - - MPASS(vsi->shared->isc_nrxqsets > 0); - MPASS(vsi->shared->isc_ntxqsets > 0); - - /* Admin Que is vector 0*/ - rid = vector + 1; - err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, - ixlv_msix_adminq, sc, 0, "aq"); - if (err) { - iflib_irq_free(ctx, &vsi->irq); - device_printf(iflib_get_dev(ctx), - "Failed to register Admin Que handler"); - return (err); - } - - /* Now set up the stations */ - for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { - rid = vector + 1; - - snprintf(buf, sizeof(buf), "rxq%d", i); - err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, - IFLIB_INTR_RX, ixlv_msix_que, rx_que, rx_que->rxr.me, buf); - /* XXX: Does the driver work as expected if there are fewer num_rx_queues than - * what's expected in the iflib context? */ - if (err) { - device_printf(iflib_get_dev(ctx), - "Failed to allocate queue RX int vector %d, err: %d\n", i, err); - vsi->num_rx_queues = i + 1; - goto fail; - } - rx_que->msix = vector; - } - - bzero(buf, sizeof(buf)); - - for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { - snprintf(buf, sizeof(buf), "txq%d", i); - iflib_softirq_alloc_generic(ctx, - &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, - IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); - - /* TODO: Maybe call a strategy function for this to figure out which - * interrupts to map Tx queues to. I don't know if there's an immediately - * better way than this other than a user-supplied map, though. */ - tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; - } - - return (0); -fail: - iflib_irq_free(ctx, &vsi->irq); - rx_que = vsi->rx_queues; - for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) - iflib_irq_free(ctx, &rx_que->que_irq); - return (err); -} - -/* Enable all interrupts */ -static void -ixlv_if_enable_intr(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - - ixlv_enable_intr(vsi); -} - -/* Disable all interrupts */ -static void -ixlv_if_disable_intr(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - - ixlv_disable_intr(vsi); -} - -static int -ixlv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct i40e_hw *hw = vsi->hw; - struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; - - ixlv_enable_queue_irq(hw, rx_que->msix - 1); - return (0); -} - -static int -ixlv_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct i40e_hw *hw = vsi->hw; - struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; - - ixlv_enable_queue_irq(hw, tx_que->msix - 1); - return (0); -} - -static int -ixlv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - if_softc_ctx_t scctx = vsi->shared; - struct ixl_tx_queue *que; - int i, j, error = 0; - - MPASS(scctx->isc_ntxqsets > 0); - MPASS(ntxqs == 1); - MPASS(scctx->isc_ntxqsets == ntxqsets); - - /* Allocate queue structure memory */ - if (!(vsi->tx_queues = - (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXLV, M_NOWAIT | M_ZERO))) { - device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); - return (ENOMEM); - } - - for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { - struct tx_ring *txr = &que->txr; - - txr->me = i; - que->vsi = vsi; - - if (!vsi->enable_head_writeback) { - /* Allocate report status array */ - if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXLV, M_NOWAIT))) { - device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); - error = ENOMEM; - goto fail; - } - /* Init report status array */ - for (j = 0; j < scctx->isc_ntxd[0]; j++) - txr->tx_rsq[j] = QIDX_INVALID; - } - /* get the virtual and physical address of the hardware queues */ - txr->tail = I40E_QTX_TAIL1(txr->me); - txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; - txr->tx_paddr = paddrs[i * ntxqs]; - txr->que = que; - } - - return (0); -fail: - ixlv_if_queues_free(ctx); - return (error); -} - -static int -ixlv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_rx_queue *que; - int i, error = 0; - -#ifdef INVARIANTS - if_softc_ctx_t scctx = vsi->shared; - MPASS(scctx->isc_nrxqsets > 0); - MPASS(nrxqs == 1); - MPASS(scctx->isc_nrxqsets == nrxqsets); -#endif - - /* Allocate queue structure memory */ - if (!(vsi->rx_queues = - (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * - nrxqsets, M_IXLV, M_NOWAIT | M_ZERO))) { - device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); - error = ENOMEM; - goto fail; - } - - for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { - struct rx_ring *rxr = &que->rxr; - - rxr->me = i; - que->vsi = vsi; - - /* get the virtual and physical address of the hardware queues */ - rxr->tail = I40E_QRX_TAIL1(rxr->me); - rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; - rxr->rx_paddr = paddrs[i * nrxqs]; - rxr->que = que; - } - - return (0); -fail: - ixlv_if_queues_free(ctx); - return (error); -} - -static void -ixlv_if_queues_free(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - - if (!vsi->enable_head_writeback) { - struct ixl_tx_queue *que; - int i = 0; - - for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) { - struct tx_ring *txr = &que->txr; - if (txr->tx_rsq != NULL) { - free(txr->tx_rsq, M_IXLV); - txr->tx_rsq = NULL; - } - } - } - - if (vsi->tx_queues != NULL) { - free(vsi->tx_queues, M_IXLV); - vsi->tx_queues = NULL; - } - if (vsi->rx_queues != NULL) { - free(vsi->rx_queues, M_IXLV); - vsi->rx_queues = NULL; - } -} - -static int -ixlv_check_aq_errors(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - u32 reg, oldreg; - u8 aq_error = false; - - /* check for Admin queue errors */ - oldreg = reg = rd32(hw, hw->aq.arq.len); - if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) { - device_printf(dev, "ARQ VF Error detected\n"); - reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; - aq_error = true; - } - if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) { - device_printf(dev, "ARQ Overflow Error detected\n"); - reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; - aq_error = true; - } - if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) { - device_printf(dev, "ARQ Critical Error detected\n"); - reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; - aq_error = true; - } - if (oldreg != reg) - wr32(hw, hw->aq.arq.len, reg); - - oldreg = reg = rd32(hw, hw->aq.asq.len); - if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) { - device_printf(dev, "ASQ VF Error detected\n"); - reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; - aq_error = true; - } - if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) { - device_printf(dev, "ASQ Overflow Error detected\n"); - reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; - aq_error = true; - } - if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) { - device_printf(dev, "ASQ Critical Error detected\n"); - reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; - aq_error = true; - } - if (oldreg != reg) - wr32(hw, hw->aq.asq.len, reg); - - if (aq_error) { - device_printf(dev, "WARNING: Stopping VF!\n"); - /* - * A VF reset might not be enough to fix a problem here; - * a PF reset could be required. - */ - sc->init_state = IXLV_RESET_REQUIRED; - ixlv_stop(sc); - ixlv_request_reset(sc); - } - - return (aq_error ? EIO : 0); -} - -static enum i40e_status_code -ixlv_process_adminq(struct ixlv_sc *sc, u16 *pending) -{ - enum i40e_status_code status = I40E_SUCCESS; - struct i40e_arq_event_info event; - struct i40e_hw *hw = &sc->hw; - struct virtchnl_msg *v_msg; - int error = 0, loop = 0; - u32 reg; - - error = ixlv_check_aq_errors(sc); - if (error) - return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR); - - event.buf_len = IXL_AQ_BUF_SZ; - event.msg_buf = sc->aq_buffer; - bzero(event.msg_buf, IXL_AQ_BUF_SZ); - v_msg = (struct virtchnl_msg *)&event.desc; - - /* clean and process any events */ - do { - status = i40e_clean_arq_element(hw, &event, pending); - /* - * Also covers normal case when i40e_clean_arq_element() - * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK" - */ - if (status) - break; - ixlv_vc_completion(sc, v_msg->v_opcode, - v_msg->v_retval, event.msg_buf, event.msg_len); - bzero(event.msg_buf, IXL_AQ_BUF_SZ); - } while (*pending && (loop++ < IXL_ADM_LIMIT)); - - /* Re-enable admin queue interrupt cause */ - reg = rd32(hw, I40E_VFINT_ICR0_ENA1); - reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK; - wr32(hw, I40E_VFINT_ICR0_ENA1, reg); - - return (status); -} - -static void -ixlv_if_update_admin_status(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct i40e_hw *hw = &sc->hw; - u16 pending; - - ixlv_process_adminq(sc, &pending); - ixlv_update_link_status(sc); - - /* - * If there are still messages to process, reschedule. - * Otherwise, re-enable the Admin Queue interrupt. - */ - if (pending > 0) - iflib_admin_intr_deferred(ctx); - else - ixlv_enable_adminq_irq(hw); -} - -static int -ixlv_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) -{ - struct ixlv_sc *sc = arg; - int error = 0; - - if (ifma->ifma_addr->sa_family != AF_LINK) - return (0); - error = ixlv_add_mac_filter(sc, - (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr), - IXL_FILTER_MC); - - return (!error); -} - -static void -ixlv_if_multi_set(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - int mcnt = 0; - - IOCTL_DEBUGOUT("ixlv_if_multi_set: begin"); - - mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); - if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { - /* Delete MC filters and enable mulitcast promisc instead */ - ixlv_init_multi(sc); - sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC; - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_CONFIGURE_PROMISC); - return; - } - - /* If there aren't too many filters, delete existing MC filters */ - ixlv_init_multi(sc); - - /* And (re-)install filters for all mcast addresses */ - mcnt = if_multi_apply(iflib_get_ifp(ctx), ixlv_mc_filter_apply, sc); - - if (mcnt > 0) - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_ADD_MAC_FILTER); -} - -static int -ixlv_if_mtu_set(if_ctx_t ctx, uint32_t mtu) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - - IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); - if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - - ETHER_VLAN_ENCAP_LEN) - return (EINVAL); - - vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + - ETHER_VLAN_ENCAP_LEN; - - return (0); -} - -static void -ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) -{ -#ifdef IXL_DEBUG - struct ifnet *ifp = iflib_get_ifp(ctx); -#endif - struct ixlv_sc *sc = iflib_get_softc(ctx); - - INIT_DBG_IF(ifp, "begin"); - - ixlv_update_link_status(sc); - - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_active = IFM_ETHER; - - if (!sc->link_up) - return; - - ifmr->ifm_status |= IFM_ACTIVE; - /* Hardware is always full-duplex */ - ifmr->ifm_active |= IFM_FDX; - - /* Based on the link speed reported by the PF over the AdminQ, choose a - * PHY type to report. This isn't 100% correct since we don't really - * know the underlying PHY type of the PF, but at least we can report - * a valid link speed... - */ - switch (sc->link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: - ifmr->ifm_active |= IFM_100_TX; - break; - case VIRTCHNL_LINK_SPEED_1GB: - ifmr->ifm_active |= IFM_1000_T; - break; - case VIRTCHNL_LINK_SPEED_10GB: - ifmr->ifm_active |= IFM_10G_SR; - break; - case VIRTCHNL_LINK_SPEED_20GB: - case VIRTCHNL_LINK_SPEED_25GB: - ifmr->ifm_active |= IFM_25G_SR; - break; - case VIRTCHNL_LINK_SPEED_40GB: - ifmr->ifm_active |= IFM_40G_SR4; - break; - default: - ifmr->ifm_active |= IFM_UNKNOWN; - break; - } - - INIT_DBG_IF(ifp, "end"); -} - -static int -ixlv_if_media_change(if_ctx_t ctx) -{ - struct ifmedia *ifm = iflib_get_media(ctx); - - INIT_DEBUGOUT("ixl_media_change: begin"); - - if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) - return (EINVAL); - - if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); - return (ENODEV); -} - -static int -ixlv_if_promisc_set(if_ctx_t ctx, int flags) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ifnet *ifp = iflib_get_ifp(ctx); - - sc->promisc_flags = 0; - - if (flags & IFF_ALLMULTI || - if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) - sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC; - if (flags & IFF_PROMISC) - sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC; - - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_CONFIGURE_PROMISC); - - return (0); -} - -static void -ixlv_if_timer(if_ctx_t ctx, uint16_t qid) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct i40e_hw *hw = &sc->hw; - u32 val; - - if (qid != 0) - return; - - /* Check for when PF triggers a VF reset */ - val = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if (val != VIRTCHNL_VFR_VFACTIVE - && val != VIRTCHNL_VFR_COMPLETED) { - ixlv_dbg_info(sc, "reset in progress! (%d)\n", val); - return; - } - - /* Fire off the adminq task */ - iflib_admin_intr_deferred(ctx); - - /* Update stats */ - ixlv_request_stats(sc); -} - -static void -ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct ixlv_vlan_filter *v; - - if ((vtag == 0) || (vtag > 4095)) /* Invalid */ - return; - - ++vsi->num_vlans; - v = malloc(sizeof(struct ixlv_vlan_filter), M_IXLV, M_WAITOK | M_ZERO); - SLIST_INSERT_HEAD(sc->vlan_filters, v, next); - v->vlan = vtag; - v->flags = IXL_FILTER_ADD; - - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_ADD_VLAN_FILTER); -} - -static void -ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - struct ixlv_vlan_filter *v; - int i = 0; - - if ((vtag == 0) || (vtag > 4095)) /* Invalid */ - return; - - SLIST_FOREACH(v, sc->vlan_filters, next) { - if (v->vlan == vtag) { - v->flags = IXL_FILTER_DEL; - ++i; - --vsi->num_vlans; - } - } - if (i) - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_DEL_VLAN_FILTER); -} - -static uint64_t -ixlv_if_get_counter(if_ctx_t ctx, ift_counter cnt) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - struct ixl_vsi *vsi = &sc->vsi; - if_t ifp = iflib_get_ifp(ctx); - - switch (cnt) { - case IFCOUNTER_IPACKETS: - return (vsi->ipackets); - case IFCOUNTER_IERRORS: - return (vsi->ierrors); - case IFCOUNTER_OPACKETS: - return (vsi->opackets); - case IFCOUNTER_OERRORS: - return (vsi->oerrors); - case IFCOUNTER_COLLISIONS: - /* Collisions are by standard impossible in 40G/10G Ethernet */ - return (0); - case IFCOUNTER_IBYTES: - return (vsi->ibytes); - case IFCOUNTER_OBYTES: - return (vsi->obytes); - case IFCOUNTER_IMCASTS: - return (vsi->imcasts); - case IFCOUNTER_OMCASTS: - return (vsi->omcasts); - case IFCOUNTER_IQDROPS: - return (vsi->iqdrops); - case IFCOUNTER_OQDROPS: - return (vsi->oqdrops); - case IFCOUNTER_NOPROTO: - return (vsi->noproto); - default: - return (if_get_counter_default(ifp, cnt)); - } -} - - -static void -ixlv_free_pci_resources(struct ixlv_sc *sc) -{ - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_rx_queue *rx_que = vsi->rx_queues; - device_t dev = sc->dev; - - /* We may get here before stations are setup */ - if (rx_que == NULL) - goto early; - - /* Release all interrupts */ - iflib_irq_free(vsi->ctx, &vsi->irq); - - for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) - iflib_irq_free(vsi->ctx, &rx_que->que_irq); - -early: - if (sc->pci_mem != NULL) - bus_release_resource(dev, SYS_RES_MEMORY, - PCIR_BAR(0), sc->pci_mem); -} - - -/* -** Requests a VF reset from the PF. -** -** Requires the VF's Admin Queue to be initialized. -*/ -static int -ixlv_reset(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - int error = 0; - - /* Ask the PF to reset us if we are initiating */ - if (sc->init_state != IXLV_RESET_PENDING) - ixlv_request_reset(sc); - - i40e_msec_pause(100); - error = ixlv_reset_complete(hw); - if (error) { - device_printf(dev, "%s: VF reset failed\n", - __func__); - return (error); - } - pci_enable_busmaster(dev); - - error = i40e_shutdown_adminq(hw); - if (error) { - device_printf(dev, "%s: shutdown_adminq failed: %d\n", - __func__, error); - return (error); - } - - error = i40e_init_adminq(hw); - if (error) { - device_printf(dev, "%s: init_adminq failed: %d\n", - __func__, error); - return (error); - } - - ixlv_enable_adminq_irq(hw); - return (0); -} - -static int -ixlv_reset_complete(struct i40e_hw *hw) -{ - u32 reg; - - /* Wait up to ~10 seconds */ - for (int i = 0; i < 100; i++) { - reg = rd32(hw, I40E_VFGEN_RSTAT) & - I40E_VFGEN_RSTAT_VFR_STATE_MASK; - - if ((reg == VIRTCHNL_VFR_VFACTIVE) || - (reg == VIRTCHNL_VFR_COMPLETED)) - return (0); - i40e_msec_pause(100); - } - - return (EBUSY); -} - -static void -ixlv_setup_interface(device_t dev, struct ixlv_sc *sc) -{ - struct ixl_vsi *vsi = &sc->vsi; - if_ctx_t ctx = vsi->ctx; - struct ifnet *ifp = iflib_get_ifp(ctx); - - INIT_DBG_DEV(dev, "begin"); - - vsi->shared->isc_max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN - + ETHER_VLAN_ENCAP_LEN; -#if __FreeBSD_version >= 1100000 - if_setbaudrate(ifp, IF_Gbps(40)); -#else - if_initbaudrate(ifp, IF_Gbps(40)); -#endif - - ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO); -} - -/* -** Get a new filter and add it to the mac filter list. -*/ -static struct ixlv_mac_filter * -ixlv_get_mac_filter(struct ixlv_sc *sc) -{ - struct ixlv_mac_filter *f; - - f = malloc(sizeof(struct ixlv_mac_filter), - M_IXLV, M_NOWAIT | M_ZERO); - if (f) - SLIST_INSERT_HEAD(sc->mac_filters, f, next); - - return (f); -} - -/* -** Find the filter with matching MAC address -*/ -static struct ixlv_mac_filter * -ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr) -{ - struct ixlv_mac_filter *f; - bool match = FALSE; - - SLIST_FOREACH(f, sc->mac_filters, next) { - if (cmp_etheraddr(f->macaddr, macaddr)) { - match = TRUE; - break; - } - } - - if (!match) - f = NULL; - return (f); -} - -/* -** Admin Queue interrupt handler -*/ -static int -ixlv_msix_adminq(void *arg) -{ - struct ixlv_sc *sc = arg; - struct i40e_hw *hw = &sc->hw; - u32 reg, mask; - bool do_task = FALSE; - - ++sc->admin_irq; - - reg = rd32(hw, I40E_VFINT_ICR01); - /* - * For masking off interrupt causes that need to be handled before - * they can be re-enabled - */ - mask = rd32(hw, I40E_VFINT_ICR0_ENA1); - - /* Check on the cause */ - if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) { - mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK; - do_task = TRUE; - } - - wr32(hw, I40E_VFINT_ICR0_ENA1, mask); - ixlv_enable_adminq_irq(hw); - - if (do_task) - return (FILTER_SCHEDULE_THREAD); - else - return (FILTER_HANDLED); -} - -void -ixlv_enable_intr(struct ixl_vsi *vsi) -{ - struct i40e_hw *hw = vsi->hw; - struct ixl_rx_queue *que = vsi->rx_queues; - - ixlv_enable_adminq_irq(hw); - for (int i = 0; i < vsi->num_rx_queues; i++, que++) - ixlv_enable_queue_irq(hw, que->rxr.me); -} - -void -ixlv_disable_intr(struct ixl_vsi *vsi) -{ - struct i40e_hw *hw = vsi->hw; - struct ixl_rx_queue *que = vsi->rx_queues; - - for (int i = 0; i < vsi->num_rx_queues; i++, que++) - ixlv_disable_queue_irq(hw, que->rxr.me); -} - -static void -ixlv_disable_adminq_irq(struct i40e_hw *hw) -{ - wr32(hw, I40E_VFINT_DYN_CTL01, 0); - wr32(hw, I40E_VFINT_ICR0_ENA1, 0); - /* flush */ - rd32(hw, I40E_VFGEN_RSTAT); -} - -static void -ixlv_enable_adminq_irq(struct i40e_hw *hw) -{ - wr32(hw, I40E_VFINT_DYN_CTL01, - I40E_VFINT_DYN_CTL01_INTENA_MASK | - I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); - wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); - /* flush */ - rd32(hw, I40E_VFGEN_RSTAT); -} - -static void -ixlv_enable_queue_irq(struct i40e_hw *hw, int id) -{ - u32 reg; - - reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; - wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg); -} - -static void -ixlv_disable_queue_irq(struct i40e_hw *hw, int id) -{ - wr32(hw, I40E_VFINT_DYN_CTLN1(id), - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); - rd32(hw, I40E_VFGEN_RSTAT); -} - -static void -ixlv_configure_tx_itr(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_tx_queue *que = vsi->tx_queues; - - vsi->tx_itr_setting = sc->tx_itr; - - for (int i = 0; i < vsi->num_tx_queues; i++, que++) { - struct tx_ring *txr = &que->txr; - - wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i), - vsi->tx_itr_setting); - txr->itr = vsi->tx_itr_setting; - txr->latency = IXL_AVE_LATENCY; - } -} - -static void -ixlv_configure_rx_itr(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_rx_queue *que = vsi->rx_queues; - - vsi->rx_itr_setting = sc->rx_itr; - - for (int i = 0; i < vsi->num_rx_queues; i++, que++) { - struct rx_ring *rxr = &que->rxr; - - wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i), - vsi->rx_itr_setting); - rxr->itr = vsi->rx_itr_setting; - rxr->latency = IXL_AVE_LATENCY; - } -} - -/* - * Get initial ITR values from tunable values. - */ -static void -ixlv_configure_itr(struct ixlv_sc *sc) -{ - ixlv_configure_tx_itr(sc); - ixlv_configure_rx_itr(sc); -} - -/* -** Provide a update to the queue RX -** interrupt moderation value. -*/ -static void -ixlv_set_queue_rx_itr(struct ixl_rx_queue *que) -{ - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; - struct rx_ring *rxr = &que->rxr; - - /* Idle, do nothing */ - if (rxr->bytes == 0) - return; - - /* Update the hardware if needed */ - if (rxr->itr != vsi->rx_itr_setting) { - rxr->itr = vsi->rx_itr_setting; - wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, - que->rxr.me), rxr->itr); - } -} - -static int -ixlv_msix_que(void *arg) -{ - struct ixl_rx_queue *rx_que = arg; - - ++rx_que->irqs; - - ixlv_set_queue_rx_itr(rx_que); - // ixlv_set_queue_tx_itr(que); - - return (FILTER_SCHEDULE_THREAD); -} - -/********************************************************************* - * Multicast Initialization - * - * This routine is called by init to reset a fresh state. - * - **********************************************************************/ -static void -ixlv_init_multi(struct ixlv_sc *sc) -{ - struct ixlv_mac_filter *f; - int mcnt = 0; - - /* First clear any multicast filters */ - SLIST_FOREACH(f, sc->mac_filters, next) { - if ((f->flags & IXL_FILTER_USED) - && (f->flags & IXL_FILTER_MC)) { - f->flags |= IXL_FILTER_DEL; - mcnt++; - } - } - if (mcnt > 0) - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_DEL_MAC_FILTER); -} - -/* -** Note: this routine updates the OS on the link state -** the real check of the hardware only happens with -** a link interrupt. -*/ -void -ixlv_update_link_status(struct ixlv_sc *sc) -{ - struct ixl_vsi *vsi = &sc->vsi; - u64 baudrate; - - if (sc->link_up){ - if (vsi->link_active == FALSE) { - vsi->link_active = TRUE; - baudrate = ixl_max_vc_speed_to_value(sc->link_speed); - ixlv_dbg_info(sc, "baudrate: %lu\n", baudrate); - iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); - } - } else { /* Link down */ - if (vsi->link_active == TRUE) { - vsi->link_active = FALSE; - iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); - } - } -} - -/********************************************************************* - * - * This routine disables all traffic on the adapter by issuing a - * global reset on the MAC and deallocates TX/RX buffers. - * - **********************************************************************/ - -static void -ixlv_stop(struct ixlv_sc *sc) -{ - struct ifnet *ifp; - - ifp = sc->vsi.ifp; - - ixlv_disable_intr(&sc->vsi); - - if (atomic_load_acq_32(&sc->queues_enabled)) - ixlv_send_vc_msg_sleep(sc, IXLV_FLAG_AQ_DISABLE_QUEUES); -} - -static void -ixlv_if_stop(if_ctx_t ctx) -{ - struct ixlv_sc *sc = iflib_get_softc(ctx); - - ixlv_stop(sc); -} - -static void -ixlv_config_rss_reg(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - u32 lut = 0; - u64 set_hena = 0, hena; - int i, j, que_id; - u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; -#ifdef RSS - u32 rss_hash_config; -#endif - - /* Don't set up RSS if using a single queue */ - if (vsi->num_rx_queues == 1) { - wr32(hw, I40E_VFQF_HENA(0), 0); - wr32(hw, I40E_VFQF_HENA(1), 0); - ixl_flush(hw); - return; - } - -#ifdef RSS - /* Fetch the configured RSS key */ - rss_getkey((uint8_t *) &rss_seed); -#else - ixl_get_default_rss_key(rss_seed); -#endif - - /* Fill out hash function seed */ - for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) - wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]); - - /* Enable PCTYPES for RSS: */ -#ifdef RSS - rss_hash_config = rss_gethashconfig(); - if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); - if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); - if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); - if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); - if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) - set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); -#else - set_hena = IXL_DEFAULT_RSS_HENA_XL710; -#endif - hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - hena |= set_hena; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) { - if (j == vsi->num_rx_queues) - j = 0; -#ifdef RSS - /* - * Fetch the RSS bucket id for the given indirection entry. - * Cap it at the number of configured buckets (which is - * num_queues.) - */ - que_id = rss_get_indirection_to_bucket(i); - que_id = que_id % vsi->num_queues; -#else - que_id = j; -#endif - /* lut = 4-byte sliding window of 4 lut entries */ - lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK); - /* On i = 3, we have 4 entries in lut; write to the register */ - if ((i & 3) == 3) { - wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); - DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut); - } - } - ixl_flush(hw); -} - -static void -ixlv_config_rss_pf(struct ixlv_sc *sc) -{ - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_CONFIG_RSS_KEY); - - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_SET_RSS_HENA); - - ixlv_send_vc_msg(sc, IXLV_FLAG_AQ_CONFIG_RSS_LUT); -} - -/* -** ixlv_config_rss - setup RSS -** -** RSS keys and table are cleared on VF reset. -*/ -static void -ixlv_config_rss(struct ixlv_sc *sc) -{ - if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) { - ixlv_dbg_info(sc, "Setting up RSS using VF registers..."); - ixlv_config_rss_reg(sc); - } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { - ixlv_dbg_info(sc, "Setting up RSS using messages to PF..."); - ixlv_config_rss_pf(sc); - } else - device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); -} - -/* -** This routine adds new MAC filters to the sc's list; -** these are later added in hardware by sending a virtual -** channel message. -*/ -static int -ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) -{ - struct ixlv_mac_filter *f; - - /* Does one already exist? */ - f = ixlv_find_mac_filter(sc, macaddr); - if (f != NULL) { - ixlv_dbg_filter(sc, "exists: " MAC_FORMAT "\n", - MAC_FORMAT_ARGS(macaddr)); - return (EEXIST); - } - - /* If not, get a new empty filter */ - f = ixlv_get_mac_filter(sc); - if (f == NULL) { - device_printf(sc->dev, "%s: no filters available!!\n", - __func__); - return (ENOMEM); - } - - ixlv_dbg_filter(sc, "marked: " MAC_FORMAT "\n", - MAC_FORMAT_ARGS(macaddr)); - - bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); - f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); - f->flags |= flags; - return (0); -} - -/* -** Marks a MAC filter for deletion. -*/ -static int -ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr) -{ - struct ixlv_mac_filter *f; - - f = ixlv_find_mac_filter(sc, macaddr); - if (f == NULL) - return (ENOENT); - - f->flags |= IXL_FILTER_DEL; - return (0); -} - -/* - * Re-uses the name from the PF driver. - */ -static void -ixlv_add_device_sysctls(struct ixlv_sc *sc) -{ - struct ixl_vsi *vsi = &sc->vsi; - device_t dev = sc->dev; - - struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); - struct sysctl_oid_list *ctx_list = - SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); - struct sysctl_oid *debug_node; - struct sysctl_oid_list *debug_list; - - SYSCTL_ADD_PROC(ctx, ctx_list, - OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, - sc, 0, ixlv_sysctl_current_speed, "A", "Current Port Speed"); - - SYSCTL_ADD_PROC(ctx, ctx_list, - OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, - sc, 0, ixlv_sysctl_tx_itr, "I", - "Immediately set TX ITR value for all queues"); - - SYSCTL_ADD_PROC(ctx, ctx_list, - OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, - sc, 0, ixlv_sysctl_rx_itr, "I", - "Immediately set RX ITR value for all queues"); - - /* Add sysctls meant to print debug information, but don't list them - * in "sysctl -a" output. */ - debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, - OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); - debug_list = SYSCTL_CHILDREN(debug_node); - - SYSCTL_ADD_UINT(ctx, debug_list, - OID_AUTO, "shared_debug_mask", CTLFLAG_RW, - &sc->hw.debug_mask, 0, "Shared code debug message level"); - - SYSCTL_ADD_UINT(ctx, debug_list, - OID_AUTO, "core_debug_mask", CTLFLAG_RW, - &sc->dbg_mask, 0, "Non-shared code debug message level"); - - SYSCTL_ADD_PROC(ctx, debug_list, - OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, - sc, 0, ixlv_sysctl_sw_filter_list, "A", "SW Filter List"); - - SYSCTL_ADD_PROC(ctx, debug_list, - OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD, - sc, 0, ixlv_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); - - SYSCTL_ADD_PROC(ctx, debug_list, - OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR, - sc, 0, ixlv_sysctl_vf_reset, "A", "Request a VF reset from PF"); - - SYSCTL_ADD_PROC(ctx, debug_list, - OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR, - sc, 0, ixlv_sysctl_vflr_reset, "A", "Request a VFLR reset from HW"); - - /* Add stats sysctls */ - ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi"); - ixl_add_queues_sysctls(dev, vsi); - -} - -static void -ixlv_init_filters(struct ixlv_sc *sc) -{ - sc->mac_filters = malloc(sizeof(struct mac_list), - M_IXLV, M_WAITOK | M_ZERO); - SLIST_INIT(sc->mac_filters); - sc->vlan_filters = malloc(sizeof(struct vlan_list), - M_IXLV, M_WAITOK | M_ZERO); - SLIST_INIT(sc->vlan_filters); -} - -static void -ixlv_free_filters(struct ixlv_sc *sc) -{ - struct ixlv_mac_filter *f; - struct ixlv_vlan_filter *v; - - while (!SLIST_EMPTY(sc->mac_filters)) { - f = SLIST_FIRST(sc->mac_filters); - SLIST_REMOVE_HEAD(sc->mac_filters, next); - free(f, M_IXLV); - } - free(sc->mac_filters, M_IXLV); - while (!SLIST_EMPTY(sc->vlan_filters)) { - v = SLIST_FIRST(sc->vlan_filters); - SLIST_REMOVE_HEAD(sc->vlan_filters, next); - free(v, M_IXLV); - } - free(sc->vlan_filters, M_IXLV); -} - -char * -ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed) -{ - int index; - - char *speeds[] = { - "Unknown", - "100 Mbps", - "1 Gbps", - "10 Gbps", - "40 Gbps", - "20 Gbps", - "25 Gbps", - }; - - switch (link_speed) { - case VIRTCHNL_LINK_SPEED_100MB: - index = 1; - break; - case VIRTCHNL_LINK_SPEED_1GB: - index = 2; - break; - case VIRTCHNL_LINK_SPEED_10GB: - index = 3; - break; - case VIRTCHNL_LINK_SPEED_40GB: - index = 4; - break; - case VIRTCHNL_LINK_SPEED_20GB: - index = 5; - break; - case VIRTCHNL_LINK_SPEED_25GB: - index = 6; - break; - case VIRTCHNL_LINK_SPEED_UNKNOWN: - default: - index = 0; - break; - } - - return speeds[index]; -} - -static int -ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - int error = 0; - - error = sysctl_handle_string(oidp, - ixlv_vc_speed_to_string(sc->link_speed), - 8, req); - return (error); -} - -/* - * Sanity check and save off tunable values. - */ -static void -ixlv_save_tunables(struct ixlv_sc *sc) -{ - device_t dev = sc->dev; - - /* Save tunable information */ - sc->dbg_mask = ixlv_core_debug_mask; - sc->hw.debug_mask = ixlv_shared_debug_mask; - sc->vsi.enable_head_writeback = !!(ixlv_enable_head_writeback); - - if (ixlv_tx_itr < 0 || ixlv_tx_itr > IXL_MAX_ITR) { - device_printf(dev, "Invalid tx_itr value of %d set!\n", - ixlv_tx_itr); - device_printf(dev, "tx_itr must be between %d and %d, " - "inclusive\n", - 0, IXL_MAX_ITR); - device_printf(dev, "Using default value of %d instead\n", - IXL_ITR_4K); - sc->tx_itr = IXL_ITR_4K; - } else - sc->tx_itr = ixlv_tx_itr; - - if (ixlv_rx_itr < 0 || ixlv_rx_itr > IXL_MAX_ITR) { - device_printf(dev, "Invalid rx_itr value of %d set!\n", - ixlv_rx_itr); - device_printf(dev, "rx_itr must be between %d and %d, " - "inclusive\n", - 0, IXL_MAX_ITR); - device_printf(dev, "Using default value of %d instead\n", - IXL_ITR_8K); - sc->rx_itr = IXL_ITR_8K; - } else - sc->rx_itr = ixlv_rx_itr; -} - -/* - * Used to set the Tx ITR value for all of the VF's queues. - * Writes to the ITR registers immediately. - */ -static int -ixlv_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - device_t dev = sc->dev; - int requested_tx_itr; - int error = 0; - - requested_tx_itr = sc->tx_itr; - error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); - if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { - device_printf(dev, - "Invalid TX itr value; value must be between 0 and %d\n", - IXL_MAX_ITR); - return (EINVAL); - } - - sc->tx_itr = requested_tx_itr; - ixlv_configure_tx_itr(sc); - - return (error); -} - -/* - * Used to set the Rx ITR value for all of the VF's queues. - * Writes to the ITR registers immediately. - */ -static int -ixlv_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - device_t dev = sc->dev; - int requested_rx_itr; - int error = 0; - - requested_rx_itr = sc->rx_itr; - error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); - if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { - device_printf(dev, - "Invalid RX itr value; value must be between 0 and %d\n", - IXL_MAX_ITR); - return (EINVAL); - } - - sc->rx_itr = requested_rx_itr; - ixlv_configure_rx_itr(sc); - - return (error); -} - -static int -ixlv_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - struct ixlv_mac_filter *f; - struct ixlv_vlan_filter *v; - device_t dev = sc->dev; - int ftl_len, ftl_counter = 0, error = 0; - struct sbuf *buf; - - buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); - if (!buf) { - device_printf(dev, "Could not allocate sbuf for output.\n"); - return (ENOMEM); - } - - sbuf_printf(buf, "\n"); - - /* Print MAC filters */ - sbuf_printf(buf, "MAC Filters:\n"); - ftl_len = 0; - SLIST_FOREACH(f, sc->mac_filters, next) - ftl_len++; - if (ftl_len < 1) - sbuf_printf(buf, "(none)\n"); - else { - SLIST_FOREACH(f, sc->mac_filters, next) { - sbuf_printf(buf, - MAC_FORMAT ", flags %#06x\n", - MAC_FORMAT_ARGS(f->macaddr), f->flags); - } - } - - /* Print VLAN filters */ - sbuf_printf(buf, "VLAN Filters:\n"); - ftl_len = 0; - SLIST_FOREACH(v, sc->vlan_filters, next) - ftl_len++; - if (ftl_len < 1) - sbuf_printf(buf, "(none)"); - else { - SLIST_FOREACH(v, sc->vlan_filters, next) { - sbuf_printf(buf, - "%d, flags %#06x", - v->vlan, v->flags); - /* don't print '\n' for last entry */ - if (++ftl_counter != ftl_len) - sbuf_printf(buf, "\n"); - } - } - - error = sbuf_finish(buf); - if (error) - device_printf(dev, "Error finishing sbuf: %d\n", error); - - sbuf_delete(buf); - return (error); -} - -/* - * Print out mapping of TX queue indexes and Rx queue indexes - * to MSI-X vectors. - */ -static int -ixlv_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - struct ixl_vsi *vsi = &sc->vsi; - device_t dev = sc->dev; - struct sbuf *buf; - int error = 0; - - struct ixl_rx_queue *rx_que = vsi->rx_queues; - struct ixl_tx_queue *tx_que = vsi->tx_queues; - - buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); - if (!buf) { - device_printf(dev, "Could not allocate sbuf for output.\n"); - return (ENOMEM); - } - - sbuf_cat(buf, "\n"); - for (int i = 0; i < vsi->num_rx_queues; i++) { - rx_que = &vsi->rx_queues[i]; - sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); - } - for (int i = 0; i < vsi->num_tx_queues; i++) { - tx_que = &vsi->tx_queues[i]; - sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); - } - - error = sbuf_finish(buf); - if (error) - device_printf(dev, "Error finishing sbuf: %d\n", error); - sbuf_delete(buf); - - return (error); -} - -#define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING)) -static int -ixlv_sysctl_vf_reset(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - int do_reset = 0, error = 0; - - error = sysctl_handle_int(oidp, &do_reset, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); - - if (do_reset == 1) { - ixlv_reset(sc); - if (CTX_ACTIVE(sc->vsi.ctx)) - iflib_request_reset(sc->vsi.ctx); - } - - return (error); -} - -static int -ixlv_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)arg1; - device_t dev = sc->dev; - int do_reset = 0, error = 0; - - error = sysctl_handle_int(oidp, &do_reset, 0, req); - if ((error) || (req->newptr == NULL)) - return (error); - - if (do_reset == 1) { - if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) { - device_printf(dev, "PCIE FLR failed\n"); - error = EIO; - } - else if (CTX_ACTIVE(sc->vsi.ctx)) - iflib_request_reset(sc->vsi.ctx); - } - - return (error); -} -#undef CTX_ACTIVE Property changes on: head/sys/dev/ixl/if_ixlv.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/dev/ixl/ixlvc.c =================================================================== --- head/sys/dev/ixl/ixlvc.c (revision 339361) +++ head/sys/dev/ixl/ixlvc.c (nonexistent) @@ -1,1014 +0,0 @@ -/****************************************************************************** - - Copyright (c) 2013-2018, Intel Corporation - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ -/*$FreeBSD$*/ - -/* -** Virtual Channel support -** These are support functions to communication -** between the VF and PF drivers. -*/ - -#include "ixl.h" -#include "ixlv.h" - -/* busy wait delay in msec */ -#define IXLV_BUSY_WAIT_DELAY 10 -#define IXLV_BUSY_WAIT_COUNT 50 - -/* -** ixlv_send_pf_msg -** -** Send message to PF and print status if failure. -*/ -static int -ixlv_send_pf_msg(struct ixlv_sc *sc, - enum virtchnl_ops op, u8 *msg, u16 len) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - i40e_status status; - int val_err; - - /* Validating message before sending it to the PF */ - val_err = virtchnl_vc_validate_vf_msg(&sc->version, op, msg, len); - if (val_err) - device_printf(dev, "Error validating msg to PF for op %d," - " msglen %d: error %d\n", op, len, val_err); - - if (!i40e_check_asq_alive(hw)) { - if (op != VIRTCHNL_OP_GET_STATS) - device_printf(dev, "Unable to send opcode %s to PF, " - "ASQ is not alive\n", ixl_vc_opcode_str(op)); - return (0); - } - - if (op != VIRTCHNL_OP_GET_STATS) - ixlv_dbg_vc(sc, - "Sending msg (op=%s[%d]) to PF\n", - ixl_vc_opcode_str(op), op); - - status = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); - if (status && op != VIRTCHNL_OP_GET_STATS) - device_printf(dev, "Unable to send opcode %s to PF, " - "status %s, aq error %s\n", - ixl_vc_opcode_str(op), - i40e_stat_str(hw, status), - i40e_aq_str(hw, hw->aq.asq_last_status)); - - return (status); -} - -/* -** ixlv_send_api_ver -** -** Send API version admin queue message to the PF. The reply is not checked -** in this function. Returns 0 if the message was successfully -** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. -*/ -int -ixlv_send_api_ver(struct ixlv_sc *sc) -{ - struct virtchnl_version_info vvi; - - vvi.major = VIRTCHNL_VERSION_MAJOR; - vvi.minor = VIRTCHNL_VERSION_MINOR; - - return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION, - (u8 *)&vvi, sizeof(vvi)); -} - -/* -** ixlv_verify_api_ver -** -** Compare API versions with the PF. Must be called after admin queue is -** initialized. Returns 0 if API versions match, EIO if -** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. -*/ -int -ixlv_verify_api_ver(struct ixlv_sc *sc) -{ - struct virtchnl_version_info *pf_vvi; - struct i40e_hw *hw = &sc->hw; - struct i40e_arq_event_info event; - device_t dev = sc->dev; - i40e_status err; - int retries = 0; - - event.buf_len = IXL_AQ_BUF_SZ; - event.msg_buf = malloc(event.buf_len, M_IXLV, M_WAITOK); - - for (;;) { - if (++retries > IXLV_AQ_MAX_ERR) - goto out_alloc; - - /* Initial delay here is necessary */ - i40e_msec_pause(100); - err = i40e_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) - continue; - else if (err) { - err = EIO; - goto out_alloc; - } - - if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != - VIRTCHNL_OP_VERSION) { - DDPRINTF(dev, "Received unexpected op response: %d\n", - le32toh(event.desc.cookie_high)); - /* Don't stop looking for expected response */ - continue; - } - - err = (i40e_status)le32toh(event.desc.cookie_low); - if (err) { - err = EIO; - goto out_alloc; - } else - break; - } - - pf_vvi = (struct virtchnl_version_info *)event.msg_buf; - if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || - ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && - (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) { - device_printf(dev, "Critical PF/VF API version mismatch!\n"); - err = EIO; - } else { - sc->version.major = pf_vvi->major; - sc->version.minor = pf_vvi->minor; - } - - /* Log PF/VF api versions */ - device_printf(dev, "PF API %d.%d / VF API %d.%d\n", - pf_vvi->major, pf_vvi->minor, - VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); - -out_alloc: - free(event.msg_buf, M_IXLV); - return (err); -} - -/* -** ixlv_send_vf_config_msg -** -** Send VF configuration request admin queue message to the PF. The reply -** is not checked in this function. Returns 0 if the message was -** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. -*/ -int -ixlv_send_vf_config_msg(struct ixlv_sc *sc) -{ - u32 caps; - - caps = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_RSS_PF | - VIRTCHNL_VF_OFFLOAD_VLAN; - - ixlv_dbg_info(sc, "Sending offload flags: 0x%b\n", - caps, IXLV_PRINTF_VF_OFFLOAD_FLAGS); - - if (sc->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) - return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, - NULL, 0); - else - return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, - (u8 *)&caps, sizeof(caps)); -} - -/* -** ixlv_get_vf_config -** -** Get VF configuration from PF and populate hw structure. Must be called after -** admin queue is initialized. Busy waits until response is received from PF, -** with maximum timeout. Response from PF is returned in the buffer for further -** processing by the caller. -*/ -int -ixlv_get_vf_config(struct ixlv_sc *sc) -{ - struct i40e_hw *hw = &sc->hw; - device_t dev = sc->dev; - struct i40e_arq_event_info event; - u16 len; - i40e_status err = 0; - u32 retries = 0; - - /* Note this assumes a single VSI */ - len = sizeof(struct virtchnl_vf_resource) + - sizeof(struct virtchnl_vsi_resource); - event.buf_len = len; - event.msg_buf = malloc(event.buf_len, M_IXLV, M_WAITOK); - - for (;;) { - err = i40e_clean_arq_element(hw, &event, NULL); - if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { - if (++retries <= IXLV_AQ_MAX_ERR) - i40e_msec_pause(10); - } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != - VIRTCHNL_OP_GET_VF_RESOURCES) { - DDPRINTF(dev, "Received a response from PF," - " opcode %d, error %d", - le32toh(event.desc.cookie_high), - le32toh(event.desc.cookie_low)); - retries++; - continue; - } else { - err = (i40e_status)le32toh(event.desc.cookie_low); - if (err) { - device_printf(dev, "%s: Error returned from PF," - " opcode %d, error %d\n", __func__, - le32toh(event.desc.cookie_high), - le32toh(event.desc.cookie_low)); - err = EIO; - goto out_alloc; - } - /* We retrieved the config message, with no errors */ - break; - } - - if (retries > IXLV_AQ_MAX_ERR) { - INIT_DBG_DEV(dev, "Did not receive response after %d tries.", - retries); - err = ETIMEDOUT; - goto out_alloc; - } - } - - memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); - i40e_vf_parse_hw_config(hw, sc->vf_res); - -out_alloc: - free(event.msg_buf, M_IXLV); - return err; -} - -/* -** ixlv_configure_queues -** -** Request that the PF set up our queues. -*/ -int -ixlv_configure_queues(struct ixlv_sc *sc) -{ - device_t dev = sc->dev; - struct ixl_vsi *vsi = &sc->vsi; - if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); - struct ixl_tx_queue *tx_que = vsi->tx_queues; - struct ixl_rx_queue *rx_que = vsi->rx_queues; - struct tx_ring *txr; - struct rx_ring *rxr; - int len, pairs; - - struct virtchnl_vsi_queue_config_info *vqci; - struct virtchnl_queue_pair_info *vqpi; - - /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX - * queues of a pair need to be configured */ - pairs = max(vsi->num_tx_queues, vsi->num_rx_queues); - len = sizeof(struct virtchnl_vsi_queue_config_info) + - (sizeof(struct virtchnl_queue_pair_info) * pairs); - vqci = malloc(len, M_IXLV, M_NOWAIT | M_ZERO); - if (!vqci) { - device_printf(dev, "%s: unable to allocate memory\n", __func__); - return (ENOMEM); - } - vqci->vsi_id = sc->vsi_res->vsi_id; - vqci->num_queue_pairs = pairs; - vqpi = vqci->qpair; - /* Size check is not needed here - HW max is 16 queue pairs, and we - * can fit info for 31 of them into the AQ buffer before it overflows. - */ - // TODO: the above is wrong now; X722 VFs can have 256 queues - for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) { - txr = &tx_que->txr; - rxr = &rx_que->rxr; - - vqpi->txq.vsi_id = vqci->vsi_id; - vqpi->txq.queue_id = i; - vqpi->txq.ring_len = scctx->isc_ntxd[0]; - vqpi->txq.dma_ring_addr = txr->tx_paddr; - /* Enable Head writeback */ - if (!vsi->enable_head_writeback) { - vqpi->txq.headwb_enabled = 0; - vqpi->txq.dma_headwb_addr = 0; - } else { - vqpi->txq.headwb_enabled = 1; - vqpi->txq.dma_headwb_addr = txr->tx_paddr + - sizeof(struct i40e_tx_desc) * scctx->isc_ntxd[0]; - } - - vqpi->rxq.vsi_id = vqci->vsi_id; - vqpi->rxq.queue_id = i; - vqpi->rxq.ring_len = scctx->isc_nrxd[0]; - vqpi->rxq.dma_ring_addr = rxr->rx_paddr; - vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size; - vqpi->rxq.databuffer_size = rxr->mbuf_sz; - vqpi->rxq.splithdr_enabled = 0; - } - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES, - (u8 *)vqci, len); - free(vqci, M_IXLV); - - return (0); -} - -/* -** ixlv_enable_queues -** -** Request that the PF enable all of our queues. -*/ -int -ixlv_enable_queues(struct ixlv_sc *sc) -{ - struct virtchnl_queue_select vqs; - - vqs.vsi_id = sc->vsi_res->vsi_id; - /* XXX: In Linux PF, as long as neither of these is 0, - * every queue in VF VSI is enabled. */ - vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1; - vqs.rx_queues = vqs.tx_queues; - ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); - return (0); -} - -/* -** ixlv_disable_queues -** -** Request that the PF disable all of our queues. -*/ -int -ixlv_disable_queues(struct ixlv_sc *sc) -{ - struct virtchnl_queue_select vqs; - - vqs.vsi_id = sc->vsi_res->vsi_id; - /* XXX: In Linux PF, as long as neither of these is 0, - * every queue in VF VSI is disabled. */ - vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1; - vqs.rx_queues = vqs.tx_queues; - ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES, - (u8 *)&vqs, sizeof(vqs)); - return (0); -} - -/* -** ixlv_map_queues -** -** Request that the PF map queues to interrupt vectors. Misc causes, including -** admin queue, are always mapped to vector 0. -*/ -int -ixlv_map_queues(struct ixlv_sc *sc) -{ - struct virtchnl_irq_map_info *vm; - int i, q, len; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_rx_queue *rx_que = vsi->rx_queues; - if_softc_ctx_t scctx = vsi->shared; - device_t dev = sc->dev; - - // XXX: What happens if we only get 1 MSI-X vector? - MPASS(scctx->isc_vectors > 1); - - /* How many queue vectors, adminq uses one */ - // XXX: How do we know how many interrupt vectors we have? - q = scctx->isc_vectors - 1; - - len = sizeof(struct virtchnl_irq_map_info) + - (scctx->isc_vectors * sizeof(struct virtchnl_vector_map)); - vm = malloc(len, M_IXLV, M_NOWAIT); - if (!vm) { - device_printf(dev, "%s: unable to allocate memory\n", __func__); - return (ENOMEM); - } - - vm->num_vectors = scctx->isc_vectors; - /* Queue vectors first */ - for (i = 0; i < q; i++, rx_que++) { - vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; - vm->vecmap[i].vector_id = i + 1; /* first is adminq */ - // TODO: Re-examine this - vm->vecmap[i].txq_map = (1 << rx_que->rxr.me); - vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me); - vm->vecmap[i].rxitr_idx = 0; - vm->vecmap[i].txitr_idx = 1; - } - - /* Misc vector last - this is only for AdminQ messages */ - vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; - vm->vecmap[i].vector_id = 0; - vm->vecmap[i].txq_map = 0; - vm->vecmap[i].rxq_map = 0; - vm->vecmap[i].rxitr_idx = 0; - vm->vecmap[i].txitr_idx = 0; - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP, - (u8 *)vm, len); - free(vm, M_IXLV); - - return (0); -} - -/* -** Scan the Filter List looking for vlans that need -** to be added, then create the data to hand to the AQ -** for handling. -*/ -int -ixlv_add_vlans(struct ixlv_sc *sc) -{ - struct virtchnl_vlan_filter_list *v; - struct ixlv_vlan_filter *f, *ftmp; - device_t dev = sc->dev; - int len, i = 0, cnt = 0; - - /* Get count of VLAN filters to add */ - SLIST_FOREACH(f, sc->vlan_filters, next) { - if (f->flags & IXL_FILTER_ADD) - cnt++; - } - - if (!cnt) /* no work... */ - return (ENOENT); - - len = sizeof(struct virtchnl_vlan_filter_list) + - (cnt * sizeof(u16)); - - if (len > IXL_AQ_BUF_SZ) { - device_printf(dev, "%s: Exceeded Max AQ Buf size\n", - __func__); - return (EFBIG); - } - - v = malloc(len, M_IXLV, M_NOWAIT); - if (!v) { - device_printf(dev, "%s: unable to allocate memory\n", - __func__); - return (ENOMEM); - } - - v->vsi_id = sc->vsi_res->vsi_id; - v->num_elements = cnt; - - /* Scan the filter array */ - SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { - if (f->flags & IXL_FILTER_ADD) { - bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); - f->flags = IXL_FILTER_USED; - i++; - } - if (i == cnt) - break; - } - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); - free(v, M_IXLV); - /* add stats? */ - return (0); -} - -/* -** Scan the Filter Table looking for vlans that need -** to be removed, then create the data to hand to the AQ -** for handling. -*/ -int -ixlv_del_vlans(struct ixlv_sc *sc) -{ - struct virtchnl_vlan_filter_list *v; - struct ixlv_vlan_filter *f, *ftmp; - device_t dev = sc->dev; - int len, i = 0, cnt = 0; - - /* Get count of VLAN filters to delete */ - SLIST_FOREACH(f, sc->vlan_filters, next) { - if (f->flags & IXL_FILTER_DEL) - cnt++; - } - - if (!cnt) /* no work... */ - return (ENOENT); - - len = sizeof(struct virtchnl_vlan_filter_list) + - (cnt * sizeof(u16)); - - if (len > IXL_AQ_BUF_SZ) { - device_printf(dev, "%s: Exceeded Max AQ Buf size\n", - __func__); - return (EFBIG); - } - - v = malloc(len, M_IXLV, M_NOWAIT | M_ZERO); - if (!v) { - device_printf(dev, "%s: unable to allocate memory\n", - __func__); - return (ENOMEM); - } - - v->vsi_id = sc->vsi_res->vsi_id; - v->num_elements = cnt; - - /* Scan the filter array */ - SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { - if (f->flags & IXL_FILTER_DEL) { - bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); - i++; - SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); - free(f, M_IXLV); - } - if (i == cnt) - break; - } - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); - free(v, M_IXLV); - /* add stats? */ - return (0); -} - - -/* -** This routine takes additions to the vsi filter -** table and creates an Admin Queue call to create -** the filters in the hardware. -*/ -int -ixlv_add_ether_filters(struct ixlv_sc *sc) -{ - struct virtchnl_ether_addr_list *a; - struct ixlv_mac_filter *f; - device_t dev = sc->dev; - int len, j = 0, cnt = 0; - enum i40e_status_code status; - - /* Get count of MAC addresses to add */ - SLIST_FOREACH(f, sc->mac_filters, next) { - if (f->flags & IXL_FILTER_ADD) - cnt++; - } - if (cnt == 0) { /* Should not happen... */ - ixlv_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__); - return (ENOENT); - } - - len = sizeof(struct virtchnl_ether_addr_list) + - (cnt * sizeof(struct virtchnl_ether_addr)); - - a = malloc(len, M_IXLV, M_NOWAIT | M_ZERO); - if (a == NULL) { - device_printf(dev, "%s: Failed to get memory for " - "virtchnl_ether_addr_list\n", __func__); - return (ENOMEM); - } - a->vsi_id = sc->vsi.id; - a->num_elements = cnt; - - /* Scan the filter array */ - SLIST_FOREACH(f, sc->mac_filters, next) { - if (f->flags & IXL_FILTER_ADD) { - bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN); - f->flags &= ~IXL_FILTER_ADD; - j++; - - ixlv_dbg_vc(sc, "ADD: " MAC_FORMAT "\n", - MAC_FORMAT_ARGS(f->macaddr)); - } - if (j == cnt) - break; - } - DDPRINTF(dev, "len %d, j %d, cnt %d", - len, j, cnt); - - status = ixlv_send_pf_msg(sc, - VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len); - /* add stats? */ - free(a, M_IXLV); - return (status); -} - -/* -** This routine takes filters flagged for deletion in the -** sc MAC filter list and creates an Admin Queue call -** to delete those filters in the hardware. -*/ -int -ixlv_del_ether_filters(struct ixlv_sc *sc) -{ - struct virtchnl_ether_addr_list *d; - struct ixlv_mac_filter *f, *f_temp; - device_t dev = sc->dev; - int len, j = 0, cnt = 0; - - /* Get count of MAC addresses to delete */ - SLIST_FOREACH(f, sc->mac_filters, next) { - if (f->flags & IXL_FILTER_DEL) - cnt++; - } - if (cnt == 0) { - ixlv_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__); - return (ENOENT); - } - - len = sizeof(struct virtchnl_ether_addr_list) + - (cnt * sizeof(struct virtchnl_ether_addr)); - - d = malloc(len, M_IXLV, M_NOWAIT | M_ZERO); - if (d == NULL) { - device_printf(dev, "%s: Failed to get memory for " - "virtchnl_ether_addr_list\n", __func__); - return (ENOMEM); - } - d->vsi_id = sc->vsi.id; - d->num_elements = cnt; - - /* Scan the filter array */ - SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { - if (f->flags & IXL_FILTER_DEL) { - bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); - ixlv_dbg_vc(sc, "DEL: " MAC_FORMAT "\n", - MAC_FORMAT_ARGS(f->macaddr)); - j++; - SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); - free(f, M_IXLV); - } - if (j == cnt) - break; - } - ixlv_send_pf_msg(sc, - VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len); - /* add stats? */ - free(d, M_IXLV); - return (0); -} - -/* -** ixlv_request_reset -** Request that the PF reset this VF. No response is expected. -*/ -int -ixlv_request_reset(struct ixlv_sc *sc) -{ - /* - ** Set the reset status to "in progress" before - ** the request, this avoids any possibility of - ** a mistaken early detection of completion. - */ - wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS); - ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0); - return (0); -} - -/* -** ixlv_request_stats -** Request the statistics for this VF's VSI from PF. -*/ -int -ixlv_request_stats(struct ixlv_sc *sc) -{ - struct virtchnl_queue_select vqs; - int error = 0; - - vqs.vsi_id = sc->vsi_res->vsi_id; - /* Low priority, we don't need to error check */ - error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS, - (u8 *)&vqs, sizeof(vqs)); - if (error) - device_printf(sc->dev, "Error sending stats request to PF: %d\n", error); - - return (0); -} - -/* -** Updates driver's stats counters with VSI stats returned from PF. -*/ -void -ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) -{ - struct ixl_vsi *vsi = &sc->vsi; - uint64_t tx_discards; - - tx_discards = es->tx_discards; -#if 0 - for (int i = 0; i < vsi->num_queues; i++) - tx_discards += sc->vsi.queues[i].txr.br->br_drops; -#endif - - /* Update ifnet stats */ - IXL_SET_IPACKETS(vsi, es->rx_unicast + - es->rx_multicast + - es->rx_broadcast); - IXL_SET_OPACKETS(vsi, es->tx_unicast + - es->tx_multicast + - es->tx_broadcast); - IXL_SET_IBYTES(vsi, es->rx_bytes); - IXL_SET_OBYTES(vsi, es->tx_bytes); - IXL_SET_IMCASTS(vsi, es->rx_multicast); - IXL_SET_OMCASTS(vsi, es->tx_multicast); - - IXL_SET_OERRORS(vsi, es->tx_errors); - IXL_SET_IQDROPS(vsi, es->rx_discards); - IXL_SET_OQDROPS(vsi, tx_discards); - IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); - IXL_SET_COLLISIONS(vsi, 0); - - vsi->eth_stats = *es; -} - -int -ixlv_config_rss_key(struct ixlv_sc *sc) -{ - struct virtchnl_rss_key *rss_key_msg; - int msg_len, key_length; - u8 rss_seed[IXL_RSS_KEY_SIZE]; - -#ifdef RSS - /* Fetch the configured RSS key */ - rss_getkey((uint8_t *) &rss_seed); -#else - ixl_get_default_rss_key((u32 *)rss_seed); -#endif - - /* Send the fetched key */ - key_length = IXL_RSS_KEY_SIZE; - msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1; - rss_key_msg = malloc(msg_len, M_IXLV, M_NOWAIT | M_ZERO); - if (rss_key_msg == NULL) { - device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n"); - return (ENOMEM); - } - - rss_key_msg->vsi_id = sc->vsi_res->vsi_id; - rss_key_msg->key_len = key_length; - bcopy(rss_seed, &rss_key_msg->key[0], key_length); - - ixlv_dbg_vc(sc, "config_rss: vsi_id %d, key_len %d\n", - rss_key_msg->vsi_id, rss_key_msg->key_len); - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY, - (u8 *)rss_key_msg, msg_len); - - free(rss_key_msg, M_IXLV); - return (0); -} - -int -ixlv_set_rss_hena(struct ixlv_sc *sc) -{ - struct virtchnl_rss_hena hena; - struct i40e_hw *hw = &sc->hw; - - if (hw->mac.type == I40E_MAC_X722_VF) - hena.hena = IXL_DEFAULT_RSS_HENA_X722; - else - hena.hena = IXL_DEFAULT_RSS_HENA_XL710; - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA, - (u8 *)&hena, sizeof(hena)); - return (0); -} - -int -ixlv_config_rss_lut(struct ixlv_sc *sc) -{ - struct virtchnl_rss_lut *rss_lut_msg; - int msg_len; - u16 lut_length; - u32 lut; - int i, que_id; - - lut_length = IXL_RSS_VSI_LUT_SIZE; - msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1; - rss_lut_msg = malloc(msg_len, M_IXLV, M_NOWAIT | M_ZERO); - if (rss_lut_msg == NULL) { - device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n"); - return (ENOMEM); - } - - rss_lut_msg->vsi_id = sc->vsi_res->vsi_id; - /* Each LUT entry is a max of 1 byte, so this is easy */ - rss_lut_msg->lut_entries = lut_length; - - /* Populate the LUT with max no. of queues in round robin fashion */ - for (i = 0; i < lut_length; i++) { -#ifdef RSS - /* - * Fetch the RSS bucket id for the given indirection entry. - * Cap it at the number of configured buckets (which is - * num_queues.) - */ - que_id = rss_get_indirection_to_bucket(i); - que_id = que_id % sc->vsi.num_rx_queues; -#else - que_id = i % sc->vsi.num_rx_queues; -#endif - lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; - rss_lut_msg->lut[i] = lut; - } - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT, - (u8 *)rss_lut_msg, msg_len); - - free(rss_lut_msg, M_IXLV); - return (0); -} - -int -ixlv_config_promisc_mode(struct ixlv_sc *sc) -{ - struct virtchnl_promisc_info pinfo; - - pinfo.vsi_id = sc->vsi_res->vsi_id; - pinfo.flags = sc->promisc_flags; - - ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, - (u8 *)&pinfo, sizeof(pinfo)); - return (0); -} - -/* -** ixlv_vc_completion -** -** Asynchronous completion function for admin queue messages. Rather than busy -** wait, we fire off our requests and assume that no errors will be returned. -** This function handles the reply messages. -*/ -void -ixlv_vc_completion(struct ixlv_sc *sc, - enum virtchnl_ops v_opcode, - enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) -{ - device_t dev = sc->dev; - - if (v_opcode != VIRTCHNL_OP_GET_STATS) - ixlv_dbg_vc(sc, "%s: opcode %s\n", __func__, - ixl_vc_opcode_str(v_opcode)); - - if (v_opcode == VIRTCHNL_OP_EVENT) { - struct virtchnl_pf_event *vpe = - (struct virtchnl_pf_event *)msg; - - switch (vpe->event) { - case VIRTCHNL_EVENT_LINK_CHANGE: - ixlv_dbg_vc(sc, "Link change: status %d, speed %s\n", - vpe->event_data.link_event.link_status, - ixlv_vc_speed_to_string(vpe->event_data.link_event.link_speed)); - sc->link_up = - vpe->event_data.link_event.link_status; - sc->link_speed = - vpe->event_data.link_event.link_speed; - ixlv_update_link_status(sc); - break; - case VIRTCHNL_EVENT_RESET_IMPENDING: - device_printf(dev, "PF initiated reset!\n"); - sc->init_state = IXLV_RESET_PENDING; - ixlv_if_init(sc->vsi.ctx); - break; - default: - ixlv_dbg_vc(sc, "Unknown event %d from AQ\n", - vpe->event); - break; - } - - return; - } - - /* Catch-all error response */ - if (v_retval) { - device_printf(dev, - "%s: AQ returned error %s to our request %s!\n", - __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); - } - - switch (v_opcode) { - case VIRTCHNL_OP_GET_STATS: - ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); - break; - case VIRTCHNL_OP_ADD_ETH_ADDR: - if (v_retval) { - device_printf(dev, "WARNING: Error adding VF mac filter!\n"); - device_printf(dev, "WARNING: Device may not receive traffic!\n"); - } - break; - case VIRTCHNL_OP_DEL_ETH_ADDR: - break; - case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - break; - case VIRTCHNL_OP_ADD_VLAN: - break; - case VIRTCHNL_OP_DEL_VLAN: - break; - case VIRTCHNL_OP_ENABLE_QUEUES: - atomic_store_rel_32(&sc->queues_enabled, 1); - wakeup_one(&sc->enable_queues_chan); - break; - case VIRTCHNL_OP_DISABLE_QUEUES: - atomic_store_rel_32(&sc->queues_enabled, 0); - wakeup_one(&sc->disable_queues_chan); - break; - case VIRTCHNL_OP_CONFIG_VSI_QUEUES: - break; - case VIRTCHNL_OP_CONFIG_IRQ_MAP: - break; - case VIRTCHNL_OP_CONFIG_RSS_KEY: - break; - case VIRTCHNL_OP_SET_RSS_HENA: - break; - case VIRTCHNL_OP_CONFIG_RSS_LUT: - break; - default: - ixlv_dbg_vc(sc, - "Received unexpected message %s from PF.\n", - ixl_vc_opcode_str(v_opcode)); - break; - } -} - -int -ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request) -{ - - switch (request) { - case IXLV_FLAG_AQ_MAP_VECTORS: - return ixlv_map_queues(sc); - - case IXLV_FLAG_AQ_ADD_MAC_FILTER: - return ixlv_add_ether_filters(sc); - - case IXLV_FLAG_AQ_ADD_VLAN_FILTER: - return ixlv_add_vlans(sc); - - case IXLV_FLAG_AQ_DEL_MAC_FILTER: - return ixlv_del_ether_filters(sc); - - case IXLV_FLAG_AQ_DEL_VLAN_FILTER: - return ixlv_del_vlans(sc); - - case IXLV_FLAG_AQ_CONFIGURE_QUEUES: - return ixlv_configure_queues(sc); - - case IXLV_FLAG_AQ_DISABLE_QUEUES: - return ixlv_disable_queues(sc); - - case IXLV_FLAG_AQ_ENABLE_QUEUES: - return ixlv_enable_queues(sc); - - case IXLV_FLAG_AQ_CONFIG_RSS_KEY: - return ixlv_config_rss_key(sc); - - case IXLV_FLAG_AQ_SET_RSS_HENA: - return ixlv_set_rss_hena(sc); - - case IXLV_FLAG_AQ_CONFIG_RSS_LUT: - return ixlv_config_rss_lut(sc); - - case IXLV_FLAG_AQ_CONFIGURE_PROMISC: - return ixlv_config_promisc_mode(sc); - } - - return (0); -} - -void * -ixl_vc_get_op_chan(struct ixlv_sc *sc, uint32_t request) -{ - switch (request) { - case IXLV_FLAG_AQ_ENABLE_QUEUES: - return (&sc->enable_queues_chan); - case IXLV_FLAG_AQ_DISABLE_QUEUES: - return (&sc->disable_queues_chan); - default: - return (NULL); - } -} Property changes on: head/sys/dev/ixl/ixlvc.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/dev/ixl/ixlv.h =================================================================== --- head/sys/dev/ixl/ixlv.h (revision 339361) +++ head/sys/dev/ixl/ixlv.h (nonexistent) @@ -1,218 +0,0 @@ -/****************************************************************************** - - Copyright (c) 2013-2018, Intel Corporation - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ -/*$FreeBSD$*/ - - -#ifndef _IXLV_H_ -#define _IXLV_H_ - -#include "ixl.h" - -#define IXLV_AQ_MAX_ERR 200 -#define IXLV_MAX_FILTERS 128 -#define IXLV_MAX_QUEUES 16 -#define IXLV_AQ_TIMEOUT (1 * hz) - -#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0) -#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) -#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) -#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) -#define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) -#define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) -#define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) -#define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) -#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) -#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9) -#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10) -#define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11) -#define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12) -#define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13) -#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14) - -/* printf %b flag args */ -#define IXLV_FLAGS \ - "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \ - "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \ - "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \ - "\12CONFIGURE_PROMISC\13GET_STATS\14CONFIG_RSS_KEY" \ - "\15SET_RSS_HENA\16GET_RSS_HENA_CAPS\17CONFIG_RSS_LUT" -#define IXLV_PRINTF_VF_OFFLOAD_FLAGS \ - "\20\1L2" \ - "\2IWARP" \ - "\3RSVD" \ - "\4RSS_AQ" \ - "\5RSS_REG" \ - "\6WB_ON_ITR" \ - "\7REQ_QUEUES" \ - "\21VLAN" \ - "\22RX_POLLING" \ - "\23RSS_PCTYPE_V2" \ - "\24RSS_PF" \ - "\25ENCAP" \ - "\26ENCAP_CSUM" \ - "\27RX_ENCAP_CSUM" - -MALLOC_DECLARE(M_IXLV); - -/* Driver state */ -enum ixlv_state_t { - IXLV_RESET_REQUIRED, - IXLV_RESET_PENDING, - IXLV_INIT_READY, - IXLV_RUNNING, -}; - -/* Structs */ - -struct ixlv_mac_filter { - SLIST_ENTRY(ixlv_mac_filter) next; - u8 macaddr[ETHER_ADDR_LEN]; - u16 flags; -}; -SLIST_HEAD(mac_list, ixlv_mac_filter); - -struct ixlv_vlan_filter { - SLIST_ENTRY(ixlv_vlan_filter) next; - u16 vlan; - u16 flags; -}; -SLIST_HEAD(vlan_list, ixlv_vlan_filter); - -/* Software controller structure */ -struct ixlv_sc { - struct ixl_vsi vsi; - - struct i40e_hw hw; - struct i40e_osdep osdep; - device_t dev; - - struct resource *pci_mem; - - enum ixlv_state_t init_state; - - struct ifmedia media; - struct virtchnl_version_info version; - enum ixl_dbg_mask dbg_mask; - u16 promisc_flags; - - bool link_up; - enum virtchnl_link_speed link_speed; - - /* Tunable settings */ - int tx_itr; - int rx_itr; - int dynamic_tx_itr; - int dynamic_rx_itr; - - /* Filter lists */ - struct mac_list *mac_filters; - struct vlan_list *vlan_filters; - - /* Virtual comm channel */ - struct virtchnl_vf_resource *vf_res; - struct virtchnl_vsi_resource *vsi_res; - - /* Misc stats maintained by the driver */ - u64 admin_irq; - - /* Buffer used for reading AQ responses */ - u8 aq_buffer[IXL_AQ_BUF_SZ]; - - /* State flag used in init/stop */ - u32 queues_enabled; - u8 enable_queues_chan; - u8 disable_queues_chan; -}; - -/* -** This checks for a zero mac addr, something that will be likely -** unless the Admin on the Host has created one. -*/ -static inline bool -ixlv_check_ether_addr(u8 *addr) -{ - bool status = TRUE; - - if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && - addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) - status = FALSE; - return (status); -} - -/* Debug printing */ -#define ixlv_dbg(sc, m, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, m, s, ##__VA_ARGS__) -#define ixlv_dbg_init(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_INIT, s, ##__VA_ARGS__) -#define ixlv_dbg_info(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_INFO, s, ##__VA_ARGS__) -#define ixlv_dbg_vc(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_VC, s, ##__VA_ARGS__) -#define ixlv_dbg_filter(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IXLV_DBG_FILTER, s, ##__VA_ARGS__) - -/* -** VF Common function prototypes -*/ -void ixlv_if_init(if_ctx_t ctx); - -int ixlv_send_api_ver(struct ixlv_sc *); -int ixlv_verify_api_ver(struct ixlv_sc *); -int ixlv_send_vf_config_msg(struct ixlv_sc *); -int ixlv_get_vf_config(struct ixlv_sc *); -void ixlv_init(void *); -int ixlv_reinit_locked(struct ixlv_sc *); -int ixlv_configure_queues(struct ixlv_sc *); -int ixlv_enable_queues(struct ixlv_sc *); -int ixlv_disable_queues(struct ixlv_sc *); -int ixlv_map_queues(struct ixlv_sc *); -void ixlv_enable_intr(struct ixl_vsi *); -void ixlv_disable_intr(struct ixl_vsi *); -int ixlv_add_ether_filters(struct ixlv_sc *); -int ixlv_del_ether_filters(struct ixlv_sc *); -int ixlv_request_stats(struct ixlv_sc *); -int ixlv_request_reset(struct ixlv_sc *); -void ixlv_vc_completion(struct ixlv_sc *, - enum virtchnl_ops, enum virtchnl_status_code, - u8 *, u16); -int ixlv_add_ether_filter(struct ixlv_sc *); -int ixlv_add_vlans(struct ixlv_sc *); -int ixlv_del_vlans(struct ixlv_sc *); -void ixlv_update_stats_counters(struct ixlv_sc *, - struct i40e_eth_stats *); -void ixlv_update_link_status(struct ixlv_sc *); -int ixlv_get_default_rss_key(u32 *, bool); -int ixlv_config_rss_key(struct ixlv_sc *); -int ixlv_set_rss_hena(struct ixlv_sc *); -int ixlv_config_rss_lut(struct ixlv_sc *); -int ixlv_config_promisc_mode(struct ixlv_sc *); - -int ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request); -char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed); -void *ixl_vc_get_op_chan(struct ixlv_sc *sc, uint32_t request); -#endif /* _IXLV_H_ */ Property changes on: head/sys/dev/ixl/ixlv.h ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/dev/ixl/iavf.h =================================================================== --- head/sys/dev/ixl/iavf.h (nonexistent) +++ head/sys/dev/ixl/iavf.h (revision 339362) @@ -0,0 +1,218 @@ +/****************************************************************************** + + Copyright (c) 2013-2018, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifndef _IAVF_H_ +#define _IAVF_H_ + +#include "ixl.h" + +#define IAVF_AQ_MAX_ERR 200 +#define IAVF_MAX_FILTERS 128 +#define IAVF_MAX_QUEUES 16 +#define IAVF_AQ_TIMEOUT (1 * hz) + +#define IAVF_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0) +#define IAVF_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) +#define IAVF_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) +#define IAVF_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) +#define IAVF_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) +#define IAVF_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) +#define IAVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) +#define IAVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) +#define IAVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) +#define IAVF_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9) +#define IAVF_FLAG_AQ_GET_STATS (u32)(1 << 10) +#define IAVF_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11) +#define IAVF_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12) +#define IAVF_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13) +#define IAVF_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14) + +/* printf %b flag args */ +#define IAVF_FLAGS \ + "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \ + "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \ + "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \ + "\12CONFIGURE_PROMISC\13GET_STATS\14CONFIG_RSS_KEY" \ + "\15SET_RSS_HENA\16GET_RSS_HENA_CAPS\17CONFIG_RSS_LUT" +#define IAVF_PRINTF_VF_OFFLOAD_FLAGS \ + "\20\1L2" \ + "\2IWARP" \ + "\3RSVD" \ + "\4RSS_AQ" \ + "\5RSS_REG" \ + "\6WB_ON_ITR" \ + "\7REQ_QUEUES" \ + "\21VLAN" \ + "\22RX_POLLING" \ + "\23RSS_PCTYPE_V2" \ + "\24RSS_PF" \ + "\25ENCAP" \ + "\26ENCAP_CSUM" \ + "\27RX_ENCAP_CSUM" + +MALLOC_DECLARE(M_IAVF); + +/* Driver state */ +enum iavf_state_t { + IAVF_RESET_REQUIRED, + IAVF_RESET_PENDING, + IAVF_INIT_READY, + IAVF_RUNNING, +}; + +/* Structs */ + +struct iavf_mac_filter { + SLIST_ENTRY(iavf_mac_filter) next; + u8 macaddr[ETHER_ADDR_LEN]; + u16 flags; +}; +SLIST_HEAD(mac_list, iavf_mac_filter); + +struct iavf_vlan_filter { + SLIST_ENTRY(iavf_vlan_filter) next; + u16 vlan; + u16 flags; +}; +SLIST_HEAD(vlan_list, iavf_vlan_filter); + +/* Software controller structure */ +struct iavf_sc { + struct ixl_vsi vsi; + + struct i40e_hw hw; + struct i40e_osdep osdep; + device_t dev; + + struct resource *pci_mem; + + enum iavf_state_t init_state; + + struct ifmedia media; + struct virtchnl_version_info version; + enum ixl_dbg_mask dbg_mask; + u16 promisc_flags; + + bool link_up; + enum virtchnl_link_speed link_speed; + + /* Tunable settings */ + int tx_itr; + int rx_itr; + int dynamic_tx_itr; + int dynamic_rx_itr; + + /* Filter lists */ + struct mac_list *mac_filters; + struct vlan_list *vlan_filters; + + /* Virtual comm channel */ + struct virtchnl_vf_resource *vf_res; + struct virtchnl_vsi_resource *vsi_res; + + /* Misc stats maintained by the driver */ + u64 admin_irq; + + /* Buffer used for reading AQ responses */ + u8 aq_buffer[IXL_AQ_BUF_SZ]; + + /* State flag used in init/stop */ + u32 queues_enabled; + u8 enable_queues_chan; + u8 disable_queues_chan; +}; + +/* +** This checks for a zero mac addr, something that will be likely +** unless the Admin on the Host has created one. +*/ +static inline bool +iavf_check_ether_addr(u8 *addr) +{ + bool status = TRUE; + + if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && + addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) + status = FALSE; + return (status); +} + +/* Debug printing */ +#define iavf_dbg(sc, m, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, m, s, ##__VA_ARGS__) +#define iavf_dbg_init(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_INIT, s, ##__VA_ARGS__) +#define iavf_dbg_info(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_INFO, s, ##__VA_ARGS__) +#define iavf_dbg_vc(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_VC, s, ##__VA_ARGS__) +#define iavf_dbg_filter(sc, s, ...) ixl_debug_core(sc->dev, sc->dbg_mask, IAVF_DBG_FILTER, s, ##__VA_ARGS__) + +/* +** VF Common function prototypes +*/ +void iavf_if_init(if_ctx_t ctx); + +int iavf_send_api_ver(struct iavf_sc *); +int iavf_verify_api_ver(struct iavf_sc *); +int iavf_send_vf_config_msg(struct iavf_sc *); +int iavf_get_vf_config(struct iavf_sc *); +void iavf_init(void *); +int iavf_reinit_locked(struct iavf_sc *); +int iavf_configure_queues(struct iavf_sc *); +int iavf_enable_queues(struct iavf_sc *); +int iavf_disable_queues(struct iavf_sc *); +int iavf_map_queues(struct iavf_sc *); +void iavf_enable_intr(struct ixl_vsi *); +void iavf_disable_intr(struct ixl_vsi *); +int iavf_add_ether_filters(struct iavf_sc *); +int iavf_del_ether_filters(struct iavf_sc *); +int iavf_request_stats(struct iavf_sc *); +int iavf_request_reset(struct iavf_sc *); +void iavf_vc_completion(struct iavf_sc *, + enum virtchnl_ops, enum virtchnl_status_code, + u8 *, u16); +int iavf_add_ether_filter(struct iavf_sc *); +int iavf_add_vlans(struct iavf_sc *); +int iavf_del_vlans(struct iavf_sc *); +void iavf_update_stats_counters(struct iavf_sc *, + struct i40e_eth_stats *); +void iavf_update_link_status(struct iavf_sc *); +int iavf_get_default_rss_key(u32 *, bool); +int iavf_config_rss_key(struct iavf_sc *); +int iavf_set_rss_hena(struct iavf_sc *); +int iavf_config_rss_lut(struct iavf_sc *); +int iavf_config_promisc_mode(struct iavf_sc *); + +int ixl_vc_send_cmd(struct iavf_sc *sc, uint32_t request); +char *iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed); +void *ixl_vc_get_op_chan(struct iavf_sc *sc, uint32_t request); +#endif /* _IAVF_H_ */ Property changes on: head/sys/dev/ixl/iavf.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/ixl/iavf_vc.c =================================================================== --- head/sys/dev/ixl/iavf_vc.c (nonexistent) +++ head/sys/dev/ixl/iavf_vc.c (revision 339362) @@ -0,0 +1,1014 @@ +/****************************************************************************** + + Copyright (c) 2013-2018, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +/* +** Virtual Channel support +** These are support functions to communication +** between the VF and PF drivers. +*/ + +#include "ixl.h" +#include "iavf.h" + +/* busy wait delay in msec */ +#define IAVF_BUSY_WAIT_DELAY 10 +#define IAVF_BUSY_WAIT_COUNT 50 + +/* +** iavf_send_pf_msg +** +** Send message to PF and print status if failure. +*/ +static int +iavf_send_pf_msg(struct iavf_sc *sc, + enum virtchnl_ops op, u8 *msg, u16 len) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + i40e_status status; + int val_err; + + /* Validating message before sending it to the PF */ + val_err = virtchnl_vc_validate_vf_msg(&sc->version, op, msg, len); + if (val_err) + device_printf(dev, "Error validating msg to PF for op %d," + " msglen %d: error %d\n", op, len, val_err); + + if (!i40e_check_asq_alive(hw)) { + if (op != VIRTCHNL_OP_GET_STATS) + device_printf(dev, "Unable to send opcode %s to PF, " + "ASQ is not alive\n", ixl_vc_opcode_str(op)); + return (0); + } + + if (op != VIRTCHNL_OP_GET_STATS) + iavf_dbg_vc(sc, + "Sending msg (op=%s[%d]) to PF\n", + ixl_vc_opcode_str(op), op); + + status = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); + if (status && op != VIRTCHNL_OP_GET_STATS) + device_printf(dev, "Unable to send opcode %s to PF, " + "status %s, aq error %s\n", + ixl_vc_opcode_str(op), + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + + return (status); +} + +/* +** iavf_send_api_ver +** +** Send API version admin queue message to the PF. The reply is not checked +** in this function. Returns 0 if the message was successfully +** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. +*/ +int +iavf_send_api_ver(struct iavf_sc *sc) +{ + struct virtchnl_version_info vvi; + + vvi.major = VIRTCHNL_VERSION_MAJOR; + vvi.minor = VIRTCHNL_VERSION_MINOR; + + return iavf_send_pf_msg(sc, VIRTCHNL_OP_VERSION, + (u8 *)&vvi, sizeof(vvi)); +} + +/* +** iavf_verify_api_ver +** +** Compare API versions with the PF. Must be called after admin queue is +** initialized. Returns 0 if API versions match, EIO if +** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. +*/ +int +iavf_verify_api_ver(struct iavf_sc *sc) +{ + struct virtchnl_version_info *pf_vvi; + struct i40e_hw *hw = &sc->hw; + struct i40e_arq_event_info event; + device_t dev = sc->dev; + i40e_status err; + int retries = 0; + + event.buf_len = IXL_AQ_BUF_SZ; + event.msg_buf = malloc(event.buf_len, M_IAVF, M_WAITOK); + + for (;;) { + if (++retries > IAVF_AQ_MAX_ERR) + goto out_alloc; + + /* Initial delay here is necessary */ + i40e_msec_pause(100); + err = i40e_clean_arq_element(hw, &event, NULL); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) + continue; + else if (err) { + err = EIO; + goto out_alloc; + } + + if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != + VIRTCHNL_OP_VERSION) { + DDPRINTF(dev, "Received unexpected op response: %d\n", + le32toh(event.desc.cookie_high)); + /* Don't stop looking for expected response */ + continue; + } + + err = (i40e_status)le32toh(event.desc.cookie_low); + if (err) { + err = EIO; + goto out_alloc; + } else + break; + } + + pf_vvi = (struct virtchnl_version_info *)event.msg_buf; + if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || + ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && + (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) { + device_printf(dev, "Critical PF/VF API version mismatch!\n"); + err = EIO; + } else { + sc->version.major = pf_vvi->major; + sc->version.minor = pf_vvi->minor; + } + + /* Log PF/VF api versions */ + device_printf(dev, "PF API %d.%d / VF API %d.%d\n", + pf_vvi->major, pf_vvi->minor, + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); + +out_alloc: + free(event.msg_buf, M_IAVF); + return (err); +} + +/* +** iavf_send_vf_config_msg +** +** Send VF configuration request admin queue message to the PF. The reply +** is not checked in this function. Returns 0 if the message was +** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. +*/ +int +iavf_send_vf_config_msg(struct iavf_sc *sc) +{ + u32 caps; + + caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_PF | + VIRTCHNL_VF_OFFLOAD_VLAN; + + iavf_dbg_info(sc, "Sending offload flags: 0x%b\n", + caps, IAVF_PRINTF_VF_OFFLOAD_FLAGS); + + if (sc->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) + return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, + NULL, 0); + else + return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, + (u8 *)&caps, sizeof(caps)); +} + +/* +** iavf_get_vf_config +** +** Get VF configuration from PF and populate hw structure. Must be called after +** admin queue is initialized. Busy waits until response is received from PF, +** with maximum timeout. Response from PF is returned in the buffer for further +** processing by the caller. +*/ +int +iavf_get_vf_config(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + struct i40e_arq_event_info event; + u16 len; + i40e_status err = 0; + u32 retries = 0; + + /* Note this assumes a single VSI */ + len = sizeof(struct virtchnl_vf_resource) + + sizeof(struct virtchnl_vsi_resource); + event.buf_len = len; + event.msg_buf = malloc(event.buf_len, M_IAVF, M_WAITOK); + + for (;;) { + err = i40e_clean_arq_element(hw, &event, NULL); + if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { + if (++retries <= IAVF_AQ_MAX_ERR) + i40e_msec_pause(10); + } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != + VIRTCHNL_OP_GET_VF_RESOURCES) { + DDPRINTF(dev, "Received a response from PF," + " opcode %d, error %d", + le32toh(event.desc.cookie_high), + le32toh(event.desc.cookie_low)); + retries++; + continue; + } else { + err = (i40e_status)le32toh(event.desc.cookie_low); + if (err) { + device_printf(dev, "%s: Error returned from PF," + " opcode %d, error %d\n", __func__, + le32toh(event.desc.cookie_high), + le32toh(event.desc.cookie_low)); + err = EIO; + goto out_alloc; + } + /* We retrieved the config message, with no errors */ + break; + } + + if (retries > IAVF_AQ_MAX_ERR) { + INIT_DBG_DEV(dev, "Did not receive response after %d tries.", + retries); + err = ETIMEDOUT; + goto out_alloc; + } + } + + memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); + i40e_vf_parse_hw_config(hw, sc->vf_res); + +out_alloc: + free(event.msg_buf, M_IAVF); + return err; +} + +/* +** iavf_configure_queues +** +** Request that the PF set up our queues. +*/ +int +iavf_configure_queues(struct iavf_sc *sc) +{ + device_t dev = sc->dev; + struct ixl_vsi *vsi = &sc->vsi; + if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); + struct ixl_tx_queue *tx_que = vsi->tx_queues; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + struct tx_ring *txr; + struct rx_ring *rxr; + int len, pairs; + + struct virtchnl_vsi_queue_config_info *vqci; + struct virtchnl_queue_pair_info *vqpi; + + /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX + * queues of a pair need to be configured */ + pairs = max(vsi->num_tx_queues, vsi->num_rx_queues); + len = sizeof(struct virtchnl_vsi_queue_config_info) + + (sizeof(struct virtchnl_queue_pair_info) * pairs); + vqci = malloc(len, M_IAVF, M_NOWAIT | M_ZERO); + if (!vqci) { + device_printf(dev, "%s: unable to allocate memory\n", __func__); + return (ENOMEM); + } + vqci->vsi_id = sc->vsi_res->vsi_id; + vqci->num_queue_pairs = pairs; + vqpi = vqci->qpair; + /* Size check is not needed here - HW max is 16 queue pairs, and we + * can fit info for 31 of them into the AQ buffer before it overflows. + */ + // TODO: the above is wrong now; X722 VFs can have 256 queues + for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) { + txr = &tx_que->txr; + rxr = &rx_que->rxr; + + vqpi->txq.vsi_id = vqci->vsi_id; + vqpi->txq.queue_id = i; + vqpi->txq.ring_len = scctx->isc_ntxd[0]; + vqpi->txq.dma_ring_addr = txr->tx_paddr; + /* Enable Head writeback */ + if (!vsi->enable_head_writeback) { + vqpi->txq.headwb_enabled = 0; + vqpi->txq.dma_headwb_addr = 0; + } else { + vqpi->txq.headwb_enabled = 1; + vqpi->txq.dma_headwb_addr = txr->tx_paddr + + sizeof(struct i40e_tx_desc) * scctx->isc_ntxd[0]; + } + + vqpi->rxq.vsi_id = vqci->vsi_id; + vqpi->rxq.queue_id = i; + vqpi->rxq.ring_len = scctx->isc_nrxd[0]; + vqpi->rxq.dma_ring_addr = rxr->rx_paddr; + vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size; + vqpi->rxq.databuffer_size = rxr->mbuf_sz; + vqpi->rxq.splithdr_enabled = 0; + } + + iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + (u8 *)vqci, len); + free(vqci, M_IAVF); + + return (0); +} + +/* +** iavf_enable_queues +** +** Request that the PF enable all of our queues. +*/ +int +iavf_enable_queues(struct iavf_sc *sc) +{ + struct virtchnl_queue_select vqs; + + vqs.vsi_id = sc->vsi_res->vsi_id; + /* XXX: In Linux PF, as long as neither of these is 0, + * every queue in VF VSI is enabled. */ + vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1; + vqs.rx_queues = vqs.tx_queues; + iavf_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); + return (0); +} + +/* +** iavf_disable_queues +** +** Request that the PF disable all of our queues. +*/ +int +iavf_disable_queues(struct iavf_sc *sc) +{ + struct virtchnl_queue_select vqs; + + vqs.vsi_id = sc->vsi_res->vsi_id; + /* XXX: In Linux PF, as long as neither of these is 0, + * every queue in VF VSI is disabled. */ + vqs.tx_queues = (1 << sc->vsi.num_tx_queues) - 1; + vqs.rx_queues = vqs.tx_queues; + iavf_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES, + (u8 *)&vqs, sizeof(vqs)); + return (0); +} + +/* +** iavf_map_queues +** +** Request that the PF map queues to interrupt vectors. Misc causes, including +** admin queue, are always mapped to vector 0. +*/ +int +iavf_map_queues(struct iavf_sc *sc) +{ + struct virtchnl_irq_map_info *vm; + int i, q, len; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + if_softc_ctx_t scctx = vsi->shared; + device_t dev = sc->dev; + + // XXX: What happens if we only get 1 MSI-X vector? + MPASS(scctx->isc_vectors > 1); + + /* How many queue vectors, adminq uses one */ + // XXX: How do we know how many interrupt vectors we have? + q = scctx->isc_vectors - 1; + + len = sizeof(struct virtchnl_irq_map_info) + + (scctx->isc_vectors * sizeof(struct virtchnl_vector_map)); + vm = malloc(len, M_IAVF, M_NOWAIT); + if (!vm) { + device_printf(dev, "%s: unable to allocate memory\n", __func__); + return (ENOMEM); + } + + vm->num_vectors = scctx->isc_vectors; + /* Queue vectors first */ + for (i = 0; i < q; i++, rx_que++) { + vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; + vm->vecmap[i].vector_id = i + 1; /* first is adminq */ + // TODO: Re-examine this + vm->vecmap[i].txq_map = (1 << rx_que->rxr.me); + vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me); + vm->vecmap[i].rxitr_idx = 0; + vm->vecmap[i].txitr_idx = 1; + } + + /* Misc vector last - this is only for AdminQ messages */ + vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; + vm->vecmap[i].vector_id = 0; + vm->vecmap[i].txq_map = 0; + vm->vecmap[i].rxq_map = 0; + vm->vecmap[i].rxitr_idx = 0; + vm->vecmap[i].txitr_idx = 0; + + iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP, + (u8 *)vm, len); + free(vm, M_IAVF); + + return (0); +} + +/* +** Scan the Filter List looking for vlans that need +** to be added, then create the data to hand to the AQ +** for handling. +*/ +int +iavf_add_vlans(struct iavf_sc *sc) +{ + struct virtchnl_vlan_filter_list *v; + struct iavf_vlan_filter *f, *ftmp; + device_t dev = sc->dev; + int len, i = 0, cnt = 0; + + /* Get count of VLAN filters to add */ + SLIST_FOREACH(f, sc->vlan_filters, next) { + if (f->flags & IXL_FILTER_ADD) + cnt++; + } + + if (!cnt) /* no work... */ + return (ENOENT); + + len = sizeof(struct virtchnl_vlan_filter_list) + + (cnt * sizeof(u16)); + + if (len > IXL_AQ_BUF_SZ) { + device_printf(dev, "%s: Exceeded Max AQ Buf size\n", + __func__); + return (EFBIG); + } + + v = malloc(len, M_IAVF, M_NOWAIT); + if (!v) { + device_printf(dev, "%s: unable to allocate memory\n", + __func__); + return (ENOMEM); + } + + v->vsi_id = sc->vsi_res->vsi_id; + v->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { + if (f->flags & IXL_FILTER_ADD) { + bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); + f->flags = IXL_FILTER_USED; + i++; + } + if (i == cnt) + break; + } + + iavf_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); + free(v, M_IAVF); + /* add stats? */ + return (0); +} + +/* +** Scan the Filter Table looking for vlans that need +** to be removed, then create the data to hand to the AQ +** for handling. +*/ +int +iavf_del_vlans(struct iavf_sc *sc) +{ + struct virtchnl_vlan_filter_list *v; + struct iavf_vlan_filter *f, *ftmp; + device_t dev = sc->dev; + int len, i = 0, cnt = 0; + + /* Get count of VLAN filters to delete */ + SLIST_FOREACH(f, sc->vlan_filters, next) { + if (f->flags & IXL_FILTER_DEL) + cnt++; + } + + if (!cnt) /* no work... */ + return (ENOENT); + + len = sizeof(struct virtchnl_vlan_filter_list) + + (cnt * sizeof(u16)); + + if (len > IXL_AQ_BUF_SZ) { + device_printf(dev, "%s: Exceeded Max AQ Buf size\n", + __func__); + return (EFBIG); + } + + v = malloc(len, M_IAVF, M_NOWAIT | M_ZERO); + if (!v) { + device_printf(dev, "%s: unable to allocate memory\n", + __func__); + return (ENOMEM); + } + + v->vsi_id = sc->vsi_res->vsi_id; + v->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { + if (f->flags & IXL_FILTER_DEL) { + bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); + i++; + SLIST_REMOVE(sc->vlan_filters, f, iavf_vlan_filter, next); + free(f, M_IAVF); + } + if (i == cnt) + break; + } + + iavf_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); + free(v, M_IAVF); + /* add stats? */ + return (0); +} + + +/* +** This routine takes additions to the vsi filter +** table and creates an Admin Queue call to create +** the filters in the hardware. +*/ +int +iavf_add_ether_filters(struct iavf_sc *sc) +{ + struct virtchnl_ether_addr_list *a; + struct iavf_mac_filter *f; + device_t dev = sc->dev; + int len, j = 0, cnt = 0; + enum i40e_status_code status; + + /* Get count of MAC addresses to add */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if (f->flags & IXL_FILTER_ADD) + cnt++; + } + if (cnt == 0) { /* Should not happen... */ + iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__); + return (ENOENT); + } + + len = sizeof(struct virtchnl_ether_addr_list) + + (cnt * sizeof(struct virtchnl_ether_addr)); + + a = malloc(len, M_IAVF, M_NOWAIT | M_ZERO); + if (a == NULL) { + device_printf(dev, "%s: Failed to get memory for " + "virtchnl_ether_addr_list\n", __func__); + return (ENOMEM); + } + a->vsi_id = sc->vsi.id; + a->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if (f->flags & IXL_FILTER_ADD) { + bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN); + f->flags &= ~IXL_FILTER_ADD; + j++; + + iavf_dbg_vc(sc, "ADD: " MAC_FORMAT "\n", + MAC_FORMAT_ARGS(f->macaddr)); + } + if (j == cnt) + break; + } + DDPRINTF(dev, "len %d, j %d, cnt %d", + len, j, cnt); + + status = iavf_send_pf_msg(sc, + VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len); + /* add stats? */ + free(a, M_IAVF); + return (status); +} + +/* +** This routine takes filters flagged for deletion in the +** sc MAC filter list and creates an Admin Queue call +** to delete those filters in the hardware. +*/ +int +iavf_del_ether_filters(struct iavf_sc *sc) +{ + struct virtchnl_ether_addr_list *d; + struct iavf_mac_filter *f, *f_temp; + device_t dev = sc->dev; + int len, j = 0, cnt = 0; + + /* Get count of MAC addresses to delete */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if (f->flags & IXL_FILTER_DEL) + cnt++; + } + if (cnt == 0) { + iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__); + return (ENOENT); + } + + len = sizeof(struct virtchnl_ether_addr_list) + + (cnt * sizeof(struct virtchnl_ether_addr)); + + d = malloc(len, M_IAVF, M_NOWAIT | M_ZERO); + if (d == NULL) { + device_printf(dev, "%s: Failed to get memory for " + "virtchnl_ether_addr_list\n", __func__); + return (ENOMEM); + } + d->vsi_id = sc->vsi.id; + d->num_elements = cnt; + + /* Scan the filter array */ + SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { + if (f->flags & IXL_FILTER_DEL) { + bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); + iavf_dbg_vc(sc, "DEL: " MAC_FORMAT "\n", + MAC_FORMAT_ARGS(f->macaddr)); + j++; + SLIST_REMOVE(sc->mac_filters, f, iavf_mac_filter, next); + free(f, M_IAVF); + } + if (j == cnt) + break; + } + iavf_send_pf_msg(sc, + VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len); + /* add stats? */ + free(d, M_IAVF); + return (0); +} + +/* +** iavf_request_reset +** Request that the PF reset this VF. No response is expected. +*/ +int +iavf_request_reset(struct iavf_sc *sc) +{ + /* + ** Set the reset status to "in progress" before + ** the request, this avoids any possibility of + ** a mistaken early detection of completion. + */ + wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS); + iavf_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0); + return (0); +} + +/* +** iavf_request_stats +** Request the statistics for this VF's VSI from PF. +*/ +int +iavf_request_stats(struct iavf_sc *sc) +{ + struct virtchnl_queue_select vqs; + int error = 0; + + vqs.vsi_id = sc->vsi_res->vsi_id; + /* Low priority, we don't need to error check */ + error = iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS, + (u8 *)&vqs, sizeof(vqs)); + if (error) + device_printf(sc->dev, "Error sending stats request to PF: %d\n", error); + + return (0); +} + +/* +** Updates driver's stats counters with VSI stats returned from PF. +*/ +void +iavf_update_stats_counters(struct iavf_sc *sc, struct i40e_eth_stats *es) +{ + struct ixl_vsi *vsi = &sc->vsi; + uint64_t tx_discards; + + tx_discards = es->tx_discards; +#if 0 + for (int i = 0; i < vsi->num_queues; i++) + tx_discards += sc->vsi.queues[i].txr.br->br_drops; +#endif + + /* Update ifnet stats */ + IXL_SET_IPACKETS(vsi, es->rx_unicast + + es->rx_multicast + + es->rx_broadcast); + IXL_SET_OPACKETS(vsi, es->tx_unicast + + es->tx_multicast + + es->tx_broadcast); + IXL_SET_IBYTES(vsi, es->rx_bytes); + IXL_SET_OBYTES(vsi, es->tx_bytes); + IXL_SET_IMCASTS(vsi, es->rx_multicast); + IXL_SET_OMCASTS(vsi, es->tx_multicast); + + IXL_SET_OERRORS(vsi, es->tx_errors); + IXL_SET_IQDROPS(vsi, es->rx_discards); + IXL_SET_OQDROPS(vsi, tx_discards); + IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); + IXL_SET_COLLISIONS(vsi, 0); + + vsi->eth_stats = *es; +} + +int +iavf_config_rss_key(struct iavf_sc *sc) +{ + struct virtchnl_rss_key *rss_key_msg; + int msg_len, key_length; + u8 rss_seed[IXL_RSS_KEY_SIZE]; + +#ifdef RSS + /* Fetch the configured RSS key */ + rss_getkey((uint8_t *) &rss_seed); +#else + ixl_get_default_rss_key((u32 *)rss_seed); +#endif + + /* Send the fetched key */ + key_length = IXL_RSS_KEY_SIZE; + msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1; + rss_key_msg = malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO); + if (rss_key_msg == NULL) { + device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n"); + return (ENOMEM); + } + + rss_key_msg->vsi_id = sc->vsi_res->vsi_id; + rss_key_msg->key_len = key_length; + bcopy(rss_seed, &rss_key_msg->key[0], key_length); + + iavf_dbg_vc(sc, "config_rss: vsi_id %d, key_len %d\n", + rss_key_msg->vsi_id, rss_key_msg->key_len); + + iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY, + (u8 *)rss_key_msg, msg_len); + + free(rss_key_msg, M_IAVF); + return (0); +} + +int +iavf_set_rss_hena(struct iavf_sc *sc) +{ + struct virtchnl_rss_hena hena; + struct i40e_hw *hw = &sc->hw; + + if (hw->mac.type == I40E_MAC_X722_VF) + hena.hena = IXL_DEFAULT_RSS_HENA_X722; + else + hena.hena = IXL_DEFAULT_RSS_HENA_XL710; + + iavf_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA, + (u8 *)&hena, sizeof(hena)); + return (0); +} + +int +iavf_config_rss_lut(struct iavf_sc *sc) +{ + struct virtchnl_rss_lut *rss_lut_msg; + int msg_len; + u16 lut_length; + u32 lut; + int i, que_id; + + lut_length = IXL_RSS_VSI_LUT_SIZE; + msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1; + rss_lut_msg = malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO); + if (rss_lut_msg == NULL) { + device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n"); + return (ENOMEM); + } + + rss_lut_msg->vsi_id = sc->vsi_res->vsi_id; + /* Each LUT entry is a max of 1 byte, so this is easy */ + rss_lut_msg->lut_entries = lut_length; + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0; i < lut_length; i++) { +#ifdef RSS + /* + * Fetch the RSS bucket id for the given indirection entry. + * Cap it at the number of configured buckets (which is + * num_queues.) + */ + que_id = rss_get_indirection_to_bucket(i); + que_id = que_id % sc->vsi.num_rx_queues; +#else + que_id = i % sc->vsi.num_rx_queues; +#endif + lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; + rss_lut_msg->lut[i] = lut; + } + + iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT, + (u8 *)rss_lut_msg, msg_len); + + free(rss_lut_msg, M_IAVF); + return (0); +} + +int +iavf_config_promisc_mode(struct iavf_sc *sc) +{ + struct virtchnl_promisc_info pinfo; + + pinfo.vsi_id = sc->vsi_res->vsi_id; + pinfo.flags = sc->promisc_flags; + + iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + (u8 *)&pinfo, sizeof(pinfo)); + return (0); +} + +/* +** iavf_vc_completion +** +** Asynchronous completion function for admin queue messages. Rather than busy +** wait, we fire off our requests and assume that no errors will be returned. +** This function handles the reply messages. +*/ +void +iavf_vc_completion(struct iavf_sc *sc, + enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) +{ + device_t dev = sc->dev; + + if (v_opcode != VIRTCHNL_OP_GET_STATS) + iavf_dbg_vc(sc, "%s: opcode %s\n", __func__, + ixl_vc_opcode_str(v_opcode)); + + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = + (struct virtchnl_pf_event *)msg; + + switch (vpe->event) { + case VIRTCHNL_EVENT_LINK_CHANGE: + iavf_dbg_vc(sc, "Link change: status %d, speed %s\n", + vpe->event_data.link_event.link_status, + iavf_vc_speed_to_string(vpe->event_data.link_event.link_speed)); + sc->link_up = + vpe->event_data.link_event.link_status; + sc->link_speed = + vpe->event_data.link_event.link_speed; + iavf_update_link_status(sc); + break; + case VIRTCHNL_EVENT_RESET_IMPENDING: + device_printf(dev, "PF initiated reset!\n"); + sc->init_state = IAVF_RESET_PENDING; + iavf_if_init(sc->vsi.ctx); + break; + default: + iavf_dbg_vc(sc, "Unknown event %d from AQ\n", + vpe->event); + break; + } + + return; + } + + /* Catch-all error response */ + if (v_retval) { + device_printf(dev, + "%s: AQ returned error %s to our request %s!\n", + __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); + } + + switch (v_opcode) { + case VIRTCHNL_OP_GET_STATS: + iavf_update_stats_counters(sc, (struct i40e_eth_stats *)msg); + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + if (v_retval) { + device_printf(dev, "WARNING: Error adding VF mac filter!\n"); + device_printf(dev, "WARNING: Device may not receive traffic!\n"); + } + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + break; + case VIRTCHNL_OP_ADD_VLAN: + break; + case VIRTCHNL_OP_DEL_VLAN: + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + atomic_store_rel_32(&sc->queues_enabled, 1); + wakeup_one(&sc->enable_queues_chan); + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + atomic_store_rel_32(&sc->queues_enabled, 0); + wakeup_one(&sc->disable_queues_chan); + break; + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: + break; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + break; + case VIRTCHNL_OP_SET_RSS_HENA: + break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + break; + default: + iavf_dbg_vc(sc, + "Received unexpected message %s from PF.\n", + ixl_vc_opcode_str(v_opcode)); + break; + } +} + +int +ixl_vc_send_cmd(struct iavf_sc *sc, uint32_t request) +{ + + switch (request) { + case IAVF_FLAG_AQ_MAP_VECTORS: + return iavf_map_queues(sc); + + case IAVF_FLAG_AQ_ADD_MAC_FILTER: + return iavf_add_ether_filters(sc); + + case IAVF_FLAG_AQ_ADD_VLAN_FILTER: + return iavf_add_vlans(sc); + + case IAVF_FLAG_AQ_DEL_MAC_FILTER: + return iavf_del_ether_filters(sc); + + case IAVF_FLAG_AQ_DEL_VLAN_FILTER: + return iavf_del_vlans(sc); + + case IAVF_FLAG_AQ_CONFIGURE_QUEUES: + return iavf_configure_queues(sc); + + case IAVF_FLAG_AQ_DISABLE_QUEUES: + return iavf_disable_queues(sc); + + case IAVF_FLAG_AQ_ENABLE_QUEUES: + return iavf_enable_queues(sc); + + case IAVF_FLAG_AQ_CONFIG_RSS_KEY: + return iavf_config_rss_key(sc); + + case IAVF_FLAG_AQ_SET_RSS_HENA: + return iavf_set_rss_hena(sc); + + case IAVF_FLAG_AQ_CONFIG_RSS_LUT: + return iavf_config_rss_lut(sc); + + case IAVF_FLAG_AQ_CONFIGURE_PROMISC: + return iavf_config_promisc_mode(sc); + } + + return (0); +} + +void * +ixl_vc_get_op_chan(struct iavf_sc *sc, uint32_t request) +{ + switch (request) { + case IAVF_FLAG_AQ_ENABLE_QUEUES: + return (&sc->enable_queues_chan); + case IAVF_FLAG_AQ_DISABLE_QUEUES: + return (&sc->disable_queues_chan); + default: + return (NULL); + } +} Property changes on: head/sys/dev/ixl/iavf_vc.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/ixl/if_iavf.c =================================================================== --- head/sys/dev/ixl/if_iavf.c (nonexistent) +++ head/sys/dev/ixl/if_iavf.c (revision 339362) @@ -0,0 +1,2435 @@ +/****************************************************************************** + + Copyright (c) 2013-2018, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "iavf.h" + +/********************************************************************* + * Driver version + *********************************************************************/ +#define IAVF_DRIVER_VERSION_MAJOR 2 +#define IAVF_DRIVER_VERSION_MINOR 0 +#define IAVF_DRIVER_VERSION_BUILD 0 + +#define IAVF_DRIVER_VERSION_STRING \ + __XSTRING(IAVF_DRIVER_VERSION_MAJOR) "." \ + __XSTRING(IAVF_DRIVER_VERSION_MINOR) "." \ + __XSTRING(IAVF_DRIVER_VERSION_BUILD) "-k" + +/********************************************************************* + * PCI Device ID Table + * + * Used by probe to select devices to load on + * + * ( Vendor ID, Device ID, Branding String ) + *********************************************************************/ + +static pci_vendor_info_t iavf_vendor_info_array[] = +{ + PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, "Intel(R) Ethernet Virtual Function 700 Series"), + PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, "Intel(R) Ethernet Virtual Function 700 Series (X722)"), + PVID(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, "Intel(R) Ethernet Adaptive Virtual Function"), + /* required last entry */ + PVID_END +}; + +/********************************************************************* + * Function prototypes + *********************************************************************/ +static void *iavf_register(device_t dev); +static int iavf_if_attach_pre(if_ctx_t ctx); +static int iavf_if_attach_post(if_ctx_t ctx); +static int iavf_if_detach(if_ctx_t ctx); +static int iavf_if_shutdown(if_ctx_t ctx); +static int iavf_if_suspend(if_ctx_t ctx); +static int iavf_if_resume(if_ctx_t ctx); +static int iavf_if_msix_intr_assign(if_ctx_t ctx, int msix); +static void iavf_if_enable_intr(if_ctx_t ctx); +static void iavf_if_disable_intr(if_ctx_t ctx); +static int iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); +static int iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); +static int iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); +static int iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); +static void iavf_if_queues_free(if_ctx_t ctx); +static void iavf_if_update_admin_status(if_ctx_t ctx); +static void iavf_if_multi_set(if_ctx_t ctx); +static int iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu); +static void iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); +static int iavf_if_media_change(if_ctx_t ctx); +static int iavf_if_promisc_set(if_ctx_t ctx, int flags); +static void iavf_if_timer(if_ctx_t ctx, uint16_t qid); +static void iavf_if_vlan_register(if_ctx_t ctx, u16 vtag); +static void iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag); +static uint64_t iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt); +static void iavf_if_stop(if_ctx_t ctx); + +static int iavf_allocate_pci_resources(struct iavf_sc *); +static int iavf_reset_complete(struct i40e_hw *); +static int iavf_setup_vc(struct iavf_sc *); +static int iavf_reset(struct iavf_sc *); +static int iavf_vf_config(struct iavf_sc *); +static void iavf_init_filters(struct iavf_sc *); +static void iavf_free_pci_resources(struct iavf_sc *); +static void iavf_free_filters(struct iavf_sc *); +static void iavf_setup_interface(device_t, struct iavf_sc *); +static void iavf_add_device_sysctls(struct iavf_sc *); +static void iavf_enable_adminq_irq(struct i40e_hw *); +static void iavf_disable_adminq_irq(struct i40e_hw *); +static void iavf_enable_queue_irq(struct i40e_hw *, int); +static void iavf_disable_queue_irq(struct i40e_hw *, int); +static void iavf_config_rss(struct iavf_sc *); +static void iavf_stop(struct iavf_sc *); + +static int iavf_add_mac_filter(struct iavf_sc *, u8 *, u16); +static int iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr); +static int iavf_msix_que(void *); +static int iavf_msix_adminq(void *); +//static void iavf_del_multi(struct iavf_sc *sc); +static void iavf_init_multi(struct iavf_sc *sc); +static void iavf_configure_itr(struct iavf_sc *sc); + +static int iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS); +static int iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS); +static int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS); +static int iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); +static int iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); +static int iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS); +static int iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS); + +char *iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed); +static void iavf_save_tunables(struct iavf_sc *); +static enum i40e_status_code + iavf_process_adminq(struct iavf_sc *, u16 *); +static int iavf_send_vc_msg(struct iavf_sc *sc, u32 op); +static int iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op); + +/********************************************************************* + * FreeBSD Device Interface Entry Points + *********************************************************************/ + +static device_method_t iavf_methods[] = { + /* Device interface */ + DEVMETHOD(device_register, iavf_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + DEVMETHOD(device_shutdown, iflib_device_shutdown), + DEVMETHOD_END +}; + +static driver_t iavf_driver = { + "iavf", iavf_methods, sizeof(struct iavf_sc), +}; + +devclass_t iavf_devclass; +DRIVER_MODULE(iavf, pci, iavf_driver, iavf_devclass, 0, 0); +MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision", + pci, iavf, iavf_vendor_info_array, + nitems(iavf_vendor_info_array) - 1); +MODULE_VERSION(iavf, 1); + +MODULE_DEPEND(iavf, pci, 1, 1, 1); +MODULE_DEPEND(iavf, ether, 1, 1, 1); +MODULE_DEPEND(iavf, iflib, 1, 1, 1); + +MALLOC_DEFINE(M_IAVF, "iavf", "iavf driver allocations"); + +static device_method_t iavf_if_methods[] = { + DEVMETHOD(ifdi_attach_pre, iavf_if_attach_pre), + DEVMETHOD(ifdi_attach_post, iavf_if_attach_post), + DEVMETHOD(ifdi_detach, iavf_if_detach), + DEVMETHOD(ifdi_shutdown, iavf_if_shutdown), + DEVMETHOD(ifdi_suspend, iavf_if_suspend), + DEVMETHOD(ifdi_resume, iavf_if_resume), + DEVMETHOD(ifdi_init, iavf_if_init), + DEVMETHOD(ifdi_stop, iavf_if_stop), + DEVMETHOD(ifdi_msix_intr_assign, iavf_if_msix_intr_assign), + DEVMETHOD(ifdi_intr_enable, iavf_if_enable_intr), + DEVMETHOD(ifdi_intr_disable, iavf_if_disable_intr), + DEVMETHOD(ifdi_rx_queue_intr_enable, iavf_if_rx_queue_intr_enable), + DEVMETHOD(ifdi_tx_queue_intr_enable, iavf_if_tx_queue_intr_enable), + DEVMETHOD(ifdi_tx_queues_alloc, iavf_if_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, iavf_if_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, iavf_if_queues_free), + DEVMETHOD(ifdi_update_admin_status, iavf_if_update_admin_status), + DEVMETHOD(ifdi_multi_set, iavf_if_multi_set), + DEVMETHOD(ifdi_mtu_set, iavf_if_mtu_set), + DEVMETHOD(ifdi_media_status, iavf_if_media_status), + DEVMETHOD(ifdi_media_change, iavf_if_media_change), + DEVMETHOD(ifdi_promisc_set, iavf_if_promisc_set), + DEVMETHOD(ifdi_timer, iavf_if_timer), + DEVMETHOD(ifdi_vlan_register, iavf_if_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, iavf_if_vlan_unregister), + DEVMETHOD(ifdi_get_counter, iavf_if_get_counter), + DEVMETHOD_END +}; + +static driver_t iavf_if_driver = { + "iavf_if", iavf_if_methods, sizeof(struct iavf_sc) +}; + +/* +** TUNEABLE PARAMETERS: +*/ + +static SYSCTL_NODE(_hw, OID_AUTO, iavf, CTLFLAG_RD, 0, + "iavf driver parameters"); + +/* + * Different method for processing TX descriptor + * completion. + */ +static int iavf_enable_head_writeback = 0; +TUNABLE_INT("hw.iavf.enable_head_writeback", + &iavf_enable_head_writeback); +SYSCTL_INT(_hw_iavf, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, + &iavf_enable_head_writeback, 0, + "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); + +static int iavf_core_debug_mask = 0; +TUNABLE_INT("hw.iavf.core_debug_mask", + &iavf_core_debug_mask); +SYSCTL_INT(_hw_iavf, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, + &iavf_core_debug_mask, 0, + "Display debug statements that are printed in non-shared code"); + +static int iavf_shared_debug_mask = 0; +TUNABLE_INT("hw.iavf.shared_debug_mask", + &iavf_shared_debug_mask); +SYSCTL_INT(_hw_iavf, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, + &iavf_shared_debug_mask, 0, + "Display debug statements that are printed in shared code"); + +int iavf_rx_itr = IXL_ITR_8K; +TUNABLE_INT("hw.iavf.rx_itr", &iavf_rx_itr); +SYSCTL_INT(_hw_iavf, OID_AUTO, rx_itr, CTLFLAG_RDTUN, + &iavf_rx_itr, 0, "RX Interrupt Rate"); + +int iavf_tx_itr = IXL_ITR_4K; +TUNABLE_INT("hw.iavf.tx_itr", &iavf_tx_itr); +SYSCTL_INT(_hw_iavf, OID_AUTO, tx_itr, CTLFLAG_RDTUN, + &iavf_tx_itr, 0, "TX Interrupt Rate"); + +extern struct if_txrx ixl_txrx_hwb; +extern struct if_txrx ixl_txrx_dwb; + +static struct if_shared_ctx iavf_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ + .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), + .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, + .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), + .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, + .isc_rx_maxsize = 16384, + .isc_rx_nsegments = IXL_MAX_RX_SEGS, + .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, + .isc_nfl = 1, + .isc_ntxqs = 1, + .isc_nrxqs = 1, + + .isc_admin_intrcnt = 1, + .isc_vendor_info = iavf_vendor_info_array, + .isc_driver_version = IAVF_DRIVER_VERSION_STRING, + .isc_driver = &iavf_if_driver, + .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_IS_VF, + + .isc_nrxd_min = {IXL_MIN_RING}, + .isc_ntxd_min = {IXL_MIN_RING}, + .isc_nrxd_max = {IXL_MAX_RING}, + .isc_ntxd_max = {IXL_MAX_RING}, + .isc_nrxd_default = {IXL_DEFAULT_RING}, + .isc_ntxd_default = {IXL_DEFAULT_RING}, +}; + +if_shared_ctx_t iavf_sctx = &iavf_sctx_init; + +/*** Functions ***/ +static void * +iavf_register(device_t dev) +{ + return (iavf_sctx); +} + +static int +iavf_allocate_pci_resources(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = iflib_get_dev(sc->vsi.ctx); + int rid; + + /* Map BAR0 */ + rid = PCIR_BAR(0); + sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + + if (!(sc->pci_mem)) { + device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); + return (ENXIO); + } + + /* Save off the PCI information */ + hw->vendor_id = pci_get_vendor(dev); + hw->device_id = pci_get_device(dev); + hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); + hw->subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + hw->subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + + hw->bus.device = pci_get_slot(dev); + hw->bus.func = pci_get_function(dev); + + /* Save off register access information */ + sc->osdep.mem_bus_space_tag = + rman_get_bustag(sc->pci_mem); + sc->osdep.mem_bus_space_handle = + rman_get_bushandle(sc->pci_mem); + sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); + sc->osdep.flush_reg = I40E_VFGEN_RSTAT; + sc->osdep.dev = dev; + + sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; + sc->hw.back = &sc->osdep; + + return (0); +} + +static int +iavf_if_attach_pre(if_ctx_t ctx) +{ + device_t dev; + struct iavf_sc *sc; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + if_softc_ctx_t scctx; + int error = 0; + + dev = iflib_get_dev(ctx); + sc = iflib_get_softc(ctx); + + vsi = &sc->vsi; + vsi->back = sc; + sc->dev = dev; + hw = &sc->hw; + + vsi->dev = dev; + vsi->hw = &sc->hw; + vsi->num_vlans = 0; + vsi->ctx = ctx; + vsi->media = iflib_get_media(ctx); + vsi->shared = scctx = iflib_get_softc_ctx(ctx); + + iavf_save_tunables(sc); + + /* Do PCI setup - map BAR0, etc */ + if (iavf_allocate_pci_resources(sc)) { + device_printf(dev, "%s: Allocation of PCI resources failed\n", + __func__); + error = ENXIO; + goto err_early; + } + + iavf_dbg_init(sc, "Allocated PCI resources and MSIX vectors\n"); + + /* + * XXX: This is called by init_shared_code in the PF driver, + * but the rest of that function does not support VFs. + */ + error = i40e_set_mac_type(hw); + if (error) { + device_printf(dev, "%s: set_mac_type failed: %d\n", + __func__, error); + goto err_pci_res; + } + + error = iavf_reset_complete(hw); + if (error) { + device_printf(dev, "%s: Device is still being reset\n", + __func__); + goto err_pci_res; + } + + iavf_dbg_init(sc, "VF Device is ready for configuration\n"); + + /* Sets up Admin Queue */ + error = iavf_setup_vc(sc); + if (error) { + device_printf(dev, "%s: Error setting up PF comms, %d\n", + __func__, error); + goto err_pci_res; + } + + iavf_dbg_init(sc, "PF API version verified\n"); + + /* Need API version before sending reset message */ + error = iavf_reset(sc); + if (error) { + device_printf(dev, "VF reset failed; reload the driver\n"); + goto err_aq; + } + + iavf_dbg_init(sc, "VF reset complete\n"); + + /* Ask for VF config from PF */ + error = iavf_vf_config(sc); + if (error) { + device_printf(dev, "Error getting configuration from PF: %d\n", + error); + goto err_aq; + } + + device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n", + sc->vf_res->num_vsis, + sc->vf_res->num_queue_pairs, + sc->vf_res->max_vectors, + sc->vf_res->rss_key_size, + sc->vf_res->rss_lut_size); + iavf_dbg_info(sc, "Capabilities=%b\n", + sc->vf_res->vf_cap_flags, IAVF_PRINTF_VF_OFFLOAD_FLAGS); + + /* got VF config message back from PF, now we can parse it */ + for (int i = 0; i < sc->vf_res->num_vsis; i++) { + /* XXX: We only use the first VSI we find */ + if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + sc->vsi_res = &sc->vf_res->vsi_res[i]; + } + if (!sc->vsi_res) { + device_printf(dev, "%s: no LAN VSI found\n", __func__); + error = EIO; + goto err_res_buf; + } + vsi->id = sc->vsi_res->vsi_id; + + iavf_dbg_init(sc, "Resource Acquisition complete\n"); + + /* If no mac address was assigned just make a random one */ + if (!iavf_check_ether_addr(hw->mac.addr)) { + u8 addr[ETHER_ADDR_LEN]; + arc4rand(&addr, sizeof(addr), 0); + addr[0] &= 0xFE; + addr[0] |= 0x02; + bcopy(addr, hw->mac.addr, sizeof(addr)); + } + bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); + iflib_set_mac(ctx, hw->mac.addr); + + /* Allocate filter lists */ + iavf_init_filters(sc); + + /* Fill out more iflib parameters */ + scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = + sc->vsi_res->num_queue_pairs; + if (vsi->enable_head_writeback) { + scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] + * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); + scctx->isc_txrx = &ixl_txrx_hwb; + } else { + scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] + * sizeof(struct i40e_tx_desc), DBA_ALIGN); + scctx->isc_txrx = &ixl_txrx_dwb; + } + scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] + * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); + scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); + scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; + scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; + scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; + scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; + scctx->isc_rss_table_size = IXL_RSS_VSI_LUT_SIZE; + scctx->isc_tx_csum_flags = CSUM_OFFLOAD; + scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; + + return (0); + +err_res_buf: + free(sc->vf_res, M_IAVF); +err_aq: + i40e_shutdown_adminq(hw); +err_pci_res: + iavf_free_pci_resources(sc); +err_early: + return (error); +} + +static int +iavf_if_attach_post(if_ctx_t ctx) +{ + device_t dev; + struct iavf_sc *sc; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + int error = 0; + + INIT_DBG_DEV(dev, "begin"); + + dev = iflib_get_dev(ctx); + sc = iflib_get_softc(ctx); + vsi = &sc->vsi; + vsi->ifp = iflib_get_ifp(ctx); + hw = &sc->hw; + + /* Save off determined number of queues for interface */ + vsi->num_rx_queues = vsi->shared->isc_nrxqsets; + vsi->num_tx_queues = vsi->shared->isc_ntxqsets; + + /* Setup the stack interface */ + iavf_setup_interface(dev, sc); + + INIT_DBG_DEV(dev, "Interface setup complete"); + + /* Initialize statistics & add sysctls */ + bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); + iavf_add_device_sysctls(sc); + + sc->init_state = IAVF_INIT_READY; + atomic_store_rel_32(&sc->queues_enabled, 0); + + /* We want AQ enabled early for init */ + iavf_enable_adminq_irq(hw); + + INIT_DBG_DEV(dev, "end"); + + return (error); +} + +/** + * XXX: iflib always ignores the return value of detach() + * -> This means that this isn't allowed to fail + */ +static int +iavf_if_detach(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + enum i40e_status_code status; + + INIT_DBG_DEV(dev, "begin"); + + /* Remove all the media and link information */ + ifmedia_removeall(vsi->media); + + iavf_disable_adminq_irq(hw); + status = i40e_shutdown_adminq(&sc->hw); + if (status != I40E_SUCCESS) { + device_printf(dev, + "i40e_shutdown_adminq() failed with status %s\n", + i40e_stat_str(hw, status)); + } + + free(sc->vf_res, M_IAVF); + iavf_free_pci_resources(sc); + iavf_free_filters(sc); + + INIT_DBG_DEV(dev, "end"); + return (0); +} + +static int +iavf_if_shutdown(if_ctx_t ctx) +{ + return (0); +} + +static int +iavf_if_suspend(if_ctx_t ctx) +{ + return (0); +} + +static int +iavf_if_resume(if_ctx_t ctx) +{ + return (0); +} + +static int +iavf_send_vc_msg_sleep(struct iavf_sc *sc, u32 op) +{ + int error = 0; + if_ctx_t ctx = sc->vsi.ctx; + + error = ixl_vc_send_cmd(sc, op); + if (error != 0) { + iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error); + return (error); + } + + /* Don't wait for a response if the device is being detached. */ + if (!iflib_in_detach(ctx)) { + iavf_dbg_vc(sc, "Sleeping for op %b\n", op, IAVF_FLAGS); + error = sx_sleep(ixl_vc_get_op_chan(sc, op), + iflib_ctx_lock_get(ctx), PRI_MAX, "iavf_vc", IAVF_AQ_TIMEOUT); + + if (error == EWOULDBLOCK) + device_printf(sc->dev, "%b timed out\n", op, IAVF_FLAGS); + } + + return (error); +} + +static int +iavf_send_vc_msg(struct iavf_sc *sc, u32 op) +{ + int error = 0; + + error = ixl_vc_send_cmd(sc, op); + if (error != 0) + iavf_dbg_vc(sc, "Error sending %b: %d\n", op, IAVF_FLAGS, error); + + return (error); +} + +static void +iavf_init_queues(struct ixl_vsi *vsi) +{ + if_softc_ctx_t scctx = vsi->shared; + struct ixl_tx_queue *tx_que = vsi->tx_queues; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + struct rx_ring *rxr; + + for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) + ixl_init_tx_ring(vsi, tx_que); + + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { + rxr = &rx_que->rxr; + + if (scctx->isc_max_frame_size <= MCLBYTES) + rxr->mbuf_sz = MCLBYTES; + else + rxr->mbuf_sz = MJUMPAGESIZE; + + wr32(vsi->hw, rxr->tail, 0); + } +} + +void +iavf_if_init(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct i40e_hw *hw = &sc->hw; + struct ifnet *ifp = iflib_get_ifp(ctx); + u8 tmpaddr[ETHER_ADDR_LEN]; + int error = 0; + + INIT_DBG_IF(ifp, "begin"); + + MPASS(sx_xlocked(iflib_ctx_lock_get(ctx))); + + error = iavf_reset_complete(hw); + if (error) { + device_printf(sc->dev, "%s: VF reset failed\n", + __func__); + } + + if (!i40e_check_asq_alive(hw)) { + iavf_dbg_info(sc, "ASQ is not alive, re-initializing AQ\n"); + pci_enable_busmaster(sc->dev); + i40e_shutdown_adminq(hw); + i40e_init_adminq(hw); + } + + /* Make sure queues are disabled */ + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DISABLE_QUEUES); + + bcopy(IF_LLADDR(ifp), tmpaddr, ETHER_ADDR_LEN); + if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && + (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { + error = iavf_del_mac_filter(sc, hw->mac.addr); + if (error == 0) + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER); + + bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); + } + + error = iavf_add_mac_filter(sc, hw->mac.addr, 0); + if (!error || error == EEXIST) + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER); + iflib_set_mac(ctx, hw->mac.addr); + + /* Prepare the queues for operation */ + iavf_init_queues(vsi); + + /* Set initial ITR values */ + iavf_configure_itr(sc); + + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_QUEUES); + + /* Set up RSS */ + iavf_config_rss(sc); + + /* Map vectors */ + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_MAP_VECTORS); + + /* Init SW TX ring indices */ + if (vsi->enable_head_writeback) + ixl_init_tx_cidx(vsi); + else + ixl_init_tx_rsqs(vsi); + + /* Configure promiscuous mode */ + iavf_if_promisc_set(ctx, if_getflags(ifp)); + + /* Enable queues */ + iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_ENABLE_QUEUES); + + sc->init_state = IAVF_RUNNING; +} + +/* + * iavf_attach() helper function; initalizes the admin queue + * and attempts to establish contact with the PF by + * retrying the initial "API version" message several times + * or until the PF responds. + */ +static int +iavf_setup_vc(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + int error = 0, ret_error = 0, asq_retries = 0; + bool send_api_ver_retried = 0; + + /* Need to set these AQ paramters before initializing AQ */ + hw->aq.num_arq_entries = IXL_AQ_LEN; + hw->aq.num_asq_entries = IXL_AQ_LEN; + hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; + hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; + + for (int i = 0; i < IAVF_AQ_MAX_ERR; i++) { + /* Initialize admin queue */ + error = i40e_init_adminq(hw); + if (error) { + device_printf(dev, "%s: init_adminq failed: %d\n", + __func__, error); + ret_error = 1; + continue; + } + + iavf_dbg_init(sc, "Initialized Admin Queue; starting" + " send_api_ver attempt %d", i+1); + +retry_send: + /* Send VF's API version */ + error = iavf_send_api_ver(sc); + if (error) { + i40e_shutdown_adminq(hw); + ret_error = 2; + device_printf(dev, "%s: unable to send api" + " version to PF on attempt %d, error %d\n", + __func__, i+1, error); + } + + asq_retries = 0; + while (!i40e_asq_done(hw)) { + if (++asq_retries > IAVF_AQ_MAX_ERR) { + i40e_shutdown_adminq(hw); + device_printf(dev, "Admin Queue timeout " + "(waiting for send_api_ver), %d more tries...\n", + IAVF_AQ_MAX_ERR - (i + 1)); + ret_error = 3; + break; + } + i40e_msec_pause(10); + } + if (asq_retries > IAVF_AQ_MAX_ERR) + continue; + + iavf_dbg_init(sc, "Sent API version message to PF"); + + /* Verify that the VF accepts the PF's API version */ + error = iavf_verify_api_ver(sc); + if (error == ETIMEDOUT) { + if (!send_api_ver_retried) { + /* Resend message, one more time */ + send_api_ver_retried = true; + device_printf(dev, + "%s: Timeout while verifying API version on first" + " try!\n", __func__); + goto retry_send; + } else { + device_printf(dev, + "%s: Timeout while verifying API version on second" + " try!\n", __func__); + ret_error = 4; + break; + } + } + if (error) { + device_printf(dev, + "%s: Unable to verify API version," + " error %s\n", __func__, i40e_stat_str(hw, error)); + ret_error = 5; + } + break; + } + + if (ret_error >= 4) + i40e_shutdown_adminq(hw); + return (ret_error); +} + +/* + * iavf_attach() helper function; asks the PF for this VF's + * configuration, and saves the information if it receives it. + */ +static int +iavf_vf_config(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + int bufsz, error = 0, ret_error = 0; + int asq_retries, retried = 0; + +retry_config: + error = iavf_send_vf_config_msg(sc); + if (error) { + device_printf(dev, + "%s: Unable to send VF config request, attempt %d," + " error %d\n", __func__, retried + 1, error); + ret_error = 2; + } + + asq_retries = 0; + while (!i40e_asq_done(hw)) { + if (++asq_retries > IAVF_AQ_MAX_ERR) { + device_printf(dev, "%s: Admin Queue timeout " + "(waiting for send_vf_config_msg), attempt %d\n", + __func__, retried + 1); + ret_error = 3; + goto fail; + } + i40e_msec_pause(10); + } + + iavf_dbg_init(sc, "Sent VF config message to PF, attempt %d\n", + retried + 1); + + if (!sc->vf_res) { + bufsz = sizeof(struct virtchnl_vf_resource) + + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); + sc->vf_res = malloc(bufsz, M_IAVF, M_NOWAIT); + if (!sc->vf_res) { + device_printf(dev, + "%s: Unable to allocate memory for VF configuration" + " message from PF on attempt %d\n", __func__, retried + 1); + ret_error = 1; + goto fail; + } + } + + /* Check for VF config response */ + error = iavf_get_vf_config(sc); + if (error == ETIMEDOUT) { + /* The 1st time we timeout, send the configuration message again */ + if (!retried) { + retried++; + goto retry_config; + } + device_printf(dev, + "%s: iavf_get_vf_config() timed out waiting for a response\n", + __func__); + } + if (error) { + device_printf(dev, + "%s: Unable to get VF configuration from PF after %d tries!\n", + __func__, retried + 1); + ret_error = 4; + } + goto done; + +fail: + free(sc->vf_res, M_IAVF); +done: + return (ret_error); +} + +static int +iavf_if_msix_intr_assign(if_ctx_t ctx, int msix) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + struct ixl_tx_queue *tx_que = vsi->tx_queues; + int err, i, rid, vector = 0; + char buf[16]; + + MPASS(vsi->shared->isc_nrxqsets > 0); + MPASS(vsi->shared->isc_ntxqsets > 0); + + /* Admin Que is vector 0*/ + rid = vector + 1; + err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, + iavf_msix_adminq, sc, 0, "aq"); + if (err) { + iflib_irq_free(ctx, &vsi->irq); + device_printf(iflib_get_dev(ctx), + "Failed to register Admin Que handler"); + return (err); + } + + /* Now set up the stations */ + for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) { + rid = vector + 1; + + snprintf(buf, sizeof(buf), "rxq%d", i); + err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, + IFLIB_INTR_RX, iavf_msix_que, rx_que, rx_que->rxr.me, buf); + /* XXX: Does the driver work as expected if there are fewer num_rx_queues than + * what's expected in the iflib context? */ + if (err) { + device_printf(iflib_get_dev(ctx), + "Failed to allocate queue RX int vector %d, err: %d\n", i, err); + vsi->num_rx_queues = i + 1; + goto fail; + } + rx_que->msix = vector; + } + + bzero(buf, sizeof(buf)); + + for (i = 0; i < vsi->shared->isc_ntxqsets; i++, tx_que++) { + snprintf(buf, sizeof(buf), "txq%d", i); + iflib_softirq_alloc_generic(ctx, + &vsi->rx_queues[i % vsi->shared->isc_nrxqsets].que_irq, + IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); + + /* TODO: Maybe call a strategy function for this to figure out which + * interrupts to map Tx queues to. I don't know if there's an immediately + * better way than this other than a user-supplied map, though. */ + tx_que->msix = (i % vsi->shared->isc_nrxqsets) + 1; + } + + return (0); +fail: + iflib_irq_free(ctx, &vsi->irq); + rx_que = vsi->rx_queues; + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) + iflib_irq_free(ctx, &rx_que->que_irq); + return (err); +} + +/* Enable all interrupts */ +static void +iavf_if_enable_intr(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + + iavf_enable_intr(vsi); +} + +/* Disable all interrupts */ +static void +iavf_if_disable_intr(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + + iavf_disable_intr(vsi); +} + +static int +iavf_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; + + iavf_enable_queue_irq(hw, rx_que->msix - 1); + return (0); +} + +static int +iavf_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct i40e_hw *hw = vsi->hw; + struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; + + iavf_enable_queue_irq(hw, tx_que->msix - 1); + return (0); +} + +static int +iavf_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + if_softc_ctx_t scctx = vsi->shared; + struct ixl_tx_queue *que; + int i, j, error = 0; + + MPASS(scctx->isc_ntxqsets > 0); + MPASS(ntxqs == 1); + MPASS(scctx->isc_ntxqsets == ntxqsets); + + /* Allocate queue structure memory */ + if (!(vsi->tx_queues = + (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IAVF, M_NOWAIT | M_ZERO))) { + device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); + return (ENOMEM); + } + + for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { + struct tx_ring *txr = &que->txr; + + txr->me = i; + que->vsi = vsi; + + if (!vsi->enable_head_writeback) { + /* Allocate report status array */ + if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IAVF, M_NOWAIT))) { + device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); + error = ENOMEM; + goto fail; + } + /* Init report status array */ + for (j = 0; j < scctx->isc_ntxd[0]; j++) + txr->tx_rsq[j] = QIDX_INVALID; + } + /* get the virtual and physical address of the hardware queues */ + txr->tail = I40E_QTX_TAIL1(txr->me); + txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; + txr->tx_paddr = paddrs[i * ntxqs]; + txr->que = que; + } + + return (0); +fail: + iavf_if_queues_free(ctx); + return (error); +} + +static int +iavf_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_rx_queue *que; + int i, error = 0; + +#ifdef INVARIANTS + if_softc_ctx_t scctx = vsi->shared; + MPASS(scctx->isc_nrxqsets > 0); + MPASS(nrxqs == 1); + MPASS(scctx->isc_nrxqsets == nrxqsets); +#endif + + /* Allocate queue structure memory */ + if (!(vsi->rx_queues = + (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * + nrxqsets, M_IAVF, M_NOWAIT | M_ZERO))) { + device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); + error = ENOMEM; + goto fail; + } + + for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { + struct rx_ring *rxr = &que->rxr; + + rxr->me = i; + que->vsi = vsi; + + /* get the virtual and physical address of the hardware queues */ + rxr->tail = I40E_QRX_TAIL1(rxr->me); + rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; + rxr->rx_paddr = paddrs[i * nrxqs]; + rxr->que = que; + } + + return (0); +fail: + iavf_if_queues_free(ctx); + return (error); +} + +static void +iavf_if_queues_free(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + + if (!vsi->enable_head_writeback) { + struct ixl_tx_queue *que; + int i = 0; + + for (i = 0, que = vsi->tx_queues; i < vsi->shared->isc_ntxqsets; i++, que++) { + struct tx_ring *txr = &que->txr; + if (txr->tx_rsq != NULL) { + free(txr->tx_rsq, M_IAVF); + txr->tx_rsq = NULL; + } + } + } + + if (vsi->tx_queues != NULL) { + free(vsi->tx_queues, M_IAVF); + vsi->tx_queues = NULL; + } + if (vsi->rx_queues != NULL) { + free(vsi->rx_queues, M_IAVF); + vsi->rx_queues = NULL; + } +} + +static int +iavf_check_aq_errors(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + u32 reg, oldreg; + u8 aq_error = false; + + /* check for Admin queue errors */ + oldreg = reg = rd32(hw, hw->aq.arq.len); + if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) { + device_printf(dev, "ARQ VF Error detected\n"); + reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; + aq_error = true; + } + if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) { + device_printf(dev, "ARQ Overflow Error detected\n"); + reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; + aq_error = true; + } + if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) { + device_printf(dev, "ARQ Critical Error detected\n"); + reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; + aq_error = true; + } + if (oldreg != reg) + wr32(hw, hw->aq.arq.len, reg); + + oldreg = reg = rd32(hw, hw->aq.asq.len); + if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) { + device_printf(dev, "ASQ VF Error detected\n"); + reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; + aq_error = true; + } + if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) { + device_printf(dev, "ASQ Overflow Error detected\n"); + reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; + aq_error = true; + } + if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) { + device_printf(dev, "ASQ Critical Error detected\n"); + reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; + aq_error = true; + } + if (oldreg != reg) + wr32(hw, hw->aq.asq.len, reg); + + if (aq_error) { + device_printf(dev, "WARNING: Stopping VF!\n"); + /* + * A VF reset might not be enough to fix a problem here; + * a PF reset could be required. + */ + sc->init_state = IAVF_RESET_REQUIRED; + iavf_stop(sc); + iavf_request_reset(sc); + } + + return (aq_error ? EIO : 0); +} + +static enum i40e_status_code +iavf_process_adminq(struct iavf_sc *sc, u16 *pending) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_arq_event_info event; + struct i40e_hw *hw = &sc->hw; + struct virtchnl_msg *v_msg; + int error = 0, loop = 0; + u32 reg; + + error = iavf_check_aq_errors(sc); + if (error) + return (I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR); + + event.buf_len = IXL_AQ_BUF_SZ; + event.msg_buf = sc->aq_buffer; + bzero(event.msg_buf, IXL_AQ_BUF_SZ); + v_msg = (struct virtchnl_msg *)&event.desc; + + /* clean and process any events */ + do { + status = i40e_clean_arq_element(hw, &event, pending); + /* + * Also covers normal case when i40e_clean_arq_element() + * returns "I40E_ERR_ADMIN_QUEUE_NO_WORK" + */ + if (status) + break; + iavf_vc_completion(sc, v_msg->v_opcode, + v_msg->v_retval, event.msg_buf, event.msg_len); + bzero(event.msg_buf, IXL_AQ_BUF_SZ); + } while (*pending && (loop++ < IXL_ADM_LIMIT)); + + /* Re-enable admin queue interrupt cause */ + reg = rd32(hw, I40E_VFINT_ICR0_ENA1); + reg |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK; + wr32(hw, I40E_VFINT_ICR0_ENA1, reg); + + return (status); +} + +static void +iavf_if_update_admin_status(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct i40e_hw *hw = &sc->hw; + u16 pending; + + iavf_process_adminq(sc, &pending); + iavf_update_link_status(sc); + + /* + * If there are still messages to process, reschedule. + * Otherwise, re-enable the Admin Queue interrupt. + */ + if (pending > 0) + iflib_admin_intr_deferred(ctx); + else + iavf_enable_adminq_irq(hw); +} + +static int +iavf_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) +{ + struct iavf_sc *sc = arg; + int error = 0; + + if (ifma->ifma_addr->sa_family != AF_LINK) + return (0); + error = iavf_add_mac_filter(sc, + (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr), + IXL_FILTER_MC); + + return (!error); +} + +static void +iavf_if_multi_set(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + int mcnt = 0; + + IOCTL_DEBUGOUT("iavf_if_multi_set: begin"); + + mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); + if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { + /* Delete MC filters and enable mulitcast promisc instead */ + iavf_init_multi(sc); + sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC; + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC); + return; + } + + /* If there aren't too many filters, delete existing MC filters */ + iavf_init_multi(sc); + + /* And (re-)install filters for all mcast addresses */ + mcnt = if_multi_apply(iflib_get_ifp(ctx), iavf_mc_filter_apply, sc); + + if (mcnt > 0) + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_MAC_FILTER); +} + +static int +iavf_if_mtu_set(if_ctx_t ctx, uint32_t mtu) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + + IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); + if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - + ETHER_VLAN_ENCAP_LEN) + return (EINVAL); + + vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ETHER_VLAN_ENCAP_LEN; + + return (0); +} + +static void +iavf_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) +{ +#ifdef IXL_DEBUG + struct ifnet *ifp = iflib_get_ifp(ctx); +#endif + struct iavf_sc *sc = iflib_get_softc(ctx); + + INIT_DBG_IF(ifp, "begin"); + + iavf_update_link_status(sc); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!sc->link_up) + return; + + ifmr->ifm_status |= IFM_ACTIVE; + /* Hardware is always full-duplex */ + ifmr->ifm_active |= IFM_FDX; + + /* Based on the link speed reported by the PF over the AdminQ, choose a + * PHY type to report. This isn't 100% correct since we don't really + * know the underlying PHY type of the PF, but at least we can report + * a valid link speed... + */ + switch (sc->link_speed) { + case VIRTCHNL_LINK_SPEED_100MB: + ifmr->ifm_active |= IFM_100_TX; + break; + case VIRTCHNL_LINK_SPEED_1GB: + ifmr->ifm_active |= IFM_1000_T; + break; + case VIRTCHNL_LINK_SPEED_10GB: + ifmr->ifm_active |= IFM_10G_SR; + break; + case VIRTCHNL_LINK_SPEED_20GB: + case VIRTCHNL_LINK_SPEED_25GB: + ifmr->ifm_active |= IFM_25G_SR; + break; + case VIRTCHNL_LINK_SPEED_40GB: + ifmr->ifm_active |= IFM_40G_SR4; + break; + default: + ifmr->ifm_active |= IFM_UNKNOWN; + break; + } + + INIT_DBG_IF(ifp, "end"); +} + +static int +iavf_if_media_change(if_ctx_t ctx) +{ + struct ifmedia *ifm = iflib_get_media(ctx); + + INIT_DEBUGOUT("ixl_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); + return (ENODEV); +} + +static int +iavf_if_promisc_set(if_ctx_t ctx, int flags) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); + + sc->promisc_flags = 0; + + if (flags & IFF_ALLMULTI || + if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) + sc->promisc_flags |= FLAG_VF_MULTICAST_PROMISC; + if (flags & IFF_PROMISC) + sc->promisc_flags |= FLAG_VF_UNICAST_PROMISC; + + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIGURE_PROMISC); + + return (0); +} + +static void +iavf_if_timer(if_ctx_t ctx, uint16_t qid) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct i40e_hw *hw = &sc->hw; + u32 val; + + if (qid != 0) + return; + + /* Check for when PF triggers a VF reset */ + val = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + if (val != VIRTCHNL_VFR_VFACTIVE + && val != VIRTCHNL_VFR_COMPLETED) { + iavf_dbg_info(sc, "reset in progress! (%d)\n", val); + return; + } + + /* Fire off the adminq task */ + iflib_admin_intr_deferred(ctx); + + /* Update stats */ + iavf_request_stats(sc); +} + +static void +iavf_if_vlan_register(if_ctx_t ctx, u16 vtag) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct iavf_vlan_filter *v; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + ++vsi->num_vlans; + v = malloc(sizeof(struct iavf_vlan_filter), M_IAVF, M_WAITOK | M_ZERO); + SLIST_INSERT_HEAD(sc->vlan_filters, v, next); + v->vlan = vtag; + v->flags = IXL_FILTER_ADD; + + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_ADD_VLAN_FILTER); +} + +static void +iavf_if_vlan_unregister(if_ctx_t ctx, u16 vtag) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + struct iavf_vlan_filter *v; + int i = 0; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + SLIST_FOREACH(v, sc->vlan_filters, next) { + if (v->vlan == vtag) { + v->flags = IXL_FILTER_DEL; + ++i; + --vsi->num_vlans; + } + } + if (i) + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_VLAN_FILTER); +} + +static uint64_t +iavf_if_get_counter(if_ctx_t ctx, ift_counter cnt) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &sc->vsi; + if_t ifp = iflib_get_ifp(ctx); + + switch (cnt) { + case IFCOUNTER_IPACKETS: + return (vsi->ipackets); + case IFCOUNTER_IERRORS: + return (vsi->ierrors); + case IFCOUNTER_OPACKETS: + return (vsi->opackets); + case IFCOUNTER_OERRORS: + return (vsi->oerrors); + case IFCOUNTER_COLLISIONS: + /* Collisions are by standard impossible in 40G/10G Ethernet */ + return (0); + case IFCOUNTER_IBYTES: + return (vsi->ibytes); + case IFCOUNTER_OBYTES: + return (vsi->obytes); + case IFCOUNTER_IMCASTS: + return (vsi->imcasts); + case IFCOUNTER_OMCASTS: + return (vsi->omcasts); + case IFCOUNTER_IQDROPS: + return (vsi->iqdrops); + case IFCOUNTER_OQDROPS: + return (vsi->oqdrops); + case IFCOUNTER_NOPROTO: + return (vsi->noproto); + default: + return (if_get_counter_default(ifp, cnt)); + } +} + + +static void +iavf_free_pci_resources(struct iavf_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + device_t dev = sc->dev; + + /* We may get here before stations are setup */ + if (rx_que == NULL) + goto early; + + /* Release all interrupts */ + iflib_irq_free(vsi->ctx, &vsi->irq); + + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) + iflib_irq_free(vsi->ctx, &rx_que->que_irq); + +early: + if (sc->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), sc->pci_mem); +} + + +/* +** Requests a VF reset from the PF. +** +** Requires the VF's Admin Queue to be initialized. +*/ +static int +iavf_reset(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + int error = 0; + + /* Ask the PF to reset us if we are initiating */ + if (sc->init_state != IAVF_RESET_PENDING) + iavf_request_reset(sc); + + i40e_msec_pause(100); + error = iavf_reset_complete(hw); + if (error) { + device_printf(dev, "%s: VF reset failed\n", + __func__); + return (error); + } + pci_enable_busmaster(dev); + + error = i40e_shutdown_adminq(hw); + if (error) { + device_printf(dev, "%s: shutdown_adminq failed: %d\n", + __func__, error); + return (error); + } + + error = i40e_init_adminq(hw); + if (error) { + device_printf(dev, "%s: init_adminq failed: %d\n", + __func__, error); + return (error); + } + + iavf_enable_adminq_irq(hw); + return (0); +} + +static int +iavf_reset_complete(struct i40e_hw *hw) +{ + u32 reg; + + /* Wait up to ~10 seconds */ + for (int i = 0; i < 100; i++) { + reg = rd32(hw, I40E_VFGEN_RSTAT) & + I40E_VFGEN_RSTAT_VFR_STATE_MASK; + + if ((reg == VIRTCHNL_VFR_VFACTIVE) || + (reg == VIRTCHNL_VFR_COMPLETED)) + return (0); + i40e_msec_pause(100); + } + + return (EBUSY); +} + +static void +iavf_setup_interface(device_t dev, struct iavf_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + if_ctx_t ctx = vsi->ctx; + struct ifnet *ifp = iflib_get_ifp(ctx); + + INIT_DBG_DEV(dev, "begin"); + + vsi->shared->isc_max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ETHER_VLAN_ENCAP_LEN; +#if __FreeBSD_version >= 1100000 + if_setbaudrate(ifp, IF_Gbps(40)); +#else + if_initbaudrate(ifp, IF_Gbps(40)); +#endif + + ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO); +} + +/* +** Get a new filter and add it to the mac filter list. +*/ +static struct iavf_mac_filter * +iavf_get_mac_filter(struct iavf_sc *sc) +{ + struct iavf_mac_filter *f; + + f = malloc(sizeof(struct iavf_mac_filter), + M_IAVF, M_NOWAIT | M_ZERO); + if (f) + SLIST_INSERT_HEAD(sc->mac_filters, f, next); + + return (f); +} + +/* +** Find the filter with matching MAC address +*/ +static struct iavf_mac_filter * +iavf_find_mac_filter(struct iavf_sc *sc, u8 *macaddr) +{ + struct iavf_mac_filter *f; + bool match = FALSE; + + SLIST_FOREACH(f, sc->mac_filters, next) { + if (cmp_etheraddr(f->macaddr, macaddr)) { + match = TRUE; + break; + } + } + + if (!match) + f = NULL; + return (f); +} + +/* +** Admin Queue interrupt handler +*/ +static int +iavf_msix_adminq(void *arg) +{ + struct iavf_sc *sc = arg; + struct i40e_hw *hw = &sc->hw; + u32 reg, mask; + bool do_task = FALSE; + + ++sc->admin_irq; + + reg = rd32(hw, I40E_VFINT_ICR01); + /* + * For masking off interrupt causes that need to be handled before + * they can be re-enabled + */ + mask = rd32(hw, I40E_VFINT_ICR0_ENA1); + + /* Check on the cause */ + if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) { + mask &= ~I40E_VFINT_ICR0_ENA_ADMINQ_MASK; + do_task = TRUE; + } + + wr32(hw, I40E_VFINT_ICR0_ENA1, mask); + iavf_enable_adminq_irq(hw); + + if (do_task) + return (FILTER_SCHEDULE_THREAD); + else + return (FILTER_HANDLED); +} + +void +iavf_enable_intr(struct ixl_vsi *vsi) +{ + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *que = vsi->rx_queues; + + iavf_enable_adminq_irq(hw); + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + iavf_enable_queue_irq(hw, que->rxr.me); +} + +void +iavf_disable_intr(struct ixl_vsi *vsi) +{ + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *que = vsi->rx_queues; + + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + iavf_disable_queue_irq(hw, que->rxr.me); +} + +static void +iavf_disable_adminq_irq(struct i40e_hw *hw) +{ + wr32(hw, I40E_VFINT_DYN_CTL01, 0); + wr32(hw, I40E_VFINT_ICR0_ENA1, 0); + /* flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +static void +iavf_enable_adminq_irq(struct i40e_hw *hw) +{ + wr32(hw, I40E_VFINT_DYN_CTL01, + I40E_VFINT_DYN_CTL01_INTENA_MASK | + I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); + wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); + /* flush */ + rd32(hw, I40E_VFGEN_RSTAT); +} + +static void +iavf_enable_queue_irq(struct i40e_hw *hw, int id) +{ + u32 reg; + + reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; + wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg); +} + +static void +iavf_disable_queue_irq(struct i40e_hw *hw, int id) +{ + wr32(hw, I40E_VFINT_DYN_CTLN1(id), + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); + rd32(hw, I40E_VFGEN_RSTAT); +} + +static void +iavf_configure_tx_itr(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_tx_queue *que = vsi->tx_queues; + + vsi->tx_itr_setting = sc->tx_itr; + + for (int i = 0; i < vsi->num_tx_queues; i++, que++) { + struct tx_ring *txr = &que->txr; + + wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i), + vsi->tx_itr_setting); + txr->itr = vsi->tx_itr_setting; + txr->latency = IXL_AVE_LATENCY; + } +} + +static void +iavf_configure_rx_itr(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_rx_queue *que = vsi->rx_queues; + + vsi->rx_itr_setting = sc->rx_itr; + + for (int i = 0; i < vsi->num_rx_queues; i++, que++) { + struct rx_ring *rxr = &que->rxr; + + wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i), + vsi->rx_itr_setting); + rxr->itr = vsi->rx_itr_setting; + rxr->latency = IXL_AVE_LATENCY; + } +} + +/* + * Get initial ITR values from tunable values. + */ +static void +iavf_configure_itr(struct iavf_sc *sc) +{ + iavf_configure_tx_itr(sc); + iavf_configure_rx_itr(sc); +} + +/* +** Provide a update to the queue RX +** interrupt moderation value. +*/ +static void +iavf_set_queue_rx_itr(struct ixl_rx_queue *que) +{ + struct ixl_vsi *vsi = que->vsi; + struct i40e_hw *hw = vsi->hw; + struct rx_ring *rxr = &que->rxr; + + /* Idle, do nothing */ + if (rxr->bytes == 0) + return; + + /* Update the hardware if needed */ + if (rxr->itr != vsi->rx_itr_setting) { + rxr->itr = vsi->rx_itr_setting; + wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, + que->rxr.me), rxr->itr); + } +} + +static int +iavf_msix_que(void *arg) +{ + struct ixl_rx_queue *rx_que = arg; + + ++rx_que->irqs; + + iavf_set_queue_rx_itr(rx_que); + // iavf_set_queue_tx_itr(que); + + return (FILTER_SCHEDULE_THREAD); +} + +/********************************************************************* + * Multicast Initialization + * + * This routine is called by init to reset a fresh state. + * + **********************************************************************/ +static void +iavf_init_multi(struct iavf_sc *sc) +{ + struct iavf_mac_filter *f; + int mcnt = 0; + + /* First clear any multicast filters */ + SLIST_FOREACH(f, sc->mac_filters, next) { + if ((f->flags & IXL_FILTER_USED) + && (f->flags & IXL_FILTER_MC)) { + f->flags |= IXL_FILTER_DEL; + mcnt++; + } + } + if (mcnt > 0) + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_DEL_MAC_FILTER); +} + +/* +** Note: this routine updates the OS on the link state +** the real check of the hardware only happens with +** a link interrupt. +*/ +void +iavf_update_link_status(struct iavf_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + u64 baudrate; + + if (sc->link_up){ + if (vsi->link_active == FALSE) { + vsi->link_active = TRUE; + baudrate = ixl_max_vc_speed_to_value(sc->link_speed); + iavf_dbg_info(sc, "baudrate: %lu\n", baudrate); + iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); + } + } else { /* Link down */ + if (vsi->link_active == TRUE) { + vsi->link_active = FALSE; + iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); + } + } +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC and deallocates TX/RX buffers. + * + **********************************************************************/ + +static void +iavf_stop(struct iavf_sc *sc) +{ + struct ifnet *ifp; + + ifp = sc->vsi.ifp; + + iavf_disable_intr(&sc->vsi); + + if (atomic_load_acq_32(&sc->queues_enabled)) + iavf_send_vc_msg_sleep(sc, IAVF_FLAG_AQ_DISABLE_QUEUES); +} + +static void +iavf_if_stop(if_ctx_t ctx) +{ + struct iavf_sc *sc = iflib_get_softc(ctx); + + iavf_stop(sc); +} + +static void +iavf_config_rss_reg(struct iavf_sc *sc) +{ + struct i40e_hw *hw = &sc->hw; + struct ixl_vsi *vsi = &sc->vsi; + u32 lut = 0; + u64 set_hena = 0, hena; + int i, j, que_id; + u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; +#ifdef RSS + u32 rss_hash_config; +#endif + + /* Don't set up RSS if using a single queue */ + if (vsi->num_rx_queues == 1) { + wr32(hw, I40E_VFQF_HENA(0), 0); + wr32(hw, I40E_VFQF_HENA(1), 0); + ixl_flush(hw); + return; + } + +#ifdef RSS + /* Fetch the configured RSS key */ + rss_getkey((uint8_t *) &rss_seed); +#else + ixl_get_default_rss_key(rss_seed); +#endif + + /* Fill out hash function seed */ + for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) + wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]); + + /* Enable PCTYPES for RSS: */ +#ifdef RSS + rss_hash_config = rss_gethashconfig(); + if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); + if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); + if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); + if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); + if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); + if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); + if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) + set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); +#else + set_hena = IXL_DEFAULT_RSS_HENA_XL710; +#endif + hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | + ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); + hena |= set_hena; + wr32(hw, I40E_VFQF_HENA(0), (u32)hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + + /* Populate the LUT with max no. of queues in round robin fashion */ + for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) { + if (j == vsi->num_rx_queues) + j = 0; +#ifdef RSS + /* + * Fetch the RSS bucket id for the given indirection entry. + * Cap it at the number of configured buckets (which is + * num_queues.) + */ + que_id = rss_get_indirection_to_bucket(i); + que_id = que_id % vsi->num_queues; +#else + que_id = j; +#endif + /* lut = 4-byte sliding window of 4 lut entries */ + lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK); + /* On i = 3, we have 4 entries in lut; write to the register */ + if ((i & 3) == 3) { + wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); + DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut); + } + } + ixl_flush(hw); +} + +static void +iavf_config_rss_pf(struct iavf_sc *sc) +{ + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_KEY); + + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_SET_RSS_HENA); + + iavf_send_vc_msg(sc, IAVF_FLAG_AQ_CONFIG_RSS_LUT); +} + +/* +** iavf_config_rss - setup RSS +** +** RSS keys and table are cleared on VF reset. +*/ +static void +iavf_config_rss(struct iavf_sc *sc) +{ + if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) { + iavf_dbg_info(sc, "Setting up RSS using VF registers..."); + iavf_config_rss_reg(sc); + } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + iavf_dbg_info(sc, "Setting up RSS using messages to PF..."); + iavf_config_rss_pf(sc); + } else + device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); +} + +/* +** This routine adds new MAC filters to the sc's list; +** these are later added in hardware by sending a virtual +** channel message. +*/ +static int +iavf_add_mac_filter(struct iavf_sc *sc, u8 *macaddr, u16 flags) +{ + struct iavf_mac_filter *f; + + /* Does one already exist? */ + f = iavf_find_mac_filter(sc, macaddr); + if (f != NULL) { + iavf_dbg_filter(sc, "exists: " MAC_FORMAT "\n", + MAC_FORMAT_ARGS(macaddr)); + return (EEXIST); + } + + /* If not, get a new empty filter */ + f = iavf_get_mac_filter(sc); + if (f == NULL) { + device_printf(sc->dev, "%s: no filters available!!\n", + __func__); + return (ENOMEM); + } + + iavf_dbg_filter(sc, "marked: " MAC_FORMAT "\n", + MAC_FORMAT_ARGS(macaddr)); + + bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); + f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); + f->flags |= flags; + return (0); +} + +/* +** Marks a MAC filter for deletion. +*/ +static int +iavf_del_mac_filter(struct iavf_sc *sc, u8 *macaddr) +{ + struct iavf_mac_filter *f; + + f = iavf_find_mac_filter(sc, macaddr); + if (f == NULL) + return (ENOENT); + + f->flags |= IXL_FILTER_DEL; + return (0); +} + +/* + * Re-uses the name from the PF driver. + */ +static void +iavf_add_device_sysctls(struct iavf_sc *sc) +{ + struct ixl_vsi *vsi = &sc->vsi; + device_t dev = sc->dev; + + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid_list *ctx_list = + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); + struct sysctl_oid *debug_node; + struct sysctl_oid_list *debug_list; + + SYSCTL_ADD_PROC(ctx, ctx_list, + OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, + sc, 0, iavf_sysctl_current_speed, "A", "Current Port Speed"); + + SYSCTL_ADD_PROC(ctx, ctx_list, + OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, + sc, 0, iavf_sysctl_tx_itr, "I", + "Immediately set TX ITR value for all queues"); + + SYSCTL_ADD_PROC(ctx, ctx_list, + OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, + sc, 0, iavf_sysctl_rx_itr, "I", + "Immediately set RX ITR value for all queues"); + + /* Add sysctls meant to print debug information, but don't list them + * in "sysctl -a" output. */ + debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, + OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); + debug_list = SYSCTL_CHILDREN(debug_node); + + SYSCTL_ADD_UINT(ctx, debug_list, + OID_AUTO, "shared_debug_mask", CTLFLAG_RW, + &sc->hw.debug_mask, 0, "Shared code debug message level"); + + SYSCTL_ADD_UINT(ctx, debug_list, + OID_AUTO, "core_debug_mask", CTLFLAG_RW, + &sc->dbg_mask, 0, "Non-shared code debug message level"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, + sc, 0, iavf_sysctl_sw_filter_list, "A", "SW Filter List"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD, + sc, 0, iavf_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "do_vf_reset", CTLTYPE_INT | CTLFLAG_WR, + sc, 0, iavf_sysctl_vf_reset, "A", "Request a VF reset from PF"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "do_vflr_reset", CTLTYPE_INT | CTLFLAG_WR, + sc, 0, iavf_sysctl_vflr_reset, "A", "Request a VFLR reset from HW"); + + /* Add stats sysctls */ + ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi"); + ixl_add_queues_sysctls(dev, vsi); + +} + +static void +iavf_init_filters(struct iavf_sc *sc) +{ + sc->mac_filters = malloc(sizeof(struct mac_list), + M_IAVF, M_WAITOK | M_ZERO); + SLIST_INIT(sc->mac_filters); + sc->vlan_filters = malloc(sizeof(struct vlan_list), + M_IAVF, M_WAITOK | M_ZERO); + SLIST_INIT(sc->vlan_filters); +} + +static void +iavf_free_filters(struct iavf_sc *sc) +{ + struct iavf_mac_filter *f; + struct iavf_vlan_filter *v; + + while (!SLIST_EMPTY(sc->mac_filters)) { + f = SLIST_FIRST(sc->mac_filters); + SLIST_REMOVE_HEAD(sc->mac_filters, next); + free(f, M_IAVF); + } + free(sc->mac_filters, M_IAVF); + while (!SLIST_EMPTY(sc->vlan_filters)) { + v = SLIST_FIRST(sc->vlan_filters); + SLIST_REMOVE_HEAD(sc->vlan_filters, next); + free(v, M_IAVF); + } + free(sc->vlan_filters, M_IAVF); +} + +char * +iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed) +{ + int index; + + char *speeds[] = { + "Unknown", + "100 Mbps", + "1 Gbps", + "10 Gbps", + "40 Gbps", + "20 Gbps", + "25 Gbps", + }; + + switch (link_speed) { + case VIRTCHNL_LINK_SPEED_100MB: + index = 1; + break; + case VIRTCHNL_LINK_SPEED_1GB: + index = 2; + break; + case VIRTCHNL_LINK_SPEED_10GB: + index = 3; + break; + case VIRTCHNL_LINK_SPEED_40GB: + index = 4; + break; + case VIRTCHNL_LINK_SPEED_20GB: + index = 5; + break; + case VIRTCHNL_LINK_SPEED_25GB: + index = 6; + break; + case VIRTCHNL_LINK_SPEED_UNKNOWN: + default: + index = 0; + break; + } + + return speeds[index]; +} + +static int +iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + int error = 0; + + error = sysctl_handle_string(oidp, + iavf_vc_speed_to_string(sc->link_speed), + 8, req); + return (error); +} + +/* + * Sanity check and save off tunable values. + */ +static void +iavf_save_tunables(struct iavf_sc *sc) +{ + device_t dev = sc->dev; + + /* Save tunable information */ + sc->dbg_mask = iavf_core_debug_mask; + sc->hw.debug_mask = iavf_shared_debug_mask; + sc->vsi.enable_head_writeback = !!(iavf_enable_head_writeback); + + if (iavf_tx_itr < 0 || iavf_tx_itr > IXL_MAX_ITR) { + device_printf(dev, "Invalid tx_itr value of %d set!\n", + iavf_tx_itr); + device_printf(dev, "tx_itr must be between %d and %d, " + "inclusive\n", + 0, IXL_MAX_ITR); + device_printf(dev, "Using default value of %d instead\n", + IXL_ITR_4K); + sc->tx_itr = IXL_ITR_4K; + } else + sc->tx_itr = iavf_tx_itr; + + if (iavf_rx_itr < 0 || iavf_rx_itr > IXL_MAX_ITR) { + device_printf(dev, "Invalid rx_itr value of %d set!\n", + iavf_rx_itr); + device_printf(dev, "rx_itr must be between %d and %d, " + "inclusive\n", + 0, IXL_MAX_ITR); + device_printf(dev, "Using default value of %d instead\n", + IXL_ITR_8K); + sc->rx_itr = IXL_ITR_8K; + } else + sc->rx_itr = iavf_rx_itr; +} + +/* + * Used to set the Tx ITR value for all of the VF's queues. + * Writes to the ITR registers immediately. + */ +static int +iavf_sysctl_tx_itr(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + device_t dev = sc->dev; + int requested_tx_itr; + int error = 0; + + requested_tx_itr = sc->tx_itr; + error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { + device_printf(dev, + "Invalid TX itr value; value must be between 0 and %d\n", + IXL_MAX_ITR); + return (EINVAL); + } + + sc->tx_itr = requested_tx_itr; + iavf_configure_tx_itr(sc); + + return (error); +} + +/* + * Used to set the Rx ITR value for all of the VF's queues. + * Writes to the ITR registers immediately. + */ +static int +iavf_sysctl_rx_itr(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + device_t dev = sc->dev; + int requested_rx_itr; + int error = 0; + + requested_rx_itr = sc->rx_itr; + error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { + device_printf(dev, + "Invalid RX itr value; value must be between 0 and %d\n", + IXL_MAX_ITR); + return (EINVAL); + } + + sc->rx_itr = requested_rx_itr; + iavf_configure_rx_itr(sc); + + return (error); +} + +static int +iavf_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + struct iavf_mac_filter *f; + struct iavf_vlan_filter *v; + device_t dev = sc->dev; + int ftl_len, ftl_counter = 0, error = 0; + struct sbuf *buf; + + buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + if (!buf) { + device_printf(dev, "Could not allocate sbuf for output.\n"); + return (ENOMEM); + } + + sbuf_printf(buf, "\n"); + + /* Print MAC filters */ + sbuf_printf(buf, "MAC Filters:\n"); + ftl_len = 0; + SLIST_FOREACH(f, sc->mac_filters, next) + ftl_len++; + if (ftl_len < 1) + sbuf_printf(buf, "(none)\n"); + else { + SLIST_FOREACH(f, sc->mac_filters, next) { + sbuf_printf(buf, + MAC_FORMAT ", flags %#06x\n", + MAC_FORMAT_ARGS(f->macaddr), f->flags); + } + } + + /* Print VLAN filters */ + sbuf_printf(buf, "VLAN Filters:\n"); + ftl_len = 0; + SLIST_FOREACH(v, sc->vlan_filters, next) + ftl_len++; + if (ftl_len < 1) + sbuf_printf(buf, "(none)"); + else { + SLIST_FOREACH(v, sc->vlan_filters, next) { + sbuf_printf(buf, + "%d, flags %#06x", + v->vlan, v->flags); + /* don't print '\n' for last entry */ + if (++ftl_counter != ftl_len) + sbuf_printf(buf, "\n"); + } + } + + error = sbuf_finish(buf); + if (error) + device_printf(dev, "Error finishing sbuf: %d\n", error); + + sbuf_delete(buf); + return (error); +} + +/* + * Print out mapping of TX queue indexes and Rx queue indexes + * to MSI-X vectors. + */ +static int +iavf_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + struct ixl_vsi *vsi = &sc->vsi; + device_t dev = sc->dev; + struct sbuf *buf; + int error = 0; + + struct ixl_rx_queue *rx_que = vsi->rx_queues; + struct ixl_tx_queue *tx_que = vsi->tx_queues; + + buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + if (!buf) { + device_printf(dev, "Could not allocate sbuf for output.\n"); + return (ENOMEM); + } + + sbuf_cat(buf, "\n"); + for (int i = 0; i < vsi->num_rx_queues; i++) { + rx_que = &vsi->rx_queues[i]; + sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); + } + for (int i = 0; i < vsi->num_tx_queues; i++) { + tx_que = &vsi->tx_queues[i]; + sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); + } + + error = sbuf_finish(buf); + if (error) + device_printf(dev, "Error finishing sbuf: %d\n", error); + sbuf_delete(buf); + + return (error); +} + +#define CTX_ACTIVE(ctx) ((if_getdrvflags(iflib_get_ifp(ctx)) & IFF_DRV_RUNNING)) +static int +iavf_sysctl_vf_reset(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + int do_reset = 0, error = 0; + + error = sysctl_handle_int(oidp, &do_reset, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + if (do_reset == 1) { + iavf_reset(sc); + if (CTX_ACTIVE(sc->vsi.ctx)) + iflib_request_reset(sc->vsi.ctx); + } + + return (error); +} + +static int +iavf_sysctl_vflr_reset(SYSCTL_HANDLER_ARGS) +{ + struct iavf_sc *sc = (struct iavf_sc *)arg1; + device_t dev = sc->dev; + int do_reset = 0, error = 0; + + error = sysctl_handle_int(oidp, &do_reset, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + if (do_reset == 1) { + if (!pcie_flr(dev, max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) { + device_printf(dev, "PCIE FLR failed\n"); + error = EIO; + } + else if (CTX_ACTIVE(sc->vsi.ctx)) + iflib_request_reset(sc->vsi.ctx); + } + + return (error); +} +#undef CTX_ACTIVE Property changes on: head/sys/dev/ixl/if_iavf.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/dev/ixl/ixl.h =================================================================== --- head/sys/dev/ixl/ixl.h (revision 339361) +++ head/sys/dev/ixl/ixl.h (revision 339362) @@ -1,549 +1,549 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_H_ #define _IXL_H_ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "opt_ixl.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #include #endif #include "ifdi_if.h" #include "i40e_type.h" #include "i40e_prototype.h" #include "ixl_debug.h" #define PVIDV(vendor, devid, name) \ PVID(vendor, devid, name " - " IXL_DRIVER_VERSION_STRING) /* Tunables */ /* * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the * number of tx/rx descriptors allocated by the driver. Increasing this * value allows the driver to queue more operations. * * Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes. * The driver currently always uses 32 byte Rx descriptors. */ #define IXL_DEFAULT_RING 1024 #define IXL_MAX_RING 4096 #define IXL_MIN_RING 64 #define IXL_RING_INCREMENT 32 #define IXL_AQ_LEN 256 #define IXL_AQ_LEN_MAX 1024 /* Alignment for rings */ #define DBA_ALIGN 128 #define MAX_MULTICAST_ADDR 128 #define IXL_MSIX_BAR 3 #define IXL_ADM_LIMIT 2 #define IXL_TSO_SIZE ((255*1024)-1) #define IXL_TX_BUF_SZ ((u32) 1514) #define IXL_AQ_BUF_SZ ((u32) 4096) #define IXL_RX_ITR 0 #define IXL_TX_ITR 1 #define IXL_ITR_NONE 3 #define IXL_QUEUE_EOL 0x7FF #define IXL_MIN_FRAME 17 #define IXL_MAX_FRAME 9728 #define IXL_MAX_TX_SEGS 8 #define IXL_MAX_RX_SEGS 5 #define IXL_MAX_TSO_SEGS 128 #define IXL_SPARSE_CHAIN 7 #define IXL_MIN_TSO_MSS 64 #define IXL_MAX_TSO_MSS 9668 #define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1) #define IXL_RSS_KEY_SIZE_REG 13 #define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4) #define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */ #define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F #define IXL_RSS_VF_LUT_ENTRY_MASK 0xF #define IXL_VF_MAX_BUFFER 0x3F80 #define IXL_VF_MAX_HDR_BUFFER 0x840 #define IXL_VF_MAX_FRAME 0x3FFF /* ERJ: hardware can support ~2k (SW5+) filters between all functions */ #define IXL_MAX_FILTERS 256 #define IXL_NVM_VERSION_LO_SHIFT 0 #define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT) #define IXL_NVM_VERSION_HI_SHIFT 12 #define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT) /* * Interrupt Moderation parameters * Multiply ITR values by 2 for real ITR value */ #define IXL_MAX_ITR 0x0FF0 #define IXL_ITR_100K 0x0005 #define IXL_ITR_20K 0x0019 #define IXL_ITR_8K 0x003E #define IXL_ITR_4K 0x007A #define IXL_ITR_1K 0x01F4 #define IXL_ITR_DYNAMIC 0x8000 #define IXL_LOW_LATENCY 0 #define IXL_AVE_LATENCY 1 #define IXL_BULK_LATENCY 2 /* MacVlan Flags */ #define IXL_FILTER_USED (u16)(1 << 0) #define IXL_FILTER_VLAN (u16)(1 << 1) #define IXL_FILTER_ADD (u16)(1 << 2) #define IXL_FILTER_DEL (u16)(1 << 3) #define IXL_FILTER_MC (u16)(1 << 4) /* used in the vlan field of the filter when not a vlan */ #define IXL_VLAN_ANY -1 #define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6) #define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO) #define IXL_VF_RESET_TIMEOUT 100 #define IXL_VSI_DATA_PORT 0x01 -#define IXLV_MAX_QUEUES 16 +#define IAVF_MAX_QUEUES 16 #define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1)) #define IXL_RX_CTX_BASE_UNITS 128 #define IXL_TX_CTX_BASE_UNITS 128 #define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA #define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20 #define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32) #define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32)) #define IXL_MAX_ITR_IDX 3 #define IXL_END_OF_INTR_LNKLST 0x7FF #define IXL_DEFAULT_RSS_HENA_BASE (\ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define IXL_DEFAULT_RSS_HENA_XL710 IXL_DEFAULT_RSS_HENA_BASE #define IXL_DEFAULT_RSS_HENA_X722 (\ IXL_DEFAULT_RSS_HENA_BASE | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) #define IXL_CAPS \ (IFCAP_TSO4 | IFCAP_TSO6 | \ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \ IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | \ IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO) #define IXL_CSUM_TCP \ (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP) #define IXL_CSUM_UDP \ (CSUM_IP_UDP|CSUM_IP6_UDP) #define IXL_CSUM_SCTP \ (CSUM_IP_SCTP|CSUM_IP6_SCTP) /* Pre-11 counter(9) compatibility */ #if __FreeBSD_version >= 1100036 #define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */ #define IXL_SET_IBYTES(vsi, count) (vsi)->ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count) #define IXL_SET_OQDROPS(vsi, count) (vsi)->oqdrops = (count) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #else #define IXL_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count) #define IXL_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count) #define IXL_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #endif /* For stats sysctl naming */ #define QUEUE_NAME_LEN 32 #define IXL_DEV_ERR(_dev, _format, ...) \ device_printf(_dev, "%s: " _format " (%s:%d)\n", __func__, ##__VA_ARGS__, __FILE__, __LINE__) /* ***************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ***************************************************************************** */ typedef struct _ixl_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixl_vendor_info_t; /* ** This struct has multiple uses, multicast ** addresses, vlans, and mac filters all use it. */ struct ixl_mac_filter { SLIST_ENTRY(ixl_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; s16 vlan; u16 flags; }; /* * The Transmit ring control struct */ struct tx_ring { struct ixl_tx_queue *que; u32 tail; struct i40e_tx_desc *tx_base; u64 tx_paddr; u32 latency; u32 packets; u32 me; /* * For reporting completed packet status * in descriptor writeback mode */ qidx_t *tx_rsq; qidx_t tx_rs_cidx; qidx_t tx_rs_pidx; qidx_t tx_cidx_processed; /* Used for Dynamic ITR calculation */ u32 itr; u32 bytes; /* Soft Stats */ u64 tx_bytes; u64 tx_packets; u64 mss_too_small; }; /* * The Receive ring control struct */ struct rx_ring { struct ixl_rx_queue *que; union i40e_rx_desc *rx_base; uint64_t rx_paddr; bool discard; u32 itr; u32 latency; u32 mbuf_sz; u32 tail; u32 me; /* Used for Dynamic ITR calculation */ u32 packets; u32 bytes; /* Soft stats */ u64 rx_packets; u64 rx_bytes; u64 desc_errs; }; /* ** Driver queue structs */ struct ixl_tx_queue { struct ixl_vsi *vsi; struct tx_ring txr; struct if_irq que_irq; u32 msix; /* Stats */ u64 irqs; u64 tso; }; struct ixl_rx_queue { struct ixl_vsi *vsi; struct rx_ring rxr; struct if_irq que_irq; u32 msix; /* This queue's MSIX vector */ /* Stats */ u64 irqs; }; /* ** Virtual Station Interface */ SLIST_HEAD(ixl_ftl_head, ixl_mac_filter); struct ixl_vsi { if_ctx_t ctx; if_softc_ctx_t shared; struct ifnet *ifp; device_t dev; struct i40e_hw *hw; struct ifmedia *media; int num_rx_queues; int num_tx_queues; void *back; enum i40e_vsi_type type; int id; u32 rx_itr_setting; u32 tx_itr_setting; bool enable_head_writeback; u16 vsi_num; bool link_active; u16 seid; u16 uplink_seid; u16 downlink_seid; struct ixl_tx_queue *tx_queues; /* TX queue array */ struct ixl_rx_queue *rx_queues; /* RX queue array */ struct if_irq irq; u32 link_speed; /* MAC/VLAN Filter list */ struct ixl_ftl_head ftl; u16 num_macs; /* Contains readylist & stat counter id */ struct i40e_aqc_vsi_properties_data info; u16 num_vlans; /* Per-VSI stats from hardware */ struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; bool stat_offsets_loaded; /* VSI stat counters */ u64 ipackets; u64 ierrors; u64 opackets; u64 oerrors; u64 ibytes; u64 obytes; u64 imcasts; u64 omcasts; u64 iqdrops; u64 oqdrops; u64 noproto; /* Driver statistics */ u64 hw_filters_del; u64 hw_filters_add; /* Misc. */ u64 flags; /* Stats sysctls for this VSI */ struct sysctl_oid *vsi_node; }; /* ** Creates new filter with given MAC address and VLAN ID */ static inline struct ixl_mac_filter * ixl_new_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; /* create a new empty filter */ f = malloc(sizeof(struct ixl_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (f) { SLIST_INSERT_HEAD(&vsi->ftl, f, next); bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->vlan = vlan; f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); } return (f); } /* ** Compare two ethernet addresses */ static inline bool cmp_etheraddr(const u8 *ea1, const u8 *ea2) { return (bcmp(ea1, ea2, 6) == 0); } /* * Return next largest power of 2, unsigned * * Public domain, from Bit Twiddling Hacks */ static inline u32 next_power_of_two(u32 n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; /* Next power of two > 0 is 1 */ n += (n == 0); return (n); } /* * Info for stats sysctls */ struct ixl_sysctl_info { u64 *stat; char *name; char *description; }; extern const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN]; /* Common function prototypes between PF/VF driver */ void ixl_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...); void ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que); void ixl_get_default_rss_key(u32 *); const char * i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err); void ixl_init_tx_rsqs(struct ixl_vsi *vsi); void ixl_init_tx_cidx(struct ixl_vsi *vsi); u64 ixl_max_vc_speed_to_value(u8 link_speeds); void ixl_add_vsi_sysctls(device_t dev, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name); void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_eth_stats *eth_stats); void ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi); #endif /* _IXL_H_ */ Index: head/sys/dev/ixl/ixl_debug.h =================================================================== --- head/sys/dev/ixl/ixl_debug.h (revision 339361) +++ head/sys/dev/ixl/ixl_debug.h (revision 339362) @@ -1,123 +1,123 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_DEBUG_H_ #define _IXL_DEBUG_H_ #define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_FORMAT_ARGS(mac_addr) \ (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \ (mac_addr)[4], (mac_addr)[5] #define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off") #ifdef IXL_DEBUG #define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__) #define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__) #define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__) /* Defines for printing generic debug information */ #define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__) #define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__) #define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__) /* Defines for printing specific debug information */ #define DEBUG_INIT 1 #define DEBUG_IOCTL 1 #define DEBUG_HW 1 #define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__) #define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__) #define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__) #define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__) #define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \ if_printf(ifp, S "\n", ##__VA_ARGS__) #define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__) #define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__) #else /* no IXL_DEBUG */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define DPRINTF(...) #define DDPRINTF(...) #define IDPRINTF(...) #define INIT_DEBUGOUT(...) #define INIT_DBG_DEV(...) #define INIT_DBG_IF(...) #define IOCTL_DEBUGOUT(...) #define IOCTL_DBG_IF2(...) #define IOCTL_DBG_IF(...) #define HW_DEBUGOUT(...) #endif /* IXL_DEBUG */ enum ixl_dbg_mask { IXL_DBG_INFO = 0x00000001, IXL_DBG_EN_DIS = 0x00000002, IXL_DBG_AQ = 0x00000004, IXL_DBG_NVMUPD = 0x00000008, IXL_DBG_FILTER = 0x00000010, IXL_DEBUG_RSS = 0x00000100, IXL_DBG_IOV = 0x00001000, IXL_DBG_IOV_VC = 0x00002000, IXL_DBG_SWITCH_INFO = 0x00010000, IXL_DBG_I2C = 0x00020000, IXL_DBG_ALL = 0xFFFFFFFF }; -enum ixlv_dbg_mask { - IXLV_DBG_INFO = 0x00000001, - IXLV_DBG_EN_DIS = 0x00000002, - IXLV_DBG_AQ = 0x00000004, - IXLV_DBG_INIT = 0x00000008, - IXLV_DBG_FILTER = 0x00000010, +enum iavf_dbg_mask { + IAVF_DBG_INFO = 0x00000001, + IAVF_DBG_EN_DIS = 0x00000002, + IAVF_DBG_AQ = 0x00000004, + IAVF_DBG_INIT = 0x00000008, + IAVF_DBG_FILTER = 0x00000010, - IXLV_DEBUG_RSS = 0x00000100, + IAVF_DEBUG_RSS = 0x00000100, - IXLV_DBG_VC = 0x00001000, + IAVF_DBG_VC = 0x00001000, - IXLV_DBG_SWITCH_INFO = 0x00010000, + IAVF_DBG_SWITCH_INFO = 0x00010000, - IXLV_DBG_ALL = 0xFFFFFFFF + IAVF_DBG_ALL = 0xFFFFFFFF }; #endif /* _IXL_DEBUG_H_ */ Index: head/sys/dev/ixl/ixl_pf_iov.c =================================================================== --- head/sys/dev/ixl/ixl_pf_iov.c (revision 339361) +++ head/sys/dev/ixl/ixl_pf_iov.c (revision 339362) @@ -1,1969 +1,1969 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf_iov.h" /* Private functions */ static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val); static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg); static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg); static bool ixl_zero_mac(const uint8_t *addr); static bool ixl_bcast_mac(const uint8_t *addr); static int ixl_vc_opcode_level(uint16_t opcode); static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr); static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf); static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi); static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf); static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len); static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op); static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line); static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info); static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info); static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue); static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector); static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues); static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable); static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); /* * TODO: Move pieces of this into iflib and call the rest in a handler? * * e.g. ixl_if_iov_set_schema * * It's odd to do pci_iov_detach() there while doing pci_iov_attach() * in the driver. */ void ixl_initialize_sriov(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; nvlist_t *pf_schema, *vf_schema; int iov_error; pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", IOV_SCHEMA_HASDEFAULT, TRUE); pci_iov_schema_add_bool(vf_schema, "allow-set-mac", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_bool(vf_schema, "allow-promisc", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_uint16(vf_schema, "num-queues", IOV_SCHEMA_HASDEFAULT, - max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IXLV_MAX_QUEUES))); + max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES))); iov_error = pci_iov_attach(dev, pf_schema, vf_schema); if (iov_error != 0) { device_printf(dev, "Failed to initialize SR-IOV (error=%d)\n", iov_error); } else device_printf(dev, "SR-IOV ready\n"); } /* * Allocate the VSI for a VF. */ static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { device_t dev; struct i40e_hw *hw; struct ixl_vsi *vsi; struct i40e_vsi_context vsi_ctx; int i; enum i40e_status_code code; hw = &pf->hw; vsi = &pf->vsi; dev = pf->dev; vsi_ctx.pf_num = hw->pf_id; vsi_ctx.uplink_seid = pf->veb_seid; vsi_ctx.connection_type = IXL_VSI_DATA_PORT; vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num; vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID); if (pf->enable_vf_loopback) vsi_ctx.info.switch_id = htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID); vsi_ctx.info.sec_flags = 0; if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF) vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); /* XXX: Only scattered allocation is supported for VFs right now */ for (i = 0; i < vf->qtag.num_active; i++) vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i]; for (; i < nitems(vsi_ctx.info.queue_mapping); i++) vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK); vsi_ctx.info.tc_mapping[0] = htole16( (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); vf->vsi.seid = vsi_ctx.seid; vf->vsi.vsi_num = vsi_ctx.vsi_number; vf->vsi.num_rx_queues = vf->qtag.num_active; vf->vsi.num_tx_queues = vf->qtag.num_active; code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); if (code != I40E_SUCCESS) { device_printf(dev, "Failed to disable BW limit: %d\n", ixl_adminq_err_to_errno(hw->aq.asq_last_status)); return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); } memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); return (0); } static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int error; hw = &pf->hw; error = ixl_vf_alloc_vsi(pf, vf); if (error != 0) return (error); /* Let VF receive broadcast Ethernet frames */ error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL); if (error) device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n"); /* Re-add VF's MAC/VLAN filters to its VSI */ ixl_reconfigure_filters(&vf->vsi); /* Reset stats? */ vf->vsi.hw_filters_add = 0; vf->vsi.hw_filters_del = 0; return (0); } static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val) { uint32_t qtable; int index, shift; /* * Two queues are mapped in a single register, so we have to do some * gymnastics to convert the queue number into a register index and * shift. */ index = qnum / 2; shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT; qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num)); qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift); qtable |= val << shift; i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable); } static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t qtable; int i; hw = &pf->hw; /* * Contiguous mappings aren't actually supported by the hardware, * so we have to use non-contiguous mappings. */ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* Enable LAN traffic on this VF */ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), I40E_VPLAN_MAPENA_TXRX_ENA_MASK); /* Program index of each VF queue into PF queue space * (This is only needed if QTABLE is enabled) */ for (i = 0; i < vf->vsi.num_tx_queues; i++) { qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) << I40E_VPLAN_QTABLE_QINDEX_SHIFT; wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); } for (; i < IXL_MAX_VSI_QUEUES; i++) wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), I40E_VPLAN_QTABLE_QINDEX_MASK); /* Map queues allocated to VF to its VSI; * This mapping matches the VF-wide mapping since the VF * is only given a single VSI */ for (i = 0; i < vf->vsi.num_tx_queues; i++) ixl_vf_map_vsi_queue(hw, vf, i, ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i)); /* Set rest of VSI queues as unused. */ for (; i < IXL_MAX_VSI_QUEUES; i++) ixl_vf_map_vsi_queue(hw, vf, i, I40E_VSILAN_QTABLE_QINDEX_0_MASK); ixl_flush(hw); } static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi) { struct i40e_hw *hw; hw = &pf->hw; if (vsi->seid == 0) return; i40e_aq_delete_element(hw, vsi->seid, NULL); } static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg) { wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); ixl_flush(hw); } static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg) { wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); ixl_flush(hw); } static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfint_reg, vpint_reg; int i; hw = &pf->hw; ixl_vf_vsi_release(pf, &vf->vsi); /* Index 0 has a special register. */ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num); ixl_vf_disable_queue_intr(hw, vfint_reg); } /* Index 0 has a special register. */ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); ixl_vf_unregister_intr(hw, vpint_reg); } vf->vsi.num_tx_queues = 0; vf->vsi.num_rx_queues = 0; } static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int i; uint16_t global_vf_num; uint32_t ciad; hw = &pf->hw; global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS | (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { ciad = rd32(hw, I40E_PF_PCI_CIAD); if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0) return (0); DELAY(1); } return (ETIMEDOUT); } static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrtrig; hw = &pf->hw; ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num); vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); ixl_flush(hw); ixl_reinit_vf(pf, vf); ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num); } static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrstat, vfrtrig; int i, error; hw = &pf->hw; error = ixl_flush_pcie(pf, vf); if (error != 0) device_printf(pf->dev, "Timed out waiting for PCIe activity to stop on VF-%d\n", vf->vf_num); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { DELAY(10); vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) break; } if (i == IXL_VF_RESET_TIMEOUT) device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED); vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); if (vf->vsi.seid != 0) ixl_disable_rings(pf, &vf->vsi, &vf->qtag); ixl_pf_qmgr_clear_queue_flags(&vf->qtag); ixl_vf_release_resources(pf, vf); ixl_vf_setup_vsi(pf, vf); ixl_vf_map_queues(pf, vf); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); ixl_flush(hw); } static int ixl_vc_opcode_level(uint16_t opcode) { switch (opcode) { case VIRTCHNL_OP_GET_STATS: return (10); default: return (5); } } static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len) { struct i40e_hw *hw; int global_vf_id; hw = &pf->hw; global_vf_id = hw->func_caps.vf_base_id + vf->vf_num; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op), "Sending msg (op=%s[%d], status=%d) to VF-%d\n", ixl_vc_opcode_str(op), op, status, vf->vf_num); i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL); } static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op) { ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0); } static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line) { I40E_VC_DEBUG(pf, 1, "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n", ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status), status, vf->vf_num, file, line); ixl_send_vf_msg(pf, vf, op, status, NULL, 0); } static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_version_info reply; if (msg_size != sizeof(struct virtchnl_version_info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION, I40E_ERR_PARAM); return; } vf->version = ((struct virtchnl_version_info *)msg)->minor; reply.major = VIRTCHNL_VERSION_MAJOR; reply.minor = VIRTCHNL_VERSION_MINOR; ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, sizeof(reply)); } static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { if (msg_size != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF, I40E_ERR_PARAM); return; } ixl_reset_vf(pf, vf); /* No response to a reset message. */ } static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vf_resource reply; if ((vf->version == 0 && msg_size != 0) || (vf->version == 1 && msg_size != 4)) { device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size," " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR, vf->version); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_ERR_PARAM); return; } bzero(&reply, sizeof(reply)); if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; else /* Force VF RSS setup by PF in 1.1+ VFs */ reply.vf_cap_flags = *(u32 *)msg & ( VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN); reply.num_vsis = 1; reply.num_queue_pairs = vf->vsi.num_tx_queues; reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; reply.rss_key_size = 52; reply.rss_lut_size = 64; reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues; memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_SUCCESS, &reply, sizeof(reply)); } static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_txq txq; uint16_t global_queue_num, global_vf_num; enum i40e_status_code status; uint32_t qtx_ctl; hw = &pf->hw; global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; bzero(&txq, sizeof(txq)); DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n", vf->vf_num, global_queue_num, info->queue_id, global_vf_num); status = i40e_clear_lan_tx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS; txq.head_wb_ena = info->headwb_enabled; txq.head_wb_addr = info->dma_headwb_addr; txq.qlen = info->ring_len; txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]); txq.rdylist_act = 0; status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq); if (status != I40E_SUCCESS) return (EINVAL); qtx_ctl = I40E_QTX_CTL_VF_QUEUE | (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) | (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT); wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl); ixl_flush(hw); ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true); return (0); } static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_rxq rxq; uint16_t global_queue_num; enum i40e_status_code status; hw = &pf->hw; global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); bzero(&rxq, sizeof(rxq)); DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n", vf->vf_num, global_queue_num, info->queue_id); if (info->databuffer_size > IXL_VF_MAX_BUFFER) return (EINVAL); if (info->max_pkt_size > IXL_VF_MAX_FRAME || info->max_pkt_size < ETHER_MIN_LEN) return (EINVAL); if (info->splithdr_enabled) { if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER) return (EINVAL); rxq.hsplit_0 = info->rx_split_pos & (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP); rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; rxq.dtype = 2; } status = i40e_clear_lan_rx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS; rxq.qlen = info->ring_len; rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; rxq.dsize = 1; rxq.crcstrip = 1; rxq.l2tsel = 1; rxq.rxmax = info->max_pkt_size; rxq.tphrdesc_ena = 1; rxq.tphwdesc_ena = 1; rxq.tphdata_ena = 1; rxq.tphhead_ena = 1; rxq.lrxqthresh = 1; rxq.prefena = 1; status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq); if (status != I40E_SUCCESS) return (EINVAL); ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false); return (0); } static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vsi_queue_config_info *info; struct virtchnl_queue_pair_info *pair; uint16_t expected_msg_size; int i; if (msg_size < sizeof(*info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } info = msg; if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) { device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n", vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair); if (msg_size != expected_msg_size) { device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n", vf->vf_num, msg_size, expected_msg_size); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (info->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, info->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } for (i = 0; i < info->num_queue_pairs; i++) { pair = &info->qpair[i]; if (pair->txq.vsi_id != vf->vsi.vsi_num || pair->rxq.vsi_id != vf->vsi.vsi_num || pair->txq.queue_id != pair->rxq.queue_id || pair->txq.queue_id >= vf->vsi.num_tx_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES); } static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue) { uint32_t offset, qctl; uint16_t itr_indx; if (cur_type == I40E_QUEUE_TYPE_RX) { offset = I40E_QINT_RQCTL(cur_queue); itr_indx = vector->rxitr_idx; } else { offset = I40E_QINT_TQCTL(cur_queue); itr_indx = vector->txitr_idx; } qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | I40E_QINT_RQCTL_CAUSE_ENA_MASK | (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT)); wr32(&pf->hw, offset, qctl); *last_type = cur_type; *last_queue = cur_queue; } static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector) { struct i40e_hw *hw; u_int qindex; enum i40e_queue_type type, last_type; uint32_t lnklst_reg; uint16_t rxq_map, txq_map, cur_queue, last_queue; hw = &pf->hw; rxq_map = vector->rxq_map; txq_map = vector->txq_map; last_queue = IXL_END_OF_INTR_LNKLST; last_type = I40E_QUEUE_TYPE_RX; /* * The datasheet says to optimize performance, RX queues and TX queues * should be interleaved in the interrupt linked list, so we process * both at once here. */ while ((rxq_map != 0) || (txq_map != 0)) { if (txq_map != 0) { qindex = ffs(txq_map) - 1; type = I40E_QUEUE_TYPE_TX; cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex); ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); txq_map &= ~(1 << qindex); } if (rxq_map != 0) { qindex = ffs(rxq_map) - 1; type = I40E_QUEUE_TYPE_RX; cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex); ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); rxq_map &= ~(1 << qindex); } } if (vector->vector_id == 0) lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num); else lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id, vf->vf_num); wr32(hw, lnklst_reg, (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); ixl_flush(hw); } static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_irq_map_info *map; struct virtchnl_vector_map *vector; struct i40e_hw *hw; int i, largest_txq, largest_rxq; hw = &pf->hw; if (msg_size < sizeof(*map)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } map = msg; if (map->num_vectors == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } for (i = 0; i < map->num_vectors; i++) { vector = &map->vecmap[i]; if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || vector->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (vector->rxq_map != 0) { largest_rxq = fls(vector->rxq_map) - 1; if (largest_rxq >= vf->vsi.num_rx_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->txq_map != 0) { largest_txq = fls(vector->txq_map) - 1; if (largest_txq >= vf->vsi.num_tx_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->rxitr_idx > IXL_MAX_ITR_IDX || vector->txitr_idx > IXL_MAX_ITR_IDX) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } ixl_vf_config_vector(pf, vf, vector); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP); } static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *select; int error = 0; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } /* Enable TX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->tx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_tx_queues) { device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) continue; /* Warn if this queue is already marked as enabled */ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n", vf->vf_num, i); error = ixl_enable_tx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true); } } /* Enable RX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->rx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_rx_queues) { device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) continue; /* Warn if this queue is already marked as enabled */ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n", vf->vf_num, i); error = ixl_enable_rx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false); } } if (error) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES); } static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *select; int error = 0; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } /* Disable TX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->tx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_tx_queues) { device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) continue; /* Warn if this queue is already marked as disabled */ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) { ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n", vf->vf_num, i); continue; } error = ixl_disable_tx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true); } } /* Enable RX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->rx_queues) { /* Warn if queue is out of VF allocation range */ if (i >= vf->vsi.num_rx_queues) { device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) continue; /* Warn if this queue is already marked as disabled */ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) { ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n", vf->vf_num, i); continue; } error = ixl_disable_rx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false); } } if (error) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES); } static bool ixl_zero_mac(const uint8_t *addr) { uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; return (cmp_etheraddr(addr, zero)); } static bool ixl_bcast_mac(const uint8_t *addr) { static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; return (cmp_etheraddr(addr, ixl_bcast_addr)); } static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) { if (ixl_zero_mac(addr) || ixl_bcast_mac(addr)) return (EINVAL); /* * If the VF is not allowed to change its MAC address, don't let it * set a MAC filter for an address that is not a multicast address and * is not its assigned MAC. */ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac))) return (EPERM); return (0); } static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_ether_addr_list *addr_list; struct virtchnl_ether_addr *addr; struct ixl_vsi *vsi; int i; size_t expected_size; vsi = &vf->vsi; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vsi->vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR); } static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_ether_addr_list *addr_list; struct virtchnl_ether_addr *addr; size_t expected_size; int i; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR); } static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_vsi_context vsi_ctx; vsi_ctx.seid = vf->vsi.seid; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); } static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vlan_filter_list *filter_list; enum i40e_status_code code; size_t expected_size; int i; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } code = ixl_vf_enable_vlan_strip(pf, vf); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); } for (i = 0; i < filter_list->num_elements; i++) ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN); } static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vlan_filter_list *filter_list; int i; size_t expected_size; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN); } static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_promisc_info *info; struct i40e_hw *hw = &pf->hw; enum i40e_status_code code; if (msg_size != sizeof(*info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { /* * Do the same thing as the Linux PF driver -- lie to the VF */ ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); return; } info = msg; if (info->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid, info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE); if (code != I40E_SUCCESS) { device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s," " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid, info->flags & FLAG_VF_MULTICAST_PROMISC, NULL); if (code != I40E_SUCCESS) { device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s," " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); } static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *queue; if (msg_size != sizeof(*queue)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } queue = msg; if (queue->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } ixl_update_eth_stats(&vf->vsi); ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); } static void ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_key *key; struct i40e_aqc_get_set_rss_key_data key_data; enum i40e_status_code status; hw = &pf->hw; if (msg_size < sizeof(*key)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } key = msg; if (key->key_len > 52) { device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n", vf->vf_num, key->key_len, 52); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } if (key->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, key->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } /* Fill out hash using MAC-dependent method */ if (hw->mac.type == I40E_MAC_X722) { bzero(&key_data, sizeof(key_data)); if (key->key_len <= 40) bcopy(key->key, key_data.standard_rss_key, key->key_len); else { bcopy(key->key, key_data.standard_rss_key, 40); bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40); } status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data); if (status) { device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_ADMIN_QUEUE_ERROR); return; } } else { for (int i = 0; i < (key->key_len / 4); i++) i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]); } DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!", vf->vf_num, key->key[0]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY); } static void ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_lut *lut; enum i40e_status_code status; hw = &pf->hw; if (msg_size < sizeof(*lut)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } lut = msg; if (lut->lut_entries > 64) { device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n", vf->vf_num, lut->lut_entries, 64); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } if (lut->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, lut->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } /* Fill out LUT using MAC-dependent method */ if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries); if (status) { device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_ADMIN_QUEUE_ERROR); return; } } else { for (int i = 0; i < (lut->lut_entries / 4); i++) i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]); } DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!", vf->vf_num, lut->lut[0], lut->lut_entries); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT); } static void ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_hena *hena; hw = &pf->hw; if (msg_size < sizeof(*hena)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA, I40E_ERR_PARAM); return; } hena = msg; /* Set HENA */ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32)); DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx", vf->vf_num, hena->hena); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA); } static void ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf) { struct virtchnl_pf_event event; struct i40e_hw *hw; hw = &pf->hw; event.event = VIRTCHNL_EVENT_LINK_CHANGE; event.severity = PF_EVENT_SEVERITY_INFO; event.event_data.link_event.link_status = pf->vsi.link_active; event.event_data.link_event.link_speed = (enum virtchnl_link_speed)hw->phy.link_info.link_speed; ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event, sizeof(event)); } void ixl_broadcast_link_state(struct ixl_pf *pf) { int i; for (i = 0; i < pf->num_vfs; i++) ixl_notify_vf_link_state(pf, &pf->vfs[i]); } void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) { struct ixl_vf *vf; void *msg; uint16_t vf_num, msg_size; uint32_t opcode; vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; opcode = le32toh(event->desc.cookie_high); if (vf_num >= pf->num_vfs) { device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num); return; } vf = &pf->vfs[vf_num]; msg = event->msg_buf; msg_size = event->msg_len; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), "Got msg %s(%d) from%sVF-%d of size %d\n", ixl_vc_opcode_str(opcode), opcode, (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ", vf_num, msg_size); /* This must be a stray msg from a previously destroyed VF. */ if (!(vf->vf_flags & VF_FLAG_ENABLED)) return; switch (opcode) { case VIRTCHNL_OP_VERSION: ixl_vf_version_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_RESET_VF: ixl_vf_reset_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_GET_VF_RESOURCES: ixl_vf_get_resources_msg(pf, vf, msg, msg_size); /* Notify VF of link state after it obtains queues, as this is * the last thing it will do as part of initialization */ ixl_notify_vf_link_state(pf, vf); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: ixl_vf_config_irq_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ENABLE_QUEUES: ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); /* Notify VF of link state after it obtains queues, as this is * the last thing it will do as part of initialization */ ixl_notify_vf_link_state(pf, vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ADD_ETH_ADDR: ixl_vf_add_mac_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_DEL_ETH_ADDR: ixl_vf_del_mac_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ADD_VLAN: ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_DEL_VLAN: ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_GET_STATS: ixl_vf_get_stats_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_SET_RSS_HENA: ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size); break; /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ case VIRTCHNL_OP_CONFIG_TX_QUEUE: case VIRTCHNL_OP_CONFIG_RX_QUEUE: default: i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); break; } } /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ void ixl_handle_vflr(struct ixl_pf *pf) { struct ixl_vf *vf; struct i40e_hw *hw; uint16_t global_vf_num; uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; int i; hw = &pf->hw; ixl_dbg_iov(pf, "%s: begin\n", __func__); /* Re-enable VFLR interrupt cause so driver doesn't miss a * reset interrupt for another VF */ icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, icr0); ixl_flush(hw); for (i = 0; i < pf->num_vfs; i++) { global_vf_num = hw->func_caps.vf_base_id + i; vf = &pf->vfs[i]; if (!(vf->vf_flags & VF_FLAG_ENABLED)) continue; vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); if (vflrstat & vflrstat_mask) { wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), vflrstat_mask); ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i); ixl_reinit_vf(pf, vf); ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i); } } } static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) { switch (err) { case I40E_AQ_RC_EPERM: return (EPERM); case I40E_AQ_RC_ENOENT: return (ENOENT); case I40E_AQ_RC_ESRCH: return (ESRCH); case I40E_AQ_RC_EINTR: return (EINTR); case I40E_AQ_RC_EIO: return (EIO); case I40E_AQ_RC_ENXIO: return (ENXIO); case I40E_AQ_RC_E2BIG: return (E2BIG); case I40E_AQ_RC_EAGAIN: return (EAGAIN); case I40E_AQ_RC_ENOMEM: return (ENOMEM); case I40E_AQ_RC_EACCES: return (EACCES); case I40E_AQ_RC_EFAULT: return (EFAULT); case I40E_AQ_RC_EBUSY: return (EBUSY); case I40E_AQ_RC_EEXIST: return (EEXIST); case I40E_AQ_RC_EINVAL: return (EINVAL); case I40E_AQ_RC_ENOTTY: return (ENOTTY); case I40E_AQ_RC_ENOSPC: return (ENOSPC); case I40E_AQ_RC_ENOSYS: return (ENOSYS); case I40E_AQ_RC_ERANGE: return (ERANGE); case I40E_AQ_RC_EFLUSHED: return (EINVAL); /* No exact equivalent in errno.h */ case I40E_AQ_RC_BAD_ADDR: return (EFAULT); case I40E_AQ_RC_EMODE: return (EPERM); case I40E_AQ_RC_EFBIG: return (EFBIG); default: return (EINVAL); } } static int ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; struct i40e_vsi_context ctxt; int error; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; if (pf->veb_seid != 0) ctxt.uplink_seid = pf->veb_seid; ctxt.pf_num = hw->pf_id; ctxt.connection_type = IXL_VSI_DATA_PORT; ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID); ctxt.info.switch_id = (enable) ? htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0; /* error is set to 0 on success */ error = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (error) { device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d," " aq_error %d\n", error, hw->aq.asq_last_status); } return (error); } int ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params) { struct ixl_pf *pf = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct i40e_hw *hw; struct ixl_vsi *pf_vsi; enum i40e_status_code ret; int i, error; hw = &pf->hw; pf_vsi = &pf->vsi; pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | M_ZERO); if (pf->vfs == NULL) { error = ENOMEM; goto fail; } for (i = 0; i < num_vfs; i++) sysctl_ctx_init(&pf->vfs[i].ctx); /* * Add the VEB and ... * - do nothing: VEPA mode * - enable loopback mode on connected VSIs: VEB mode */ ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, 1, FALSE, &pf->veb_seid, FALSE, NULL); if (ret != I40E_SUCCESS) { error = hw->aq.asq_last_status; device_printf(dev, "i40e_aq_add_veb failed; status %s error %s", i40e_stat_str(hw, ret), i40e_aq_str(hw, error)); goto fail; } if (pf->enable_vf_loopback) ixl_config_pf_vsi_loopback(pf, true); /* * Adding a VEB brings back the default MAC filter(s). Remove them, * and let the driver add the proper filters back. */ ixl_del_default_hw_filters(pf_vsi); ixl_reconfigure_filters(pf_vsi); pf->num_vfs = num_vfs; return (0); fail: free(pf->vfs, M_IXL); pf->vfs = NULL; return (error); } void ixl_if_iov_uninit(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct i40e_hw *hw; struct ixl_vsi *vsi; struct ifnet *ifp; struct ixl_vf *vfs; int i, num_vfs; hw = &pf->hw; vsi = &pf->vsi; ifp = vsi->ifp; for (i = 0; i < pf->num_vfs; i++) { if (pf->vfs[i].vsi.seid != 0) i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag); ixl_free_mac_filters(&pf->vfs[i].vsi); ixl_dbg_iov(pf, "VF %d: %d released\n", i, pf->vfs[i].qtag.num_allocated); ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr)); } if (pf->veb_seid != 0) { i40e_aq_delete_element(hw, pf->veb_seid, NULL); pf->veb_seid = 0; } /* Reset PF VSI loopback mode */ if (pf->enable_vf_loopback) ixl_config_pf_vsi_loopback(pf, false); vfs = pf->vfs; num_vfs = pf->num_vfs; pf->vfs = NULL; pf->num_vfs = 0; /* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */ for (i = 0; i < num_vfs; i++) sysctl_ctx_free(&vfs[i].ctx); free(vfs, M_IXL); } static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues) { device_t dev = pf->dev; int error; /* Validate, and clamp value if invalid */ if (num_queues < 1 || num_queues > 16) device_printf(dev, "Invalid num-queues (%d) for VF %d\n", num_queues, vf->vf_num); if (num_queues < 1) { device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); num_queues = 1; - } else if (num_queues > IXLV_MAX_QUEUES) { - device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IXLV_MAX_QUEUES); - num_queues = IXLV_MAX_QUEUES; + } else if (num_queues > IAVF_MAX_QUEUES) { + device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES); + num_queues = IAVF_MAX_QUEUES; } error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag); if (error) { device_printf(dev, "Error allocating %d queues for VF %d's VSI\n", num_queues, vf->vf_num); return (ENOSPC); } ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n", vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active); ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr)); return (0); } int ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params) { struct ixl_pf *pf = iflib_get_softc(ctx); device_t dev = pf->dev; char sysctl_name[QUEUE_NAME_LEN]; struct ixl_vf *vf; const void *mac; size_t size; int error; int vf_num_queues; vf = &pf->vfs[vfnum]; vf->vf_num = vfnum; vf->vsi.back = pf; vf->vf_flags = VF_FLAG_ENABLED; SLIST_INIT(&vf->vsi.ftl); /* Reserve queue allocation from PF */ vf_num_queues = nvlist_get_number(params, "num-queues"); error = ixl_vf_reserve_queues(pf, vf, vf_num_queues); if (error != 0) goto out; error = ixl_vf_setup_vsi(pf, vf); if (error != 0) goto out; if (nvlist_exists_binary(params, "mac-addr")) { mac = nvlist_get_binary(params, "mac-addr", &size); bcopy(mac, vf->mac, ETHER_ADDR_LEN); if (nvlist_get_bool(params, "allow-set-mac")) vf->vf_flags |= VF_FLAG_SET_MAC_CAP; } else /* * If the administrator has not specified a MAC address then * we must allow the VF to choose one. */ vf->vf_flags |= VF_FLAG_SET_MAC_CAP; if (nvlist_get_bool(params, "mac-anti-spoof")) vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; if (nvlist_get_bool(params, "allow-promisc")) vf->vf_flags |= VF_FLAG_PROMISC_CAP; vf->vf_flags |= VF_FLAG_VLAN_CAP; /* VF needs to be reset before it can be used */ ixl_reset_vf(pf, vf); out: if (error == 0) { snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); ixl_add_vsi_sysctls(dev, &vf->vsi, &vf->ctx, sysctl_name); } return (error); } Index: head/sys/modules/iavf/Makefile =================================================================== --- head/sys/modules/iavf/Makefile (revision 339361) +++ head/sys/modules/iavf/Makefile (revision 339362) @@ -1,18 +1,18 @@ #$FreeBSD$ .PATH: ${SRCTOP}/sys/dev/ixl KMOD = if_iavf SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h opt_global.h -SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c +SRCS += if_iavf.c iavf_vc.c ixl_txrx.c i40e_osdep.c # Shared source SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c # Debug messages / sysctls # CFLAGS += -DIXL_DEBUG # Enable asserts and other debugging facilities # CFLAGS += -DINVARIANTS -DINVARIANTS_SUPPORT -DWITNESS .include