Index: head/sys/amd64/conf/GENERIC =================================================================== --- head/sys/amd64/conf/GENERIC (revision 335337) +++ head/sys/amd64/conf/GENERIC (revision 335338) @@ -1,380 +1,380 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/amd64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu HAMMER ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options PREEMPTION # Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC # IP (v4/v6) security options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging options TCP_HHOOK # hhook(9) framework for TCP options TCP_RFC7413 # TCP Fast Open options SCTP # Stream Control Transmission Protocol options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options GEOM_PART_GPT # GUID Partition Tables. options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options COMPAT_FREEBSD32 # Compatible with i386 binaries options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options BUF_TRACKING # Track buffer history options DDB # Support DDB. options FULL_BUF_TRACKING # Track more buffer history options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options NETDUMP # netdump(4) client support # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel options EARLY_AP_STARTUP # CPU frequency control device cpufreq # Bus support. device acpi options ACPI_DMAR device pci options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # Floppy drives device fdc # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device esp # AMD Am53C974 (Tekram DC-390(T)) device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device mpr # LSI-Logic MPT-Fusion 3 #device ncr # NCR/Symbios Logic device sym # NCR/Symbios Logic (newer chipsets + those of `ncr') device trm # Tekram DC395U/UW/F DC315U adapters device adv # Advansys SCSI adapters device adw # Advansys wide SCSI adapters device aic # Adaptec 15[012]x SCSI adapters, AIC-6[23]60. device bt # Buslogic/Mylex MultiMaster SCSI adapters device isci # Intel C600 SAS controller device ocs_fc # Emulex FC adapters # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) device ses # Enclosure Services (SES and SAF-TE) #device ctl # CAM Target Layer # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device ciss # Compaq Smart RAID 5* device dpt # DPT Smartcache III, IV - See NOTES for options device hptmv # Highpoint RocketRAID 182x device hptnr # Highpoint DC7280, R750 device hptrr # Highpoint RocketRAID 17xx, 22xx, 23xx, 25xx device hpt27xx # Highpoint RocketRAID 27xx device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID device smartpqi # Microsemi smartpqi driver device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device aacraid # Adaptec by PMC RAID device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s device pmspcv # PMC-Sierra SAS/SATA Controller driver #XXX pointer/int warnings #device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode # vt is the new video console driver device vt device vt_vga device vt_efifb device agp # support several AGP chipsets # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device ppi # Parallel port interface device #device vpo # Requires scbus and da device puc # Multi I/O cards and multi-channel UARTs # PCI Ethernet NICs. device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE device de # DEC/Intel DC21x4x (``Tulip'') device em # Intel PRO/1000 Gigabit Ethernet Family device ix # Intel PRO/10GbE PCIE PF Ethernet device ixv # Intel PRO/10GbE PCIE VF Ethernet device ixl # Intel XL710 40Gbe PCIE Ethernet -options IXL_IW # Enable iWARP Client Interface in ixl(4) -device ixlv # Intel XL710 40Gbe VF PCIE Ethernet +#options IXL_IW # Enable iWARP Client Interface in ixl(4) +#device ixlv # Intel XL710 40Gbe VF PCIE Ethernet device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet device txp # 3Com 3cR990 (``Typhoon'') device vx # 3Com 3c590, 3c595 (``Vortex'') # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device ae # Attansic/Atheros L2 FastEthernet device age # Attansic/Atheros L1 Gigabit Ethernet device alc # Atheros AR8131/AR8132 Ethernet device ale # Atheros AR8121/AR8113/AR8114 Ethernet device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn device dc # DEC/Intel 21143 and various workalikes device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device nfe # nVidia nForce MCP on-board Ethernet device nge # NatSemi DP83820 gigabit Ethernet device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le') device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sf # Adaptec AIC-6915 (``Starfire'') device sge # Silicon Integrated Systems SiS190/191 device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device stge # Sundance/Tamarack TC9021 gigabit Ethernet device tl # Texas Instruments ThunderLAN device tx # SMC EtherPower II (83c170 ``EPIC'') device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device wb # Winbond W89C840F device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs options IEEE80211_AMPDU_AGE # age frames in AMPDU reorder q's options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros NICs device ath_pci # Atheros pci/cardbus glue device ath_hal # pci/cardbus chip support options AH_SUPPORT_AR5416 # enable AR5416 tx/rx descriptors options AH_AR5416_INTERRUPT_MITIGATION # AR5416 interrupt mitigation options ATH_ENABLE_11N # Enable 802.11n support for AR5416 and later device ath_rate_sample # SampleRate tx rate control for ath #device bwi # Broadcom BCM430x/BCM431x wireless NICs. #device bwn # Broadcom BCM43xx wireless NICs. device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device malo # Marvell Libertas wireless NICs. device mwl # Marvell 88W8363 802.11n wireless NICs. device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. device wpi # Intel 3945ABG wireless NICs. # Pseudo devices. device loop # Network loopback device random # Entropy device device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # Sound support device sound # Generic sound driver (required) device snd_cmi # CMedia CMI8338/CMI8738 device snd_csa # Crystal Semiconductor CS461x/428x device snd_emu10kx # Creative SoundBlaster Live! and Audigy device snd_es137x # Ensoniq AudioPCI ES137x device snd_hda # Intel High Definition Audio device snd_ich # Intel, NVidia and other ICH AC'97 Audio device snd_via8233 # VIA VT8233x Audio # MMC/SD device mmc # MMC/SD bus device mmcsd # MMC/SD memory card device sdhci # Generic PCI SD Host Controller # VirtIO support device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI device device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device # HyperV drivers and enhancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations # NOTE: XENHVM depends on xenpci. They must be added or removed together. options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver # VMware support device vmx # VMware VMXNET3 Ethernet # Netmap provides direct access to TX/RX rings on supported NICs device netmap # netmap(4) support # The crypto framework is required by IPSEC device crypto # Required by IPSEC Index: head/sys/conf/files.amd64 =================================================================== --- head/sys/conf/files.amd64 (revision 335337) +++ head/sys/conf/files.amd64 (revision 335338) @@ -1,757 +1,757 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_x86_64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_x86_64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" # cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" # linux32_genassym.o optional compat_linux32 \ dependency "$S/amd64/linux32/linux32_genassym.c" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux32_genassym.o" # linux32_assym.h optional compat_linux32 \ dependency "$S/kern/genassym.sh linux32_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux32_assym.h" # linux32_locore.o optional compat_linux32 \ dependency "linux32_assym.h $S/amd64/linux32/linux32_locore.s" \ compile-with "${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s -pipe -I. -I$S -Werror -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/amd64/linux32/linux32_vdso.lds.s -Wl,-soname=linux32_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "linux32_locore.o" # linux32_vdso.so optional compat_linux32 \ dependency "linux32_locore.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 linux32_locore.o ${.TARGET}" \ no-implicit-rule \ clean "linux32_vdso.so" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "kbdcontrol -P ${S:S/sys$/share/}/vt/keymaps -P ${S:S/sys$/share/}/syscons/keymaps -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # hpt27xx_lib.o optional hpt27xx \ dependency "$S/dev/hpt27xx/amd64-elf.hpt27xx_lib.o.uu" \ compile-with "uudecode < $S/dev/hpt27xx/amd64-elf.hpt27xx_lib.o.uu" \ no-implicit-rule # hptmvraid.o optional hptmv \ dependency "$S/dev/hptmv/amd64-elf.raid.o.uu" \ compile-with "uudecode < $S/dev/hptmv/amd64-elf.raid.o.uu" \ no-implicit-rule # hptnr_lib.o optional hptnr \ dependency "$S/dev/hptnr/amd64-elf.hptnr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptnr/amd64-elf.hptnr_lib.o.uu" \ no-implicit-rule # hptrr_lib.o optional hptrr \ dependency "$S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ compile-with "uudecode < $S/dev/hptrr/amd64-elf.hptrr_lib.o.uu" \ no-implicit-rule # amd64/acpica/acpi_machdep.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.inc" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # #amd64/amd64/apic_vector.S standard amd64/amd64/atomic.c standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/efirt_machdep.c optional efirt amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/in_cksum.c optional inet | inet6 amd64/amd64/initcpu.c standard amd64/amd64/io.c optional io amd64/amd64/locore.S standard no-obj amd64/amd64/xen-locore.S optional xenhvm amd64/amd64/machdep.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mpboot.S optional smp amd64/amd64/pmap.c standard amd64/amd64/prof_machdep.c optional profiling-routine amd64/amd64/ptrace_machdep.c standard amd64/amd64/sigtramp.S standard amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 amd64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 amd64/pci/pci_cfgreg.c optional pci cddl/contrib/opensolaris/common/atomic/amd64/opensolaris_atomic.S optional zfs | dtrace compile-with "${ZFS_S}" cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/amd64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/x86/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" cddl/dev/dtrace/x86/dis_tables.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" cddl/dev/dtrace/x86/instr_size.c optional dtrace_fbt | dtraceall compile-with "${DTRACE_C}" crypto/aesni/aeskeys_amd64.S optional aesni crypto/aesni/aesni.c optional aesni aesni_ghash.o optional aesni \ dependency "$S/crypto/aesni/aesni_ghash.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes -mpclmul ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_ghash.o" aesni_wrap.o optional aesni \ dependency "$S/crypto/aesni/aesni_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${NO_WCAST_QUAL} ${PROF} -mmmx -msse -msse4 -maes ${.IMPSRC}" \ no-implicit-rule \ clean "aesni_wrap.o" crypto/blowfish/bf_enc.c optional crypto | ipsec | ipsec_support crypto/des/des_enc.c optional crypto | ipsec | \ ipsec_support | netsmb intel_sha1.o optional aesni \ dependency "$S/crypto/aesni/intel_sha1.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha1.o" intel_sha256.o optional aesni \ dependency "$S/crypto/aesni/intel_sha256.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} ${PROF} -mmmx -msse -msse4 -msha ${.IMPSRC}" \ no-implicit-rule \ clean "intel_sha256.o" crypto/via/padlock.c optional padlock crypto/via/padlock_cipher.c optional padlock crypto/via/padlock_hash.c optional padlock dev/acpica/acpi_if.m standard dev/acpica/acpi_hpet.c optional acpi dev/acpica/acpi_pci.c optional acpi pci dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pcib_acpi.c optional acpi pci dev/acpica/acpi_pcib_pci.c optional acpi pci dev/acpica/acpi_timer.c optional acpi dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdsbwd/amdsbwd.c optional amdsbwd dev/amdsmn/amdsmn.c optional amdsmn | amdtemp dev/amdtemp/amdtemp.c optional amdtemp dev/arcmsr/arcmsr.c optional arcmsr pci dev/asmc/asmc.c optional asmc isa dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/bxe/bxe.c optional bxe pci dev/bxe/bxe_stats.c optional bxe pci dev/bxe/bxe_debug.c optional bxe pci dev/bxe/ecore_sp.c optional bxe pci dev/bxe/bxe_elink.c optional bxe pci dev/bxe/57710_init_values.c optional bxe pci dev/bxe/57711_init_values.c optional bxe pci dev/bxe/57712_init_values.c optional bxe pci dev/coretemp/coretemp.c optional coretemp dev/cpuctl/cpuctl.c optional cpuctl dev/dpms/dpms.c optional dpms # There are no systems with isa slots, so all ed isa entries should go.. dev/ed/if_ed_3c503.c optional ed isa ed_3c503 dev/ed/if_ed_isa.c optional ed isa dev/ed/if_ed_wd80x3.c optional ed isa dev/ed/if_ed_hpp.c optional ed isa ed_hpp dev/ed/if_ed_sic.c optional ed isa ed_sic dev/fb/fb.c optional fb | vga dev/fb/s3_pci.c optional s3pci dev/fb/vesa.c optional vga vesa dev/fb/vga.c optional vga dev/ichwd/ichwd.c optional ichwd dev/if_ndis/if_ndis.c optional ndis dev/if_ndis/if_ndis_pccard.c optional ndis pccard dev/if_ndis/if_ndis_pci.c optional ndis cardbus | ndis pci dev/if_ndis/if_ndis_usb.c optional ndis usb dev/imcsmb/imcsmb.c optional imcsmb dev/imcsmb/imcsmb_pci.c optional imcsmb pci dev/intel/spi.c optional intelspi dev/io/iodev.c optional io dev/ioat/ioat.c optional ioat pci dev/ioat/ioat_test.c optional ioat pci dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_isa.c optional ipmi isa dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/ipmi/ipmi_smbus.c optional ipmi smbus dev/ipmi/ipmi_smbios.c optional ipmi dev/ipmi/ipmi_ssif.c optional ipmi smbus dev/ipmi/ipmi_pci.c optional ipmi pci dev/ipmi/ipmi_linux.c optional ipmi compat_linux32 dev/ixl/if_ixl.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" -dev/ixl/ixl_iw.c optional ixl pci \ - compile-with "${NORMAL_C} -I$S/dev/ixl" -dev/ixl/if_ixlv.c optional ixlv pci \ - compile-with "${NORMAL_C} -I$S/dev/ixl" +#dev/ixl/ixl_iw.c optional ixl pci \ +# compile-with "${NORMAL_C} -I$S/dev/ixl" +#dev/ixl/if_ixlv.c optional ixlv pci \ +# compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixlvc.c optional ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci | ixlv pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_dcb.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/fdc/fdc.c optional fdc dev/fdc/fdc_acpi.c optional fdc dev/fdc/fdc_isa.c optional fdc isa dev/fdc/fdc_pccard.c optional fdc pccard dev/gpio/bytgpio.c optional bytgpio dev/gpio/chvgpio.c optional chvgpio dev/hpt27xx/hpt27xx_os_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_osm_bsd.c optional hpt27xx dev/hpt27xx/hpt27xx_config.c optional hpt27xx dev/hptmv/entry.c optional hptmv dev/hptmv/mv.c optional hptmv dev/hptmv/gui_lib.c optional hptmv dev/hptmv/hptproc.c optional hptmv dev/hptmv/ioctl.c optional hptmv dev/hptnr/hptnr_os_bsd.c optional hptnr dev/hptnr/hptnr_osm_bsd.c optional hptnr dev/hptnr/hptnr_config.c optional hptnr dev/hptrr/hptrr_os_bsd.c optional hptrr dev/hptrr/hptrr_osm_bsd.c optional hptrr dev/hptrr/hptrr_config.c optional hptrr dev/hwpmc/hwpmc_amd.c optional hwpmc dev/hwpmc/hwpmc_intel.c optional hwpmc dev/hwpmc/hwpmc_core.c optional hwpmc dev/hwpmc/hwpmc_uncore.c optional hwpmc dev/hwpmc/hwpmc_tsc.c optional hwpmc dev/hwpmc/hwpmc_x86.c optional hwpmc dev/hyperv/input/hv_kbd.c optional hyperv dev/hyperv/input/hv_kbdc.c optional hyperv dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci dev/hyperv/netvsc/hn_nvs.c optional hyperv dev/hyperv/netvsc/hn_rndis.c optional hyperv dev/hyperv/netvsc/if_hn.c optional hyperv dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv dev/hyperv/utilities/hv_kvp.c optional hyperv dev/hyperv/utilities/hv_snapshot.c optional hyperv dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv dev/hyperv/utilities/vmbus_ic.c optional hyperv dev/hyperv/utilities/vmbus_shutdown.c optional hyperv dev/hyperv/utilities/vmbus_timesync.c optional hyperv dev/hyperv/vmbus/hyperv.c optional hyperv dev/hyperv/vmbus/hyperv_busdma.c optional hyperv dev/hyperv/vmbus/vmbus.c optional hyperv pci dev/hyperv/vmbus/vmbus_br.c optional hyperv dev/hyperv/vmbus/vmbus_chan.c optional hyperv dev/hyperv/vmbus/vmbus_et.c optional hyperv dev/hyperv/vmbus/vmbus_if.m optional hyperv dev/hyperv/vmbus/vmbus_res.c optional hyperv dev/hyperv/vmbus/vmbus_xact.c optional hyperv dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional ntb_transport | if_ntb dev/ntb/ntb.c optional ntb | ntb_transport | if_ntb | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_if.m optional ntb | ntb_transport | if_ntb | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_hw/ntb_hw_intel.c optional ntb_hw_intel | ntb_hw dev/ntb/ntb_hw/ntb_hw_plx.c optional ntb_hw_plx | ntb_hw dev/nvd/nvd.c optional nvd nvme dev/nvme/nvme.c optional nvme dev/nvme/nvme_ctrlr.c optional nvme dev/nvme/nvme_ctrlr_cmd.c optional nvme dev/nvme/nvme_ns.c optional nvme dev/nvme/nvme_ns_cmd.c optional nvme dev/nvme/nvme_qpair.c optional nvme dev/nvme/nvme_sim.c optional nvme scbus dev/nvme/nvme_sysctl.c optional nvme dev/nvme/nvme_test.c optional nvme dev/nvme/nvme_util.c optional nvme dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng dev/random/nehemiah.c optional padlock_rng dev/qlxge/qls_dbg.c optional qlxge pci dev/qlxge/qls_dump.c optional qlxge pci dev/qlxge/qls_hw.c optional qlxge pci dev/qlxge/qls_ioctl.c optional qlxge pci dev/qlxge/qls_isr.c optional qlxge pci dev/qlxge/qls_os.c optional qlxge pci dev/qlxgb/qla_dbg.c optional qlxgb pci dev/qlxgb/qla_hw.c optional qlxgb pci dev/qlxgb/qla_ioctl.c optional qlxgb pci dev/qlxgb/qla_isr.c optional qlxgb pci dev/qlxgb/qla_misc.c optional qlxgb pci dev/qlxgb/qla_os.c optional qlxgb pci dev/qlxgbe/ql_dbg.c optional qlxgbe pci dev/qlxgbe/ql_hw.c optional qlxgbe pci dev/qlxgbe/ql_ioctl.c optional qlxgbe pci dev/qlxgbe/ql_isr.c optional qlxgbe pci dev/qlxgbe/ql_misc.c optional qlxgbe pci dev/qlxgbe/ql_os.c optional qlxgbe pci dev/qlxgbe/ql_reset.c optional qlxgbe pci dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/sfxge/common/ef10_ev.c optional sfxge pci dev/sfxge/common/ef10_filter.c optional sfxge pci dev/sfxge/common/ef10_intr.c optional sfxge pci dev/sfxge/common/ef10_mac.c optional sfxge pci dev/sfxge/common/ef10_mcdi.c optional sfxge pci dev/sfxge/common/ef10_nic.c optional sfxge pci dev/sfxge/common/ef10_nvram.c optional sfxge pci dev/sfxge/common/ef10_phy.c optional sfxge pci dev/sfxge/common/ef10_rx.c optional sfxge pci dev/sfxge/common/ef10_tx.c optional sfxge pci dev/sfxge/common/ef10_vpd.c optional sfxge pci dev/sfxge/common/efx_bootcfg.c optional sfxge pci dev/sfxge/common/efx_crc32.c optional sfxge pci dev/sfxge/common/efx_ev.c optional sfxge pci dev/sfxge/common/efx_filter.c optional sfxge pci dev/sfxge/common/efx_hash.c optional sfxge pci dev/sfxge/common/efx_intr.c optional sfxge pci dev/sfxge/common/efx_lic.c optional sfxge pci dev/sfxge/common/efx_mac.c optional sfxge pci dev/sfxge/common/efx_mcdi.c optional sfxge pci dev/sfxge/common/efx_mon.c optional sfxge pci dev/sfxge/common/efx_nic.c optional sfxge pci dev/sfxge/common/efx_nvram.c optional sfxge pci dev/sfxge/common/efx_phy.c optional sfxge pci dev/sfxge/common/efx_port.c optional sfxge pci dev/sfxge/common/efx_rx.c optional sfxge pci dev/sfxge/common/efx_sram.c optional sfxge pci dev/sfxge/common/efx_tx.c optional sfxge pci dev/sfxge/common/efx_vpd.c optional sfxge pci dev/sfxge/common/hunt_nic.c optional sfxge pci dev/sfxge/common/mcdi_mon.c optional sfxge pci dev/sfxge/common/medford_nic.c optional sfxge pci dev/sfxge/common/siena_mac.c optional sfxge pci dev/sfxge/common/siena_mcdi.c optional sfxge pci dev/sfxge/common/siena_nic.c optional sfxge pci dev/sfxge/common/siena_nvram.c optional sfxge pci dev/sfxge/common/siena_phy.c optional sfxge pci dev/sfxge/common/siena_sram.c optional sfxge pci dev/sfxge/common/siena_vpd.c optional sfxge pci dev/sfxge/sfxge.c optional sfxge pci dev/sfxge/sfxge_dma.c optional sfxge pci dev/sfxge/sfxge_ev.c optional sfxge pci dev/sfxge/sfxge_intr.c optional sfxge pci dev/sfxge/sfxge_mcdi.c optional sfxge pci dev/sfxge/sfxge_nvram.c optional sfxge pci dev/sfxge/sfxge_port.c optional sfxge pci dev/sfxge/sfxge_rx.c optional sfxge pci dev/sfxge/sfxge_tx.c optional sfxge pci dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/sio/sio_pccard.c optional sio pccard dev/sio/sio_pci.c optional sio pci dev/sio/sio_puc.c optional sio puc dev/smartpqi/smartpqi_cam.c optional smartpqi dev/smartpqi/smartpqi_cmd.c optional smartpqi dev/smartpqi/smartpqi_discovery.c optional smartpqi dev/smartpqi/smartpqi_event.c optional smartpqi dev/smartpqi/smartpqi_helper.c optional smartpqi dev/smartpqi/smartpqi_init.c optional smartpqi dev/smartpqi/smartpqi_intr.c optional smartpqi dev/smartpqi/smartpqi_ioctl.c optional smartpqi dev/smartpqi/smartpqi_main.c optional smartpqi dev/smartpqi/smartpqi_mem.c optional smartpqi dev/smartpqi/smartpqi_misc.c optional smartpqi dev/smartpqi/smartpqi_queue.c optional smartpqi dev/smartpqi/smartpqi_request.c optional smartpqi dev/smartpqi/smartpqi_response.c optional smartpqi dev/smartpqi/smartpqi_sis.c optional smartpqi dev/smartpqi/smartpqi_tag.c optional smartpqi dev/speaker/spkr.c optional speaker dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scterm-teken.c optional sc dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/tpm/tpm.c optional tpm dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmware/vmxnet3/if_vmx.c optional vmx dev/vmware/vmci/vmci.c optional vmci dev/vmware/vmci/vmci_datagram.c optional vmci dev/vmware/vmci/vmci_doorbell.c optional vmci dev/vmware/vmci/vmci_driver.c optional vmci dev/vmware/vmci/vmci_event.c optional vmci dev/vmware/vmci/vmci_hashtable.c optional vmci dev/vmware/vmci/vmci_kernel_if.c optional vmci dev/vmware/vmci/vmci_qpair.c optional vmci dev/vmware/vmci/vmci_queue_pair.c optional vmci dev/vmware/vmci/vmci_resource.c optional vmci dev/wbwd/wbwd.c optional wbwd dev/xen/pci/xen_acpi_pci.c optional xenhvm dev/xen/pci/xen_pci.c optional xenhvm dev/isci/isci.c optional isci dev/isci/isci_controller.c optional isci dev/isci/isci_domain.c optional isci dev/isci/isci_interrupt.c optional isci dev/isci/isci_io_request.c optional isci dev/isci/isci_logger.c optional isci dev/isci/isci_oem_parameters.c optional isci dev/isci/isci_remote_device.c optional isci dev/isci/isci_sysctl.c optional isci dev/isci/isci_task_request.c optional isci dev/isci/isci_timer.c optional isci dev/isci/scil/sati.c optional isci dev/isci/scil/sati_abort_task_set.c optional isci dev/isci/scil/sati_atapi.c optional isci dev/isci/scil/sati_device.c optional isci dev/isci/scil/sati_inquiry.c optional isci dev/isci/scil/sati_log_sense.c optional isci dev/isci/scil/sati_lun_reset.c optional isci dev/isci/scil/sati_mode_pages.c optional isci dev/isci/scil/sati_mode_select.c optional isci dev/isci/scil/sati_mode_sense.c optional isci dev/isci/scil/sati_mode_sense_10.c optional isci dev/isci/scil/sati_mode_sense_6.c optional isci dev/isci/scil/sati_move.c optional isci dev/isci/scil/sati_passthrough.c optional isci dev/isci/scil/sati_read.c optional isci dev/isci/scil/sati_read_buffer.c optional isci dev/isci/scil/sati_read_capacity.c optional isci dev/isci/scil/sati_reassign_blocks.c optional isci dev/isci/scil/sati_report_luns.c optional isci dev/isci/scil/sati_request_sense.c optional isci dev/isci/scil/sati_start_stop_unit.c optional isci dev/isci/scil/sati_synchronize_cache.c optional isci dev/isci/scil/sati_test_unit_ready.c optional isci dev/isci/scil/sati_unmap.c optional isci dev/isci/scil/sati_util.c optional isci dev/isci/scil/sati_verify.c optional isci dev/isci/scil/sati_write.c optional isci dev/isci/scil/sati_write_and_verify.c optional isci dev/isci/scil/sati_write_buffer.c optional isci dev/isci/scil/sati_write_long.c optional isci dev/isci/scil/sci_abstract_list.c optional isci dev/isci/scil/sci_base_controller.c optional isci dev/isci/scil/sci_base_domain.c optional isci dev/isci/scil/sci_base_iterator.c optional isci dev/isci/scil/sci_base_library.c optional isci dev/isci/scil/sci_base_logger.c optional isci dev/isci/scil/sci_base_memory_descriptor_list.c optional isci dev/isci/scil/sci_base_memory_descriptor_list_decorator.c optional isci dev/isci/scil/sci_base_object.c optional isci dev/isci/scil/sci_base_observer.c optional isci dev/isci/scil/sci_base_phy.c optional isci dev/isci/scil/sci_base_port.c optional isci dev/isci/scil/sci_base_remote_device.c optional isci dev/isci/scil/sci_base_request.c optional isci dev/isci/scil/sci_base_state_machine.c optional isci dev/isci/scil/sci_base_state_machine_logger.c optional isci dev/isci/scil/sci_base_state_machine_observer.c optional isci dev/isci/scil/sci_base_subject.c optional isci dev/isci/scil/sci_util.c optional isci dev/isci/scil/scic_sds_controller.c optional isci dev/isci/scil/scic_sds_library.c optional isci dev/isci/scil/scic_sds_pci.c optional isci dev/isci/scil/scic_sds_phy.c optional isci dev/isci/scil/scic_sds_port.c optional isci dev/isci/scil/scic_sds_port_configuration_agent.c optional isci dev/isci/scil/scic_sds_remote_device.c optional isci dev/isci/scil/scic_sds_remote_node_context.c optional isci dev/isci/scil/scic_sds_remote_node_table.c optional isci dev/isci/scil/scic_sds_request.c optional isci dev/isci/scil/scic_sds_sgpio.c optional isci dev/isci/scil/scic_sds_smp_remote_device.c optional isci dev/isci/scil/scic_sds_smp_request.c optional isci dev/isci/scil/scic_sds_ssp_request.c optional isci dev/isci/scil/scic_sds_stp_packet_request.c optional isci dev/isci/scil/scic_sds_stp_remote_device.c optional isci dev/isci/scil/scic_sds_stp_request.c optional isci dev/isci/scil/scic_sds_unsolicited_frame_control.c optional isci dev/isci/scil/scif_sas_controller.c optional isci dev/isci/scil/scif_sas_controller_state_handlers.c optional isci dev/isci/scil/scif_sas_controller_states.c optional isci dev/isci/scil/scif_sas_domain.c optional isci dev/isci/scil/scif_sas_domain_state_handlers.c optional isci dev/isci/scil/scif_sas_domain_states.c optional isci dev/isci/scil/scif_sas_high_priority_request_queue.c optional isci dev/isci/scil/scif_sas_internal_io_request.c optional isci dev/isci/scil/scif_sas_io_request.c optional isci dev/isci/scil/scif_sas_io_request_state_handlers.c optional isci dev/isci/scil/scif_sas_io_request_states.c optional isci dev/isci/scil/scif_sas_library.c optional isci dev/isci/scil/scif_sas_remote_device.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_ready_substates.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substate_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_starting_substates.c optional isci dev/isci/scil/scif_sas_remote_device_state_handlers.c optional isci dev/isci/scil/scif_sas_remote_device_states.c optional isci dev/isci/scil/scif_sas_request.c optional isci dev/isci/scil/scif_sas_smp_activity_clear_affiliation.c optional isci dev/isci/scil/scif_sas_smp_io_request.c optional isci dev/isci/scil/scif_sas_smp_phy.c optional isci dev/isci/scil/scif_sas_smp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_io_request.c optional isci dev/isci/scil/scif_sas_stp_remote_device.c optional isci dev/isci/scil/scif_sas_stp_task_request.c optional isci dev/isci/scil/scif_sas_task_request.c optional isci dev/isci/scil/scif_sas_task_request_state_handlers.c optional isci dev/isci/scil/scif_sas_task_request_states.c optional isci dev/isci/scil/scif_sas_timer.c optional isci isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/imgact_aout.c optional compat_aout kern/imgact_gzip.c optional gzip kern/link_elf_obj.c standard libkern/x86/crc32_sse42.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_freebsd32 amd64/ia32/ia32_reg.c optional compat_freebsd32 amd64/ia32/ia32_signal.c optional compat_freebsd32 amd64/ia32/ia32_sigtramp.S optional compat_freebsd32 amd64/ia32/ia32_syscall.c optional compat_freebsd32 amd64/ia32/ia32_misc.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs # # Linux/i386 binary support # amd64/linux32/linux32_dummy.c optional compat_linux32 amd64/linux32/linux32_machdep.c optional compat_linux32 amd64/linux32/linux32_support.s optional compat_linux32 \ dependency "linux32_assym.h" amd64/linux32/linux32_sysent.c optional compat_linux32 amd64/linux32/linux32_sysvec.c optional compat_linux32 compat/linux/linux_emul.c optional compat_linux32 compat/linux/linux_errno.c optional compat_linux32 compat/linux/linux_file.c optional compat_linux32 compat/linux/linux_fork.c optional compat_linux32 compat/linux/linux_futex.c optional compat_linux32 compat/linux/linux_getcwd.c optional compat_linux32 compat/linux/linux_ioctl.c optional compat_linux32 compat/linux/linux_ipc.c optional compat_linux32 compat/linux/linux_mib.c optional compat_linux32 compat/linux/linux_misc.c optional compat_linux32 compat/linux/linux_mmap.c optional compat_linux32 compat/linux/linux_signal.c optional compat_linux32 compat/linux/linux_socket.c optional compat_linux32 compat/linux/linux_stats.c optional compat_linux32 compat/linux/linux_sysctl.c optional compat_linux32 compat/linux/linux_time.c optional compat_linux32 compat/linux/linux_timer.c optional compat_linux32 compat/linux/linux_uid16.c optional compat_linux32 compat/linux/linux_util.c optional compat_linux32 compat/linux/linux_vdso.c optional compat_linux32 compat/linux/linux_common.c optional compat_linux32 compat/linux/linux_event.c optional compat_linux32 compat/linux/linux.c optional compat_linux32 dev/amr/amr_linux.c optional compat_linux32 amr dev/mfi/mfi_linux.c optional compat_linux32 mfi # # Windows NDIS driver support # compat/ndis/kern_ndis.c optional ndisapi pci compat/ndis/kern_windrv.c optional ndisapi pci compat/ndis/subr_hal.c optional ndisapi pci compat/ndis/subr_ndis.c optional ndisapi pci compat/ndis/subr_ntoskrnl.c optional ndisapi pci compat/ndis/subr_pe.c optional ndisapi pci compat/ndis/subr_usbd.c optional ndisapi pci compat/ndis/winx64_wrap.S optional ndisapi pci # # x86 real mode BIOS emulator, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa contrib/x86emu/x86emu.c optional x86bios | dpms | pci | vesa # # bvm console # dev/bvm/bvm_console.c optional bvmconsole dev/bvm/bvm_dbg.c optional bvmdebug # # x86 shared code between IA32 and AMD64 architectures # x86/acpica/OsdEnvironment.c optional acpi x86/acpica/acpi_apm.c optional acpi x86/acpica/acpi_wakeup.c optional acpi x86/acpica/madt.c optional acpi x86/acpica/srat.c optional acpi x86/bios/smbios.c optional smbios x86/bios/vpd.c optional vpd x86/cpufreq/powernow.c optional cpufreq x86/cpufreq/est.c optional cpufreq x86/cpufreq/hwpstate.c optional cpufreq x86/cpufreq/p4tcc.c optional cpufreq x86/iommu/busdma_dmar.c optional acpi acpi_dmar pci x86/iommu/intel_ctx.c optional acpi acpi_dmar pci x86/iommu/intel_drv.c optional acpi acpi_dmar pci x86/iommu/intel_fault.c optional acpi acpi_dmar pci x86/iommu/intel_gas.c optional acpi acpi_dmar pci x86/iommu/intel_idpgtbl.c optional acpi acpi_dmar pci x86/iommu/intel_intrmap.c optional acpi acpi_dmar pci x86/iommu/intel_qi.c optional acpi acpi_dmar pci x86/iommu/intel_quirks.c optional acpi acpi_dmar pci x86/iommu/intel_utils.c optional acpi acpi_dmar pci x86/isa/atpic.c optional atpic isa x86/isa/atrtc.c standard x86/isa/clock.c standard x86/isa/elcr.c optional atpic isa | mptable x86/isa/isa.c standard x86/isa/isa_dma.c standard x86/isa/nmi.c standard x86/isa/orm.c optional isa x86/pci/pci_bus.c optional pci x86/pci/qpi.c optional pci x86/x86/autoconf.c standard x86/x86/bus_machdep.c standard x86/x86/busdma_bounce.c standard x86/x86/busdma_machdep.c standard x86/x86/cpu_machdep.c standard x86/x86/dump_machdep.c standard x86/x86/fdt_machdep.c optional fdt x86/x86/identcpu.c standard x86/x86/intr_machdep.c standard x86/x86/io_apic.c standard x86/x86/legacy.c standard x86/x86/local_apic.c standard x86/x86/mca.c standard x86/x86/x86_mem.c optional mem x86/x86/mptable.c optional mptable x86/x86/mptable_pci.c optional mptable pci x86/x86/mp_x86.c optional smp x86/x86/mp_watchdog.c optional mp_watchdog smp x86/x86/msi.c optional pci x86/x86/nexus.c standard x86/x86/pvclock.c standard x86/x86/stack_machdep.c optional ddb | stack x86/x86/tsc.c standard x86/x86/delay.c standard x86/xen/hvm.c optional xenhvm x86/xen/xen_intr.c optional xenhvm x86/xen/pv.c optional xenhvm x86/xen/pvcpu_enum.c optional xenhvm x86/xen/xen_apic.c optional xenhvm x86/xen/xenpv.c optional xenhvm x86/xen/xen_nexus.c optional xenhvm x86/xen/xen_msi.c optional xenhvm x86/xen/xen_pci_bus.c optional xenhvm Index: head/sys/dev/ixl/i40e_osdep.c =================================================================== --- head/sys/dev/ixl/i40e_osdep.c (revision 335337) +++ head/sys/dev/ixl/i40e_osdep.c (revision 335338) @@ -1,278 +1,278 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include #include "ixl.h" /******************************************************************** * Manage DMA'able memory. *******************************************************************/ static void i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs->ds_addr; return; } i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size) { mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); return(mem->va == NULL); } i40e_status i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem) { free(mem->va, M_DEVBUF); mem->va = NULL; return(0); } i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem, enum i40e_memory_type type __unused, u64 size, u32 alignment) { device_t dev = ((struct i40e_osdep *)hw->back)->dev; int err; err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ alignment, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &mem->tag); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dma_tag_create failed, " "error %u\n", err); goto fail_0; } err = bus_dmamem_alloc(mem->tag, (void **)&mem->va, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dmamem_alloc failed, " "error %u\n", err); goto fail_1; } err = bus_dmamap_load(mem->tag, mem->map, mem->va, size, i40e_dmamap_cb, &mem->pa, BUS_DMA_NOWAIT); if (err != 0) { device_printf(dev, "i40e_allocate_dma: bus_dmamap_load failed, " "error %u\n", err); goto fail_2; } mem->nseg = 1; mem->size = size; bus_dmamap_sync(mem->tag, mem->map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); return (0); fail_2: bus_dmamem_free(mem->tag, mem->va, mem->map); fail_1: bus_dma_tag_destroy(mem->tag); fail_0: mem->map = NULL; mem->tag = NULL; return (err); } i40e_status i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem) { bus_dmamap_sync(mem->tag, mem->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(mem->tag, mem->map); bus_dmamem_free(mem->tag, mem->va, mem->map); bus_dma_tag_destroy(mem->tag); - return (0); + return (I40E_SUCCESS); } void i40e_init_spinlock(struct i40e_spinlock *lock) { mtx_init(&lock->mutex, "mutex", "ixl spinlock", MTX_DEF | MTX_DUPOK); } void i40e_acquire_spinlock(struct i40e_spinlock *lock) { mtx_lock(&lock->mutex); } void i40e_release_spinlock(struct i40e_spinlock *lock) { mtx_unlock(&lock->mutex); } void i40e_destroy_spinlock(struct i40e_spinlock *lock) { if (mtx_initialized(&lock->mutex)) mtx_destroy(&lock->mutex); } void i40e_msec_pause(int msecs) { int ticks_to_pause = (msecs * hz) / 1000; int start_ticks = ticks; if (cold || SCHEDULER_STOPPED()) { i40e_msec_delay(msecs); return; } while (1) { kern_yield(PRI_USER); int yielded_ticks = ticks - start_ticks; if (yielded_ticks > ticks_to_pause) break; else if (yielded_ticks < 0 && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) { break; } } } /* * Helper function for debug statement printing */ void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...) { va_list args; device_t dev; if (!(mask & ((struct i40e_hw *)hw)->debug_mask)) return; dev = ((struct i40e_osdep *)hw->back)->dev; /* Re-implement device_printf() */ device_print_prettyname(dev); va_start(args, fmt); vprintf(fmt, args); va_end(args); } const char * ixl_vc_opcode_str(uint16_t op) { switch (op) { case VIRTCHNL_OP_VERSION: return ("VERSION"); case VIRTCHNL_OP_RESET_VF: return ("RESET_VF"); case VIRTCHNL_OP_GET_VF_RESOURCES: return ("GET_VF_RESOURCES"); case VIRTCHNL_OP_CONFIG_TX_QUEUE: return ("CONFIG_TX_QUEUE"); case VIRTCHNL_OP_CONFIG_RX_QUEUE: return ("CONFIG_RX_QUEUE"); case VIRTCHNL_OP_CONFIG_VSI_QUEUES: return ("CONFIG_VSI_QUEUES"); case VIRTCHNL_OP_CONFIG_IRQ_MAP: return ("CONFIG_IRQ_MAP"); case VIRTCHNL_OP_ENABLE_QUEUES: return ("ENABLE_QUEUES"); case VIRTCHNL_OP_DISABLE_QUEUES: return ("DISABLE_QUEUES"); case VIRTCHNL_OP_ADD_ETH_ADDR: return ("ADD_ETH_ADDR"); case VIRTCHNL_OP_DEL_ETH_ADDR: return ("DEL_ETH_ADDR"); case VIRTCHNL_OP_ADD_VLAN: return ("ADD_VLAN"); case VIRTCHNL_OP_DEL_VLAN: return ("DEL_VLAN"); case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: return ("CONFIG_PROMISCUOUS_MODE"); case VIRTCHNL_OP_GET_STATS: return ("GET_STATS"); case VIRTCHNL_OP_RSVD: return ("RSVD"); case VIRTCHNL_OP_EVENT: return ("EVENT"); case VIRTCHNL_OP_CONFIG_RSS_KEY: return ("CONFIG_RSS_KEY"); case VIRTCHNL_OP_CONFIG_RSS_LUT: return ("CONFIG_RSS_LUT"); case VIRTCHNL_OP_GET_RSS_HENA_CAPS: return ("GET_RSS_HENA_CAPS"); case VIRTCHNL_OP_SET_RSS_HENA: return ("SET_RSS_HENA"); default: return ("UNKNOWN"); } } u16 i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg) { u16 value; value = pci_read_config(((struct i40e_osdep *)hw->back)->dev, reg, 2); return (value); } void i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value) { pci_write_config(((struct i40e_osdep *)hw->back)->dev, reg, value, 2); return; } Index: head/sys/dev/ixl/if_ixl.c =================================================================== --- head/sys/dev/ixl/if_ixl.c (revision 335337) +++ head/sys/dev/ixl/if_ixl.c (revision 335338) @@ -1,816 +1,1710 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixl_pf.h" #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif /********************************************************************* * Driver version *********************************************************************/ #define IXL_DRIVER_VERSION_MAJOR 1 #define IXL_DRIVER_VERSION_MINOR 9 #define IXL_DRIVER_VERSION_BUILD 9 -char ixl_driver_version[] = __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." - __XSTRING(IXL_DRIVER_VERSION_MINOR) "." - __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k"; +#define IXL_DRIVER_VERSION_STRING \ + __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ + __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ + __XSTRING(IXL_DRIVER_VERSION_BUILD) "-iflib-k" /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on - * Last field stores an index into ixl_strings - * Last entry must be all 0s * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + * ( Vendor ID, Device ID, Branding String ) *********************************************************************/ -static ixl_vendor_info_t ixl_vendor_info_array[] = +static pci_vendor_info_t ixl_vendor_info_array[] = { - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, 0, 0, 0}, + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), + PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), /* required last entry */ - {0, 0, 0, 0, 0} + PVID_END }; /********************************************************************* - * Table of branding strings - *********************************************************************/ - -static char *ixl_strings[] = { - "Intel(R) Ethernet Connection 700 Series PF Driver" -}; - - -/********************************************************************* * Function prototypes *********************************************************************/ -static int ixl_probe(device_t); -static int ixl_attach(device_t); -static int ixl_detach(device_t); -static int ixl_shutdown(device_t); +/*** IFLIB interface ***/ +static void *ixl_register(device_t dev); +static int ixl_if_attach_pre(if_ctx_t ctx); +static int ixl_if_attach_post(if_ctx_t ctx); +static int ixl_if_detach(if_ctx_t ctx); +static int ixl_if_shutdown(if_ctx_t ctx); +static int ixl_if_suspend(if_ctx_t ctx); +static int ixl_if_resume(if_ctx_t ctx); +static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); +static void ixl_if_enable_intr(if_ctx_t ctx); +static void ixl_if_disable_intr(if_ctx_t ctx); +static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); +static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); +static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); +static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); +static void ixl_if_queues_free(if_ctx_t ctx); +static void ixl_if_update_admin_status(if_ctx_t ctx); +static void ixl_if_multi_set(if_ctx_t ctx); +static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); +static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); +static int ixl_if_media_change(if_ctx_t ctx); +static int ixl_if_promisc_set(if_ctx_t ctx, int flags); +static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); +static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); +static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); +static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); +static void ixl_if_vflr_handle(if_ctx_t ctx); +// static void ixl_if_link_intr_enable(if_ctx_t ctx); +static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); +static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); -static int ixl_save_pf_tunables(struct ixl_pf *); +/*** Other ***/ +static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int); +static void ixl_save_pf_tunables(struct ixl_pf *); +static int ixl_allocate_pci_resources(struct ixl_pf *); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixl_methods[] = { /* Device interface */ - DEVMETHOD(device_probe, ixl_probe), - DEVMETHOD(device_attach, ixl_attach), - DEVMETHOD(device_detach, ixl_detach), - DEVMETHOD(device_shutdown, ixl_shutdown), + DEVMETHOD(device_register, ixl_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + DEVMETHOD(device_shutdown, iflib_device_shutdown), #ifdef PCI_IOV DEVMETHOD(pci_iov_init, ixl_iov_init), DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), DEVMETHOD(pci_iov_add_vf, ixl_add_vf), #endif - {0, 0} + DEVMETHOD_END }; static driver_t ixl_driver = { "ixl", ixl_methods, sizeof(struct ixl_pf), }; devclass_t ixl_devclass; DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); +MODULE_VERSION(ixl, 3); -MODULE_VERSION(ixl, 1); - MODULE_DEPEND(ixl, pci, 1, 1, 1); MODULE_DEPEND(ixl, ether, 1, 1, 1); -#if defined(DEV_NETMAP) && __FreeBSD_version >= 1100000 -MODULE_DEPEND(ixl, netmap, 1, 1, 1); -#endif /* DEV_NETMAP */ +MODULE_DEPEND(ixl, iflib, 1, 1, 1); +static device_method_t ixl_if_methods[] = { + DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), + DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), + DEVMETHOD(ifdi_detach, ixl_if_detach), + DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), + DEVMETHOD(ifdi_suspend, ixl_if_suspend), + DEVMETHOD(ifdi_resume, ixl_if_resume), + DEVMETHOD(ifdi_init, ixl_if_init), + DEVMETHOD(ifdi_stop, ixl_if_stop), + DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), + DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), + DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), + //DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable), + DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), + DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), + DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), + DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), + DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), + DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), + DEVMETHOD(ifdi_media_status, ixl_if_media_status), + DEVMETHOD(ifdi_media_change, ixl_if_media_change), + DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), + DEVMETHOD(ifdi_timer, ixl_if_timer), + DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), + DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), + DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), + DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), + DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), + // ifdi_led_func + // ifdi_debug + DEVMETHOD_END +}; + +static driver_t ixl_if_driver = { + "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) +}; + /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, "IXL driver parameters"); /* - * MSIX should be the default for best performance, - * but this allows it to be forced off for testing. - */ -static int ixl_enable_msix = 1; -TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix); -SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0, - "Enable MSI-X interrupts"); - -/* -** Number of descriptors per ring -** - TX and RX sizes are independently configurable -*/ -static int ixl_tx_ring_size = IXL_DEFAULT_RING; -TUNABLE_INT("hw.ixl.tx_ring_size", &ixl_tx_ring_size); -SYSCTL_INT(_hw_ixl, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, - &ixl_tx_ring_size, 0, "TX Descriptor Ring Size"); - -static int ixl_rx_ring_size = IXL_DEFAULT_RING; -TUNABLE_INT("hw.ixl.rx_ring_size", &ixl_rx_ring_size); -SYSCTL_INT(_hw_ixl, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, - &ixl_rx_ring_size, 0, "RX Descriptor Ring Size"); - -/* -** This can be set manually, if left as 0 the -** number of queues will be calculated based -** on cpus and msix vectors available. -*/ -static int ixl_max_queues = 0; -TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues); -SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN, - &ixl_max_queues, 0, "Number of Queues"); - -/* * Leave this on unless you need to send flow control * frames (or other control frames) from software */ static int ixl_enable_tx_fc_filter = 1; TUNABLE_INT("hw.ixl.enable_tx_fc_filter", &ixl_enable_tx_fc_filter); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ixl_enable_tx_fc_filter, 0, "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); +static int ixl_i2c_access_method = 0; +TUNABLE_INT("hw.ixl.i2c_access_method", + &ixl_i2c_access_method); +SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, + &ixl_i2c_access_method, 0, + IXL_SYSCTL_HELP_I2C_METHOD); + /* * Different method for processing TX descriptor * completion. */ static int ixl_enable_head_writeback = 1; TUNABLE_INT("hw.ixl.enable_head_writeback", &ixl_enable_head_writeback); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixl_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); static int ixl_core_debug_mask = 0; TUNABLE_INT("hw.ixl.core_debug_mask", &ixl_core_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, &ixl_core_debug_mask, 0, "Display debug statements that are printed in non-shared code"); static int ixl_shared_debug_mask = 0; TUNABLE_INT("hw.ixl.shared_debug_mask", &ixl_shared_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, &ixl_shared_debug_mask, 0, "Display debug statements that are printed in shared code"); +#if 0 /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ -static int ixl_dynamic_rx_itr = 1; +static int ixl_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); -static int ixl_dynamic_tx_itr = 1; +static int ixl_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); +#endif static int ixl_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixl_rx_itr, 0, "RX Interrupt Rate"); static int ixl_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixl_tx_itr, 0, "TX Interrupt Rate"); #ifdef IXL_IW int ixl_enable_iwarp = 0; TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, &ixl_enable_iwarp, 0, "iWARP enabled"); #if __FreeBSD_version < 1100000 int ixl_limit_iwarp_msix = 1; #else int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; #endif TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); #endif -#ifdef DEV_NETMAP -#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */ -#include -#endif /* DEV_NETMAP */ +extern struct if_txrx ixl_txrx_hwb; +extern struct if_txrx ixl_txrx_dwb; -/********************************************************************* - * Device identification routine - * - * ixl_probe determines if the driver should be loaded on - * the hardware based on PCI vendor/device id of the device. - * - * return BUS_PROBE_DEFAULT on success, positive on failure - *********************************************************************/ +static struct if_shared_ctx ixl_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_q_align = PAGE_SIZE, + .isc_tx_maxsize = IXL_TSO_SIZE, + .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, -static int -ixl_probe(device_t dev) -{ - ixl_vendor_info_t *ent; + .isc_rx_maxsize = 16384, + .isc_rx_nsegments = IXL_MAX_RX_SEGS, + .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, + .isc_nfl = 1, + .isc_ntxqs = 1, + .isc_nrxqs = 1, - u16 pci_vendor_id, pci_device_id; - u16 pci_subvendor_id, pci_subdevice_id; - char device_name[256]; + .isc_admin_intrcnt = 1, + .isc_vendor_info = ixl_vendor_info_array, + .isc_driver_version = IXL_DRIVER_VERSION_STRING, + .isc_driver = &ixl_if_driver, + .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_ADMIN_ALWAYS_RUN, -#if 0 - INIT_DEBUGOUT("ixl_probe: begin"); -#endif - pci_vendor_id = pci_get_vendor(dev); - if (pci_vendor_id != I40E_INTEL_VENDOR_ID) - return (ENXIO); + .isc_nrxd_min = {IXL_MIN_RING}, + .isc_ntxd_min = {IXL_MIN_RING}, + .isc_nrxd_max = {IXL_MAX_RING}, + .isc_ntxd_max = {IXL_MAX_RING}, + .isc_nrxd_default = {IXL_DEFAULT_RING}, + .isc_ntxd_default = {IXL_DEFAULT_RING}, +}; - pci_device_id = pci_get_device(dev); - pci_subvendor_id = pci_get_subvendor(dev); - pci_subdevice_id = pci_get_subdevice(dev); +if_shared_ctx_t ixl_sctx = &ixl_sctx_init; - ent = ixl_vendor_info_array; - while (ent->vendor_id != 0) { - if ((pci_vendor_id == ent->vendor_id) && - (pci_device_id == ent->device_id) && - - ((pci_subvendor_id == ent->subvendor_id) || - (ent->subvendor_id == 0)) && - - ((pci_subdevice_id == ent->subdevice_id) || - (ent->subdevice_id == 0))) { - sprintf(device_name, "%s, Version - %s", - ixl_strings[ent->index], - ixl_driver_version); - device_set_desc_copy(dev, device_name); - return (BUS_PROBE_DEFAULT); - } - ent++; - } - return (ENXIO); +/*** Functions ***/ +static void * +ixl_register(device_t dev) +{ + return (ixl_sctx); } -/* - * Sanity check and save off tunable values. - */ static int -ixl_save_pf_tunables(struct ixl_pf *pf) +ixl_allocate_pci_resources(struct ixl_pf *pf) { - device_t dev = pf->dev; + int rid; + struct i40e_hw *hw = &pf->hw; + device_t dev = iflib_get_dev(pf->vsi.ctx); - /* Save tunable information */ - pf->enable_msix = ixl_enable_msix; - pf->max_queues = ixl_max_queues; - pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; - pf->dynamic_rx_itr = ixl_dynamic_rx_itr; - pf->dynamic_tx_itr = ixl_dynamic_tx_itr; - pf->dbg_mask = ixl_core_debug_mask; - pf->hw.debug_mask = ixl_shared_debug_mask; -#ifdef DEV_NETMAP - if (ixl_enable_head_writeback == 0) - device_printf(dev, "Head writeback mode cannot be disabled " - "when netmap is enabled\n"); - pf->vsi.enable_head_writeback = 1; -#else - pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); -#endif + /* Map BAR0 */ + rid = PCIR_BAR(0); + pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + + if (!(pf->pci_mem)) { + device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); + return (ENXIO); + } - ixl_vsi_setup_rings_size(&pf->vsi, ixl_tx_ring_size, ixl_rx_ring_size); + /* Save off the PCI information */ + hw->vendor_id = pci_get_vendor(dev); + hw->device_id = pci_get_device(dev); + hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); + hw->subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + hw->subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); - if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { - device_printf(dev, "Invalid tx_itr value of %d set!\n", - ixl_tx_itr); - device_printf(dev, "tx_itr must be between %d and %d, " - "inclusive\n", - 0, IXL_MAX_ITR); - device_printf(dev, "Using default value of %d instead\n", - IXL_ITR_4K); - pf->tx_itr = IXL_ITR_4K; - } else - pf->tx_itr = ixl_tx_itr; + hw->bus.device = pci_get_slot(dev); + hw->bus.func = pci_get_function(dev); - if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { - device_printf(dev, "Invalid rx_itr value of %d set!\n", - ixl_rx_itr); - device_printf(dev, "rx_itr must be between %d and %d, " - "inclusive\n", - 0, IXL_MAX_ITR); - device_printf(dev, "Using default value of %d instead\n", - IXL_ITR_8K); - pf->rx_itr = IXL_ITR_8K; - } else - pf->rx_itr = ixl_rx_itr; + /* Save off register access information */ + pf->osdep.mem_bus_space_tag = + rman_get_bustag(pf->pci_mem); + pf->osdep.mem_bus_space_handle = + rman_get_bushandle(pf->pci_mem); + pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); + pf->osdep.flush_reg = I40E_GLGEN_STAT; + pf->osdep.dev = dev; - return (0); -} + pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; + pf->hw.back = &pf->osdep; + + return (0); + } -/********************************************************************* - * Device initialization routine - * - * The attach entry point is called when the driver is being loaded. - * This routine identifies the type of hardware, allocates all resources - * and initializes the hardware. - * - * return 0 on success, positive on failure - *********************************************************************/ - static int -ixl_attach(device_t dev) +ixl_if_attach_pre(if_ctx_t ctx) { - struct ixl_pf *pf; - struct i40e_hw *hw; - struct ixl_vsi *vsi; + device_t dev; + struct ixl_pf *pf; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + if_softc_ctx_t scctx; + struct i40e_filter_control_settings filter; enum i40e_status_code status; - int error = 0; + int error = 0; - INIT_DEBUGOUT("ixl_attach: begin"); + INIT_DEBUGOUT("ixl_if_attach_pre: begin"); /* Allocate, clear, and link in our primary soft structure */ - pf = device_get_softc(dev); - pf->dev = pf->osdep.dev = dev; + dev = iflib_get_dev(ctx); + pf = iflib_get_softc(ctx); + vsi = &pf->vsi; + vsi->back = pf; + pf->dev = dev; hw = &pf->hw; /* ** Note this assumes we have a single embedded VSI, ** this could be enhanced later to allocate multiple */ - vsi = &pf->vsi; - vsi->dev = pf->dev; - vsi->back = pf; + //vsi->dev = pf->dev; + vsi->hw = &pf->hw; + vsi->id = 0; + vsi->num_vlans = 0; + vsi->ctx = ctx; + vsi->media = iflib_get_media(ctx); + vsi->shared = scctx = iflib_get_softc_ctx(ctx); /* Save tunable values */ - error = ixl_save_pf_tunables(pf); - if (error) - return (error); + ixl_save_pf_tunables(pf); - /* Core Lock Init*/ - IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev)); - - /* Set up the timer callout */ - callout_init_mtx(&pf->timer, &pf->pf_mtx, 0); - /* Do PCI setup - map BAR0, etc */ if (ixl_allocate_pci_resources(pf)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; - goto err_out; + goto err_pci_res; } /* Establish a clean starting point */ i40e_clear_hw(hw); status = i40e_pf_reset(hw); if (status) { device_printf(dev, "PF reset failure %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } /* Initialize the shared code */ status = i40e_init_shared_code(hw); if (status) { device_printf(dev, "Unable to initialize shared code, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } /* Set up the admin queue */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; status = i40e_init_adminq(hw); if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } ixl_print_nvm_version(pf); if (status == I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "The driver for the device stopped " "because the NVM image is newer than expected.\n"); device_printf(dev, "You must install the most recent version of " "the network driver.\n"); error = EIO; goto err_out; } if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { device_printf(dev, "The driver for the device detected " "a newer version of the NVM image than expected.\n"); device_printf(dev, "Please install the most recent version " "of the network driver.\n"); } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { device_printf(dev, "The driver for the device detected " "an older version of the NVM image than expected.\n"); device_printf(dev, "Please update the NVM image.\n"); } /* Clear PXE mode */ i40e_clear_pxe_mode(hw); /* Get capabilities from the device */ error = ixl_get_hw_capabilities(pf); if (error) { - device_printf(dev, "HW capabilities failure!\n"); + device_printf(dev, "get_hw_capabilities failed: %d\n", + error); goto err_get_cap; } - /* - * Allocate interrupts and figure out number of queues to use - * for PF interface - */ - pf->msix = ixl_init_msix(pf); - /* Set up host memory cache */ status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (status) { device_printf(dev, "init_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); goto err_get_cap; } - status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (status) { device_printf(dev, "configure_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); goto err_mac_hmc; } - /* Init queue allocation manager */ - error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); - if (error) { - device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", - error); - goto err_mac_hmc; - } - /* reserve a contiguous allocation for the PF's VSI */ - error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); - if (error) { - device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", - error); - goto err_mac_hmc; - } - device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", - pf->qtag.num_allocated, pf->qtag.num_active); - /* Disable LLDP from the firmware for certain NVM versions */ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4)) { i40e_aq_stop_lldp(hw, TRUE, NULL); pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; } /* Get MAC addresses from hardware */ i40e_get_mac_addr(hw, hw->mac.addr); error = i40e_validate_mac_addr(hw->mac.addr); if (error) { device_printf(dev, "validate_mac_addr failed: %d\n", error); goto err_mac_hmc; } bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); + iflib_set_mac(ctx, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); + /* Set up the device filtering */ + bzero(&filter, sizeof(filter)); + filter.enable_ethtype = TRUE; + filter.enable_macvlan = TRUE; + filter.enable_fdir = FALSE; + filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; + if (i40e_set_filter_control(hw, &filter)) + device_printf(dev, "i40e_set_filter_control() failed\n"); + /* Query device FW LLDP status */ ixl_get_fw_lldp_status(pf); /* Tell FW to apply DCB config on link up */ - if ((hw->mac.type != I40E_MAC_X722) - && ((pf->hw.aq.api_maj_ver > 1) - || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7))) - i40e_aq_set_dcb_parameters(hw, true, NULL); + i40e_aq_set_dcb_parameters(hw, true, NULL); - /* Initialize mac filter list for VSI */ - SLIST_INIT(&vsi->ftl); - - /* Set up SW VSI and allocate queue memory and rings */ - if (ixl_setup_stations(pf)) { - device_printf(dev, "setup stations failed!\n"); - error = ENOMEM; - goto err_mac_hmc; + /* Fill out iflib parameters */ + if (hw->mac.type == I40E_MAC_X722) + scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; + else + scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; + if (vsi->enable_head_writeback) { + scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] + * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); + scctx->isc_txrx = &ixl_txrx_hwb; + } else { + scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] + * sizeof(struct i40e_tx_desc), DBA_ALIGN); + scctx->isc_txrx = &ixl_txrx_dwb; } + scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] + * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); + scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); + scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; + scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; + scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; + scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; + scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; + scctx->isc_tx_csum_flags = CSUM_OFFLOAD; + scctx->isc_capenable = IXL_CAPS; + INIT_DEBUGOUT("ixl_if_attach_pre: end"); + return (0); + +err_mac_hmc: + i40e_shutdown_lan_hmc(hw); +err_get_cap: + i40e_shutdown_adminq(hw); +err_out: + ixl_free_pci_resources(pf); +err_pci_res: + return (error); +} + +static int +ixl_if_attach_post(if_ctx_t ctx) +{ + device_t dev; + struct ixl_pf *pf; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + int error = 0; + enum i40e_status_code status; + + INIT_DEBUGOUT("ixl_if_attach_post: begin"); + + dev = iflib_get_dev(ctx); + pf = iflib_get_softc(ctx); + vsi = &pf->vsi; + vsi->ifp = iflib_get_ifp(ctx); + hw = &pf->hw; + /* Setup OS network interface / ifnet */ - if (ixl_setup_interface(dev, vsi)) { + if (ixl_setup_interface(dev, pf)) { device_printf(dev, "interface setup failed!\n"); error = EIO; - goto err_late; + goto err; } /* Determine link state */ if (ixl_attach_get_link_status(pf)) { error = EINVAL; - goto err_late; + goto err; } error = ixl_switch_config(pf); if (error) { device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error); - goto err_late; + goto err; } + /* Add protocol filters to list */ + ixl_init_filters(vsi); + + /* Init queue allocation manager */ + error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); + if (error) { + device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", + error); + goto err; + } + /* reserve a contiguous allocation for the PF's VSI */ + error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, + max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); + if (error) { + device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", + error); + goto err; + } + device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", + pf->qtag.num_allocated, pf->qtag.num_active); + /* Limit PHY interrupts to link, autoneg, and modules failure */ status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (status) { device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," " aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); - goto err_late; + goto err; } - /* Get the bus configuration and set the shared code's config */ + /* Get the bus configuration and set the shared code */ ixl_get_bus_info(pf); - /* - * In MSI-X mode, initialize the Admin Queue interrupt, - * so userland tools can communicate with the adapter regardless of - * the ifnet interface's status. - */ - if (pf->msix > 1) { - error = ixl_setup_adminq_msix(pf); - if (error) { - device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", - error); - goto err_late; - } - error = ixl_setup_adminq_tq(pf); - if (error) { - device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", - error); - goto err_late; - } - ixl_configure_intr0_msix(pf); - ixl_enable_intr0(hw); - - error = ixl_setup_queue_msix(vsi); - if (error) - device_printf(dev, "ixl_setup_queue_msix() error: %d\n", - error); - error = ixl_setup_queue_tqs(vsi); - if (error) - device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", - error); - } else { - error = ixl_setup_legacy(pf); - - error = ixl_setup_adminq_tq(pf); - if (error) { - device_printf(dev, "ixl_setup_adminq_tq() error: %d\n", - error); - goto err_late; - } - - error = ixl_setup_queue_tqs(vsi); - if (error) - device_printf(dev, "ixl_setup_queue_tqs() error: %d\n", - error); + /* Keep admin queue interrupts active while driver is loaded */ + if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { + ixl_configure_intr0_msix(pf); + ixl_enable_intr0(hw); } - if (error) { - device_printf(dev, "interrupt setup error: %d\n", error); - } - /* Set initial advertised speed sysctl value */ ixl_set_initial_advertised_speeds(pf); /* Initialize statistics & add sysctls */ ixl_add_device_sysctls(pf); - ixl_pf_reset_stats(pf); ixl_update_stats_counters(pf); ixl_add_hw_stats(pf); - /* Register for VLAN events */ - vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); - vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, - ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); + hw->phy.get_link_info = true; + i40e_get_link_status(hw, &pf->link_up); + ixl_update_link_status(pf); #ifdef PCI_IOV ixl_initialize_sriov(pf); #endif -#ifdef DEV_NETMAP - if (vsi->num_rx_desc == vsi->num_tx_desc) { - vsi->queues[0].num_desc = vsi->num_rx_desc; - ixl_netmap_attach(vsi); - } else - device_printf(dev, - "Netmap is not supported when RX and TX descriptor ring sizes differ\n"); - -#endif /* DEV_NETMAP */ - #ifdef IXL_IW if (hw->func_caps.iwarp && ixl_enable_iwarp) { pf->iw_enabled = (pf->iw_msix > 0) ? true : false; if (pf->iw_enabled) { error = ixl_iw_pf_attach(pf); if (error) { device_printf(dev, "interfacing to iwarp driver failed: %d\n", error); - goto err_late; + goto err; } else device_printf(dev, "iWARP ready\n"); } else device_printf(dev, "iwarp disabled on this device (no msix vectors)\n"); } else { pf->iw_enabled = false; device_printf(dev, "The device is not iWARP enabled\n"); } #endif - INIT_DEBUGOUT("ixl_attach: end"); + INIT_DBG_DEV(dev, "end"); return (0); -err_late: - if (vsi->ifp != NULL) { - ether_ifdetach(vsi->ifp); - if_free(vsi->ifp); - } -err_mac_hmc: - i40e_shutdown_lan_hmc(hw); -err_get_cap: - i40e_shutdown_adminq(hw); -err_out: - ixl_free_pci_resources(pf); - ixl_free_vsi(vsi); - IXL_PF_LOCK_DESTROY(pf); +err: + INIT_DEBUGOUT("end: error %d", error); + /* ixl_if_detach() is called on error from this */ return (error); } -/********************************************************************* - * Device removal routine - * - * The detach entry point is called when the driver is being removed. - * This routine stops the adapter and deallocates all the resources - * that were allocated for driver operation. - * - * return 0 on success, positive on failure - *********************************************************************/ - static int -ixl_detach(device_t dev) +ixl_if_detach(if_ctx_t ctx) { - struct ixl_pf *pf = device_get_softc(dev); - struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = &pf->hw; + device_t dev = pf->dev; enum i40e_status_code status; #if defined(PCI_IOV) || defined(IXL_IW) int error; #endif - INIT_DEBUGOUT("ixl_detach: begin"); + INIT_DBG_DEV(dev, "begin"); - /* Make sure VLANS are not using driver */ - if (vsi->ifp->if_vlantrunk != NULL) { - device_printf(dev, "Vlan in use, detach first\n"); - return (EBUSY); +#ifdef IXL_IW + if (ixl_enable_iwarp && pf->iw_enabled) { + error = ixl_iw_pf_detach(pf); + if (error == EBUSY) { + device_printf(dev, "iwarp in use; stop it first.\n"); + return (error); + } } - +#endif #ifdef PCI_IOV error = pci_iov_detach(dev); if (error != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (error); } #endif - /* Remove all previously allocated media types */ - ifmedia_removeall(&vsi->media); + ifmedia_removeall(vsi->media); - ether_ifdetach(vsi->ifp); - if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) - ixl_stop(pf); - /* Shutdown LAN HMC */ - status = i40e_shutdown_lan_hmc(hw); - if (status) - device_printf(dev, - "Shutdown LAN HMC failed with code %d\n", status); + if (hw->hmc.hmc_obj) { + status = i40e_shutdown_lan_hmc(hw); + if (status) + device_printf(dev, + "i40e_shutdown_lan_hmc() failed with status %s\n", + i40e_stat_str(hw, status)); + } - /* Teardown LAN queue resources */ - ixl_teardown_queue_msix(vsi); - ixl_free_queue_tqs(vsi); /* Shutdown admin queue */ ixl_disable_intr0(hw); - ixl_teardown_adminq_msix(pf); - ixl_free_adminq_tq(pf); status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, - "Shutdown Admin queue failed with code %d\n", status); + "i40e_shutdown_adminq() failed with status %s\n", + i40e_stat_str(hw, status)); - /* Unregister VLAN events */ - if (vsi->vlan_attach != NULL) - EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); - if (vsi->vlan_detach != NULL) - EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); + ixl_pf_qmgr_destroy(&pf->qmgr); + ixl_free_pci_resources(pf); + ixl_free_mac_filters(vsi); + INIT_DBG_DEV(dev, "end"); + return (0); +} - callout_drain(&pf->timer); +/* TODO: Do shutdown-specific stuff here */ +static int +ixl_if_shutdown(if_ctx_t ctx) +{ + int error = 0; + INIT_DEBUGOUT("ixl_if_shutdown: begin"); + + /* TODO: Call ixl_if_stop()? */ + + /* TODO: Then setup low power mode */ + + return (error); +} + +static int +ixl_if_suspend(if_ctx_t ctx) +{ + int error = 0; + + INIT_DEBUGOUT("ixl_if_suspend: begin"); + + /* TODO: Call ixl_if_stop()? */ + + /* TODO: Then setup low power mode */ + + return (error); +} + +static int +ixl_if_resume(if_ctx_t ctx) +{ + struct ifnet *ifp = iflib_get_ifp(ctx); + + INIT_DEBUGOUT("ixl_if_resume: begin"); + + /* Read & clear wake-up registers */ + + /* Required after D3->D0 transition */ + if (ifp->if_flags & IFF_UP) + ixl_if_init(ctx); + + return (0); +} + +/* Set Report Status queue fields to 0 */ +static void +ixl_init_tx_rsqs(struct ixl_vsi *vsi) +{ + if_softc_ctx_t scctx = vsi->shared; + struct ixl_tx_queue *tx_que; + int i, j; + + for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { + struct tx_ring *txr = &tx_que->txr; + + txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; + + for (j = 0; j < scctx->isc_ntxd[0]; j++) + txr->tx_rsq[j] = QIDX_INVALID; + } +} + +static void +ixl_init_tx_cidx(struct ixl_vsi *vsi) +{ + struct ixl_tx_queue *tx_que; + int i; + + for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { + struct tx_ring *txr = &tx_que->txr; + + txr->tx_cidx_processed = 0; + } +} + +void +ixl_if_init(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = &pf->hw; + device_t dev = iflib_get_dev(ctx); + u8 tmpaddr[ETHER_ADDR_LEN]; + int ret; + + /* + * If the aq is dead here, it probably means something outside of the driver + * did something to the adapter, like a PF reset. + * So rebuild the driver's state here if that occurs. + */ + if (!i40e_check_asq_alive(&pf->hw)) { + device_printf(dev, "Admin Queue is down; resetting...\n"); + ixl_teardown_hw_structs(pf); + ixl_reset(pf); + } + + /* Get the latest mac address... User might use a LAA */ + bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); + if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && + (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { + ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); + bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); + ret = i40e_aq_mac_address_write(hw, + I40E_AQC_WRITE_TYPE_LAA_ONLY, + hw->mac.addr, NULL); + if (ret) { + device_printf(dev, "LLA address change failed!!\n"); + return; + } + ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); + } + + iflib_set_mac(ctx, hw->mac.addr); + + /* Prepare the VSI: rings, hmc contexts, etc... */ + if (ixl_initialize_vsi(vsi)) { + device_printf(dev, "initialize vsi failed!!\n"); + return; + } + + // TODO: Call iflib setup multicast filters here? + // It's called in ixgbe in D5213 + ixl_if_multi_set(ctx); + + /* Set up RSS */ + ixl_config_rss(pf); + + /* Set up MSI/X routing and the ITR settings */ + if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { + ixl_configure_queue_intr_msix(pf); + ixl_configure_itr(pf); + } else + ixl_configure_legacy(pf); + + if (vsi->enable_head_writeback) + ixl_init_tx_cidx(vsi); + else + ixl_init_tx_rsqs(vsi); + + ixl_enable_rings(vsi); + + i40e_aq_set_default_vsi(hw, vsi->seid, NULL); + + ixl_reconfigure_filters(vsi); + #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { - error = ixl_iw_pf_detach(pf); - if (error == EBUSY) { - device_printf(dev, "iwarp in use; stop it first.\n"); - return (error); - } + ret = ixl_iw_pf_init(pf); + if (ret) + device_printf(dev, + "initialize iwarp failed, code %d\n", ret); } #endif +} -#ifdef DEV_NETMAP - netmap_detach(vsi->ifp); -#endif /* DEV_NETMAP */ - ixl_pf_qmgr_destroy(&pf->qmgr); - ixl_free_pci_resources(pf); - bus_generic_detach(dev); - if_free(vsi->ifp); - ixl_free_vsi(vsi); - IXL_PF_LOCK_DESTROY(pf); +void +ixl_if_stop(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + + INIT_DEBUGOUT("ixl_if_stop: begin\n"); + + // TODO: This may need to be reworked +#ifdef IXL_IW + /* Stop iWARP device */ + if (ixl_enable_iwarp && pf->iw_enabled) + ixl_iw_pf_stop(pf); +#endif + + ixl_disable_rings_intr(vsi); + ixl_disable_rings(vsi); +} + +static int +ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + struct ixl_tx_queue *tx_que = vsi->tx_queues; + int err, i, rid, vector = 0; + char buf[16]; + + /* Admin Que must use vector 0*/ + rid = vector + 1; + err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, + ixl_msix_adminq, pf, 0, "aq"); + if (err) { + iflib_irq_free(ctx, &vsi->irq); + device_printf(iflib_get_dev(ctx), + "Failed to register Admin que handler"); + return (err); + } + // TODO: Re-enable this at some point + // iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov"); + + /* Now set up the stations */ + for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) { + rid = vector + 1; + + snprintf(buf, sizeof(buf), "rxq%d", i); + err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, + IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); + /* XXX: Does the driver work as expected if there are fewer num_rx_queues than + * what's expected in the iflib context? */ + if (err) { + device_printf(iflib_get_dev(ctx), + "Failed to allocate q int %d err: %d", i, err); + vsi->num_rx_queues = i + 1; + goto fail; + } + rx_que->msix = vector; + } + + bzero(buf, sizeof(buf)); + + for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) { + snprintf(buf, sizeof(buf), "txq%d", i); + iflib_softirq_alloc_generic(ctx, + &vsi->rx_queues[i % vsi->num_rx_queues].que_irq, + IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); + + /* TODO: Maybe call a strategy function for this to figure out which + * interrupts to map Tx queues to. I don't know if there's an immediately + * better way than this other than a user-supplied map, though. */ + tx_que->msix = (i % vsi->num_rx_queues) + 1; + } + return (0); +fail: + iflib_irq_free(ctx, &vsi->irq); + rx_que = vsi->rx_queues; + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) + iflib_irq_free(ctx, &rx_que->que_irq); + return (err); } -/********************************************************************* +/* + * Enable all interrupts * - * Shutdown entry point + * Called in: + * iflib_init_locked, after ixl_if_init() + */ +static void +ixl_if_enable_intr(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *que = vsi->rx_queues; + + ixl_enable_intr0(hw); + /* Enable queue interrupts */ + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + /* TODO: Queue index parameter is probably wrong */ + ixl_enable_queue(hw, que->rxr.me); +} + +/* + * Disable queue interrupts * - **********************************************************************/ + * Other interrupt causes need to remain active. + */ +static void +ixl_if_disable_intr(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) + ixl_disable_queue(hw, rx_que->msix - 1); + } else { + // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF + // stops queues from triggering interrupts + wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); + } +} + static int -ixl_shutdown(device_t dev) +ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { - struct ixl_pf *pf = device_get_softc(dev); - ixl_stop(pf); + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; + + ixl_enable_queue(hw, rx_que->msix - 1); return (0); +} + +static int +ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; + + ixl_enable_queue(hw, tx_que->msix - 1); + + return (0); +} + +static int +ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + if_softc_ctx_t scctx = vsi->shared; + struct ixl_tx_queue *que; + // int i; + int i, j, error = 0; + + MPASS(vsi->num_tx_queues > 0); + MPASS(ntxqs == 1); + MPASS(vsi->num_tx_queues == ntxqsets); + + /* Allocate queue structure memory */ + if (!(vsi->tx_queues = + (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { + device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); + return (ENOMEM); + } + + for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { + struct tx_ring *txr = &que->txr; + + txr->me = i; + que->vsi = vsi; + + if (!vsi->enable_head_writeback) { + /* Allocate report status array */ + if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { + device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); + error = ENOMEM; + goto fail; + } + /* Init report status array */ + for (j = 0; j < scctx->isc_ntxd[0]; j++) + txr->tx_rsq[j] = QIDX_INVALID; + } + /* get the virtual and physical address of the hardware queues */ + txr->tail = I40E_QTX_TAIL(txr->me); + txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; + txr->tx_paddr = paddrs[i * ntxqs]; + txr->que = que; + } + + return (0); +fail: + ixl_if_queues_free(ctx); + return (error); +} + +static int +ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct ixl_rx_queue *que; + int i, error = 0; + + MPASS(vsi->num_rx_queues > 0); + MPASS(nrxqs == 1); + MPASS(vsi->num_rx_queues == nrxqsets); + + /* Allocate queue structure memory */ + if (!(vsi->rx_queues = + (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * + nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { + device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); + error = ENOMEM; + goto fail; + } + + for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { + struct rx_ring *rxr = &que->rxr; + + rxr->me = i; + que->vsi = vsi; + + /* get the virtual and physical address of the hardware queues */ + rxr->tail = I40E_QRX_TAIL(rxr->me); + rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; + rxr->rx_paddr = paddrs[i * nrxqs]; + rxr->que = que; + } + + return (0); +fail: + ixl_if_queues_free(ctx); + return (error); +} + +static void +ixl_if_queues_free(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + + if (vsi->enable_head_writeback) { + struct ixl_tx_queue *que; + int i = 0; + + for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { + struct tx_ring *txr = &que->txr; + if (txr->tx_rsq != NULL) { + free(txr->tx_rsq, M_IXL); + txr->tx_rsq = NULL; + } + } + } + + if (vsi->tx_queues != NULL) { + free(vsi->tx_queues, M_IXL); + vsi->tx_queues = NULL; + } + if (vsi->rx_queues != NULL) { + free(vsi->rx_queues, M_IXL); + vsi->rx_queues = NULL; + } +} + +void +ixl_update_link_status(struct ixl_pf *pf) +{ + struct ixl_vsi *vsi = &pf->vsi; + u64 baudrate; + + if (pf->link_up) { + if (vsi->link_active == FALSE) { + vsi->link_active = TRUE; + baudrate = ixl_max_aq_speed_to_value(pf->link_speed); + iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); + ixl_link_up_msg(pf); +#ifdef PCI_IOV + ixl_broadcast_link_state(pf); +#endif + + } + } else { /* Link down */ + if (vsi->link_active == TRUE) { + vsi->link_active = FALSE; + iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); +#ifdef PCI_IOV + ixl_broadcast_link_state(pf); +#endif + } + } +} + +static int +ixl_process_adminq(struct ixl_pf *pf, u16 *pending) +{ + enum i40e_status_code status = I40E_SUCCESS; + struct i40e_arq_event_info event; + struct i40e_hw *hw = &pf->hw; + device_t dev = pf->dev; + u16 opcode; + u32 loop = 0, reg; + + event.buf_len = IXL_AQ_BUF_SZ; + event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); + if (!event.msg_buf) { + device_printf(dev, "%s: Unable to allocate memory for Admin" + " Queue event!\n", __func__); + return (ENOMEM); + } + + /* clean and process any events */ + do { + status = i40e_clean_arq_element(hw, &event, pending); + if (status) + break; + opcode = LE16_TO_CPU(event.desc.opcode); + ixl_dbg(pf, IXL_DBG_AQ, + "Admin Queue event: %#06x\n", opcode); + switch (opcode) { + case i40e_aqc_opc_get_link_status: + ixl_link_event(pf, &event); + break; + case i40e_aqc_opc_send_msg_to_pf: +#ifdef PCI_IOV + ixl_handle_vf_msg(pf, &event); +#endif + break; + /* + * This should only occur on no-drop queues, which + * aren't currently configured. + */ + case i40e_aqc_opc_event_lan_overflow: + device_printf(dev, "LAN overflow event\n"); + break; + default: + break; + } + } while (*pending && (loop++ < IXL_ADM_LIMIT)); + + free(event.msg_buf, M_IXL); + + /* Re-enable admin queue interrupt cause */ + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); + + return (status); +} + +static void +ixl_if_update_admin_status(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct i40e_hw *hw = &pf->hw; + u16 pending; + + if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) + ixl_handle_empr_reset(pf); + + if (pf->state & IXL_PF_STATE_MDD_PENDING) + ixl_handle_mdd_event(pf); + +#ifdef PCI_IOV + if (pf->state & IXL_PF_STATE_VF_RESET_REQ) + iflib_iov_intr_deferred(ctx); +#endif + + ixl_process_adminq(pf, &pending); + ixl_update_link_status(pf); + + /* + * If there are still messages to process, reschedule ourselves. + * Otherwise, re-enable our interrupt and go to sleep. + */ + if (pending > 0) + iflib_admin_intr_deferred(ctx); + else + ixl_enable_intr0(hw); +} + +static void +ixl_if_multi_set(if_ctx_t ctx) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + int mcnt = 0, flags; + + IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); + + mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); + /* delete existing MC filters */ + ixl_del_multi(vsi); + + if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { + i40e_aq_set_vsi_multicast_promiscuous(hw, + vsi->seid, TRUE, NULL); + return; + } + /* (re-)install filters for all mcast addresses */ + mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); + + if (mcnt > 0) { + flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); + ixl_add_hw_filters(vsi, flags, mcnt); + } + + IOCTL_DEBUGOUT("ixl_if_multi_set: end"); +} + +static int +ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + + IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); + if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - + ETHER_VLAN_ENCAP_LEN) + return (EINVAL); + + vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ETHER_VLAN_ENCAP_LEN; + + return (0); +} + +static void +ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct i40e_hw *hw = &pf->hw; + + INIT_DEBUGOUT("ixl_media_status: begin"); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!pf->link_up) { + return; + } + + ifmr->ifm_status |= IFM_ACTIVE; + /* Hardware is always full-duplex */ + ifmr->ifm_active |= IFM_FDX; + + switch (hw->phy.link_info.phy_type) { + /* 100 M */ + case I40E_PHY_TYPE_100BASE_TX: + ifmr->ifm_active |= IFM_100_TX; + break; + /* 1 G */ + case I40E_PHY_TYPE_1000BASE_T: + ifmr->ifm_active |= IFM_1000_T; + break; + case I40E_PHY_TYPE_1000BASE_SX: + ifmr->ifm_active |= IFM_1000_SX; + break; + case I40E_PHY_TYPE_1000BASE_LX: + ifmr->ifm_active |= IFM_1000_LX; + break; + case I40E_PHY_TYPE_1000BASE_T_OPTICAL: + ifmr->ifm_active |= IFM_1000_T; + break; + /* 10 G */ + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + ifmr->ifm_active |= IFM_10G_TWINAX; + break; + case I40E_PHY_TYPE_10GBASE_SR: + ifmr->ifm_active |= IFM_10G_SR; + break; + case I40E_PHY_TYPE_10GBASE_LR: + ifmr->ifm_active |= IFM_10G_LR; + break; + case I40E_PHY_TYPE_10GBASE_T: + ifmr->ifm_active |= IFM_10G_T; + break; + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + ifmr->ifm_active |= IFM_10G_TWINAX; + break; + case I40E_PHY_TYPE_10GBASE_AOC: + ifmr->ifm_active |= IFM_10G_AOC; + break; + /* 25 G */ + case I40E_PHY_TYPE_25GBASE_KR: + ifmr->ifm_active |= IFM_25G_KR; + break; + case I40E_PHY_TYPE_25GBASE_CR: + ifmr->ifm_active |= IFM_25G_CR; + break; + case I40E_PHY_TYPE_25GBASE_SR: + ifmr->ifm_active |= IFM_25G_SR; + break; + case I40E_PHY_TYPE_25GBASE_LR: + ifmr->ifm_active |= IFM_25G_LR; + break; + case I40E_PHY_TYPE_25GBASE_AOC: + ifmr->ifm_active |= IFM_25G_AOC; + break; + case I40E_PHY_TYPE_25GBASE_ACC: + ifmr->ifm_active |= IFM_25G_ACC; + break; + /* 40 G */ + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + ifmr->ifm_active |= IFM_40G_CR4; + break; + case I40E_PHY_TYPE_40GBASE_SR4: + ifmr->ifm_active |= IFM_40G_SR4; + break; + case I40E_PHY_TYPE_40GBASE_LR4: + ifmr->ifm_active |= IFM_40G_LR4; + break; + case I40E_PHY_TYPE_XLAUI: + ifmr->ifm_active |= IFM_OTHER; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ifmr->ifm_active |= IFM_1000_KX; + break; + case I40E_PHY_TYPE_SGMII: + ifmr->ifm_active |= IFM_1000_SGMII; + break; + /* ERJ: What's the difference between these? */ + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ifmr->ifm_active |= IFM_10G_CR1; + break; + case I40E_PHY_TYPE_10GBASE_KX4: + ifmr->ifm_active |= IFM_10G_KX4; + break; + case I40E_PHY_TYPE_10GBASE_KR: + ifmr->ifm_active |= IFM_10G_KR; + break; + case I40E_PHY_TYPE_SFI: + ifmr->ifm_active |= IFM_10G_SFI; + break; + /* Our single 20G media type */ + case I40E_PHY_TYPE_20GBASE_KR2: + ifmr->ifm_active |= IFM_20G_KR2; + break; + case I40E_PHY_TYPE_40GBASE_KR4: + ifmr->ifm_active |= IFM_40G_KR4; + break; + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ifmr->ifm_active |= IFM_40G_XLPPI; + break; + /* Unknown to driver */ + default: + ifmr->ifm_active |= IFM_UNKNOWN; + break; + } + /* Report flow control status as well */ + if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) + ifmr->ifm_active |= IFM_ETH_TXPAUSE; + if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) + ifmr->ifm_active |= IFM_ETH_RXPAUSE; +} + +static int +ixl_if_media_change(if_ctx_t ctx) +{ + struct ifmedia *ifm = iflib_get_media(ctx); + + INIT_DEBUGOUT("ixl_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); + return (ENODEV); +} + +static int +ixl_if_promisc_set(if_ctx_t ctx, int flags) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct ifnet *ifp = iflib_get_ifp(ctx); + struct i40e_hw *hw = vsi->hw; + int err; + bool uni = FALSE, multi = FALSE; + + if (flags & IFF_PROMISC) + uni = multi = TRUE; + else if (flags & IFF_ALLMULTI || + if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) + multi = TRUE; + + err = i40e_aq_set_vsi_unicast_promiscuous(hw, + vsi->seid, uni, NULL, true); + if (err) + return (err); + err = i40e_aq_set_vsi_multicast_promiscuous(hw, + vsi->seid, multi, NULL); + return (err); +} + +static void +ixl_if_timer(if_ctx_t ctx, uint16_t qid) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + //struct i40e_hw *hw = &pf->hw; + //struct ixl_tx_queue *que = &vsi->tx_queues[qid]; + #if 0 + u32 mask; + + /* + ** Check status of the queues + */ + mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); + + /* If queue param has outstanding work, trigger sw irq */ + // TODO: TX queues in iflib don't use HW interrupts; does this do anything? + if (que->busy) + wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); +#endif + + if (qid != 0) + return; + + /* Fire off the adminq task */ + iflib_admin_intr_deferred(ctx); + + /* Update stats */ + ixl_update_stats_counters(pf); +} + +static void +ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + ++vsi->num_vlans; + ixl_add_filter(vsi, hw->mac.addr, vtag); +} + +static void +ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + struct i40e_hw *hw = vsi->hw; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + --vsi->num_vlans; + ixl_del_filter(vsi, hw->mac.addr, vtag); +} + +static uint64_t +ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ixl_vsi *vsi = &pf->vsi; + if_t ifp = iflib_get_ifp(ctx); + + switch (cnt) { + case IFCOUNTER_IPACKETS: + return (vsi->ipackets); + case IFCOUNTER_IERRORS: + return (vsi->ierrors); + case IFCOUNTER_OPACKETS: + return (vsi->opackets); + case IFCOUNTER_OERRORS: + return (vsi->oerrors); + case IFCOUNTER_COLLISIONS: + /* Collisions are by standard impossible in 40G/10G Ethernet */ + return (0); + case IFCOUNTER_IBYTES: + return (vsi->ibytes); + case IFCOUNTER_OBYTES: + return (vsi->obytes); + case IFCOUNTER_IMCASTS: + return (vsi->imcasts); + case IFCOUNTER_OMCASTS: + return (vsi->omcasts); + case IFCOUNTER_IQDROPS: + return (vsi->iqdrops); + case IFCOUNTER_OQDROPS: + return (vsi->oqdrops); + case IFCOUNTER_NOPROTO: + return (vsi->noproto); + default: + return (if_get_counter_default(ifp, cnt)); + } +} + +static void +ixl_if_vflr_handle(if_ctx_t ctx) +{ + IXL_DEV_ERR(iflib_get_dev(ctx), ""); + + // TODO: call ixl_handle_vflr() +} + +static int +ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + + if (pf->read_i2c_byte == NULL) + return (EINVAL); + + for (int i = 0; i < req->len; i++) + if (pf->read_i2c_byte(pf, req->offset + i, + req->dev_addr, &req->data[i])) + return (EIO); + return (0); +} + +static int +ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) +{ + struct ixl_pf *pf = iflib_get_softc(ctx); + struct ifdrv *ifd = (struct ifdrv *)data; + int error = 0; + + /* NVM update command */ + if (ifd->ifd_cmd == I40E_NVM_ACCESS) + error = ixl_handle_nvmupd_cmd(pf, ifd); + else + error = EINVAL; + + return (error); +} + +static int +ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) +{ + struct ixl_vsi *vsi = arg; + + if (ifma->ifma_addr->sa_family != AF_LINK) + return (0); + ixl_add_mc_filter(vsi, + (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); + return (1); +} + +/* + * Sanity check and save off tunable values. + */ +static void +ixl_save_pf_tunables(struct ixl_pf *pf) +{ + device_t dev = pf->dev; + + /* Save tunable information */ + pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; + pf->dbg_mask = ixl_core_debug_mask; + pf->hw.debug_mask = ixl_shared_debug_mask; + pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); +#if 0 + pf->dynamic_rx_itr = ixl_dynamic_rx_itr; + pf->dynamic_tx_itr = ixl_dynamic_tx_itr; +#endif + + if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) + pf->i2c_access_method = 0; + else + pf->i2c_access_method = ixl_i2c_access_method; + + if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { + device_printf(dev, "Invalid tx_itr value of %d set!\n", + ixl_tx_itr); + device_printf(dev, "tx_itr must be between %d and %d, " + "inclusive\n", + 0, IXL_MAX_ITR); + device_printf(dev, "Using default value of %d instead\n", + IXL_ITR_4K); + pf->tx_itr = IXL_ITR_4K; + } else + pf->tx_itr = ixl_tx_itr; + + if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { + device_printf(dev, "Invalid rx_itr value of %d set!\n", + ixl_rx_itr); + device_printf(dev, "rx_itr must be between %d and %d, " + "inclusive\n", + 0, IXL_MAX_ITR); + device_printf(dev, "Using default value of %d instead\n", + IXL_ITR_8K); + pf->rx_itr = IXL_ITR_8K; + } else + pf->rx_itr = ixl_rx_itr; } Index: head/sys/dev/ixl/if_ixlv.c =================================================================== --- head/sys/dev/ixl/if_ixlv.c (revision 335337) +++ head/sys/dev/ixl/if_ixlv.c (revision 335338) @@ -1,3244 +1,3362 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixlv.h" /********************************************************************* * Driver version *********************************************************************/ #define IXLV_DRIVER_VERSION_MAJOR 1 #define IXLV_DRIVER_VERSION_MINOR 5 #define IXLV_DRIVER_VERSION_BUILD 4 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "." __XSTRING(IXLV_DRIVER_VERSION_MINOR) "." - __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-k"; + __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-iflib-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on - * Last field stores an index into ixlv_strings - * Last entry must be all 0s * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + * ( Vendor ID, Device ID, Branding String ) *********************************************************************/ -static ixl_vendor_info_t ixlv_vendor_info_array[] = +static pci_vendor_info_t ixlv_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0}, /* required last entry */ - {0, 0, 0, 0, 0} + PVID_END }; /********************************************************************* - * Table of branding strings - *********************************************************************/ - -static char *ixlv_strings[] = { - "Intel(R) Ethernet Connection 700 Series VF Driver" -}; - - -/********************************************************************* * Function prototypes *********************************************************************/ -static int ixlv_probe(device_t); -static int ixlv_attach(device_t); -static int ixlv_detach(device_t); -static int ixlv_shutdown(device_t); -static void ixlv_init_locked(struct ixlv_sc *); +static void *ixlv_register(device_t dev); +static int ixlv_if_attach_pre(if_ctx_t ctx); +static int ixlv_if_attach_post(if_ctx_t ctx); +static int ixlv_if_detach(if_ctx_t ctx); +static int ixlv_if_shutdown(if_ctx_t ctx); +static int ixlv_if_suspend(if_ctx_t ctx); +static int ixlv_if_resume(if_ctx_t ctx); +static int ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix); +static void ixlv_if_enable_intr(if_ctx_t ctx); +static void ixlv_if_disable_intr(if_ctx_t ctx); +static int ixlv_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); +static int ixlv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); +static int ixlv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); +static void ixlv_if_queues_free(if_ctx_t ctx); +static void ixlv_if_update_admin_status(if_ctx_t ctx); +static void ixlv_if_multi_set(if_ctx_t ctx); +static int ixlv_if_mtu_set(if_ctx_t ctx, uint32_t mtu); +static void ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); +static int ixlv_if_media_change(if_ctx_t ctx); +static int ixlv_if_promisc_set(if_ctx_t ctx, int flags); +static void ixlv_if_timer(if_ctx_t ctx, uint16_t qid); +static void ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag); +static void ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag); +static uint64_t ixlv_if_get_counter(if_ctx_t ctx, ift_counter cnt); +static void ixlv_if_stop(if_ctx_t ctx); + static int ixlv_allocate_pci_resources(struct ixlv_sc *); +static int ixlv_reset_complete(struct i40e_hw *); +static int ixlv_setup_vc(struct ixlv_sc *); +static int ixlv_reset(struct ixlv_sc *); +static int ixlv_vf_config(struct ixlv_sc *); +static void ixlv_init_filters(struct ixlv_sc *); static void ixlv_free_pci_resources(struct ixlv_sc *); -static int ixlv_assign_msix(struct ixlv_sc *); -static int ixlv_init_msix(struct ixlv_sc *); -static int ixlv_init_taskqueue(struct ixlv_sc *); -static int ixlv_setup_queues(struct ixlv_sc *); +static void ixlv_free_filters(struct ixlv_sc *); +static void ixlv_setup_interface(device_t, struct ixl_vsi *); +static void ixlv_add_sysctls(struct ixlv_sc *); +static void ixlv_enable_adminq_irq(struct i40e_hw *); +static void ixlv_disable_adminq_irq(struct i40e_hw *); +static void ixlv_enable_queue_irq(struct i40e_hw *, int); +static void ixlv_disable_queue_irq(struct i40e_hw *, int); static void ixlv_config_rss(struct ixlv_sc *); static void ixlv_stop(struct ixlv_sc *); -static void ixlv_add_multi(struct ixl_vsi *); -static void ixlv_del_multi(struct ixl_vsi *); -static void ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que); -static void ixlv_free_queues(struct ixl_vsi *); -static int ixlv_setup_interface(device_t, struct ixlv_sc *); -static int ixlv_teardown_adminq_msix(struct ixlv_sc *); -static int ixlv_media_change(struct ifnet *); -static void ixlv_media_status(struct ifnet *, struct ifmediareq *); - -static void ixlv_local_timer(void *); - static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16); static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr); -static void ixlv_init_filters(struct ixlv_sc *); -static void ixlv_free_filters(struct ixlv_sc *); - -static void ixlv_msix_que(void *); -static void ixlv_msix_adminq(void *); -static void ixlv_do_adminq(void *, int); +static int ixlv_msix_que(void *); +static int ixlv_msix_adminq(void *); static void ixlv_do_adminq_locked(struct ixlv_sc *sc); -static void ixlv_handle_que(void *, int); -static int ixlv_reset(struct ixlv_sc *); -static int ixlv_reset_complete(struct i40e_hw *); -static void ixlv_set_queue_rx_itr(struct ixl_queue *); -static void ixlv_set_queue_tx_itr(struct ixl_queue *); static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *, enum i40e_status_code); static void ixlv_configure_itr(struct ixlv_sc *); -static void ixlv_enable_adminq_irq(struct i40e_hw *); -static void ixlv_disable_adminq_irq(struct i40e_hw *); -static void ixlv_enable_queue_irq(struct i40e_hw *, int); -static void ixlv_disable_queue_irq(struct i40e_hw *, int); - static void ixlv_setup_vlan_filters(struct ixlv_sc *); -static void ixlv_register_vlan(void *, struct ifnet *, u16); -static void ixlv_unregister_vlan(void *, struct ifnet *, u16); -static void ixlv_init_hw(struct ixlv_sc *); -static int ixlv_setup_vc(struct ixlv_sc *); -static int ixlv_vf_config(struct ixlv_sc *); - -static void ixlv_cap_txcsum_tso(struct ixl_vsi *, - struct ifnet *, int); - static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed); static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS); -static void ixlv_add_sysctls(struct ixlv_sc *); +// static void ixlv_add_sysctls(struct ixlv_sc *); #ifdef IXL_DEBUG static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixlv_methods[] = { /* Device interface */ - DEVMETHOD(device_probe, ixlv_probe), - DEVMETHOD(device_attach, ixlv_attach), - DEVMETHOD(device_detach, ixlv_detach), - DEVMETHOD(device_shutdown, ixlv_shutdown), - {0, 0} + DEVMETHOD(device_register, ixlv_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + DEVMETHOD(device_shutdown, iflib_device_shutdown), + DEVMETHOD_END }; static driver_t ixlv_driver = { "ixlv", ixlv_methods, sizeof(struct ixlv_sc), }; devclass_t ixlv_devclass; DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0); MODULE_DEPEND(ixlv, pci, 1, 1, 1); MODULE_DEPEND(ixlv, ether, 1, 1, 1); +MODULE_DEPEND(ixlv, iflib, 1, 1, 1); +static device_method_t ixlv_if_methods[] = { + DEVMETHOD(ifdi_attach_pre, ixlv_if_attach_pre), + DEVMETHOD(ifdi_attach_post, ixlv_if_attach_post), + DEVMETHOD(ifdi_detach, ixlv_if_detach), + DEVMETHOD(ifdi_shutdown, ixlv_if_shutdown), + DEVMETHOD(ifdi_suspend, ixlv_if_suspend), + DEVMETHOD(ifdi_resume, ixlv_if_resume), + DEVMETHOD(ifdi_init, ixlv_if_init), + DEVMETHOD(ifdi_stop, ixlv_if_stop), + DEVMETHOD(ifdi_msix_intr_assign, ixlv_if_msix_intr_assign), + DEVMETHOD(ifdi_intr_enable, ixlv_if_enable_intr), + DEVMETHOD(ifdi_intr_disable, ixlv_if_disable_intr), + DEVMETHOD(ifdi_queue_intr_enable, ixlv_if_queue_intr_enable), + DEVMETHOD(ifdi_tx_queues_alloc, ixlv_if_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, ixlv_if_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, ixlv_if_queues_free), + DEVMETHOD(ifdi_update_admin_status, ixlv_if_update_admin_status), + DEVMETHOD(ifdi_multi_set, ixlv_if_multi_set), + DEVMETHOD(ifdi_mtu_set, ixlv_if_mtu_set), + // DEVMETHOD(ifdi_crcstrip_set, ixlv_if_crcstrip_set), + DEVMETHOD(ifdi_media_status, ixlv_if_media_status), + DEVMETHOD(ifdi_media_change, ixlv_if_media_change), + DEVMETHOD(ifdi_promisc_set, ixlv_if_promisc_set), + DEVMETHOD(ifdi_timer, ixlv_if_timer), + DEVMETHOD(ifdi_vlan_register, ixlv_if_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, ixlv_if_vlan_unregister), + DEVMETHOD(ifdi_get_counter, ixlv_if_get_counter), + DEVMETHOD_END +}; + +static driver_t ixlv_if_driver = { + "ixlv_if", ixlv_if_methods, sizeof(struct ixlv_sc) +}; + /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0, "IXLV driver parameters"); /* ** Number of descriptors per ring: ** - TX and RX sizes are independently configurable */ static int ixlv_tx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size); SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size"); static int ixlv_rx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size); SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size"); /* Set to zero to auto calculate */ int ixlv_max_queues = 0; TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues); SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN, &ixlv_max_queues, 0, "Number of Queues"); /* -** Number of entries in Tx queue buf_ring. -** Increasing this will reduce the number of -** errors when transmitting fragmented UDP -** packets. -*/ -static int ixlv_txbrsz = DEFAULT_TXBRSZ; -TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz); -SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN, - &ixlv_txbrsz, 0, "TX Buf Ring Size"); - -/* * Different method for processing TX descriptor * completion. */ static int ixlv_enable_head_writeback = 0; TUNABLE_INT("hw.ixlv.enable_head_writeback", &ixlv_enable_head_writeback); SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixlv_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ int ixlv_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); int ixlv_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); int ixlv_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixlv_rx_itr, 0, "RX Interrupt Rate"); int ixlv_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixlv_tx_itr, 0, "TX Interrupt Rate"); -/********************************************************************* - * Device identification routine - * - * ixlv_probe determines if the driver should be loaded on - * the hardware based on PCI vendor/device id of the device. - * - * return BUS_PROBE_DEFAULT on success, positive on failure - *********************************************************************/ +extern struct if_txrx ixl_txrx; -static int -ixlv_probe(device_t dev) -{ - ixl_vendor_info_t *ent; +static struct if_shared_ctx ixlv_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ + .isc_tx_maxsize = IXL_TSO_SIZE, - u16 pci_vendor_id, pci_device_id; - u16 pci_subvendor_id, pci_subdevice_id; - char device_name[256]; + .isc_tx_maxsegsize = PAGE_SIZE, -#if 0 - INIT_DEBUGOUT("ixlv_probe: begin"); -#endif + // TODO: Review the rx_maxsize and rx_maxsegsize params + // Where are they used in iflib? + .isc_rx_maxsize = 16384, + .isc_rx_nsegments = 1, + .isc_rx_maxsegsize = 16384, + // TODO: What is isc_nfl for? + .isc_nfl = 1, + .isc_ntxqs = 1, + .isc_nrxqs = 1, - pci_vendor_id = pci_get_vendor(dev); - if (pci_vendor_id != I40E_INTEL_VENDOR_ID) - return (ENXIO); + .isc_admin_intrcnt = 1, + .isc_vendor_info = ixlv_vendor_info_array, + .isc_driver_version = ixlv_driver_version, + .isc_driver = &ixlv_if_driver, - pci_device_id = pci_get_device(dev); - pci_subvendor_id = pci_get_subvendor(dev); - pci_subdevice_id = pci_get_subdevice(dev); + .isc_nrxd_min = {IXL_MIN_RING}, + .isc_ntxd_min = {IXL_MIN_RING}, + .isc_nrxd_max = {IXL_MAX_RING}, + .isc_ntxd_max = {IXL_MAX_RING}, + .isc_nrxd_default = {IXL_DEFAULT_RING}, + .isc_ntxd_default = {IXL_DEFAULT_RING}, +}; - ent = ixlv_vendor_info_array; - while (ent->vendor_id != 0) { - if ((pci_vendor_id == ent->vendor_id) && - (pci_device_id == ent->device_id) && +if_shared_ctx_t ixlv_sctx = &ixlv_sctx_init; - ((pci_subvendor_id == ent->subvendor_id) || - (ent->subvendor_id == 0)) && +/*** Functions ***/ - ((pci_subdevice_id == ent->subdevice_id) || - (ent->subdevice_id == 0))) { - sprintf(device_name, "%s, Version - %s", - ixlv_strings[ent->index], - ixlv_driver_version); - device_set_desc_copy(dev, device_name); - return (BUS_PROBE_DEFAULT); - } - ent++; - } - return (ENXIO); -} +static void * +ixlv_register(device_t dev) +{ + return (ixlv_sctx); + } -/********************************************************************* - * Device initialization routine - * - * The attach entry point is called when the driver is being loaded. - * This routine identifies the type of hardware, allocates all resources - * and initializes the hardware. - * - * return 0 on success, positive on failure - *********************************************************************/ - static int -ixlv_attach(device_t dev) +ixlv_if_attach_pre(if_ctx_t ctx) { + device_t dev; struct ixlv_sc *sc; struct i40e_hw *hw; struct ixl_vsi *vsi; - int error = 0; + if_softc_ctx_t scctx; + int error = 0; INIT_DBG_DEV(dev, "begin"); - /* Allocate, clear, and link in our primary soft structure */ - sc = device_get_softc(dev); - sc->dev = sc->osdep.dev = dev; + dev = iflib_get_dev(ctx); + sc = iflib_get_softc(ctx); hw = &sc->hw; + /* + ** Note this assumes we have a single embedded VSI, + ** this could be enhanced later to allocate multiple + */ vsi = &sc->vsi; vsi->dev = dev; + vsi->back = sc; + vsi->hw = &sc->hw; + // vsi->id = 0; + vsi->num_vlans = 0; + vsi->ctx = ctx; + vsi->media = iflib_get_media(ctx); + vsi->shared = scctx = iflib_get_softc_ctx(ctx); + sc->dev = dev; /* Initialize hw struct */ ixlv_init_hw(sc); + /* + * These are the same across all current ixl models + */ + vsi->shared->isc_tx_nsegments = IXL_MAX_TX_SEGS; + vsi->shared->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); + vsi->shared->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; + vsi->shared->isc_tx_tso_size_max = IXL_TSO_SIZE; + vsi->shared->isc_tx_tso_segsize_max = PAGE_SIZE; - /* Allocate filter lists */ - ixlv_init_filters(sc); - /* Save this tunable */ vsi->enable_head_writeback = ixlv_enable_head_writeback; - /* Core Lock Init */ - mtx_init(&sc->mtx, device_get_nameunit(dev), - "IXL SC Lock", MTX_DEF); + scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] + * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); + scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] + * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); + /* XXX: No idea what this does */ + /* TODO: This value may depend on resources received */ + scctx->isc_max_txqsets = scctx->isc_max_rxqsets = 16; - /* Set up the timer callout */ - callout_init_mtx(&sc->timer, &sc->mtx, 0); - /* Do PCI setup - map BAR0, etc */ if (ixlv_allocate_pci_resources(sc)) { device_printf(dev, "%s: Allocation of PCI resources failed\n", __func__); error = ENXIO; goto err_early; } INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors"); + /* XXX: This is called by init_shared_code in the PF driver */ error = i40e_set_mac_type(hw); if (error) { device_printf(dev, "%s: set_mac_type failed: %d\n", __func__, error); goto err_pci_res; } error = ixlv_reset_complete(hw); if (error) { device_printf(dev, "%s: Device is still being reset\n", __func__); goto err_pci_res; } INIT_DBG_DEV(dev, "VF Device is ready for configuration"); + /* Sets up Admin Queue */ error = ixlv_setup_vc(sc); if (error) { device_printf(dev, "%s: Error setting up PF comms, %d\n", __func__, error); goto err_pci_res; } INIT_DBG_DEV(dev, "PF API version verified"); /* Need API version before sending reset message */ error = ixlv_reset(sc); if (error) { device_printf(dev, "VF reset failed; reload the driver\n"); goto err_aq; } INIT_DBG_DEV(dev, "VF reset complete"); /* Ask for VF config from PF */ error = ixlv_vf_config(sc); if (error) { device_printf(dev, "Error getting configuration from PF: %d\n", error); goto err_aq; } device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n", sc->vf_res->num_vsis, sc->vf_res->num_queue_pairs, sc->vf_res->max_vectors, sc->vf_res->rss_key_size, sc->vf_res->rss_lut_size); #ifdef IXL_DEBUG device_printf(dev, "Offload flags: 0x%b\n", sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS); #endif /* got VF config message back from PF, now we can parse it */ for (int i = 0; i < sc->vf_res->num_vsis; i++) { if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) sc->vsi_res = &sc->vf_res->vsi_res[i]; } if (!sc->vsi_res) { device_printf(dev, "%s: no LAN VSI found\n", __func__); error = EIO; goto err_res_buf; } + vsi->id = sc->vsi_res->vsi_id; INIT_DBG_DEV(dev, "Resource Acquisition complete"); /* If no mac address was assigned just make a random one */ if (!ixlv_check_ether_addr(hw->mac.addr)) { u8 addr[ETHER_ADDR_LEN]; arc4rand(&addr, sizeof(addr), 0); addr[0] &= 0xFE; addr[0] |= 0x02; bcopy(addr, hw->mac.addr, sizeof(addr)); } + bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); + iflib_set_mac(ctx, hw->mac.addr); - /* Now that the number of queues for this VF is known, set up interrupts */ - sc->msix = ixlv_init_msix(sc); - /* We fail without MSIX support */ - if (sc->msix == 0) { - error = ENXIO; - goto err_res_buf; - } + // TODO: Is this still safe to call? + // ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size); - vsi->id = sc->vsi_res->vsi_id; - vsi->back = (void *)sc; - vsi->flags |= IXL_FLAGS_IS_VF | IXL_FLAGS_USES_MSIX; + /* Allocate filter lists */ + ixlv_init_filters(sc); - ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size); + /* Fill out more iflib parameters */ + scctx->isc_txrx = &ixl_txrx; + // TODO: Probably needs changing + vsi->shared->isc_rss_table_size = sc->hw.func_caps.rss_table_size; + scctx->isc_tx_csum_flags = CSUM_OFFLOAD; + scctx->isc_capenable = IXL_CAPS; - /* This allocates the memory and early settings */ - if (ixlv_setup_queues(sc) != 0) { - device_printf(dev, "%s: setup queues failed!\n", - __func__); - error = EIO; - goto out; - } + INIT_DBG_DEV(dev, "end"); + return (0); +err_res_buf: + free(sc->vf_res, M_DEVBUF); +err_aq: + i40e_shutdown_adminq(hw); +err_pci_res: + ixlv_free_pci_resources(sc); +err_early: + ixlv_free_filters(sc); + INIT_DBG_DEV(dev, "end: error %d", error); + return (error); +} - /* Do queue interrupt setup */ - if (ixlv_assign_msix(sc) != 0) { - device_printf(dev, "%s: allocating queue interrupts failed!\n", - __func__); - error = ENXIO; - goto out; - } +static int +ixlv_if_attach_post(if_ctx_t ctx) +{ + device_t dev; + struct ixlv_sc *sc; + struct i40e_hw *hw; + struct ixl_vsi *vsi; + int error = 0; - INIT_DBG_DEV(dev, "Queue memory and interrupts setup"); + INIT_DBG_DEV(dev, "begin"); + dev = iflib_get_dev(ctx); + vsi = iflib_get_softc(ctx); + vsi->ifp = iflib_get_ifp(ctx); + sc = (struct ixlv_sc *)vsi->back; + hw = &sc->hw; + /* Setup the stack interface */ if (ixlv_setup_interface(dev, sc) != 0) { device_printf(dev, "%s: setup interface failed!\n", __func__); error = EIO; goto out; } INIT_DBG_DEV(dev, "Interface setup complete"); - /* Start AdminQ taskqueue */ - ixlv_init_taskqueue(sc); - - /* We expect a link state message, so schedule the AdminQ task now */ - taskqueue_enqueue(sc->tq, &sc->aq_irq); - - /* Initialize stats */ + /* Initialize statistics & add sysctls */ bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); ixlv_add_sysctls(sc); - /* Register for VLAN events */ - vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, - ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST); - vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, - ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST); - /* We want AQ enabled early */ ixlv_enable_adminq_irq(hw); - - /* Set things up to run init */ - sc->init_state = IXLV_INIT_READY; - - ixl_vc_init_mgr(sc, &sc->vc_mgr); - INIT_DBG_DEV(dev, "end"); return (error); - +// TODO: Check if any failures can happen above +#if 0 out: - ixlv_free_queues(vsi); - ixlv_teardown_adminq_msix(sc); -err_res_buf: free(sc->vf_res, M_DEVBUF); -err_aq: i40e_shutdown_adminq(hw); -err_pci_res: ixlv_free_pci_resources(sc); -err_early: - mtx_destroy(&sc->mtx); ixlv_free_filters(sc); INIT_DBG_DEV(dev, "end: error %d", error); return (error); +#endif } -/********************************************************************* - * Device removal routine - * - * The detach entry point is called when the driver is being removed. - * This routine stops the adapter and deallocates all the resources - * that were allocated for driver operation. - * - * return 0 on success, positive on failure - *********************************************************************/ - static int -ixlv_detach(device_t dev) +ixlv_if_detach(if_ctx_t ctx) { - struct ixlv_sc *sc = device_get_softc(dev); - struct ixl_vsi *vsi = &sc->vsi; - struct i40e_hw *hw = &sc->hw; - enum i40e_status_code status; + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ixlv_sc *sc = vsi->back; + struct i40e_hw *hw = &sc->hw; + device_t dev = sc->dev; + enum i40e_status_code status; INIT_DBG_DEV(dev, "begin"); - /* Make sure VLANS are not using driver */ - if (vsi->ifp->if_vlantrunk != NULL) { - if_printf(vsi->ifp, "Vlan in use, detach first\n"); - return (EBUSY); - } - /* Remove all the media and link information */ ifmedia_removeall(&sc->media); - /* Stop driver */ - ether_ifdetach(vsi->ifp); - if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) { - mtx_lock(&sc->mtx); - ixlv_stop(sc); - mtx_unlock(&sc->mtx); - } - - /* Unregister VLAN events */ - if (vsi->vlan_attach != NULL) - EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach); - if (vsi->vlan_detach != NULL) - EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach); - /* Drain VC mgr */ callout_drain(&sc->vc_mgr.callout); ixlv_disable_adminq_irq(hw); - ixlv_teardown_adminq_msix(sc); - /* Drain admin queue taskqueue */ - taskqueue_free(sc->tq); status = i40e_shutdown_adminq(&sc->hw); if (status != I40E_SUCCESS) { device_printf(dev, "i40e_shutdown_adminq() failed with status %s\n", i40e_stat_str(hw, status)); } - if_free(vsi->ifp); free(sc->vf_res, M_DEVBUF); - ixlv_free_queues(vsi); ixlv_free_pci_resources(sc); ixlv_free_filters(sc); - bus_generic_detach(dev); - mtx_destroy(&sc->mtx); INIT_DBG_DEV(dev, "end"); return (0); } -/********************************************************************* - * - * Shutdown entry point - * - **********************************************************************/ - +/* TODO: Do shutdown-specific stuff here */ static int -ixlv_shutdown(device_t dev) +ixlv_if_shutdown(if_ctx_t ctx) { - struct ixlv_sc *sc = device_get_softc(dev); + int error = 0; INIT_DBG_DEV(dev, "begin"); - mtx_lock(&sc->mtx); - ixlv_stop(sc); - mtx_unlock(&sc->mtx); + /* TODO: Call ixl_if_stop()? */ - INIT_DBG_DEV(dev, "end"); - return (0); + return (error); } -/* - * Configure TXCSUM(IPV6) and TSO(4/6) - * - the hardware handles these together so we - * need to tweak them - */ -static void -ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) +/* TODO: What is a VF supposed to do in suspend/resume? */ +static int +ixlv_if_suspend(if_ctx_t ctx) { - /* Enable/disable TXCSUM/TSO4 */ - if (!(ifp->if_capenable & IFCAP_TXCSUM) - && !(ifp->if_capenable & IFCAP_TSO4)) { - if (mask & IFCAP_TXCSUM) { - ifp->if_capenable |= IFCAP_TXCSUM; - /* enable TXCSUM, restore TSO if previously enabled */ - if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; - ifp->if_capenable |= IFCAP_TSO4; - } - } - else if (mask & IFCAP_TSO4) { - ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; - if_printf(ifp, - "TSO4 requires txcsum, enabling both...\n"); - } - } else if((ifp->if_capenable & IFCAP_TXCSUM) - && !(ifp->if_capenable & IFCAP_TSO4)) { - if (mask & IFCAP_TXCSUM) - ifp->if_capenable &= ~IFCAP_TXCSUM; - else if (mask & IFCAP_TSO4) - ifp->if_capenable |= IFCAP_TSO4; - } else if((ifp->if_capenable & IFCAP_TXCSUM) - && (ifp->if_capenable & IFCAP_TSO4)) { - if (mask & IFCAP_TXCSUM) { - vsi->flags |= IXL_FLAGS_KEEP_TSO4; - ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); - if_printf(ifp, - "TSO4 requires txcsum, disabling both...\n"); - } else if (mask & IFCAP_TSO4) - ifp->if_capenable &= ~IFCAP_TSO4; - } + int error = 0; - /* Enable/disable TXCSUM_IPV6/TSO6 */ - if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) - && !(ifp->if_capenable & IFCAP_TSO6)) { - if (mask & IFCAP_TXCSUM_IPV6) { - ifp->if_capenable |= IFCAP_TXCSUM_IPV6; - if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; - ifp->if_capenable |= IFCAP_TSO6; - } - } else if (mask & IFCAP_TSO6) { - ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; - if_printf(ifp, - "TSO6 requires txcsum6, enabling both...\n"); - } - } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) - && !(ifp->if_capenable & IFCAP_TSO6)) { - if (mask & IFCAP_TXCSUM_IPV6) - ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; - else if (mask & IFCAP_TSO6) - ifp->if_capenable |= IFCAP_TSO6; - } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) - && (ifp->if_capenable & IFCAP_TSO6)) { - if (mask & IFCAP_TXCSUM_IPV6) { - vsi->flags |= IXL_FLAGS_KEEP_TSO6; - ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - if_printf(ifp, - "TSO6 requires txcsum6, disabling both...\n"); - } else if (mask & IFCAP_TSO6) - ifp->if_capenable &= ~IFCAP_TSO6; - } + INIT_DBG_DEV(dev, "begin"); + + /* TODO: Call ixl_if_stop()? */ + + return (error); } -/********************************************************************* - * Ioctl entry point - * - * ixlv_ioctl is called when the user wants to configure the - * interface. - * - * return 0 on success, positive on failure - **********************************************************************/ +static int +ixlv_if_resume(if_ctx_t ctx) +{ + struct ifnet *ifp = iflib_get_ifp(ctx); + INIT_DBG_DEV(dev, "begin"); + + /* Read & clear wake-up registers */ + + /* Required after D3->D0 transition */ + if (ifp->if_flags & IFF_UP) + ixlv_if_init(ctx); + + return (0); +} + +#if 0 static int ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; struct ixlv_sc *sc = vsi->back; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; bool avoid_reset = FALSE; #endif int error = 0; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixlv_init(vsi); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; #endif case SIOCSIFMTU: IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)"); mtx_lock(&sc->mtx); if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; IOCTL_DBG_IF(ifp, "mtu too large"); } else { IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu); // ERJ: Interestingly enough, these types don't match ifp->if_mtu = (u_long)ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_init_locked(sc); } mtx_unlock(&sc->mtx); break; case SIOCSIFFLAGS: IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)"); mtx_lock(&sc->mtx); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ixlv_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_stop(sc); sc->if_flags = ifp->if_flags; mtx_unlock(&sc->mtx); break; case SIOCADDMULTI: IOCTL_DBG_IF2(ifp, "SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&sc->mtx); ixlv_disable_intr(vsi); ixlv_add_multi(vsi); ixlv_enable_intr(vsi); mtx_unlock(&sc->mtx); } break; case SIOCDELMULTI: IOCTL_DBG_IF2(ifp, "SIOCDELMULTI"); if (sc->init_state == IXLV_RUNNING) { mtx_lock(&sc->mtx); ixlv_disable_intr(vsi); ixlv_del_multi(vsi); ixlv_enable_intr(vsi); mtx_unlock(&sc->mtx); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)"); ixlv_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixlv_init(vsi); } VLAN_CAPABILITIES(ifp); break; } default: IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command); error = ether_ioctl(ifp, command, data); break; } return (error); } +#endif /* ** To do a reinit on the VF is unfortunately more complicated ** than a physical device, we must have the PF more or less ** completely recreate our memory, so many things that were ** done only once at attach in traditional drivers now must be ** redone at each reinitialization. This function does that ** 'prelude' so we can then call the normal locked init code. */ int ixlv_reinit_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; struct ixlv_mac_filter *mf, *mf_temp; struct ixlv_vlan_filter *vf; int error = 0; INIT_DBG_IF(ifp, "begin"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_stop(sc); error = ixlv_reset(sc); INIT_DBG_IF(ifp, "VF was reset"); /* set the state in case we went thru RESET */ sc->init_state = IXLV_RUNNING; /* ** Resetting the VF drops all filters from hardware; ** we need to mark them to be re-added in init. */ SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) { if (mf->flags & IXL_FILTER_DEL) { SLIST_REMOVE(sc->mac_filters, mf, ixlv_mac_filter, next); free(mf, M_DEVBUF); } else mf->flags |= IXL_FILTER_ADD; } if (vsi->num_vlans != 0) SLIST_FOREACH(vf, sc->vlan_filters, next) vf->flags = IXL_FILTER_ADD; else { /* clean any stale filters */ while (!SLIST_EMPTY(sc->vlan_filters)) { vf = SLIST_FIRST(sc->vlan_filters); SLIST_REMOVE_HEAD(sc->vlan_filters, next); free(vf, M_DEVBUF); } } ixlv_enable_adminq_irq(hw); ixl_vc_flush(&sc->vc_mgr); INIT_DBG_IF(ifp, "end"); return (error); } static void ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg, enum i40e_status_code code) { struct ixlv_sc *sc; sc = arg; /* * Ignore "Adapter Stopped" message as that happens if an ifconfig down * happens while a command is in progress, so we don't print an error * in that case. */ if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) { if_printf(sc->vsi.ifp, "Error %s waiting for PF to complete operation %d\n", i40e_stat_str(&sc->hw, code), cmd->request); } } -static void -ixlv_init_locked(struct ixlv_sc *sc) +void +ixlv_if_init(if_ctx_t ctx) { - struct i40e_hw *hw = &sc->hw; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; - struct ifnet *ifp = vsi->ifp; - int error = 0; + struct ixl_vsi *vsi = iflib_get_softc(ctx); + if_softc_ctx_t scctx = vsi->shared; + struct ixlv_sc *sc = vsi->back; + struct i40e_hw *hw = &sc->hw; + struct ifnet *ifp = iflib_get_ifp(ctx); + struct ixl_tx_queue *tx_que = vsi->tx_queues; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + int error = 0; + INIT_DBG_IF(ifp, "begin"); IXLV_CORE_LOCK_ASSERT(sc); /* Do a reinit first if an init has already been done */ if ((sc->init_state == IXLV_RUNNING) || (sc->init_state == IXLV_RESET_REQUIRED) || (sc->init_state == IXLV_RESET_PENDING)) error = ixlv_reinit_locked(sc); /* Don't bother with init if we failed reinit */ if (error) goto init_done; /* Remove existing MAC filter if new MAC addr is set */ if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) { error = ixlv_del_mac_filter(sc, hw->mac.addr); if (error == 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); } /* Check for an LAA mac address... */ bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN); - ifp->if_hwassist = 0; - if (ifp->if_capenable & IFCAP_TSO) - ifp->if_hwassist |= CSUM_TSO; - if (ifp->if_capenable & IFCAP_TXCSUM) - ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP); - if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) - ifp->if_hwassist |= CSUM_OFFLOAD_IPV6; - /* Add mac filter for this VF to PF */ if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) { error = ixlv_add_mac_filter(sc, hw->mac.addr, 0); if (!error || error == EEXIST) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd, IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, sc); } /* Setup vlan's if needed */ ixlv_setup_vlan_filters(sc); + // TODO: Functionize /* Prepare the queues for operation */ - for (int i = 0; i < vsi->num_queues; i++, que++) { - struct rx_ring *rxr = &que->rxr; + for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) { + // TODO: Necessary? Correct? + ixl_init_tx_ring(vsi, tx_que); + } + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { + struct rx_ring *rxr = &rx_que->rxr; - ixl_init_tx_ring(que); - - if (vsi->max_frame_size <= MCLBYTES) + if (scctx->isc_max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; - ixl_init_rx_ring(que); } /* Set initial ITR values */ ixlv_configure_itr(sc); /* Configure queues */ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd, IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc); /* Set up RSS */ ixlv_config_rss(sc); /* Map vectors */ ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc); /* Enable queues */ ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd, IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc); - /* Start the local timer */ - callout_reset(&sc->timer, hz, ixlv_local_timer, sc); - sc->init_state = IXLV_RUNNING; init_done: INIT_DBG_IF(ifp, "end"); return; } -/* -** Init entry point for the stack -*/ +#if 0 void ixlv_init(void *arg) { struct ixl_vsi *vsi = (struct ixl_vsi *)arg; struct ixlv_sc *sc = vsi->back; int retries = 0; /* Prevent init from running again while waiting for AQ calls * made in init_locked() to complete. */ mtx_lock(&sc->mtx); if (sc->init_in_progress) { mtx_unlock(&sc->mtx); return; } else sc->init_in_progress = true; ixlv_init_locked(sc); mtx_unlock(&sc->mtx); /* Wait for init_locked to finish */ while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) && ++retries < IXLV_MAX_INIT_WAIT) { i40e_msec_pause(25); } if (retries >= IXLV_MAX_INIT_WAIT) { if_printf(vsi->ifp, "Init failed to complete in allotted time!\n"); } mtx_lock(&sc->mtx); sc->init_in_progress = false; mtx_unlock(&sc->mtx); } /* * ixlv_attach() helper function; gathers information about * the (virtual) hardware for use elsewhere in the driver. */ static void ixlv_init_hw(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; /* Save off the information about this board */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); } +#endif /* * ixlv_attach() helper function; initalizes the admin queue * and attempts to establish contact with the PF by * retrying the initial "API version" message several times * or until the PF responds. */ static int ixlv_setup_vc(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int error = 0, ret_error = 0, asq_retries = 0; bool send_api_ver_retried = 0; /* Need to set these AQ paramters before initializing AQ */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) { /* Initialize admin queue */ error = i40e_init_adminq(hw); if (error) { device_printf(dev, "%s: init_adminq failed: %d\n", __func__, error); ret_error = 1; continue; } INIT_DBG_DEV(dev, "Initialized Admin Queue; starting" " send_api_ver attempt %d", i+1); retry_send: /* Send VF's API version */ error = ixlv_send_api_ver(sc); if (error) { i40e_shutdown_adminq(hw); ret_error = 2; device_printf(dev, "%s: unable to send api" " version to PF on attempt %d, error %d\n", __func__, i+1, error); } asq_retries = 0; while (!i40e_asq_done(hw)) { if (++asq_retries > IXLV_AQ_MAX_ERR) { i40e_shutdown_adminq(hw); device_printf(dev, "Admin Queue timeout " "(waiting for send_api_ver), %d more tries...\n", IXLV_AQ_MAX_ERR - (i + 1)); ret_error = 3; break; } i40e_msec_pause(10); } if (asq_retries > IXLV_AQ_MAX_ERR) continue; INIT_DBG_DEV(dev, "Sent API version message to PF"); /* Verify that the VF accepts the PF's API version */ error = ixlv_verify_api_ver(sc); if (error == ETIMEDOUT) { if (!send_api_ver_retried) { /* Resend message, one more time */ send_api_ver_retried = true; device_printf(dev, "%s: Timeout while verifying API version on first" " try!\n", __func__); goto retry_send; } else { device_printf(dev, "%s: Timeout while verifying API version on second" " try!\n", __func__); ret_error = 4; break; } } if (error) { device_printf(dev, "%s: Unable to verify API version," " error %s\n", __func__, i40e_stat_str(hw, error)); ret_error = 5; } break; } if (ret_error >= 4) i40e_shutdown_adminq(hw); return (ret_error); } /* * ixlv_attach() helper function; asks the PF for this VF's * configuration, and saves the information if it receives it. */ static int ixlv_vf_config(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int bufsz, error = 0, ret_error = 0; int asq_retries, retried = 0; retry_config: error = ixlv_send_vf_config_msg(sc); if (error) { device_printf(dev, "%s: Unable to send VF config request, attempt %d," " error %d\n", __func__, retried + 1, error); ret_error = 2; } asq_retries = 0; while (!i40e_asq_done(hw)) { if (++asq_retries > IXLV_AQ_MAX_ERR) { device_printf(dev, "%s: Admin Queue timeout " "(waiting for send_vf_config_msg), attempt %d\n", __func__, retried + 1); ret_error = 3; goto fail; } i40e_msec_pause(10); } INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d", retried + 1); if (!sc->vf_res) { bufsz = sizeof(struct virtchnl_vf_resource) + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT); if (!sc->vf_res) { device_printf(dev, "%s: Unable to allocate memory for VF configuration" " message from PF on attempt %d\n", __func__, retried + 1); ret_error = 1; goto fail; } } /* Check for VF config response */ error = ixlv_get_vf_config(sc); if (error == ETIMEDOUT) { /* The 1st time we timeout, send the configuration message again */ if (!retried) { retried++; goto retry_config; } device_printf(dev, "%s: ixlv_get_vf_config() timed out waiting for a response\n", __func__); } if (error) { device_printf(dev, "%s: Unable to get VF configuration from PF after %d tries!\n", __func__, retried + 1); ret_error = 4; } goto done; fail: free(sc->vf_res, M_DEVBUF); done: return (ret_error); } -/* - * Allocate MSI/X vectors, setup the AQ vector early - */ static int -ixlv_init_msix(struct ixlv_sc *sc) +ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix) { - device_t dev = sc->dev; - int rid, want, vectors, queues, available; - int auto_max_queues; + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ixlv_sc *sc = vsi->back; + struct ixl_rx_queue *que = vsi->rx_queues; + struct ixl_tx_queue *tx_que = vsi->tx_queues; + int err, i, rid, vector = 0; + char buf[16]; - rid = PCIR_BAR(IXL_MSIX_BAR); - sc->msix_mem = bus_alloc_resource_any(dev, - SYS_RES_MEMORY, &rid, RF_ACTIVE); - if (!sc->msix_mem) { - /* May not be enabled */ - device_printf(sc->dev, - "Unable to map MSIX table\n"); - goto fail; + /* Admin Que is vector 0*/ + rid = vector + 1; + + err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, + ixlv_msix_adminq, sc, 0, "aq"); + if (err) { + iflib_irq_free(ctx, &vsi->irq); + device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler"); + return (err); } + sc->admvec = vector; + ++vector; - available = pci_msix_count(dev); - if (available == 0) { /* system has msix disabled */ - bus_release_resource(dev, SYS_RES_MEMORY, - rid, sc->msix_mem); - sc->msix_mem = NULL; - goto fail; + /* Now set up the stations */ + for (i = 0; i < vsi->num_rx_queues; i++, vector++, que++) { + rid = vector + 1; + + snprintf(buf, sizeof(buf), "rxq%d", i); + err = iflib_irq_alloc_generic(ctx, &que->que_irq, rid, IFLIB_INTR_RX, + ixlv_msix_que, que, que->rxr.me, buf); + if (err) { + device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err); + vsi->num_rx_queues = i + 1; + goto fail; + } + que->msix = vector; } - /* Clamp queues to number of CPUs and # of MSI-X vectors available */ - auto_max_queues = min(mp_ncpus, available - 1); - /* Clamp queues to # assigned to VF by PF */ - auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs); + for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { + snprintf(buf, sizeof(buf), "txq%d", i); + rid = que->msix + 1; + iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); + } - /* Override with tunable value if tunable is less than autoconfig count */ - if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues)) - queues = ixlv_max_queues; - /* Use autoconfig amount if that's lower */ - else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) { - device_printf(dev, "ixlv_max_queues (%d) is too large, using " - "autoconfig amount (%d)...\n", - ixlv_max_queues, auto_max_queues); - queues = auto_max_queues; + return (0); +fail: + iflib_irq_free(ctx, &vsi->irq); + que = vsi->rx_queues; + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + iflib_irq_free(ctx, &que->que_irq); + return (err); +} + +/* Enable all interrupts */ +static void +ixlv_if_enable_intr(if_ctx_t ctx) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + + ixlv_enable_intr(vsi); +} + +/* Disable all interrupts */ +static void +ixlv_if_disable_intr(if_ctx_t ctx) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + + ixlv_disable_intr(vsi); +} + +/* Enable queue interrupt */ +static int +ixlv_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct i40e_hw *hw = vsi->hw; + struct ixl_rx_queue *que = &vsi->rx_queues[rxqid]; + + ixlv_enable_queue_irq(hw, que->rxr.me); + + return (0); +} + +static int +ixlv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ixl_tx_queue *que; + int i; + + MPASS(vsi->num_tx_queues > 0); + MPASS(ntxqs == 1); + MPASS(vsi->num_tx_queues == ntxqsets); + + /* Allocate queue structure memory */ + if (!(vsi->tx_queues = + (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXLV, M_NOWAIT | M_ZERO))) { + device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); + return (ENOMEM); } - /* Limit maximum auto-configured queues to 8 if no user value is set */ - else - queues = min(auto_max_queues, 8); -#ifdef RSS - /* If we're doing RSS, clamp at the number of RSS buckets */ - if (queues > rss_getnumbuckets()) - queues = rss_getnumbuckets(); + for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { + struct tx_ring *txr = &que->txr; + txr->me = i; + que->vsi = vsi; + + /* get the virtual and physical address of the hardware queues */ + txr->tail = I40E_QTX_TAIL1(txr->me); + txr->tx_base = (struct i40e_tx_desc *)vaddrs[i]; + txr->tx_paddr = paddrs[i]; + txr->que = que; + } + + // TODO: Do a config_gtask_init for admin queue here? + // iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod, "mod_task"); + + device_printf(iflib_get_dev(ctx), "%s: allocated for %d txqs\n", __func__, vsi->num_tx_queues); + return (0); +} + +static int +ixlv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ixl_rx_queue *que; + int i; + + MPASS(vsi->num_rx_queues > 0); + MPASS(nrxqs == 1); + MPASS(vsi->num_rx_queues == nrxqsets); + + /* Allocate queue structure memory */ + if (!(vsi->rx_queues = + (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * + nrxqsets, M_IXLV, M_NOWAIT | M_ZERO))) { + device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); + return (ENOMEM); + } + + for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { + struct rx_ring *rxr = &que->rxr; + + rxr->me = i; + que->vsi = vsi; + + /* get the virtual and physical address of the hardware queues */ + rxr->tail = I40E_QRX_TAIL1(rxr->me); + rxr->rx_base = (union i40e_rx_desc *)vaddrs[i]; + rxr->rx_paddr = paddrs[i]; + rxr->que = que; + } + + device_printf(iflib_get_dev(ctx), "%s: allocated for %d rxqs\n", __func__, vsi->num_rx_queues); + return (0); +} + +static void +ixlv_if_queues_free(if_ctx_t ctx) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + + if (vsi->tx_queues != NULL) { + free(vsi->tx_queues, M_IXLV); + vsi->tx_queues = NULL; + } + if (vsi->rx_queues != NULL) { + free(vsi->rx_queues, M_IXLV); + vsi->rx_queues = NULL; + } +} + +// TODO: Implement +static void +ixlv_if_update_admin_status(if_ctx_t ctx) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + //struct ixlv_sc *sc = vsi->back; + //struct i40e_hw *hw = &sc->hw; + //struct i40e_arq_event_info event; + //i40e_status ret; + //u32 loop = 0; + //u16 opcode + u16 result = 0; + //u64 baudrate; + + /* TODO: Split up + * - Update admin queue stuff + * - Update link status + * - Enqueue aq task + * - Re-enable admin intr + */ + +/* TODO: Does VF reset need to be handled here? */ +#if 0 + if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { + /* Flag cleared at end of this function */ + ixl_handle_empr_reset(pf); + return; + } #endif - /* - ** Want one vector (RX/TX pair) per queue - ** plus an additional for the admin queue. - */ - want = queues + 1; - if (want <= available) /* Have enough */ - vectors = want; - else { - device_printf(sc->dev, - "MSIX Configuration Problem, " - "%d vectors available but %d wanted!\n", - available, want); - goto fail; +#if 0 + event.buf_len = IXL_AQ_BUF_SZ; + event.msg_buf = malloc(event.buf_len, + M_IXLV, M_NOWAIT | M_ZERO); + if (!event.msg_buf) { + device_printf(pf->dev, "%s: Unable to allocate memory for Admin" + " Queue event!\n", __func__); + return; } -#ifdef RSS + /* clean and process any events */ + do { + ret = i40e_clean_arq_element(hw, &event, &result); + if (ret) + break; + opcode = LE16_TO_CPU(event.desc.opcode); + ixl_dbg(pf, IXL_DBG_AQ, + "Admin Queue event: %#06x\n", opcode); + switch (opcode) { + case i40e_aqc_opc_get_link_status: + ixl_link_event(pf, &event); + break; + case i40e_aqc_opc_send_msg_to_pf: +#ifdef PCI_IOV + ixl_handle_vf_msg(pf, &event); +#endif + break; + case i40e_aqc_opc_event_lan_overflow: + break; + default: +#ifdef IXL_DEBUG + printf("AdminQ unknown event %x\n", opcode); +#endif + break; + } + + } while (result && (loop++ < IXL_ADM_LIMIT)); + + free(event.msg_buf, M_IXLV); +#endif + +#if 0 + /* XXX: This updates the link status */ + if (pf->link_up) { + if (vsi->link_active == FALSE) { + vsi->link_active = TRUE; + baudrate = ixl_max_aq_speed_to_value(pf->link_speed); + iflib_link_state_change(ctx, LINK_STATE_UP, baudrate); + ixl_link_up_msg(pf); + // ixl_ping_all_vfs(adapter); + } + } else { /* Link down */ + if (vsi->link_active == TRUE) { + vsi->link_active = FALSE; + iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); + // ixl_ping_all_vfs(adapter); + } + } +#endif + /* - * If we're doing RSS, the number of queues needs to - * match the number of RSS buckets that are configured. - * - * + If there's more queues than RSS buckets, we'll end - * up with queues that get no traffic. - * - * + If there's more RSS buckets than queues, we'll end - * up having multiple RSS buckets map to the same queue, - * so there'll be some contention. - */ - if (queues != rss_getnumbuckets()) { - device_printf(dev, - "%s: queues (%d) != RSS buckets (%d)" - "; performance will be impacted.\n", - __func__, queues, rss_getnumbuckets()); + * If there are still messages to process, reschedule ourselves. + * Otherwise, re-enable our interrupt and go to sleep. + */ + if (result > 0) + iflib_admin_intr_deferred(ctx); + else + /* TODO: Link/adminq interrupt should be re-enabled in IFDI_LINK_INTR_ENABLE */ + ixlv_enable_intr(vsi); +} + +static void +ixlv_if_multi_set(if_ctx_t ctx) +{ + // struct ixl_vsi *vsi = iflib_get_softc(ctx); + // struct i40e_hw *hw = vsi->hw; + // struct ixlv_sc *sc = vsi->back; + // int mcnt = 0, flags; + + IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); + + // TODO: Implement +#if 0 + mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); + /* delete existing MC filters */ + ixlv_del_multi(vsi); + + if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { + // Set promiscuous mode (multicast) + // TODO: This needs to get handled somehow +#if 0 + ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, + IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, sc); +#endif + return; } + /* (re-)install filters for all mcast addresses */ + mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); + + if (mcnt > 0) { + flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); + ixlv_add_hw_filters(vsi, flags, mcnt); + } #endif - if (pci_alloc_msix(dev, &vectors) == 0) { - device_printf(sc->dev, - "Using MSIX interrupts with %d vectors\n", vectors); - sc->msix = vectors; - sc->vsi.num_queues = queues; + IOCTL_DEBUGOUT("ixl_if_multi_set: end"); +} + +static void +ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; + struct i40e_hw *hw = &sc->hw; + + INIT_DEBUGOUT("ixl_media_status: begin"); + + hw->phy.get_link_info = TRUE; + i40e_get_link_status(hw, &sc->link_up); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!sc->link_up) { + return; } - /* Next we need to setup the vector for the Admin Queue */ - rid = 1; /* zero vector + 1 */ - sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, - &rid, RF_SHAREABLE | RF_ACTIVE); - if (sc->res == NULL) { - device_printf(dev, "Unable to allocate" - " bus resource: AQ interrupt \n"); - goto fail; + ifmr->ifm_status |= IFM_ACTIVE; + /* Hardware is always full-duplex */ + ifmr->ifm_active |= IFM_FDX; + + // TODO: Check another variable to get link speed +#if 0 + switch (hw->phy.link_info.phy_type) { + /* 100 M */ + case I40E_PHY_TYPE_100BASE_TX: + ifmr->ifm_active |= IFM_100_TX; + break; + /* 1 G */ + case I40E_PHY_TYPE_1000BASE_T: + ifmr->ifm_active |= IFM_1000_T; + break; + case I40E_PHY_TYPE_1000BASE_SX: + ifmr->ifm_active |= IFM_1000_SX; + break; + case I40E_PHY_TYPE_1000BASE_LX: + ifmr->ifm_active |= IFM_1000_LX; + break; + case I40E_PHY_TYPE_1000BASE_T_OPTICAL: + ifmr->ifm_active |= IFM_OTHER; + break; + /* 10 G */ + case I40E_PHY_TYPE_10GBASE_SFPP_CU: + ifmr->ifm_active |= IFM_10G_TWINAX; + break; + case I40E_PHY_TYPE_10GBASE_SR: + ifmr->ifm_active |= IFM_10G_SR; + break; + case I40E_PHY_TYPE_10GBASE_LR: + ifmr->ifm_active |= IFM_10G_LR; + break; + case I40E_PHY_TYPE_10GBASE_T: + ifmr->ifm_active |= IFM_10G_T; + break; + case I40E_PHY_TYPE_XAUI: + case I40E_PHY_TYPE_XFI: + case I40E_PHY_TYPE_10GBASE_AOC: + ifmr->ifm_active |= IFM_OTHER; + break; + /* 25 G */ + case I40E_PHY_TYPE_25GBASE_KR: + ifmr->ifm_active |= IFM_25G_KR; + break; + case I40E_PHY_TYPE_25GBASE_CR: + ifmr->ifm_active |= IFM_25G_CR; + break; + case I40E_PHY_TYPE_25GBASE_SR: + ifmr->ifm_active |= IFM_25G_SR; + break; + case I40E_PHY_TYPE_25GBASE_LR: + ifmr->ifm_active |= IFM_UNKNOWN; + break; + /* 40 G */ + case I40E_PHY_TYPE_40GBASE_CR4: + case I40E_PHY_TYPE_40GBASE_CR4_CU: + ifmr->ifm_active |= IFM_40G_CR4; + break; + case I40E_PHY_TYPE_40GBASE_SR4: + ifmr->ifm_active |= IFM_40G_SR4; + break; + case I40E_PHY_TYPE_40GBASE_LR4: + ifmr->ifm_active |= IFM_40G_LR4; + break; + case I40E_PHY_TYPE_XLAUI: + ifmr->ifm_active |= IFM_OTHER; + break; + case I40E_PHY_TYPE_1000BASE_KX: + ifmr->ifm_active |= IFM_1000_KX; + break; + case I40E_PHY_TYPE_SGMII: + ifmr->ifm_active |= IFM_1000_SGMII; + break; + /* ERJ: What's the difference between these? */ + case I40E_PHY_TYPE_10GBASE_CR1_CU: + case I40E_PHY_TYPE_10GBASE_CR1: + ifmr->ifm_active |= IFM_10G_CR1; + break; + case I40E_PHY_TYPE_10GBASE_KX4: + ifmr->ifm_active |= IFM_10G_KX4; + break; + case I40E_PHY_TYPE_10GBASE_KR: + ifmr->ifm_active |= IFM_10G_KR; + break; + case I40E_PHY_TYPE_SFI: + ifmr->ifm_active |= IFM_10G_SFI; + break; + /* Our single 20G media type */ + case I40E_PHY_TYPE_20GBASE_KR2: + ifmr->ifm_active |= IFM_20G_KR2; + break; + case I40E_PHY_TYPE_40GBASE_KR4: + ifmr->ifm_active |= IFM_40G_KR4; + break; + case I40E_PHY_TYPE_XLPPI: + case I40E_PHY_TYPE_40GBASE_AOC: + ifmr->ifm_active |= IFM_40G_XLPPI; + break; + /* Unknown to driver */ + default: + ifmr->ifm_active |= IFM_UNKNOWN; + break; } - if (bus_setup_intr(dev, sc->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixlv_msix_adminq, sc, &sc->tag)) { - sc->res = NULL; - device_printf(dev, "Failed to register AQ handler"); - goto fail; - } - bus_describe_intr(dev, sc->res, sc->tag, "adminq"); + /* Report flow control status as well */ + if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) + ifmr->ifm_active |= IFM_ETH_TXPAUSE; + if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) + ifmr->ifm_active |= IFM_ETH_RXPAUSE; + #endif +} - return (vectors); +static int +ixlv_if_media_change(if_ctx_t ctx) +{ + struct ifmedia *ifm = iflib_get_media(ctx); -fail: - /* The VF driver MUST use MSIX */ - return (0); + INIT_DEBUGOUT("ixl_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); + return (ENODEV); } +// TODO: Rework static int +ixlv_if_promisc_set(if_ctx_t ctx, int flags) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); + struct i40e_hw *hw = vsi->hw; + int err; + bool uni = FALSE, multi = FALSE; + + if (flags & IFF_ALLMULTI || + if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) + multi = TRUE; + if (flags & IFF_PROMISC) + uni = TRUE; + + err = i40e_aq_set_vsi_unicast_promiscuous(hw, + vsi->seid, uni, NULL, false); + if (err) + return (err); + err = i40e_aq_set_vsi_multicast_promiscuous(hw, + vsi->seid, multi, NULL); + return (err); +} + +static void +ixlv_if_timer(if_ctx_t ctx, uint16_t qid) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + struct ixlv_sc *sc = vsi->back; + //struct i40e_hw *hw = &sc->hw; + //struct ixl_tx_queue *que = &vsi->tx_queues[qid]; + //u32 mask; + +#if 0 + /* + ** Check status of the queues + */ + mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); + + /* If queue param has outstanding work, trigger sw irq */ + // TODO: TX queues in iflib don't use HW interrupts; does this do anything? + if (que->busy) + wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); + #endif + + // XXX: Is this timer per-queue? + if (qid != 0) + return; + + /* Fire off the adminq task */ + iflib_admin_intr_deferred(ctx); + + /* Update stats */ + ixlv_request_stats(sc); +} + +static void +ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + //struct i40e_hw *hw = vsi->hw; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + ++vsi->num_vlans; + // TODO: Redo + // ixlv_add_filter(vsi, hw->mac.addr, vtag); +} + +static void +ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + //struct i40e_hw *hw = vsi->hw; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + --vsi->num_vlans; + // TODO: Redo + // ixlv_del_filter(vsi, hw->mac.addr, vtag); +} + +static uint64_t +ixlv_if_get_counter(if_ctx_t ctx, ift_counter cnt) +{ + struct ixl_vsi *vsi = iflib_get_softc(ctx); + if_t ifp = iflib_get_ifp(ctx); + + switch (cnt) { + case IFCOUNTER_IPACKETS: + return (vsi->ipackets); + case IFCOUNTER_IERRORS: + return (vsi->ierrors); + case IFCOUNTER_OPACKETS: + return (vsi->opackets); + case IFCOUNTER_OERRORS: + return (vsi->oerrors); + case IFCOUNTER_COLLISIONS: + /* Collisions are by standard impossible in 40G/10G Ethernet */ + return (0); + case IFCOUNTER_IBYTES: + return (vsi->ibytes); + case IFCOUNTER_OBYTES: + return (vsi->obytes); + case IFCOUNTER_IMCASTS: + return (vsi->imcasts); + case IFCOUNTER_OMCASTS: + return (vsi->omcasts); + case IFCOUNTER_IQDROPS: + return (vsi->iqdrops); + case IFCOUNTER_OQDROPS: + return (vsi->oqdrops); + case IFCOUNTER_NOPROTO: + return (vsi->noproto); + default: + return (if_get_counter_default(ifp, cnt)); + } +} + +static int ixlv_allocate_pci_resources(struct ixlv_sc *sc) { + struct i40e_hw *hw = &sc->hw; + device_t dev = iflib_get_dev(sc->vsi.ctx); int rid; - device_t dev = sc->dev; + /* Map BAR0 */ rid = PCIR_BAR(0); sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(sc->pci_mem)) { - device_printf(dev, "Unable to allocate bus resource: memory\n"); + device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); return (ENXIO); - } + } + + /* Save off the PCI information */ + hw->vendor_id = pci_get_vendor(dev); + hw->device_id = pci_get_device(dev); + hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); + hw->subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + hw->subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + hw->bus.device = pci_get_slot(dev); + hw->bus.func = pci_get_function(dev); + + /* Save off register access information */ sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem); sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); sc->osdep.flush_reg = I40E_VFGEN_RSTAT; + sc->osdep.dev = dev; + sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; sc->hw.back = &sc->osdep; - ixl_set_busmaster(dev); - ixl_set_msix_enable(dev); - /* Disable adminq interrupts (just in case) */ - ixlv_disable_adminq_irq(&sc->hw); + /* TODO: Probably not necessary */ + // ixlv_disable_adminq_irq(&sc->hw); - return (0); -} - -/* - * Free MSI-X related resources for a single queue - */ + return (0); + } + static void -ixlv_free_msix_resources(struct ixlv_sc *sc, struct ixl_queue *que) +ixlv_free_pci_resources(struct ixlv_sc *sc) { + struct ixl_vsi *vsi = &sc->vsi; + struct ixl_rx_queue *rx_que = vsi->rx_queues; device_t dev = sc->dev; + /* We may get here before stations are setup */ + // TODO: Check if we can still check against sc->msix + if ((sc->msix > 0) || (rx_que == NULL)) + goto early; + /* - ** Release all msix queue resources: + ** Release all msix VSI resources: */ - if (que->tag != NULL) { - bus_teardown_intr(dev, que->res, que->tag); - que->tag = NULL; - } - if (que->res != NULL) { - int rid = que->msix + 1; - bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); - que->res = NULL; - } - if (que->tq != NULL) { - taskqueue_free(que->tq); - que->tq = NULL; - } -} + iflib_irq_free(vsi->ctx, &vsi->irq); -static void -ixlv_free_pci_resources(struct ixlv_sc *sc) -{ - device_t dev = sc->dev; + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) + iflib_irq_free(vsi->ctx, &rx_que->que_irq); - pci_release_msi(dev); - - if (sc->msix_mem != NULL) - bus_release_resource(dev, SYS_RES_MEMORY, - PCIR_BAR(IXL_MSIX_BAR), sc->msix_mem); - +early: if (sc->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->pci_mem); } -/* - * Create taskqueue and tasklet for Admin Queue interrupts. - */ -static int -ixlv_init_taskqueue(struct ixlv_sc *sc) -{ - int error = 0; - TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc); - - sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT, - taskqueue_thread_enqueue, &sc->tq); - taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq", - device_get_nameunit(sc->dev)); - - return (error); -} - -/********************************************************************* - * - * Setup MSIX Interrupt resources and handlers for the VSI queues - * - **********************************************************************/ -static int -ixlv_assign_msix(struct ixlv_sc *sc) -{ - device_t dev = sc->dev; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; - struct tx_ring *txr; - int error, rid, vector = 1; -#ifdef RSS - cpuset_t cpu_mask; -#endif - - for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { - int cpu_id = i; - rid = vector + 1; - txr = &que->txr; - que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, - RF_SHAREABLE | RF_ACTIVE); - if (que->res == NULL) { - device_printf(dev,"Unable to allocate" - " bus resource: que interrupt [%d]\n", vector); - return (ENXIO); - } - /* Set the handler function */ - error = bus_setup_intr(dev, que->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixlv_msix_que, que, &que->tag); - if (error) { - que->tag = NULL; - device_printf(dev, "Failed to register que handler"); - return (error); - } - bus_describe_intr(dev, que->res, que->tag, "que %d", i); - /* Bind the vector to a CPU */ -#ifdef RSS - cpu_id = rss_getcpu(i % rss_getnumbuckets()); -#endif - bus_bind_intr(dev, que->res, cpu_id); - que->msix = vector; - TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); - TASK_INIT(&que->task, 0, ixlv_handle_que, que); - que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT, - taskqueue_thread_enqueue, &que->tq); -#ifdef RSS - CPU_SETOF(cpu_id, &cpu_mask); - taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, - &cpu_mask, "%s (bucket %d)", - device_get_nameunit(dev), cpu_id); -#else - taskqueue_start_threads(&que->tq, 1, PI_NET, - "%s que", device_get_nameunit(dev)); -#endif - - } - - return (0); -} - /* ** Requests a VF reset from the PF. ** ** Requires the VF's Admin Queue to be initialized. */ static int ixlv_reset(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int error = 0; /* Ask the PF to reset us if we are initiating */ if (sc->init_state != IXLV_RESET_PENDING) ixlv_request_reset(sc); i40e_msec_pause(100); error = ixlv_reset_complete(hw); if (error) { device_printf(dev, "%s: VF reset failed\n", __func__); return (error); } error = i40e_shutdown_adminq(hw); if (error) { device_printf(dev, "%s: shutdown_adminq failed: %d\n", __func__, error); return (error); } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "%s: init_adminq failed: %d\n", __func__, error); return(error); } return (0); } static int ixlv_reset_complete(struct i40e_hw *hw) { u32 reg; /* Wait up to ~10 seconds */ for (int i = 0; i < 100; i++) { reg = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; if ((reg == VIRTCHNL_VFR_VFACTIVE) || (reg == VIRTCHNL_VFR_COMPLETED)) return (0); i40e_msec_pause(100); } return (EBUSY); } - -/********************************************************************* - * - * Setup networking device structure and register an interface. - * - **********************************************************************/ -static int -ixlv_setup_interface(device_t dev, struct ixlv_sc *sc) +static void +ixlv_setup_interface(device_t dev, struct ixl_vsi *vsi) { - struct ifnet *ifp; - struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + if_ctx_t ctx = vsi->ctx; + struct ixlv_sc *sc = vsi->back; + struct ifnet *ifp = iflib_get_ifp(ctx); + uint64_t cap; + //struct ixl_queue *que = vsi->queues; INIT_DBG_DEV(dev, "begin"); - ifp = vsi->ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) { - device_printf(dev, "%s: could not allocate ifnet" - " structure!\n", __func__); - return (-1); - } - - if_initname(ifp, device_get_name(dev), device_get_unit(dev)); - - ifp->if_mtu = ETHERMTU; + /* initialize fast path functions */ + cap = IXL_CAPS; + if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); + if_setcapabilitiesbit(ifp, cap, 0); + if_setcapenable(ifp, if_getcapabilities(ifp)); + /* TODO: Remove VLAN_ENCAP_LEN? */ + vsi->shared->isc_max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + ETHER_VLAN_ENCAP_LEN; #if __FreeBSD_version >= 1100000 - ifp->if_baudrate = IF_Gbps(40); + if_setbaudrate(ifp, IF_Gbps(40)); #else if_initbaudrate(ifp, IF_Gbps(40)); #endif - ifp->if_init = ixlv_init; - ifp->if_softc = vsi; - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_ioctl = ixlv_ioctl; -#if __FreeBSD_version >= 1100000 - if_setgetcounterfn(ifp, ixl_get_counter); -#endif - - ifp->if_transmit = ixl_mq_start; - - ifp->if_qflush = ixl_qflush; - ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; - - ether_ifattach(ifp, sc->hw.mac.addr); - - vsi->max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN - + ETHER_VLAN_ENCAP_LEN; - - ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); - ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; - ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; - /* - * Tell the upper layer(s) we support long frames. - */ - ifp->if_hdrlen = sizeof(struct ether_vlan_header); - - ifp->if_capabilities |= IFCAP_HWCSUM; - ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; - ifp->if_capabilities |= IFCAP_TSO; - ifp->if_capabilities |= IFCAP_JUMBO_MTU; - - ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING - | IFCAP_VLAN_HWTSO - | IFCAP_VLAN_MTU - | IFCAP_VLAN_HWCSUM - | IFCAP_LRO; - ifp->if_capenable = ifp->if_capabilities; - - /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ - ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; + if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); - /* - * Specify the media types supported by this adapter and register - * callbacks to update media and link information - */ - ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change, - ixlv_media_status); - /* Media types based on reported link speed over AdminQ */ ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); INIT_DBG_DEV(dev, "end"); return (0); } +#if 0 /* ** Allocate and setup a single queue */ static int ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que) { device_t dev = sc->dev; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; int error = I40E_SUCCESS; txr = &que->txr; txr->que = que; txr->tail = I40E_QTX_TAIL1(que->me); /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), que->me); mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); /* * Create the TX descriptor ring * * In Head Writeback mode, the descriptor ring is one bigger * than the number of descriptors for space for the HW to * write back index of last completed descriptor. */ if (sc->vsi.enable_head_writeback) { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); } else { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)), DBA_ALIGN); } if (i40e_allocate_dma_mem(&sc->hw, &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_destroy_tx_mtx; } txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; goto err_free_tx_dma; } /* Allocate a buf ring */ txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF, M_WAITOK, &txr->mtx); if (txr->br == NULL) { device_printf(dev, "Critical Failure setting up TX buf ring\n"); error = ENOMEM; goto err_free_tx_data; } /* * Next the RX queues... */ rsize = roundup2(que->num_rx_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); rxr = &que->rxr; rxr->que = que; rxr->tail = I40E_QRX_TAIL1(que->me); /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); if (i40e_allocate_dma_mem(&sc->hw, &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA? device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; goto err_destroy_rx_mtx; } rxr->base = (union i40e_rx_desc *)rxr->dma.va; bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring */ if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; goto err_free_rx_dma; } return (0); err_free_rx_dma: i40e_free_dma_mem(&sc->hw, &rxr->dma); err_destroy_rx_mtx: mtx_destroy(&rxr->mtx); /* err_free_tx_buf_ring */ buf_ring_free(txr->br, M_DEVBUF); err_free_tx_data: ixl_free_que_tx(que); err_free_tx_dma: i40e_free_dma_mem(&sc->hw, &txr->dma); err_destroy_tx_mtx: mtx_destroy(&txr->mtx); return (error); } +#endif /* ** Allocate and setup the interface queues */ static int ixlv_setup_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi; struct ixl_queue *que; int i; int error = I40E_SUCCESS; vsi = &sc->vsi; vsi->back = (void *)sc; vsi->hw = &sc->hw; vsi->num_vlans = 0; /* Get memory for the station queues */ if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); return ENOMEM; } for (i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; que->num_tx_desc = vsi->num_tx_desc; que->num_rx_desc = vsi->num_rx_desc; que->me = i; que->vsi = vsi; if (ixlv_setup_queue(sc, que)) { error = ENOMEM; goto err_free_queues; } } return (0); err_free_queues: while (i--) ixlv_free_queue(sc, &vsi->queues[i]); free(vsi->queues, M_DEVBUF); return (error); } +#if 0 /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since ** we can get the vlan id. This just creates the ** entry in the soft version of the VFTA, init will ** repopulate the real table. */ static void ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; /* Sanity check - make sure it doesn't already exist */ SLIST_FOREACH(v, sc->vlan_filters, next) { if (v->vlan == vtag) return; } mtx_lock(&sc->mtx); ++vsi->num_vlans; v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INSERT_HEAD(sc->vlan_filters, v, next); v->vlan = vtag; v->flags = IXL_FILTER_ADD; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } /* ** This routine is run via an vlan ** unconfig EVENT, remove our entry ** in the soft vfta. */ static void ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; int i = 0; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; mtx_lock(&sc->mtx); SLIST_FOREACH(v, sc->vlan_filters, next) { if (v->vlan == vtag) { v->flags = IXL_FILTER_DEL; ++i; --vsi->num_vlans; } } if (i) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd, IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } +#endif /* ** Get a new filter and add it to the mac filter list. */ static struct ixlv_mac_filter * ixlv_get_mac_filter(struct ixlv_sc *sc) { struct ixlv_mac_filter *f; f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (f) SLIST_INSERT_HEAD(sc->mac_filters, f, next); return (f); } /* ** Find the filter with matching MAC address */ static struct ixlv_mac_filter * ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr) { struct ixlv_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, sc->mac_filters, next) { if (cmp_etheraddr(f->macaddr, macaddr)) { match = TRUE; break; } } if (!match) f = NULL; return (f); } -static int -ixlv_teardown_adminq_msix(struct ixlv_sc *sc) -{ - device_t dev = sc->dev; - int error = 0; - - if (sc->tag != NULL) { - bus_teardown_intr(dev, sc->res, sc->tag); - if (error) { - device_printf(dev, "bus_teardown_intr() for" - " interrupt 0 failed\n"); - // return (ENXIO); - } - sc->tag = NULL; - } - if (sc->res != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res); - if (error) { - device_printf(dev, "bus_release_resource() for" - " interrupt 0 failed\n"); - // return (ENXIO); - } - sc->res = NULL; - } - - return (0); - -} - /* ** Admin Queue interrupt handler */ -static void +static int ixlv_msix_adminq(void *arg) { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; - u32 reg, mask; + // device_t dev = sc->dev; + u32 reg; + bool do_task = FALSE; + ++sc->admin_irq; + reg = rd32(hw, I40E_VFINT_ICR01); mask = rd32(hw, I40E_VFINT_ICR0_ENA1); reg = rd32(hw, I40E_VFINT_DYN_CTL01); reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, reg); - /* schedule task */ - taskqueue_enqueue(sc->tq, &sc->aq_irq); - return; + /* Check on the cause */ + if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) + do_task = TRUE; + + if (do_task) + iflib_admin_intr_deferred(sc->vsi.ctx); + else + ixlv_enable_adminq_irq(hw); + + return (FILTER_HANDLED); } void ixlv_enable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct ixl_rx_queue *que = vsi->rx_queues; ixlv_enable_adminq_irq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) - ixlv_enable_queue_irq(hw, que->me); + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + ixlv_enable_queue_irq(hw, que->rxr.me); } void ixlv_disable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct ixl_rx_queue *que = vsi->rx_queues; ixlv_disable_adminq_irq(hw); - for (int i = 0; i < vsi->num_queues; i++, que++) - ixlv_disable_queue_irq(hw, que->me); + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + ixlv_disable_queue_irq(hw, que->rxr.me); } - static void ixlv_disable_adminq_irq(struct i40e_hw *hw) { wr32(hw, I40E_VFINT_DYN_CTL01, 0); wr32(hw, I40E_VFINT_ICR0_ENA1, 0); /* flush */ rd32(hw, I40E_VFGEN_RSTAT); - return; } static void ixlv_enable_adminq_irq(struct i40e_hw *hw) { wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); /* flush */ rd32(hw, I40E_VFGEN_RSTAT); - return; } static void ixlv_enable_queue_irq(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg); } static void ixlv_disable_queue_irq(struct i40e_hw *hw, int id) { wr32(hw, I40E_VFINT_DYN_CTLN1(id), I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); rd32(hw, I40E_VFGEN_RSTAT); return; } /* * Get initial ITR values from tunable values. */ static void ixlv_configure_itr(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_rx_queue *rx_que = vsi->rx_queues; vsi->rx_itr_setting = ixlv_rx_itr; - vsi->tx_itr_setting = ixlv_tx_itr; + //vsi->tx_itr_setting = ixlv_tx_itr; - for (int i = 0; i < vsi->num_queues; i++, que++) { - struct tx_ring *txr = &que->txr; - struct rx_ring *rxr = &que->rxr; + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { + struct rx_ring *rxr = &rx_que->rxr; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; +#if 0 + struct tx_ring *txr = &que->txr; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; +#endif } } /* ** Provide a update to the queue RX ** interrupt moderation value. */ static void -ixlv_set_queue_rx_itr(struct ixl_queue *que) +ixlv_set_queue_rx_itr(struct ixl_rx_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (ixlv_dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = min(rx_itr, IXL_MAX_ITR); wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, - que->me), rxr->itr); + que->rxr.me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = ixlv_rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, - que->me), rxr->itr); + que->rxr.me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ static void -ixlv_set_queue_tx_itr(struct ixl_queue *que) +ixlv_set_queue_tx_itr(struct ixl_tx_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (ixlv_dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = min(tx_itr, IXL_MAX_ITR); wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, - que->me), txr->itr); + que->txr.me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = ixlv_tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, - que->me), txr->itr); + que->txr.me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } - +#if 0 /* ** ** MSIX Interrupt Handlers and Tasklets ** */ static void ixlv_handle_que(void *context, int pending) { struct ixl_queue *que = context; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { more = ixl_rxeof(que, IXL_RX_LIMIT); mtx_lock(&txr->mtx); ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); mtx_unlock(&txr->mtx); if (more) { taskqueue_enqueue(que->tq, &que->task); return; } } /* Reenable this interrupt - hmmm */ ixlv_enable_queue_irq(hw, que->me); return; } +#endif -/********************************************************************* - * - * MSIX Queue Interrupt Service routine - * - **********************************************************************/ -static void +static int ixlv_msix_que(void *arg) { - struct ixl_queue *que = arg; - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; - struct tx_ring *txr = &que->txr; - bool more_tx, more_rx; + struct ixl_rx_queue *que = arg; - /* Spurious interrupts are ignored */ - if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) - return; - ++que->irqs; - more_rx = ixl_rxeof(que, IXL_RX_LIMIT); - - mtx_lock(&txr->mtx); - more_tx = ixl_txeof(que); - /* - ** Make certain that if the stack - ** has anything queued the task gets - ** scheduled to handle it. - */ - if (!drbr_empty(vsi->ifp, txr->br)) - more_tx = 1; - mtx_unlock(&txr->mtx); - ixlv_set_queue_rx_itr(que); ixlv_set_queue_tx_itr(que); - if (more_tx || more_rx) - taskqueue_enqueue(que->tq, &que->task); - else - ixlv_enable_queue_irq(hw, que->me); - - return; + return (FILTER_SCHEDULE_THREAD); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; struct ixlv_sc *sc = vsi->back; INIT_DBG_IF(ifp, "begin"); mtx_lock(&sc->mtx); ixlv_update_link_status(sc); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!sc->link_up) { mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end: link not up"); return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware is always full-duplex */ ifmr->ifm_active |= IFM_FDX; /* Based on the link speed reported by the PF over the AdminQ, choose a * PHY type to report. This isn't 100% correct since we don't really * know the underlying PHY type of the PF, but at least we can report * a valid link speed... */ switch (sc->link_speed) { case VIRTCHNL_LINK_SPEED_100MB: ifmr->ifm_active |= IFM_100_TX; break; case VIRTCHNL_LINK_SPEED_1GB: ifmr->ifm_active |= IFM_1000_T; break; case VIRTCHNL_LINK_SPEED_10GB: ifmr->ifm_active |= IFM_10G_SR; break; case VIRTCHNL_LINK_SPEED_20GB: case VIRTCHNL_LINK_SPEED_25GB: ifmr->ifm_active |= IFM_25G_SR; break; case VIRTCHNL_LINK_SPEED_40GB: ifmr->ifm_active |= IFM_40G_SR4; break; default: ifmr->ifm_active |= IFM_UNKNOWN; break; } mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end"); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixlv_media_change(struct ifnet * ifp) { struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; INIT_DBG_IF(ifp, "begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(ifp, "Changing speed is not supported\n"); INIT_DBG_IF(ifp, "end"); return (ENODEV); } +#if 0 /********************************************************************* * Multicast Initialization * * This routine is called by init to reset a fresh state. * **********************************************************************/ static void ixlv_init_multi(struct ixl_vsi *vsi) { struct ixlv_mac_filter *f; struct ixlv_sc *sc = vsi->back; int mcnt = 0; IOCTL_DBG_IF(vsi->ifp, "begin"); /* First clear any multicast filters */ SLIST_FOREACH(f, sc->mac_filters, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { f->flags |= IXL_FILTER_DEL; mcnt++; } } if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(vsi->ifp, "end"); } static void ixlv_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct ixlv_sc *sc = vsi->back; int mcnt = 0; IOCTL_DBG_IF(ifp, "begin"); if_maddr_rlock(ifp); /* ** Get a count, to decide if we ** simply use multicast promiscuous. */ CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); /* TODO: Remove -- cannot set promiscuous mode in a VF */ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete all multicast filters */ ixlv_init_multi(vsi); sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, sc); IOCTL_DEBUGOUT("%s: end: too many filters", __func__); return; } mcnt = 0; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (!ixlv_add_mac_filter(sc, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr), IXL_FILTER_MC)) mcnt++; } if_maddr_runlock(ifp); /* ** Notify AQ task that sw filters need to be ** added to hw list */ if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(ifp, "end"); } static void ixlv_del_multi(struct ixl_vsi *vsi) { struct ixlv_mac_filter *f; struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct ixlv_sc *sc = vsi->back; int mcnt = 0; bool match = FALSE; IOCTL_DBG_IF(ifp, "begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, sc->mac_filters, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { /* check if mac address in filter is in sc's list */ match = FALSE; CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } /* if this filter is not in the sc's list, remove it */ if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) { f->flags |= IXL_FILTER_DEL; mcnt++; IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } else if (match == FALSE) IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(ifp, "end"); } -/********************************************************************* - * Timer routine - * - * This routine checks for link status,updates statistics, - * and runs the watchdog check. - * - **********************************************************************/ - static void ixlv_local_timer(void *arg) { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; u32 val; IXLV_CORE_LOCK_ASSERT(sc); /* If Reset is in progress just bail */ if (sc->init_state == IXLV_RESET_PENDING) return; /* Check for when PF triggers a VF reset */ val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; if (val != VIRTCHNL_VFR_VFACTIVE && val != VIRTCHNL_VFR_COMPLETED) { DDPRINTF(sc->dev, "reset in progress! (%d)", val); return; } ixlv_request_stats(sc); /* clean and process any events */ taskqueue_enqueue(sc->tq, &sc->aq_irq); /* Increment stat when a queue shows hung */ if (ixl_queue_hang_check(vsi)) sc->watchdog_events++; callout_reset(&sc->timer, hz, ixlv_local_timer, sc); } /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with ** a link interrupt. */ void ixlv_update_link_status(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; if (sc->link_up){ if (vsi->link_active == FALSE) { if (bootverbose) if_printf(ifp,"Link is Up, %s\n", ixlv_vc_speed_to_string(sc->link_speed)); vsi->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) if_printf(ifp,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; } } return; } +#endif /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixlv_stop(struct ixlv_sc *sc) { struct ifnet *ifp; int start; ifp = sc->vsi.ifp; INIT_DBG_IF(ifp, "begin"); - IXLV_CORE_LOCK_ASSERT(sc); - ixl_vc_flush(&sc->vc_mgr); ixlv_disable_queues(sc); start = ticks; while ((ifp->if_drv_flags & IFF_DRV_RUNNING) && ((ticks - start) < hz/10)) ixlv_do_adminq_locked(sc); /* Stop the local timer */ callout_stop(&sc->timer); INIT_DBG_IF(ifp, "end"); } -/* Free a single queue struct */ static void -ixlv_free_queue(struct ixlv_sc *sc, struct ixl_queue *que) +ixlv_if_stop(if_ctx_t ctx) { - struct tx_ring *txr = &que->txr; - struct rx_ring *rxr = &que->rxr; + struct ixl_vsi *vsi = iflib_get_softc(ctx); - if (!mtx_initialized(&txr->mtx)) /* uninitialized */ - return; - IXL_TX_LOCK(txr); - if (txr->br) - buf_ring_free(txr->br, M_DEVBUF); - ixl_free_que_tx(que); - if (txr->base) - i40e_free_dma_mem(&sc->hw, &txr->dma); - IXL_TX_UNLOCK(txr); - IXL_TX_LOCK_DESTROY(txr); - - if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ - return; - IXL_RX_LOCK(rxr); - ixl_free_que_rx(que); - if (rxr->base) - i40e_free_dma_mem(&sc->hw, &rxr->dma); - IXL_RX_UNLOCK(rxr); - IXL_RX_LOCK_DESTROY(rxr); + ixlv_stop(sc); } -/********************************************************************* - * - * Free all station queue structs. - * - **********************************************************************/ static void -ixlv_free_queues(struct ixl_vsi *vsi) -{ - struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; - struct ixl_queue *que = vsi->queues; - - for (int i = 0; i < vsi->num_queues; i++, que++) { - /* First, free the MSI-X resources */ - ixlv_free_msix_resources(sc, que); - /* Then free other queue data */ - ixlv_free_queue(sc, que); - } - - free(vsi->queues, M_DEVBUF); -} - -static void ixlv_config_rss_reg(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; u32 lut = 0; u64 set_hena = 0, hena; int i, j, que_id; u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; #ifdef RSS u32 rss_hash_config; #endif /* Don't set up RSS if using a single queue */ - if (vsi->num_queues == 1) { + if (vsi->num_rx_queues == 1) { wr32(hw, I40E_VFQF_HENA(0), 0); wr32(hw, I40E_VFQF_HENA(1), 0); ixl_flush(hw); return; } #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key(rss_seed); #endif /* Fill out hash function seed */ for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]); /* Enable PCTYPES for RSS: */ #ifdef RSS rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); hena |= set_hena; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) { - if (j == vsi->num_queues) + if (j == vsi->num_rx_queues) j = 0; #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % vsi->num_queues; #else que_id = j; #endif /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK); /* On i = 3, we have 4 entries in lut; write to the register */ if ((i & 3) == 3) { wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut); } } ixl_flush(hw); } static void ixlv_config_rss_pf(struct ixlv_sc *sc) { ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd, IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc); ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd, IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc); ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd, IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc); } /* ** ixlv_config_rss - setup RSS ** ** RSS keys and table are cleared on VF reset. */ static void ixlv_config_rss(struct ixlv_sc *sc) { if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) { DDPRINTF(sc->dev, "Setting up RSS using VF registers..."); ixlv_config_rss_reg(sc); } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { DDPRINTF(sc->dev, "Setting up RSS using messages to PF..."); ixlv_config_rss_pf(sc); } else device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); } /* ** This routine refreshes vlan filters, called by init ** it scans the filter table and then updates the AQ */ static void ixlv_setup_vlan_filters(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ixlv_vlan_filter *f; int cnt = 0; if (vsi->num_vlans == 0) return; /* ** Scan the filter table for vlan entries, ** and if found call for the AQ update. */ SLIST_FOREACH(f, sc->vlan_filters, next) if (f->flags & IXL_FILTER_ADD) cnt++; if (cnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); } /* ** This routine adds new MAC filters to the sc's list; ** these are later added in hardware by sending a virtual ** channel message. */ static int ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) { struct ixlv_mac_filter *f; /* Does one already exist? */ f = ixlv_find_mac_filter(sc, macaddr); if (f != NULL) { IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT, MAC_FORMAT_ARGS(macaddr)); return (EEXIST); } /* If not, get a new empty filter */ f = ixlv_get_mac_filter(sc); if (f == NULL) { if_printf(sc->vsi.ifp, "%s: no filters available!!\n", __func__); return (ENOMEM); } IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT, MAC_FORMAT_ARGS(macaddr)); bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); f->flags |= flags; return (0); } /* ** Marks a MAC filter for deletion. */ static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr) { struct ixlv_mac_filter *f; f = ixlv_find_mac_filter(sc, macaddr); if (f == NULL) return (ENOENT); f->flags |= IXL_FILTER_DEL; return (0); } -/* -** Tasklet handler for MSIX Adminq interrupts -** - done outside interrupt context since it might sleep -*/ static void -ixlv_do_adminq(void *context, int pending) -{ - struct ixlv_sc *sc = context; - - mtx_lock(&sc->mtx); - ixlv_do_adminq_locked(sc); - mtx_unlock(&sc->mtx); - return; -} - -static void ixlv_do_adminq_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct i40e_arq_event_info event; struct virtchnl_msg *v_msg; device_t dev = sc->dev; u16 result = 0; u32 reg, oldreg; i40e_status ret; bool aq_error = false; - IXLV_CORE_LOCK_ASSERT(sc); - event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = sc->aq_buffer; v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) break; ixlv_vc_completion(sc, v_msg->v_opcode, v_msg->v_retval, event.msg_buf, event.msg_len); if (result != 0) bzero(event.msg_buf, IXL_AQ_BUF_SZ); } while (result); /* check for Admin queue errors */ oldreg = reg = rd32(hw, hw->aq.arq.len); if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) { device_printf(dev, "ARQ VF Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; aq_error = true; } if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) { device_printf(dev, "ARQ Overflow Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; aq_error = true; } if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) { device_printf(dev, "ARQ Critical Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; aq_error = true; } if (oldreg != reg) wr32(hw, hw->aq.arq.len, reg); oldreg = reg = rd32(hw, hw->aq.asq.len); if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) { device_printf(dev, "ASQ VF Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; aq_error = true; } if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) { device_printf(dev, "ASQ Overflow Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; aq_error = true; } if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) { device_printf(dev, "ASQ Critical Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; aq_error = true; } if (oldreg != reg) wr32(hw, hw->aq.asq.len, reg); if (aq_error) { /* Need to reset adapter */ device_printf(dev, "WARNING: Resetting!\n"); sc->init_state = IXLV_RESET_REQUIRED; ixlv_stop(sc); - ixlv_init_locked(sc); + // TODO: Make stop/init calls match + ixlv_if_init(sc->vsi.ctx); } ixlv_enable_adminq_irq(hw); } static void ixlv_add_sysctls(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; struct i40e_eth_stats *es = &vsi->eth_stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); - struct sysctl_oid *vsi_node, *queue_node; - struct sysctl_oid_list *vsi_list, *queue_list; + struct sysctl_oid *vsi_node; // *queue_node; + struct sysctl_oid_list *vsi_list; // *queue_list; #define QUEUE_NAME_LEN 32 - char queue_namebuf[QUEUE_NAME_LEN]; + //char queue_namebuf[QUEUE_NAME_LEN]; +#if 0 struct ixl_queue *queues = vsi->queues; - struct tx_ring *txr; + struct tX_ring *txr; struct rx_ring *rxr; +#endif /* Driver statistics sysctls */ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &sc->admin_irq, "Admin Queue IRQ Handled"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size", CTLFLAG_RD, &vsi->num_tx_desc, 0, "TX ring size"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size", CTLFLAG_RD, &vsi->num_rx_desc, 0, "RX ring size"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ixlv_sysctl_current_speed, "A", "Current Port Speed"); /* VSI statistics sysctls */ vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", CTLFLAG_RD, NULL, "VSI-specific statistics"); vsi_list = SYSCTL_CHILDREN(vsi_node); struct ixl_sysctl_info ctls[] = { {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, {&es->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received"}, {&es->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received"}, {&es->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received"}, {&es->rx_discards, "rx_discards", "Discarded RX packets"}, {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"}, {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, {&es->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted"}, {&es->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted"}, {&es->tx_errors, "tx_errors", "TX packet errors"}, // end {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != NULL) { SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } +#if 0 /* Queue sysctls */ for (int q = 0; q < vsi->num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); txr = &(queues[q].txr); rxr = &(queues[q].rxr); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), "m_defrag() failed"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &(queues[q].dropped_pkts), "Driver dropped packets"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(queues[q].irqs), "irqs on this queue"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(queues[q].tso), "TSO"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", CTLFLAG_RD, &(queues[q].tx_dmamap_failed), "Driver tx dma failure in xmit"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &(txr->no_desc), "Queue No Descriptor Available"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", CTLFLAG_RD, &(rxr->itr), 0, "Queue Rx ITR Interval"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", CTLFLAG_RD, &(txr->itr), 0, "Queue Tx ITR Interval"); #ifdef IXL_DEBUG /* Examine queue state */ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", CTLTYPE_UINT | CTLFLAG_RD, &queues[q], sizeof(struct ixl_queue), ixlv_sysctl_qtx_tail_handler, "IU", "Queue Transmit Descriptor Tail"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", CTLTYPE_UINT | CTLFLAG_RD, &queues[q], sizeof(struct ixl_queue), ixlv_sysctl_qrx_tail_handler, "IU", "Queue Receive Descriptor Tail"); SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer", CTLFLAG_RD, &(txr.watchdog_timer), 0, "Ticks before watchdog event is triggered"); #endif } +#endif } static void ixlv_init_filters(struct ixlv_sc *sc) { sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INIT(sc->mac_filters); sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INIT(sc->vlan_filters); - return; } static void ixlv_free_filters(struct ixlv_sc *sc) { struct ixlv_mac_filter *f; struct ixlv_vlan_filter *v; while (!SLIST_EMPTY(sc->mac_filters)) { f = SLIST_FIRST(sc->mac_filters); SLIST_REMOVE_HEAD(sc->mac_filters, next); free(f, M_DEVBUF); } free(sc->mac_filters, M_DEVBUF); while (!SLIST_EMPTY(sc->vlan_filters)) { v = SLIST_FIRST(sc->vlan_filters); SLIST_REMOVE_HEAD(sc->vlan_filters, next); free(v, M_DEVBUF); } free(sc->vlan_filters, M_DEVBUF); - return; } static char * ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed) { int index; char *speeds[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", }; switch (link_speed) { case VIRTCHNL_LINK_SPEED_100MB: index = 1; break; case VIRTCHNL_LINK_SPEED_1GB: index = 2; break; case VIRTCHNL_LINK_SPEED_10GB: index = 3; break; case VIRTCHNL_LINK_SPEED_40GB: index = 4; break; case VIRTCHNL_LINK_SPEED_20GB: index = 5; break; case VIRTCHNL_LINK_SPEED_25GB: index = 6; break; case VIRTCHNL_LINK_SPEED_UNKNOWN: default: index = 0; break; } return speeds[index]; } static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixlv_sc *sc = (struct ixlv_sc *)arg1; int error = 0; error = sysctl_handle_string(oidp, ixlv_vc_speed_to_string(sc->link_speed), 8, req); return (error); } #ifdef IXL_DEBUG /** * ixlv_sysctl_qtx_tail_handler * Retrieves I40E_QTX_TAIL1 value from hardware * for a sysctl. */ static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->txr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /** * ixlv_sysctl_qrx_tail_handler * Retrieves I40E_QRX_TAIL1 value from hardware * for a sysctl. */ static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->rxr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } #endif Index: head/sys/dev/ixl/ixl.h =================================================================== --- head/sys/dev/ixl/ixl.h (revision 335337) +++ head/sys/dev/ixl/ixl.h (revision 335338) @@ -1,712 +1,550 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_H_ #define _IXL_H_ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "opt_ixl.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #include #endif +#include "ifdi_if.h" #include "i40e_type.h" #include "i40e_prototype.h" +#include "ixl_debug.h" -#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x" -#define MAC_FORMAT_ARGS(mac_addr) \ - (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \ - (mac_addr)[4], (mac_addr)[5] -#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off") +#define PVIDV(vendor, devid, name) \ + PVID(vendor, devid, name " - " IXL_DRIVER_VERSION_STRING) -#ifdef IXL_DEBUG - -#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__) -#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__) -#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__) - -/* Defines for printing generic debug information */ -#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__) -#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__) -#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__) - -/* Defines for printing specific debug information */ -#define DEBUG_INIT 1 -#define DEBUG_IOCTL 1 -#define DEBUG_HW 1 - -#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__) -#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__) -#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__) - -#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__) -#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \ - if_printf(ifp, S "\n", ##__VA_ARGS__) -#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__) - -#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__) - -#else /* no IXL_DEBUG */ -#define DEBUG_INIT 0 -#define DEBUG_IOCTL 0 -#define DEBUG_HW 0 - -#define DPRINTF(...) -#define DDPRINTF(...) -#define IDPRINTF(...) - -#define INIT_DEBUGOUT(...) -#define INIT_DBG_DEV(...) -#define INIT_DBG_IF(...) -#define IOCTL_DEBUGOUT(...) -#define IOCTL_DBG_IF2(...) -#define IOCTL_DBG_IF(...) -#define HW_DEBUGOUT(...) -#endif /* IXL_DEBUG */ - -enum ixl_dbg_mask { - IXL_DBG_INFO = 0x00000001, - IXL_DBG_EN_DIS = 0x00000002, - IXL_DBG_AQ = 0x00000004, - IXL_DBG_NVMUPD = 0x00000008, - - IXL_DBG_IOCTL_KNOWN = 0x00000010, - IXL_DBG_IOCTL_UNKNOWN = 0x00000020, - IXL_DBG_IOCTL_ALL = 0x00000030, - - I40E_DEBUG_RSS = 0x00000100, - - IXL_DBG_IOV = 0x00001000, - IXL_DBG_IOV_VC = 0x00002000, - - IXL_DBG_SWITCH_INFO = 0x00010000, - IXL_DBG_I2C = 0x00020000, - - IXL_DBG_ALL = 0xFFFFFFFF -}; - /* Tunables */ /* * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the * number of tx/rx descriptors allocated by the driver. Increasing this * value allows the driver to queue more operations. * * Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes. * The driver currently always uses 32 byte Rx descriptors. */ #define IXL_DEFAULT_RING 1024 #define IXL_MAX_RING 4096 #define IXL_MIN_RING 64 #define IXL_RING_INCREMENT 32 #define IXL_AQ_LEN 256 #define IXL_AQ_LEN_MAX 1024 -/* -** Default number of entries in Tx queue buf_ring. -*/ -#define DEFAULT_TXBRSZ 4096 - /* Alignment for rings */ #define DBA_ALIGN 128 -/* - * This is the max watchdog interval, ie. the time that can - * pass between any two TX clean operations, such only happening - * when the TX hardware is functioning. - * - * XXX: Watchdog currently counts down in units of (hz) - * Set this to just (hz) if you want queues to hang under a little bit of stress - */ -#define IXL_WATCHDOG (10 * hz) - -/* - * This parameters control when the driver calls the routine to reclaim - * transmit descriptors. - */ -#define IXL_TX_CLEANUP_THRESHOLD (que->num_tx_desc / 8) -#define IXL_TX_OP_THRESHOLD (que->num_tx_desc / 32) - #define MAX_MULTICAST_ADDR 128 #define IXL_MSIX_BAR 3 #define IXL_ADM_LIMIT 2 -#define IXL_TSO_SIZE 65535 +// TODO: Find out which TSO_SIZE to use +//#define IXL_TSO_SIZE 65535 +#define IXL_TSO_SIZE ((255*1024)-1) +#define IXL_TX_BUF_SZ ((u32) 1514) #define IXL_AQ_BUF_SZ ((u32) 4096) -#define IXL_RX_HDR 128 -#define IXL_RX_LIMIT 512 #define IXL_RX_ITR 0 #define IXL_TX_ITR 1 #define IXL_ITR_NONE 3 #define IXL_QUEUE_EOL 0x7FF #define IXL_MAX_FRAME 9728 #define IXL_MAX_TX_SEGS 8 +#define IXL_MAX_RX_SEGS 5 #define IXL_MAX_TSO_SEGS 128 #define IXL_SPARSE_CHAIN 7 -#define IXL_QUEUE_HUNG 0x80000000 #define IXL_MIN_TSO_MSS 64 #define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1) #define IXL_RSS_KEY_SIZE_REG 13 #define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4) #define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */ #define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F #define IXL_RSS_VF_LUT_ENTRY_MASK 0xF #define IXL_VF_MAX_BUFFER 0x3F80 #define IXL_VF_MAX_HDR_BUFFER 0x840 #define IXL_VF_MAX_FRAME 0x3FFF /* ERJ: hardware can support ~2k (SW5+) filters between all functions */ #define IXL_MAX_FILTERS 256 -#define IXL_MAX_TX_BUSY 10 #define IXL_NVM_VERSION_LO_SHIFT 0 #define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT) #define IXL_NVM_VERSION_HI_SHIFT 12 #define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT) /* * Interrupt Moderation parameters * Multiply ITR values by 2 for real ITR value */ #define IXL_MAX_ITR 0x0FF0 #define IXL_ITR_100K 0x0005 #define IXL_ITR_20K 0x0019 #define IXL_ITR_8K 0x003E #define IXL_ITR_4K 0x007A #define IXL_ITR_1K 0x01F4 #define IXL_ITR_DYNAMIC 0x8000 #define IXL_LOW_LATENCY 0 #define IXL_AVE_LATENCY 1 #define IXL_BULK_LATENCY 2 /* MacVlan Flags */ #define IXL_FILTER_USED (u16)(1 << 0) #define IXL_FILTER_VLAN (u16)(1 << 1) #define IXL_FILTER_ADD (u16)(1 << 2) #define IXL_FILTER_DEL (u16)(1 << 3) #define IXL_FILTER_MC (u16)(1 << 4) /* used in the vlan field of the filter when not a vlan */ #define IXL_VLAN_ANY -1 #define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6) #define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO) -/* Misc flags for ixl_vsi.flags */ -#define IXL_FLAGS_KEEP_TSO4 (1 << 0) -#define IXL_FLAGS_KEEP_TSO6 (1 << 1) -#define IXL_FLAGS_USES_MSIX (1 << 2) -#define IXL_FLAGS_IS_VF (1 << 3) - #define IXL_VF_RESET_TIMEOUT 100 #define IXL_VSI_DATA_PORT 0x01 #define IXLV_MAX_QUEUES 16 #define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1)) #define IXL_RX_CTX_BASE_UNITS 128 #define IXL_TX_CTX_BASE_UNITS 128 +#if 0 #define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \ I40E_VPINT_LNKLSTN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) #define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \ I40E_VFINT_DYN_CTLN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) +#endif #define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA #define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20 #define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32) #define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32)) #define IXL_MAX_ITR_IDX 3 #define IXL_END_OF_INTR_LNKLST 0x7FF #define IXL_DEFAULT_RSS_HENA_BASE (\ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define IXL_DEFAULT_RSS_HENA_XL710 IXL_DEFAULT_RSS_HENA_BASE #define IXL_DEFAULT_RSS_HENA_X722 (\ IXL_DEFAULT_RSS_HENA_BASE | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) -#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx) -#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) -#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) -#define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx) -#define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) +#define IXL_CAPS \ + (IFCAP_TSO4 | IFCAP_TSO6 | \ + IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | \ + IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | \ + IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO | \ + IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | \ + IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU | IFCAP_LRO) -#define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx) -#define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) -#define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) +#define IXL_CSUM_TCP \ + (CSUM_IP_TCP|CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP6_TCP) +#define IXL_CSUM_UDP \ + (CSUM_IP_UDP|CSUM_IP6_UDP) +#define IXL_CSUM_SCTP \ + (CSUM_IP_SCTP|CSUM_IP6_SCTP) /* Pre-11 counter(9) compatibility */ #if __FreeBSD_version >= 1100036 #define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */ #define IXL_SET_IBYTES(vsi, count) (vsi)->ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count) #define IXL_SET_OQDROPS(vsi, count) (vsi)->oqdrops = (count) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #else #define IXL_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count) #define IXL_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count) #define IXL_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #endif +#define IXL_DEV_ERR(_dev, _format, ...) \ + device_printf(_dev, "%s: " _format " (%s:%d)\n", __func__, ##__VA_ARGS__, __FILE__, __LINE__) + /* ***************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ***************************************************************************** */ typedef struct _ixl_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixl_vendor_info_t; - -struct ixl_tx_buf { - u32 eop_index; - struct mbuf *m_head; - bus_dmamap_t map; - bus_dma_tag_t tag; -}; - -struct ixl_rx_buf { - struct mbuf *m_head; - struct mbuf *m_pack; - struct mbuf *fmp; - bus_dmamap_t hmap; - bus_dmamap_t pmap; -}; - /* ** This struct has multiple uses, multicast ** addresses, vlans, and mac filters all use it. */ struct ixl_mac_filter { SLIST_ENTRY(ixl_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; s16 vlan; u16 flags; }; /* * The Transmit ring control struct */ struct tx_ring { - struct ixl_queue *que; - struct mtx mtx; + struct ixl_tx_queue *que; u32 tail; - struct i40e_tx_desc *base; - struct i40e_dma_mem dma; - u16 next_avail; - u16 next_to_clean; - u16 atr_rate; - u16 atr_count; - u32 itr; + struct i40e_tx_desc *tx_base; + u64 tx_paddr; u32 latency; - struct ixl_tx_buf *buffers; - volatile u16 avail; - u32 cmd; - bus_dma_tag_t tx_tag; - bus_dma_tag_t tso_tag; - char mtx_name[16]; - struct buf_ring *br; - s32 watchdog_timer; + u32 packets; + u32 me; + /* + * For reporting completed packet status + * in descriptor writeback mode + */ + qidx_t *tx_rsq; + qidx_t tx_rs_cidx; + qidx_t tx_rs_pidx; + qidx_t tx_cidx_processed; /* Used for Dynamic ITR calculation */ - u32 packets; + u32 itr; u32 bytes; /* Soft Stats */ u64 tx_bytes; - u64 no_desc; - u64 total_packets; + u64 tx_packets; + u64 mss_too_small; }; /* * The Receive ring control struct */ struct rx_ring { - struct ixl_queue *que; - struct mtx mtx; - union i40e_rx_desc *base; - struct i40e_dma_mem dma; - struct lro_ctrl lro; - bool lro_enabled; - bool hdr_split; + struct ixl_rx_queue *que; + union i40e_rx_desc *rx_base; + uint64_t rx_paddr; bool discard; - u32 next_refresh; - u32 next_check; u32 itr; u32 latency; - char mtx_name[16]; - struct ixl_rx_buf *buffers; u32 mbuf_sz; u32 tail; - bus_dma_tag_t htag; - bus_dma_tag_t ptag; + u32 me; /* Used for Dynamic ITR calculation */ u32 packets; u32 bytes; /* Soft stats */ - u64 split; u64 rx_packets; u64 rx_bytes; u64 desc_errs; - u64 not_done; }; /* -** Driver queue struct: this is the interrupt container -** for the associated tx and rx ring pair. +** Driver queue structs */ -struct ixl_queue { +struct ixl_tx_queue { struct ixl_vsi *vsi; - u32 me; - u32 msix; /* This queue's MSIX vector */ - u32 eims; /* This queue's EIMS bit */ - struct resource *res; - void *tag; - int num_tx_desc; /* both tx and rx */ - int num_rx_desc; /* both tx and rx */ -#ifdef DEV_NETMAP - int num_desc; /* for compatibility with current netmap code in kernel */ -#endif struct tx_ring txr; - struct rx_ring rxr; - struct task task; - struct task tx_task; - struct taskqueue *tq; - - /* Queue stats */ + struct if_irq que_irq; + u32 msix; + /* Stats */ u64 irqs; u64 tso; - u64 mbuf_defrag_failed; - u64 mbuf_hdr_failed; - u64 mbuf_pkt_failed; - u64 tx_dmamap_failed; - u64 dropped_pkts; - u64 mss_too_small; }; +struct ixl_rx_queue { + struct ixl_vsi *vsi; + struct rx_ring rxr; + struct if_irq que_irq; + u32 msix; /* This queue's MSIX vector */ + /* Stats */ + u64 irqs; +}; + /* ** Virtual Station Interface */ SLIST_HEAD(ixl_ftl_head, ixl_mac_filter); struct ixl_vsi { - void *back; + if_ctx_t ctx; + if_softc_ctx_t shared; struct ifnet *ifp; - device_t dev; + //device_t dev; struct i40e_hw *hw; - struct ifmedia media; + struct ifmedia *media; +#define num_rx_queues shared->isc_nrxqsets +#define num_tx_queues shared->isc_ntxqsets + + void *back; enum i40e_vsi_type type; + // TODO: Remove? + u64 que_mask; int id; - u16 num_queues; - int num_tx_desc; - int num_rx_desc; u32 rx_itr_setting; u32 tx_itr_setting; - u16 max_frame_size; bool enable_head_writeback; - struct ixl_queue *queues; /* head of queues */ - u16 vsi_num; bool link_active; u16 seid; u16 uplink_seid; u16 downlink_seid; + struct ixl_tx_queue *tx_queues; /* TX queue array */ + struct ixl_rx_queue *rx_queues; /* RX queue array */ + struct if_irq irq; + u32 link_speed; + /* MAC/VLAN Filter list */ struct ixl_ftl_head ftl; u16 num_macs; /* Contains readylist & stat counter id */ struct i40e_aqc_vsi_properties_data info; - eventhandler_tag vlan_attach; - eventhandler_tag vlan_detach; u16 num_vlans; /* Per-VSI stats from hardware */ struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; bool stat_offsets_loaded; /* VSI stat counters */ u64 ipackets; u64 ierrors; u64 opackets; u64 oerrors; u64 ibytes; u64 obytes; u64 imcasts; u64 omcasts; u64 iqdrops; u64 oqdrops; u64 noproto; /* Driver statistics */ u64 hw_filters_del; u64 hw_filters_add; /* Misc. */ u64 flags; + /* Stats sysctls for this VSI */ struct sysctl_oid *vsi_node; }; /* -** Find the number of unrefreshed RX descriptors +** Creates new filter with given MAC address and VLAN ID */ -static inline u16 -ixl_rx_unrefreshed(struct ixl_queue *que) -{ - struct rx_ring *rxr = &que->rxr; - - if (rxr->next_check > rxr->next_refresh) - return (rxr->next_check - rxr->next_refresh - 1); - else - return ((que->num_rx_desc + rxr->next_check) - - rxr->next_refresh - 1); -} - -/* -** Find the next available unused filter -*/ static inline struct ixl_mac_filter * -ixl_get_filter(struct ixl_vsi *vsi) +ixl_new_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; /* create a new empty filter */ f = malloc(sizeof(struct ixl_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); - if (f) + if (f) { SLIST_INSERT_HEAD(&vsi->ftl, f, next); + bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); + f->vlan = vlan; + f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); + } return (f); } /* ** Compare two ethernet addresses */ static inline bool cmp_etheraddr(const u8 *ea1, const u8 *ea2) { - bool cmp = FALSE; - - if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) && - (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) && - (ea1[4] == ea2[4]) && (ea1[5] == ea2[5])) - cmp = TRUE; - - return (cmp); + return (bcmp(ea1, ea2, 6) == 0); } /* * Return next largest power of 2, unsigned * * Public domain, from Bit Twiddling Hacks */ static inline u32 next_power_of_two(u32 n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; /* Next power of two > 0 is 1 */ n += (n == 0); return (n); } /* * Info for stats sysctls */ struct ixl_sysctl_info { u64 *stat; char *name; char *description; }; extern const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN]; -/********************************************************************* - * TXRX Function prototypes - *********************************************************************/ -int ixl_allocate_tx_data(struct ixl_queue *); -int ixl_allocate_rx_data(struct ixl_queue *); -void ixl_init_tx_ring(struct ixl_queue *); -int ixl_init_rx_ring(struct ixl_queue *); -bool ixl_rxeof(struct ixl_queue *, int); -bool ixl_txeof(struct ixl_queue *); -void ixl_free_que_tx(struct ixl_queue *); -void ixl_free_que_rx(struct ixl_queue *); - -int ixl_mq_start(struct ifnet *, struct mbuf *); -int ixl_mq_start_locked(struct ifnet *, struct tx_ring *); -void ixl_deferred_mq_start(void *, int); - -void ixl_vsi_setup_rings_size(struct ixl_vsi *, int, int); -int ixl_queue_hang_check(struct ixl_vsi *); -void ixl_free_vsi(struct ixl_vsi *); -void ixl_qflush(struct ifnet *); - /* Common function prototypes between PF/VF driver */ -#if __FreeBSD_version >= 1100000 -uint64_t ixl_get_counter(if_t ifp, ift_counter cnt); -#endif -void ixl_get_default_rss_key(u32 *); +void ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que); +void ixl_set_queue_rx_itr(struct ixl_rx_queue *que); +void ixl_get_default_rss_key(u32 *); const char * i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err); -void ixl_set_busmaster(device_t); -void ixl_set_msix_enable(device_t); +u64 ixl_max_aq_speed_to_value(u8); #endif /* _IXL_H_ */ Index: head/sys/dev/ixl/ixl_debug.h =================================================================== --- head/sys/dev/ixl/ixl_debug.h (nonexistent) +++ head/sys/dev/ixl/ixl_debug.h (revision 335338) @@ -0,0 +1,110 @@ +/****************************************************************************** + + Copyright (c) 2013-2016, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXL_DEBUG_H_ +#define _IXL_DEBUG_H_ + +#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_FORMAT_ARGS(mac_addr) \ + (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \ + (mac_addr)[4], (mac_addr)[5] +#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off") + +#ifdef IXL_DEBUG + +#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__) +#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__) +#define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__) + +/* Defines for printing generic debug information */ +#define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__) +#define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__) +#define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__) + +/* Defines for printing specific debug information */ +#define DEBUG_INIT 1 +#define DEBUG_IOCTL 1 +#define DEBUG_HW 1 + +#define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__) +#define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__) +#define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__) + +#define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__) +#define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \ + if_printf(ifp, S "\n", ##__VA_ARGS__) +#define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__) + +#define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__) + +#else /* no IXL_DEBUG */ +#define DEBUG_INIT 0 +#define DEBUG_IOCTL 0 +#define DEBUG_HW 0 + +#define DPRINTF(...) +#define DDPRINTF(...) +#define IDPRINTF(...) + +#define INIT_DEBUGOUT(...) +#define INIT_DBG_DEV(...) +#define INIT_DBG_IF(...) +#define IOCTL_DEBUGOUT(...) +#define IOCTL_DBG_IF2(...) +#define IOCTL_DBG_IF(...) +#define HW_DEBUGOUT(...) +#endif /* IXL_DEBUG */ + +enum ixl_dbg_mask { + IXL_DBG_INFO = 0x00000001, + IXL_DBG_EN_DIS = 0x00000002, + IXL_DBG_AQ = 0x00000004, + IXL_DBG_NVMUPD = 0x00000008, + + IXL_DBG_IOCTL_KNOWN = 0x00000010, + IXL_DBG_IOCTL_UNKNOWN = 0x00000020, + IXL_DBG_IOCTL_ALL = 0x00000030, + + I40E_DEBUG_RSS = 0x00000100, + + IXL_DBG_IOV = 0x00001000, + IXL_DBG_IOV_VC = 0x00002000, + + IXL_DBG_SWITCH_INFO = 0x00010000, + IXL_DBG_I2C = 0x00020000, + + IXL_DBG_ALL = 0xFFFFFFFF +}; + +#endif /* _IXL_DEBUG_H_ */ Property changes on: head/sys/dev/ixl/ixl_debug.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/ixl/ixl_pf.h =================================================================== --- head/sys/dev/ixl/ixl_pf.h (revision 335337) +++ head/sys/dev/ixl/ixl_pf.h (revision 335338) @@ -1,363 +1,407 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_PF_H_ #define _IXL_PF_H_ #include "ixl.h" #include "ixl_pf_qmgr.h" #define VF_FLAG_ENABLED 0x01 #define VF_FLAG_SET_MAC_CAP 0x02 #define VF_FLAG_VLAN_CAP 0x04 #define VF_FLAG_PROMISC_CAP 0x08 #define VF_FLAG_MAC_ANTI_SPOOF 0x10 -#define IXL_PF_STATE_EMPR_RESETTING (1 << 0) -#define IXL_PF_STATE_FW_LLDP_DISABLED (1 << 1) +#define IXL_ICR0_CRIT_ERR_MASK \ + (I40E_PFINT_ICR0_PCI_EXCEPTION_MASK | \ + I40E_PFINT_ICR0_ECC_ERR_MASK | \ + I40E_PFINT_ICR0_PE_CRITERR_MASK) +/* VF Interrupts */ +#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \ + I40E_VPINT_LNKLSTN(((vector) - 1) + \ + (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) + +#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \ + I40E_VFINT_DYN_CTLN(((vector) - 1) + \ + (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) + +/* Used in struct ixl_pf's state field */ +enum ixl_pf_state { + IXL_PF_STATE_ADAPTER_RESETTING = (1 << 0), + IXL_PF_STATE_MDD_PENDING = (1 << 1), + IXL_PF_STATE_PF_RESET_REQ = (1 << 2), + IXL_PF_STATE_VF_RESET_REQ = (1 << 3), + IXL_PF_STATE_PF_CRIT_ERR = (1 << 4), + IXL_PF_STATE_CORE_RESET_REQ = (1 << 5), + IXL_PF_STATE_GLOB_RESET_REQ = (1 << 6), + IXL_PF_STATE_EMP_RESET_REQ = (1 << 7), + IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 8), +}; + struct ixl_vf { struct ixl_vsi vsi; - uint32_t vf_flags; + u32 vf_flags; + u32 num_mdd_events; - uint8_t mac[ETHER_ADDR_LEN]; - uint16_t vf_num; - uint32_t version; + u8 mac[ETHER_ADDR_LEN]; + u16 vf_num; + u32 version; struct ixl_pf_qtag qtag; struct sysctl_ctx_list ctx; }; /* Physical controller structure */ struct ixl_pf { + /* + * This is first so that iflib_get_softc can return + * either the VSI or the PF structures. + */ + struct ixl_vsi vsi; + struct i40e_hw hw; struct i40e_osdep osdep; device_t dev; - struct ixl_vsi vsi; struct resource *pci_mem; - struct resource *msix_mem; - /* - * Interrupt resources: this set is - * either used for legacy, or for Link - * when doing MSIX - */ - void *tag; - struct resource *res; - - struct callout timer; - int msix; #ifdef IXL_IW int iw_msix; bool iw_enabled; #endif int if_flags; - int state; - bool init_in_progress; + u32 state; u8 supported_speeds; struct ixl_pf_qmgr qmgr; struct ixl_pf_qtag qtag; /* Tunable values */ bool enable_msix; int max_queues; bool enable_tx_fc_filter; int dynamic_rx_itr; int dynamic_tx_itr; int tx_itr; int rx_itr; - struct mtx pf_mtx; - - u32 qbase; - u32 admvec; - struct task adminq; - struct taskqueue *tq; - bool link_up; u32 link_speed; int advertised_speed; int fc; /* link flow ctrl setting */ enum ixl_dbg_mask dbg_mask; bool has_i2c; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; /* Statistics from hw */ struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; bool stat_offsets_loaded; + /* I2C access methods */ + u8 i2c_access_method; + s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data); + s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data); + /* SR-IOV */ struct ixl_vf *vfs; int num_vfs; uint16_t veb_seid; struct task vflr_task; int vc_debug_lvl; }; /* * Defines used for NVM update ioctls. * This value is used in the Solaris tool, too. */ #define I40E_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) #define IXL_DEFAULT_PHY_INT_MASK \ ((~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL \ | I40E_AQ_EVENT_MEDIA_NA)) & 0x3FF) /*** Sysctl help messages; displayed with "sysctl -d" ***/ #define IXL_SYSCTL_HELP_SET_ADVERTISE \ "\nControl advertised link speed.\n" \ "Flags:\n" \ "\t 0x1 - advertise 100M\n" \ "\t 0x2 - advertise 1G\n" \ "\t 0x4 - advertise 10G\n" \ "\t 0x8 - advertise 20G\n" \ "\t0x10 - advertise 25G\n" \ "\t0x20 - advertise 40G\n\n" \ "Set to 0 to disable link.\n" \ "Use \"sysctl -x\" to view flags properly." #define IXL_SYSCTL_HELP_SUPPORTED_SPEED \ "\nSupported link speeds.\n" \ "Flags:\n" \ "\t 0x1 - 100M\n" \ "\t 0x2 - 1G\n" \ "\t 0x4 - 10G\n" \ "\t 0x8 - 20G\n" \ "\t0x10 - 25G\n" \ "\t0x20 - 40G\n\n" \ "Use \"sysctl -x\" to view flags properly." #define IXL_SYSCTL_HELP_FC \ "\nSet flow control mode using the values below.\n" \ "\t0 - off\n" \ "\t1 - rx pause\n" \ "\t2 - tx pause\n" \ "\t3 - tx and rx pause" #define IXL_SYSCTL_HELP_LINK_STATUS \ "\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \ -" the response." \ +" the response." #define IXL_SYSCTL_HELP_FW_LLDP \ "\nFW LLDP engine:\n" \ "\t0 - disable\n" \ "\t1 - enable\n" +#define IXL_SYSCTL_HELP_READ_I2C \ +"\nRead a byte from I2C bus\n" \ +"Input: 32-bit value\n" \ +"\tbits 0-7: device address (0xA0 or 0xA2)\n" \ +"\tbits 8-15: offset (0-255)\n" \ +"\tbits 16-31: unused\n" \ +"Output: 8-bit value read" + +#define IXL_SYSCTL_HELP_WRITE_I2C \ +"\nWrite a byte to the I2C bus\n" \ +"Input: 32-bit value\n" \ +"\tbits 0-7: device address (0xA0 or 0xA2)\n" \ +"\tbits 8-15: offset (0-255)\n" \ +"\tbits 16-23: value to write\n" \ +"\tbits 24-31: unused\n" \ +"Output: 8-bit value written" + +#define IXL_SYSCTL_HELP_I2C_METHOD \ +"\nI2C access method that driver will use:\n" \ +"\t0 - best available method\n" \ +"\t1 - bit bang via I2CPARAMS register\n" \ +"\t2 - register read/write via I2CCMD register\n" \ +"\t3 - Use Admin Queue command (best)\n" \ +"Using the Admin Queue is only supported on 710 devices with FW version 1.7 or higher" + extern const char * const ixl_fc_string[6]; MALLOC_DECLARE(M_IXL); /*** Functions / Macros ***/ /* Adjust the level here to 10 or over to print stats messages */ #define I40E_VC_DEBUG(p, level, ...) \ do { \ if (level < 10) \ ixl_dbg(p, IXL_DBG_IOV_VC, ##__VA_ARGS__); \ } while (0) #define i40e_send_vf_nack(pf, vf, op, st) \ ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__) -#define IXL_PF_LOCK_INIT(_sc, _name) \ - mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF) -#define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx) -#define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx) -#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx) -#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED) - /* Debug printing */ #define ixl_dbg(p, m, s, ...) ixl_debug_core(p, m, s, ##__VA_ARGS__) void ixl_debug_core(struct ixl_pf *, enum ixl_dbg_mask, char *, ...); /* For stats sysctl naming */ #define QUEUE_NAME_LEN 32 /* For netmap(4) compatibility */ #define ixl_disable_intr(vsi) ixl_disable_rings_intr(vsi) -/* - * PF-only function declarations - */ - -int ixl_setup_interface(device_t, struct ixl_vsi *); +/* PF-only function declarations */ +int ixl_setup_interface(device_t, struct ixl_pf *); void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *); char * ixl_aq_speed_to_str(enum i40e_aq_link_speed); void ixl_handle_que(void *context, int pending); void ixl_init(void *); void ixl_local_timer(void *); void ixl_register_vlan(void *, struct ifnet *, u16); void ixl_unregister_vlan(void *, struct ifnet *, u16); -void ixl_intr(void *); -void ixl_msix_que(void *); -void ixl_msix_adminq(void *); +int ixl_intr(void *); +int ixl_msix_que(void *); +int ixl_msix_adminq(void *); void ixl_do_adminq(void *, int); int ixl_res_alloc_cmp(const void *, const void *); char * ixl_switch_res_type_string(u8); char * ixl_switch_element_string(struct sbuf *, struct i40e_aqc_switch_config_element_resp *); void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_hw_port_stats *); void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_eth_stats *); void ixl_media_status(struct ifnet *, struct ifmediareq *); int ixl_media_change(struct ifnet *); int ixl_ioctl(struct ifnet *, u_long, caddr_t); void ixl_enable_queue(struct i40e_hw *, int); void ixl_disable_queue(struct i40e_hw *, int); void ixl_enable_intr0(struct i40e_hw *); void ixl_disable_intr0(struct i40e_hw *); void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf); void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, u64 *, u64 *); void ixl_stat_update32(struct i40e_hw *, u32, bool, u64 *, u64 *); void ixl_stop(struct ixl_pf *); void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name); int ixl_get_hw_capabilities(struct ixl_pf *); void ixl_link_up_msg(struct ixl_pf *); void ixl_update_link_status(struct ixl_pf *); -int ixl_allocate_pci_resources(struct ixl_pf *); int ixl_setup_stations(struct ixl_pf *); int ixl_switch_config(struct ixl_pf *); void ixl_stop_locked(struct ixl_pf *); int ixl_teardown_hw_structs(struct ixl_pf *); int ixl_reset(struct ixl_pf *); void ixl_init_locked(struct ixl_pf *); void ixl_set_rss_key(struct ixl_pf *); void ixl_set_rss_pctypes(struct ixl_pf *); void ixl_set_rss_hlut(struct ixl_pf *); int ixl_setup_adminq_msix(struct ixl_pf *); int ixl_setup_adminq_tq(struct ixl_pf *); int ixl_teardown_adminq_msix(struct ixl_pf *); void ixl_configure_intr0_msix(struct ixl_pf *); void ixl_configure_queue_intr_msix(struct ixl_pf *); void ixl_free_adminq_tq(struct ixl_pf *); int ixl_setup_legacy(struct ixl_pf *); int ixl_init_msix(struct ixl_pf *); void ixl_configure_itr(struct ixl_pf *); void ixl_configure_legacy(struct ixl_pf *); void ixl_free_pci_resources(struct ixl_pf *); void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); void ixl_config_rss(struct ixl_pf *); int ixl_set_advertised_speeds(struct ixl_pf *, int, bool); void ixl_set_initial_advertised_speeds(struct ixl_pf *); void ixl_print_nvm_version(struct ixl_pf *pf); void ixl_add_device_sysctls(struct ixl_pf *); void ixl_handle_mdd_event(struct ixl_pf *); void ixl_add_hw_stats(struct ixl_pf *); void ixl_update_stats_counters(struct ixl_pf *); void ixl_pf_reset_stats(struct ixl_pf *); void ixl_get_bus_info(struct ixl_pf *pf); int ixl_aq_get_link_status(struct ixl_pf *, struct i40e_aqc_get_link_status *); int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *); void ixl_handle_empr_reset(struct ixl_pf *); int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up); int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up); -void ixl_set_queue_rx_itr(struct ixl_queue *); -void ixl_set_queue_tx_itr(struct ixl_queue *); +void ixl_set_queue_rx_itr(struct ixl_rx_queue *); +void ixl_set_queue_tx_itr(struct ixl_tx_queue *); void ixl_add_filter(struct ixl_vsi *, const u8 *, s16 vlan); void ixl_del_filter(struct ixl_vsi *, const u8 *, s16 vlan); void ixl_reconfigure_filters(struct ixl_vsi *vsi); int ixl_disable_rings(struct ixl_vsi *); int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); int ixl_enable_rings(struct ixl_vsi *); int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); void ixl_update_eth_stats(struct ixl_vsi *); void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); int ixl_initialize_vsi(struct ixl_vsi *); void ixl_add_ifmedia(struct ixl_vsi *, u64); int ixl_setup_queue_msix(struct ixl_vsi *); int ixl_setup_queue_tqs(struct ixl_vsi *); int ixl_teardown_queue_msix(struct ixl_vsi *); void ixl_free_queue_tqs(struct ixl_vsi *); void ixl_enable_intr(struct ixl_vsi *); void ixl_disable_rings_intr(struct ixl_vsi *); void ixl_set_promisc(struct ixl_vsi *); void ixl_add_multi(struct ixl_vsi *); void ixl_del_multi(struct ixl_vsi *); void ixl_setup_vlan_filters(struct ixl_vsi *); void ixl_init_filters(struct ixl_vsi *); void ixl_add_hw_filters(struct ixl_vsi *, int, int); void ixl_del_hw_filters(struct ixl_vsi *, int); struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, const u8 *, s16); void ixl_add_mc_filter(struct ixl_vsi *, u8 *); void ixl_free_mac_filters(struct ixl_vsi *vsi); void ixl_update_vsi_stats(struct ixl_vsi *); void ixl_vsi_reset_stats(struct ixl_vsi *); -int ixl_vsi_setup_queues(struct ixl_vsi *vsi); void ixl_vsi_free_queues(struct ixl_vsi *vsi); +void ixl_if_init(if_ctx_t ctx); +void ixl_if_stop(if_ctx_t ctx); + /* * I2C Function prototypes */ int ixl_find_i2c_interface(struct ixl_pf *); -s32 ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +s32 ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data); -s32 ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +s32 ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data); +s32 ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data); int ixl_get_fw_lldp_status(struct ixl_pf *pf); int ixl_attach_get_link_status(struct ixl_pf *); +u64 ixl_max_aq_speed_to_value(u8); +void ixl_handle_vflr(void *, int); #endif /* _IXL_PF_H_ */ Index: head/sys/dev/ixl/ixl_pf_i2c.c =================================================================== --- head/sys/dev/ixl/ixl_pf_i2c.c (revision 335337) +++ head/sys/dev/ixl/ixl_pf_i2c.c (revision 335338) @@ -1,605 +1,743 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #define IXL_I2C_T_RISE 1 #define IXL_I2C_T_FALL 1 #define IXL_I2C_T_SU_DATA 1 #define IXL_I2C_T_SU_STA 5 #define IXL_I2C_T_SU_STO 4 #define IXL_I2C_T_HD_STA 4 #define IXL_I2C_T_LOW 5 #define IXL_I2C_T_HIGH 4 #define IXL_I2C_T_BUF 5 #define IXL_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXL_I2C_REG(_hw) \ - I40E_GLGEN_I2CPARAMS(((struct i40e_osdep *)(_hw)->back)->i2c_intfc_num) + I40E_GLGEN_I2CPARAMS(_hw->func_caps.mdio_port_num) - +/* I2C bit-banging functions */ static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data); static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl); static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl); static void ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl); static s32 ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data); static s32 ixl_get_i2c_ack(struct ixl_pf *pf); static s32 ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data); static s32 ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data); static s32 ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data); static void ixl_i2c_bus_clear(struct ixl_pf *pf); static void ixl_i2c_start(struct ixl_pf *pf); static void ixl_i2c_stop(struct ixl_pf *pf); +static s32 ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum); + /** * ixl_i2c_bus_clear - Clears the I2C bus * @hw: pointer to hardware structure * * Clears the I2C bus by sending nine clock pulses. * Used when data line is stuck low. **/ static void ixl_i2c_bus_clear(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); u32 i; DEBUGFUNC("ixl_i2c_bus_clear"); ixl_i2c_start(pf); ixl_set_i2c_data(pf, &i2cctl, 1); for (i = 0; i < 9; i++) { ixl_raise_i2c_clk(pf, &i2cctl); /* Min high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); ixl_lower_i2c_clk(pf, &i2cctl); /* Min low period of clock is 4.7us*/ i40e_usec_delay(IXL_I2C_T_LOW); } ixl_i2c_start(pf); /* Put the i2c bus back to default state */ ixl_i2c_stop(pf); } /** * ixl_i2c_stop - Sets I2C stop condition * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) **/ static void ixl_i2c_stop(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); DEBUGFUNC("ixl_i2c_stop"); /* Stop condition must begin with data low and clock high */ ixl_set_i2c_data(pf, &i2cctl, 0); ixl_raise_i2c_clk(pf, &i2cctl); /* Setup time for stop condition (4us) */ i40e_usec_delay(IXL_I2C_T_SU_STO); ixl_set_i2c_data(pf, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ i40e_usec_delay(IXL_I2C_T_BUF); } /** * ixl_clock_in_i2c_byte - Clocks in one byte via I2C * @hw: pointer to hardware structure * @data: data byte to clock in * * Clocks in one byte data via I2C data/clock **/ static s32 ixl_clock_in_i2c_byte(struct ixl_pf *pf, u8 *data) { s32 i; bool bit = 0; DEBUGFUNC("ixl_clock_in_i2c_byte"); for (i = 7; i >= 0; i--) { ixl_clock_in_i2c_bit(pf, &bit); *data |= bit << i; } return I40E_SUCCESS; } /** * ixl_clock_in_i2c_bit - Clocks in one bit via I2C data/clock * @hw: pointer to hardware structure * @data: read data value * * Clocks in one bit via I2C data/clock **/ static s32 ixl_clock_in_i2c_bit(struct ixl_pf *pf, bool *data) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); DEBUGFUNC("ixl_clock_in_i2c_bit"); ixl_raise_i2c_clk(pf, &i2cctl); /* Minimum high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); i2cctl = rd32(hw, IXL_I2C_REG(hw)); *data = ixl_get_i2c_data(pf, &i2cctl); ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us */ i40e_usec_delay(IXL_I2C_T_LOW); return I40E_SUCCESS; } /** * ixl_get_i2c_ack - Polls for I2C ACK * @hw: pointer to hardware structure * * Clocks in/out one bit via I2C data/clock **/ static s32 ixl_get_i2c_ack(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; u32 i = 0; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); u32 timeout = 10; bool ack = 1; ixl_raise_i2c_clk(pf, &i2cctl); /* Minimum high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { i2cctl = rd32(hw, IXL_I2C_REG(hw)); ack = ixl_get_i2c_data(pf, &i2cctl); i40e_usec_delay(1); if (!ack) break; } if (ack) { ixl_dbg(pf, IXL_DBG_I2C, "I2C ack was not received.\n"); status = I40E_ERR_PHY; } ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us */ i40e_usec_delay(IXL_I2C_T_LOW); return status; } /** * ixl_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock * @hw: pointer to hardware structure * @data: data value to write * * Clocks out one bit via I2C data/clock **/ static s32 ixl_clock_out_i2c_bit(struct ixl_pf *pf, bool data) { struct i40e_hw *hw = &pf->hw; s32 status; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); status = ixl_set_i2c_data(pf, &i2cctl, data); if (status == I40E_SUCCESS) { ixl_raise_i2c_clk(pf, &i2cctl); /* Minimum high period of clock is 4us */ i40e_usec_delay(IXL_I2C_T_HIGH); ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ i40e_usec_delay(IXL_I2C_T_LOW); } else { status = I40E_ERR_PHY; ixl_dbg(pf, IXL_DBG_I2C, "I2C data was not set to %#x\n", data); } return status; } /** * ixl_clock_out_i2c_byte - Clocks out one byte via I2C * @hw: pointer to hardware structure * @data: data byte clocked out * * Clocks out one byte data via I2C data/clock **/ static s32 ixl_clock_out_i2c_byte(struct ixl_pf *pf, u8 data) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; s32 i; u32 i2cctl; bool bit; DEBUGFUNC("ixl_clock_out_i2c_byte"); for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = ixl_clock_out_i2c_bit(pf, bit); if (status != I40E_SUCCESS) break; } /* Release SDA line (set high) */ i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK; i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); return status; } /** * ixl_lower_i2c_clk - Lowers the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' **/ static void ixl_lower_i2c_clk(struct ixl_pf *pf, u32 *i2cctl) { struct i40e_hw *hw = &pf->hw; *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_MASK); *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), *i2cctl); ixl_flush(hw); /* SCL fall time (300ns) */ i40e_usec_delay(IXL_I2C_T_FALL); } /** * ixl_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' **/ static void ixl_raise_i2c_clk(struct ixl_pf *pf, u32 *i2cctl) { struct i40e_hw *hw = &pf->hw; u32 i = 0; u32 timeout = IXL_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; for (i = 0; i < timeout; i++) { *i2cctl |= I40E_GLGEN_I2CPARAMS_CLK_MASK; *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), *i2cctl); ixl_flush(hw); /* SCL rise time (1000ns) */ i40e_usec_delay(IXL_I2C_T_RISE); i2cctl_r = rd32(hw, IXL_I2C_REG(hw)); if (i2cctl_r & I40E_GLGEN_I2CPARAMS_CLK_IN_MASK) break; } } /** * ixl_get_i2c_data - Reads the I2C SDA data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value **/ static bool ixl_get_i2c_data(struct ixl_pf *pf, u32 *i2cctl) { bool data; if (*i2cctl & I40E_GLGEN_I2CPARAMS_DATA_IN_MASK) data = 1; else data = 0; return data; } /** * ixl_set_i2c_data - Sets the I2C data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit **/ static s32 ixl_set_i2c_data(struct ixl_pf *pf, u32 *i2cctl, bool data) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; DEBUGFUNC("ixl_set_i2c_data"); if (data) *i2cctl |= I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK; else *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK); *i2cctl &= ~(I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK); wr32(hw, IXL_I2C_REG(hw), *i2cctl); ixl_flush(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ i40e_usec_delay(IXL_I2C_T_RISE + IXL_I2C_T_FALL + IXL_I2C_T_SU_DATA); /* Verify data was set correctly */ *i2cctl = rd32(hw, IXL_I2C_REG(hw)); if (data != ixl_get_i2c_data(pf, i2cctl)) { status = I40E_ERR_PHY; ixl_dbg(pf, IXL_DBG_I2C, "Error - I2C data was not set to %X.\n", data); } return status; } /** * ixl_i2c_start - Sets I2C start condition * Sets I2C start condition (High -> Low on SDA while SCL is High) **/ static void ixl_i2c_start(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); DEBUGFUNC("ixl_i2c_start"); /* Start condition must begin with data and clock high */ ixl_set_i2c_data(pf, &i2cctl, 1); ixl_raise_i2c_clk(pf, &i2cctl); /* Setup time for start condition (4.7us) */ i40e_usec_delay(IXL_I2C_T_SU_STA); ixl_set_i2c_data(pf, &i2cctl, 0); /* Hold time for start condition (4us) */ i40e_usec_delay(IXL_I2C_T_HD_STA); ixl_lower_i2c_clk(pf, &i2cctl); /* Minimum low period of clock is 4.7 us */ i40e_usec_delay(IXL_I2C_T_LOW); } /** - * ixl_read_i2c_byte - Reads 8 bit word over I2C + * ixl_read_i2c_byte_bb - Reads 8 bit word over I2C **/ s32 -ixl_read_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +ixl_read_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 *data) { struct i40e_hw *hw = &pf->hw; u32 max_retry = 10; u32 retry = 0; bool nack = 1; s32 status; *data = 0; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); do { ixl_i2c_start(pf); /* Device Address and write indication */ status = ixl_clock_out_i2c_byte(pf, dev_addr); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "dev_addr clock out error\n"); goto fail; } status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "dev_addr i2c ack error\n"); goto fail; } status = ixl_clock_out_i2c_byte(pf, byte_offset); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "byte_offset clock out error\n"); goto fail; } status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) { ixl_dbg(pf, IXL_DBG_I2C, "byte_offset i2c ack error\n"); goto fail; } ixl_i2c_start(pf); /* Device Address and read indication */ status = ixl_clock_out_i2c_byte(pf, (dev_addr | 0x1)); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_in_i2c_byte(pf, data); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_out_i2c_bit(pf, nack); if (status != I40E_SUCCESS) goto fail; ixl_i2c_stop(pf); status = I40E_SUCCESS; goto done; fail: ixl_i2c_bus_clear(pf); i40e_msec_delay(100); retry++; if (retry < max_retry) - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error - Retrying\n"); else - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n"); } while (retry < max_retry); done: i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); return status; } /** - * ixl_write_i2c_byte - Writes 8 bit word over I2C + * ixl_write_i2c_byte_bb - Writes 8 bit word over I2C **/ s32 -ixl_write_i2c_byte(struct ixl_pf *pf, u8 byte_offset, +ixl_write_i2c_byte_bb(struct ixl_pf *pf, u8 byte_offset, u8 dev_addr, u8 data) { struct i40e_hw *hw = &pf->hw; s32 status = I40E_SUCCESS; u32 max_retry = 1; u32 retry = 0; u32 i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl |= I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); do { ixl_i2c_start(pf); status = ixl_clock_out_i2c_byte(pf, dev_addr); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_out_i2c_byte(pf, byte_offset); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; status = ixl_clock_out_i2c_byte(pf, data); if (status != I40E_SUCCESS) goto fail; status = ixl_get_i2c_ack(pf); if (status != I40E_SUCCESS) goto fail; ixl_i2c_stop(pf); goto write_byte_out; fail: ixl_i2c_bus_clear(pf); i40e_msec_delay(100); retry++; if (retry < max_retry) - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error - Retrying\n"); else - ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error.\n"); + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n"); } while (retry < max_retry); write_byte_out: i2cctl = rd32(hw, IXL_I2C_REG(hw)); i2cctl &= ~I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK; wr32(hw, IXL_I2C_REG(hw), i2cctl); ixl_flush(hw); return status; } +/** + * ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register + **/ +s32 +ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct i40e_hw *hw = &pf->hw; + u32 reg = 0; + s32 status; + *data = 0; + + reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT); + reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT); + reg |= I40E_GLGEN_I2CCMD_OP_MASK; + wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg); + + status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num); + + /* Get data from I2C register */ + reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num)); + + /* Retrieve data readed from EEPROM */ + *data = (u8)(reg & 0xff); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read error\n"); + return status; +} + +/** + * ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register + **/ +s32 +ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct i40e_hw *hw = &pf->hw; + s32 status = I40E_SUCCESS; + u32 reg = 0; + u8 upperbyte = 0; + u16 datai2c = 0; + + status = ixl_read_i2c_byte_reg(pf, byte_offset + 1, dev_addr, &upperbyte); + datai2c = ((u16)upperbyte << 8) | (u16)data; + reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num)); + + /* Form write command */ + reg &= ~I40E_GLGEN_I2CCMD_PHYADD_MASK; + reg |= (((dev_addr >> 1) & 0x7) << I40E_GLGEN_I2CCMD_PHYADD_SHIFT); + reg &= ~I40E_GLGEN_I2CCMD_REGADD_MASK; + reg |= (byte_offset << I40E_GLGEN_I2CCMD_REGADD_SHIFT); + reg &= ~I40E_GLGEN_I2CCMD_DATA_MASK; + reg |= (datai2c << I40E_GLGEN_I2CCMD_DATA_SHIFT); + reg &= ~I40E_GLGEN_I2CCMD_OP_MASK; + + /* Write command to registers controling I2C - data and address. */ + wr32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num), reg); + + status = ixl_wait_for_i2c_completion(hw, hw->func_caps.mdio_port_num); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write error\n"); + return status; +} + +/** + * ixl_wait_for_i2c_completion + **/ +static s32 +ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum) +{ + s32 status = 0; + u32 timeout = 100; + u32 reg; + do { + reg = rd32(hw, I40E_GLGEN_I2CCMD(portnum)); + if ((reg & I40E_GLGEN_I2CCMD_R_MASK) != 0) + break; + i40e_usec_delay(10); + } while (timeout-- > 0); + + if (timeout == 0) + return I40E_ERR_TIMEOUT; + else + return status; +} + +/** + * ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register + **/ +s32 +ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + struct i40e_hw *hw = &pf->hw; + s32 status = I40E_SUCCESS; + u32 reg; + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + dev_addr, + byte_offset, + ®, NULL); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte read status %s, error %s\n", + i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); + else + *data = (u8)reg; + + return status; +} + +/** + * ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register + **/ +s32 +ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset, + u8 dev_addr, u8 data) +{ + struct i40e_hw *hw = &pf->hw; + s32 status = I40E_SUCCESS; + + status = i40e_aq_set_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + dev_addr, + byte_offset, + data, NULL); + + if (status) + ixl_dbg(pf, IXL_DBG_I2C, "I2C byte write status %s, error %s\n", + i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); + + return status; +} Index: head/sys/dev/ixl/ixl_pf_iov.c =================================================================== --- head/sys/dev/ixl/ixl_pf_iov.c (revision 335337) +++ head/sys/dev/ixl/ixl_pf_iov.c (revision 335338) @@ -1,1900 +1,1906 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf_iov.h" /* Private functions */ static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val); static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg); static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg); static bool ixl_zero_mac(const uint8_t *addr); static bool ixl_bcast_mac(const uint8_t *addr); static int ixl_vc_opcode_level(uint16_t opcode); static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr); static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf); static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi); static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf); static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len); static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op); static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line); static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info); static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info); static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue); static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector); static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf); static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size); static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues); static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err); void ixl_initialize_sriov(struct ixl_pf *pf) { + return; +#if 0 device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; nvlist_t *pf_schema, *vf_schema; int iov_error; - /* SR-IOV is only supported when MSI-X is in use. */ - if (pf->msix <= 1) - return; - pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", IOV_SCHEMA_HASDEFAULT, TRUE); pci_iov_schema_add_bool(vf_schema, "allow-set-mac", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_bool(vf_schema, "allow-promisc", IOV_SCHEMA_HASDEFAULT, FALSE); pci_iov_schema_add_uint16(vf_schema, "num-queues", IOV_SCHEMA_HASDEFAULT, max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES); iov_error = pci_iov_attach(dev, pf_schema, vf_schema); if (iov_error != 0) { device_printf(dev, "Failed to initialize SR-IOV (error=%d)\n", iov_error); } else device_printf(dev, "SR-IOV ready\n"); pf->vc_debug_lvl = 1; +#endif } + /* * Allocate the VSI for a VF. */ static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { device_t dev; struct i40e_hw *hw; struct ixl_vsi *vsi; struct i40e_vsi_context vsi_ctx; int i; enum i40e_status_code code; hw = &pf->hw; vsi = &pf->vsi; dev = pf->dev; vsi_ctx.pf_num = hw->pf_id; vsi_ctx.uplink_seid = pf->veb_seid; vsi_ctx.connection_type = IXL_VSI_DATA_PORT; vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num; vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID); vsi_ctx.info.switch_id = htole16(0); vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID); vsi_ctx.info.sec_flags = 0; if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF) vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_NOTHING; vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID); vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG); /* ERJ: Only scattered allocation is supported for VFs right now */ for (i = 0; i < vf->qtag.num_active; i++) vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i]; for (; i < nitems(vsi_ctx.info.queue_mapping); i++) vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK); vsi_ctx.info.tc_mapping[0] = htole16( (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | - (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); + ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)); code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); vf->vsi.seid = vsi_ctx.seid; vf->vsi.vsi_num = vsi_ctx.vsi_number; - // vf->vsi.first_queue = vf->qtag.qidx[0]; - vf->vsi.num_queues = vf->qtag.num_active; + // TODO: How to deal with num tx queues / num rx queues split? + // I don't think just assigning this variable is going to work + vf->vsi.num_rx_queues = vf->qtag.num_active; + vf->vsi.num_tx_queues = vf->qtag.num_active; code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL); if (code != I40E_SUCCESS) return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL); if (code != I40E_SUCCESS) { device_printf(dev, "Failed to disable BW limit: %d\n", ixl_adminq_err_to_errno(hw->aq.asq_last_status)); return (ixl_adminq_err_to_errno(hw->aq.asq_last_status)); } memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info)); return (0); } static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int error; hw = &pf->hw; error = ixl_vf_alloc_vsi(pf, vf); if (error != 0) return (error); vf->vsi.hw_filters_add = 0; vf->vsi.hw_filters_del = 0; - ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); + // ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY); ixl_reconfigure_filters(&vf->vsi); return (0); } static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val) { uint32_t qtable; int index, shift; /* * Two queues are mapped in a single register, so we have to do some * gymnastics to convert the queue number into a register index and * shift. */ index = qnum / 2; shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT; qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num)); qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift); qtable |= val << shift; i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable); } static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t qtable; int i; hw = &pf->hw; /* * Contiguous mappings aren't actually supported by the hardware, * so we have to use non-contiguous mappings. */ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num), I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* Enable LAN traffic on this VF */ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num), I40E_VPLAN_MAPENA_TXRX_ENA_MASK); /* Program index of each VF queue into PF queue space * (This is only needed if QTABLE is enabled) */ - for (i = 0; i < vf->vsi.num_queues; i++) { + for (i = 0; i < vf->vsi.num_tx_queues; i++) { qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) << I40E_VPLAN_QTABLE_QINDEX_SHIFT; wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable); } for (; i < IXL_MAX_VSI_QUEUES; i++) wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), I40E_VPLAN_QTABLE_QINDEX_MASK); /* Map queues allocated to VF to its VSI; * This mapping matches the VF-wide mapping since the VF * is only given a single VSI */ - for (i = 0; i < vf->vsi.num_queues; i++) + for (i = 0; i < vf->vsi.num_tx_queues; i++) ixl_vf_map_vsi_queue(hw, vf, i, ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i)); /* Set rest of VSI queues as unused. */ for (; i < IXL_MAX_VSI_QUEUES; i++) ixl_vf_map_vsi_queue(hw, vf, i, I40E_VSILAN_QTABLE_QINDEX_0_MASK); ixl_flush(hw); } static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi) { struct i40e_hw *hw; hw = &pf->hw; if (vsi->seid == 0) return; i40e_aq_delete_element(hw, vsi->seid, NULL); } static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg) { wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK); ixl_flush(hw); } static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg) { wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK | I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK); ixl_flush(hw); } static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfint_reg, vpint_reg; int i; hw = &pf->hw; ixl_vf_vsi_release(pf, &vf->vsi); /* Index 0 has a special register. */ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num); ixl_vf_disable_queue_intr(hw, vfint_reg); } /* Index 0 has a special register. */ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num)); for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) { vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num); ixl_vf_unregister_intr(hw, vpint_reg); } - vf->vsi.num_queues = 0; + vf->vsi.num_tx_queues = 0; + vf->vsi.num_rx_queues = 0; } static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; int i; uint16_t global_vf_num; uint32_t ciad; hw = &pf->hw; global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS | (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { ciad = rd32(hw, I40E_PF_PCI_CIAD); if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0) return (0); DELAY(1); } return (ETIMEDOUT); } static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrtrig; hw = &pf->hw; vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); ixl_flush(hw); ixl_reinit_vf(pf, vf); } static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_hw *hw; uint32_t vfrstat, vfrtrig; int i, error; hw = &pf->hw; error = ixl_flush_pcie(pf, vf); if (error != 0) device_printf(pf->dev, "Timed out waiting for PCIe activity to stop on VF-%d\n", vf->vf_num); for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) { DELAY(10); vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num)); if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK) break; } if (i == IXL_VF_RESET_TIMEOUT) device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED); vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num)); vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK; wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig); if (vf->vsi.seid != 0) ixl_disable_rings(&vf->vsi); ixl_vf_release_resources(pf, vf); ixl_vf_setup_vsi(pf, vf); ixl_vf_map_queues(pf, vf); wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE); ixl_flush(hw); } static int ixl_vc_opcode_level(uint16_t opcode) { switch (opcode) { case VIRTCHNL_OP_GET_STATS: return (10); default: return (5); } } static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len) { struct i40e_hw *hw; int global_vf_id; hw = &pf->hw; global_vf_id = hw->func_caps.vf_base_id + vf->vf_num; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op), "Sending msg (op=%s[%d], status=%d) to VF-%d\n", ixl_vc_opcode_str(op), op, status, vf->vf_num); i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL); } static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op) { ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0); } static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line) { I40E_VC_DEBUG(pf, 1, "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n", ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status), status, vf->vf_num, file, line); ixl_send_vf_msg(pf, vf, op, status, NULL, 0); } static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_version_info reply; if (msg_size != sizeof(struct virtchnl_version_info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION, I40E_ERR_PARAM); return; } vf->version = ((struct virtchnl_version_info *)msg)->minor; reply.major = VIRTCHNL_VERSION_MAJOR; reply.minor = VIRTCHNL_VERSION_MINOR; ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply, sizeof(reply)); } static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { if (msg_size != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF, I40E_ERR_PARAM); return; } ixl_reset_vf(pf, vf); /* No response to a reset message. */ } static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vf_resource reply; if ((vf->version == 0 && msg_size != 0) || (vf->version == 1 && msg_size != 4)) { device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size," " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR, vf->version); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_ERR_PARAM); return; } bzero(&reply, sizeof(reply)); if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; else /* Force VF RSS setup by PF in 1.1+ VFs */ reply.vf_cap_flags = *(u32 *)msg & ( VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN); reply.num_vsis = 1; - reply.num_queue_pairs = vf->vsi.num_queues; + reply.num_queue_pairs = vf->vsi.num_tx_queues; reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf; reply.rss_key_size = 52; reply.rss_lut_size = 64; reply.vsi_res[0].vsi_id = vf->vsi.vsi_num; reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; - reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues; + reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues; memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN); ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES, I40E_SUCCESS, &reply, sizeof(reply)); } static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_txq txq; uint16_t global_queue_num, global_vf_num; enum i40e_status_code status; uint32_t qtx_ctl; hw = &pf->hw; global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); global_vf_num = hw->func_caps.vf_base_id + vf->vf_num; bzero(&txq, sizeof(txq)); DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n", vf->vf_num, global_queue_num, info->queue_id, global_vf_num); status = i40e_clear_lan_tx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS; txq.head_wb_ena = info->headwb_enabled; txq.head_wb_addr = info->dma_headwb_addr; txq.qlen = info->ring_len; txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]); txq.rdylist_act = 0; status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq); if (status != I40E_SUCCESS) return (EINVAL); qtx_ctl = I40E_QTX_CTL_VF_QUEUE | (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) | (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT); wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl); ixl_flush(hw); ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true); return (0); } static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info) { struct i40e_hw *hw; struct i40e_hmc_obj_rxq rxq; uint16_t global_queue_num; enum i40e_status_code status; hw = &pf->hw; global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id); bzero(&rxq, sizeof(rxq)); DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n", vf->vf_num, global_queue_num, info->queue_id); if (info->databuffer_size > IXL_VF_MAX_BUFFER) return (EINVAL); if (info->max_pkt_size > IXL_VF_MAX_FRAME || info->max_pkt_size < ETHER_MIN_LEN) return (EINVAL); if (info->splithdr_enabled) { if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER) return (EINVAL); rxq.hsplit_0 = info->rx_split_pos & (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP | I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP); rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; rxq.dtype = 2; } status = i40e_clear_lan_rx_queue_context(hw, global_queue_num); if (status != I40E_SUCCESS) return (EINVAL); rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS; rxq.qlen = info->ring_len; rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT; rxq.dsize = 1; rxq.crcstrip = 1; rxq.l2tsel = 1; rxq.rxmax = info->max_pkt_size; rxq.tphrdesc_ena = 1; rxq.tphwdesc_ena = 1; rxq.tphdata_ena = 1; rxq.tphhead_ena = 1; rxq.lrxqthresh = 2; rxq.prefena = 1; status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq); if (status != I40E_SUCCESS) return (EINVAL); ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false); return (0); } static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vsi_queue_config_info *info; struct virtchnl_queue_pair_info *pair; uint16_t expected_msg_size; int i; if (msg_size < sizeof(*info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } info = msg; - if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) { + if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) { device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n", - vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues); + vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair); if (msg_size != expected_msg_size) { device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n", vf->vf_num, msg_size, expected_msg_size); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (info->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, info->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } for (i = 0; i < info->num_queue_pairs; i++) { pair = &info->qpair[i]; if (pair->txq.vsi_id != vf->vsi.vsi_num || pair->rxq.vsi_id != vf->vsi.vsi_num || pair->txq.queue_id != pair->rxq.queue_id || - pair->txq.queue_id >= vf->vsi.num_queues) { + pair->txq.queue_id >= vf->vsi.num_tx_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM); return; } } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES); } static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue, enum i40e_queue_type *last_type, uint16_t *last_queue) { uint32_t offset, qctl; uint16_t itr_indx; if (cur_type == I40E_QUEUE_TYPE_RX) { offset = I40E_QINT_RQCTL(cur_queue); itr_indx = vector->rxitr_idx; } else { offset = I40E_QINT_TQCTL(cur_queue); itr_indx = vector->txitr_idx; } qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) | (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | I40E_QINT_RQCTL_CAUSE_ENA_MASK | (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT)); wr32(&pf->hw, offset, qctl); *last_type = cur_type; *last_queue = cur_queue; } static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector) { struct i40e_hw *hw; u_int qindex; enum i40e_queue_type type, last_type; uint32_t lnklst_reg; uint16_t rxq_map, txq_map, cur_queue, last_queue; hw = &pf->hw; rxq_map = vector->rxq_map; txq_map = vector->txq_map; last_queue = IXL_END_OF_INTR_LNKLST; last_type = I40E_QUEUE_TYPE_RX; /* * The datasheet says to optimize performance, RX queues and TX queues * should be interleaved in the interrupt linked list, so we process * both at once here. */ while ((rxq_map != 0) || (txq_map != 0)) { if (txq_map != 0) { qindex = ffs(txq_map) - 1; type = I40E_QUEUE_TYPE_TX; cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex); ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); txq_map &= ~(1 << qindex); } if (rxq_map != 0) { qindex = ffs(rxq_map) - 1; type = I40E_QUEUE_TYPE_RX; cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex); ixl_vf_set_qctl(pf, vector, type, cur_queue, &last_type, &last_queue); rxq_map &= ~(1 << qindex); } } if (vector->vector_id == 0) lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num); else lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id, vf->vf_num); wr32(hw, lnklst_reg, (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) | (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)); ixl_flush(hw); } static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_irq_map_info *map; struct virtchnl_vector_map *vector; struct i40e_hw *hw; int i, largest_txq, largest_rxq; hw = &pf->hw; if (msg_size < sizeof(*map)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } map = msg; if (map->num_vectors == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } for (i = 0; i < map->num_vectors; i++) { vector = &map->vecmap[i]; if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) || vector->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } if (vector->rxq_map != 0) { largest_rxq = fls(vector->rxq_map) - 1; - if (largest_rxq >= vf->vsi.num_queues) { + if (largest_rxq >= vf->vsi.num_rx_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->txq_map != 0) { largest_txq = fls(vector->txq_map) - 1; - if (largest_txq >= vf->vsi.num_queues) { + if (largest_txq >= vf->vsi.num_tx_queues) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } } if (vector->rxitr_idx > IXL_MAX_ITR_IDX || vector->txitr_idx > IXL_MAX_ITR_IDX) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM); return; } ixl_vf_config_vector(pf, vf, vector); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP); } static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *select; int error = 0; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_PARAM); return; } /* Enable TX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->tx_queues) { /* Warn if queue is out of VF allocation range */ - if (i >= vf->vsi.num_queues) { + if (i >= vf->vsi.num_tx_queues) { device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) continue; /* Warn if this queue is already marked as enabled */ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n", vf->vf_num, i); error = ixl_enable_tx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true); } } /* Enable RX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->rx_queues) { /* Warn if queue is out of VF allocation range */ - if (i >= vf->vsi.num_queues) { + if (i >= vf->vsi.num_rx_queues) { device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) continue; /* Warn if this queue is already marked as enabled */ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n", vf->vf_num, i); error = ixl_enable_rx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false); } } if (error) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES); } static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *select; int error = 0; if (msg_size != sizeof(*select)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } select = msg; if (select->vsi_id != vf->vsi.vsi_num || select->rx_queues == 0 || select->tx_queues == 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_PARAM); return; } /* Disable TX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->tx_queues) { /* Warn if queue is out of VF allocation range */ - if (i >= vf->vsi.num_queues) { + if (i >= vf->vsi.num_tx_queues) { device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true)) continue; /* Warn if this queue is already marked as disabled */ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) { device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n", vf->vf_num, i); continue; } error = ixl_disable_tx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true); } } /* Enable RX rings selected by the VF */ for (int i = 0; i < 32; i++) { if ((1 << i) & select->rx_queues) { /* Warn if queue is out of VF allocation range */ - if (i >= vf->vsi.num_queues) { + if (i >= vf->vsi.num_rx_queues) { device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n", vf->vf_num, i); break; } /* Skip this queue if it hasn't been configured */ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false)) continue; /* Warn if this queue is already marked as disabled */ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) { device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n", vf->vf_num, i); continue; } error = ixl_disable_rx_ring(pf, &vf->qtag, i); if (error) break; else ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false); } } if (error) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES, I40E_ERR_TIMEOUT); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES); } static bool ixl_zero_mac(const uint8_t *addr) { uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0}; return (cmp_etheraddr(addr, zero)); } static bool ixl_bcast_mac(const uint8_t *addr) { + static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = + {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; return (cmp_etheraddr(addr, ixl_bcast_addr)); } static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr) { if (ixl_zero_mac(addr) || ixl_bcast_mac(addr)) return (EINVAL); /* * If the VF is not allowed to change its MAC address, don't let it * set a MAC filter for an address that is not a multicast address and * is not its assigned MAC. */ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) && !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac))) return (EPERM); return (0); } static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_ether_addr_list *addr_list; struct virtchnl_ether_addr *addr; struct ixl_vsi *vsi; int i; size_t expected_size; vsi = &vf->vsi; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vsi->vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR); } static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_ether_addr_list *addr_list; struct virtchnl_ether_addr *addr; size_t expected_size; int i; if (msg_size < sizeof(*addr_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } addr_list = msg; expected_size = sizeof(*addr_list) + addr_list->num_elements * sizeof(*addr); if (addr_list->num_elements == 0 || addr_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM); return; } } for (i = 0; i < addr_list->num_elements; i++) { addr = &addr_list->list[i]; ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY); } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR); } static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf) { struct i40e_vsi_context vsi_ctx; vsi_ctx.seid = vf->vsi.seid; bzero(&vsi_ctx.info, sizeof(vsi_ctx.info)); vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID); vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL)); } static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vlan_filter_list *filter_list; enum i40e_status_code code; size_t expected_size; int i; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } code = ixl_vf_enable_vlan_strip(pf, vf); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); } for (i = 0; i < filter_list->num_elements; i++) ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN); } static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_vlan_filter_list *filter_list; int i; size_t expected_size; if (msg_size < sizeof(*filter_list)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } filter_list = msg; expected_size = sizeof(*filter_list) + filter_list->num_elements * sizeof(uint16_t); if (filter_list->num_elements == 0 || filter_list->vsi_id != vf->vsi.vsi_num || msg_size != expected_size) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) { if (filter_list->vlan_id[i] > EVL_VLID_MASK) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } } if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN, I40E_ERR_PARAM); return; } for (i = 0; i < filter_list->num_elements; i++) ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN); } static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_promisc_info *info; enum i40e_status_code code; if (msg_size != sizeof(*info)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } info = msg; if (info->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; } code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id, info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); return; } code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id, info->flags & FLAG_VF_MULTICAST_PROMISC, NULL); if (code != I40E_SUCCESS) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code); return; } ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE); } static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct virtchnl_queue_select *queue; if (msg_size != sizeof(*queue)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } queue = msg; if (queue->vsi_id != vf->vsi.vsi_num) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_ERR_PARAM); return; } ixl_update_eth_stats(&vf->vsi); ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS, I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats)); } static void ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_key *key; struct i40e_aqc_get_set_rss_key_data key_data; enum i40e_status_code status; hw = &pf->hw; if (msg_size < sizeof(*key)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } key = msg; if (key->key_len > 52) { device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n", vf->vf_num, key->key_len, 52); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } if (key->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, key->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_PARAM); return; } /* Fill out hash using MAC-dependent method */ if (hw->mac.type == I40E_MAC_X722) { bzero(&key_data, sizeof(key_data)); if (key->key_len <= 40) bcopy(key->key, key_data.standard_rss_key, key->key_len); else { bcopy(key->key, key_data.standard_rss_key, 40); bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40); } status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data); if (status) { device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY, I40E_ERR_ADMIN_QUEUE_ERROR); return; } } else { for (int i = 0; i < (key->key_len / 4); i++) i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]); } DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!", vf->vf_num, key->key[0]); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY); } static void ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_lut *lut; enum i40e_status_code status; hw = &pf->hw; if (msg_size < sizeof(*lut)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } lut = msg; if (lut->lut_entries > 64) { device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n", vf->vf_num, lut->lut_entries, 64); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } if (lut->vsi_id != vf->vsi.vsi_num) { device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n", vf->vf_num, lut->vsi_id, vf->vsi.vsi_num); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_PARAM); return; } /* Fill out LUT using MAC-dependent method */ if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries); if (status) { device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT, I40E_ERR_ADMIN_QUEUE_ERROR); return; } } else { for (int i = 0; i < (lut->lut_entries / 4); i++) i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]); } DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!", vf->vf_num, lut->lut[0], lut->lut_entries); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT); } static void ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size) { struct i40e_hw *hw; struct virtchnl_rss_hena *hena; hw = &pf->hw; if (msg_size < sizeof(*hena)) { i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA, I40E_ERR_PARAM); return; } hena = msg; /* Set HENA */ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32)); DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx", vf->vf_num, hena->hena); ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA); } static void ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf) { struct virtchnl_pf_event event; struct i40e_hw *hw; hw = &pf->hw; event.event = VIRTCHNL_EVENT_LINK_CHANGE; event.severity = PF_EVENT_SEVERITY_INFO; event.event_data.link_event.link_status = pf->vsi.link_active; event.event_data.link_event.link_speed = (enum virtchnl_link_speed)hw->phy.link_info.link_speed; ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event, sizeof(event)); } void ixl_broadcast_link_state(struct ixl_pf *pf) { int i; for (i = 0; i < pf->num_vfs; i++) ixl_notify_vf_link_state(pf, &pf->vfs[i]); } void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event) { struct ixl_vf *vf; void *msg; uint16_t vf_num, msg_size; uint32_t opcode; vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id; opcode = le32toh(event->desc.cookie_high); if (vf_num >= pf->num_vfs) { device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num); return; } vf = &pf->vfs[vf_num]; msg = event->msg_buf; msg_size = event->msg_len; I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode), "Got msg %s(%d) from%sVF-%d of size %d\n", ixl_vc_opcode_str(opcode), opcode, (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ", vf_num, msg_size); /* This must be a stray msg from a previously destroyed VF. */ if (!(vf->vf_flags & VF_FLAG_ENABLED)) return; switch (opcode) { case VIRTCHNL_OP_VERSION: ixl_vf_version_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_RESET_VF: ixl_vf_reset_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_GET_VF_RESOURCES: ixl_vf_get_resources_msg(pf, vf, msg, msg_size); /* Notify VF of link state after it obtains queues, as this is * the last thing it will do as part of initialization */ ixl_notify_vf_link_state(pf, vf); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ixl_vf_config_vsi_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: ixl_vf_config_irq_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ENABLE_QUEUES: ixl_vf_enable_queues_msg(pf, vf, msg, msg_size); /* Notify VF of link state after it obtains queues, as this is * the last thing it will do as part of initialization */ ixl_notify_vf_link_state(pf, vf); break; case VIRTCHNL_OP_DISABLE_QUEUES: ixl_vf_disable_queues_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ADD_ETH_ADDR: ixl_vf_add_mac_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_DEL_ETH_ADDR: ixl_vf_del_mac_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_ADD_VLAN: ixl_vf_add_vlan_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_DEL_VLAN: ixl_vf_del_vlan_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ixl_vf_config_promisc_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_GET_STATS: ixl_vf_get_stats_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size); break; case VIRTCHNL_OP_SET_RSS_HENA: ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size); break; /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */ case VIRTCHNL_OP_CONFIG_TX_QUEUE: case VIRTCHNL_OP_CONFIG_RX_QUEUE: default: i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED); break; } } /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */ void ixl_handle_vflr(void *arg, int pending) { struct ixl_pf *pf; struct ixl_vf *vf; struct i40e_hw *hw; uint16_t global_vf_num; uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0; int i; pf = arg; hw = &pf->hw; - IXL_PF_LOCK(pf); + /* TODO: May need to lock this */ for (i = 0; i < pf->num_vfs; i++) { global_vf_num = hw->func_caps.vf_base_id + i; vf = &pf->vfs[i]; if (!(vf->vf_flags & VF_FLAG_ENABLED)) continue; vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num); vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num); vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index)); if (vflrstat & vflrstat_mask) { wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index), vflrstat_mask); ixl_reinit_vf(pf, vf); } } + atomic_clear_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ); icr0 = rd32(hw, I40E_PFINT_ICR0_ENA); icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, icr0); ixl_flush(hw); - IXL_PF_UNLOCK(pf); + // IXL_PF_UNLOCK() } static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err) { switch (err) { case I40E_AQ_RC_EPERM: return (EPERM); case I40E_AQ_RC_ENOENT: return (ENOENT); case I40E_AQ_RC_ESRCH: return (ESRCH); case I40E_AQ_RC_EINTR: return (EINTR); case I40E_AQ_RC_EIO: return (EIO); case I40E_AQ_RC_ENXIO: return (ENXIO); case I40E_AQ_RC_E2BIG: return (E2BIG); case I40E_AQ_RC_EAGAIN: return (EAGAIN); case I40E_AQ_RC_ENOMEM: return (ENOMEM); case I40E_AQ_RC_EACCES: return (EACCES); case I40E_AQ_RC_EFAULT: return (EFAULT); case I40E_AQ_RC_EBUSY: return (EBUSY); case I40E_AQ_RC_EEXIST: return (EEXIST); case I40E_AQ_RC_EINVAL: return (EINVAL); case I40E_AQ_RC_ENOTTY: return (ENOTTY); case I40E_AQ_RC_ENOSPC: return (ENOSPC); case I40E_AQ_RC_ENOSYS: return (ENOSYS); case I40E_AQ_RC_ERANGE: return (ERANGE); case I40E_AQ_RC_EFLUSHED: return (EINVAL); /* No exact equivalent in errno.h */ case I40E_AQ_RC_BAD_ADDR: return (EFAULT); case I40E_AQ_RC_EMODE: return (EPERM); case I40E_AQ_RC_EFBIG: return (EFBIG); default: return (EINVAL); } } int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *pf_vsi; enum i40e_status_code ret; int i, error; pf = device_get_softc(dev); hw = &pf->hw; pf_vsi = &pf->vsi; - IXL_PF_LOCK(pf); + //IXL_PF_LOCK(pf); pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT | M_ZERO); if (pf->vfs == NULL) { error = ENOMEM; goto fail; } for (i = 0; i < num_vfs; i++) sysctl_ctx_init(&pf->vfs[i].ctx); ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid, 1, FALSE, &pf->veb_seid, FALSE, NULL); if (ret != I40E_SUCCESS) { error = ixl_adminq_err_to_errno(hw->aq.asq_last_status); device_printf(dev, "add_veb failed; code=%d error=%d", ret, error); goto fail; } pf->num_vfs = num_vfs; - IXL_PF_UNLOCK(pf); + //IXL_PF_UNLOCK(pf); return (0); fail: free(pf->vfs, M_IXL); pf->vfs = NULL; - IXL_PF_UNLOCK(pf); + //IXL_PF_UNLOCK(pf); return (error); } void ixl_iov_uninit(device_t dev) { struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; struct ifnet *ifp; struct ixl_vf *vfs; int i, num_vfs; pf = device_get_softc(dev); hw = &pf->hw; vsi = &pf->vsi; ifp = vsi->ifp; - IXL_PF_LOCK(pf); + //IXL_PF_LOCK(pf); for (i = 0; i < pf->num_vfs; i++) { if (pf->vfs[i].vsi.seid != 0) i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL); ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag); ixl_free_mac_filters(&pf->vfs[i].vsi); DDPRINTF(dev, "VF %d: %d released\n", i, pf->vfs[i].qtag.num_allocated); DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr)); } if (pf->veb_seid != 0) { i40e_aq_delete_element(hw, pf->veb_seid, NULL); pf->veb_seid = 0; } vfs = pf->vfs; num_vfs = pf->num_vfs; pf->vfs = NULL; pf->num_vfs = 0; - IXL_PF_UNLOCK(pf); + //IXL_PF_UNLOCK(pf); /* Do this after the unlock as sysctl_ctx_free might sleep. */ for (i = 0; i < num_vfs; i++) sysctl_ctx_free(&vfs[i].ctx); free(vfs, M_IXL); } static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues) { device_t dev = pf->dev; int error; /* Validate, and clamp value if invalid */ if (num_queues < 1 || num_queues > 16) device_printf(dev, "Invalid num-queues (%d) for VF %d\n", num_queues, vf->vf_num); if (num_queues < 1) { device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num); num_queues = 1; } else if (num_queues > 16) { device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num); num_queues = 16; } error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag); if (error) { device_printf(dev, "Error allocating %d queues for VF %d's VSI\n", num_queues, vf->vf_num); return (ENOSPC); } DDPRINTF(dev, "VF %d: %d allocated, %d active", vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active); DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr)); return (0); } int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { char sysctl_name[QUEUE_NAME_LEN]; struct ixl_pf *pf; struct ixl_vf *vf; const void *mac; size_t size; int error; int vf_num_queues; pf = device_get_softc(dev); vf = &pf->vfs[vfnum]; - IXL_PF_LOCK(pf); + //IXL_PF_LOCK(pf); vf->vf_num = vfnum; vf->vsi.back = pf; vf->vf_flags = VF_FLAG_ENABLED; SLIST_INIT(&vf->vsi.ftl); /* Reserve queue allocation from PF */ vf_num_queues = nvlist_get_number(params, "num-queues"); error = ixl_vf_reserve_queues(pf, vf, vf_num_queues); if (error != 0) goto out; error = ixl_vf_setup_vsi(pf, vf); if (error != 0) goto out; if (nvlist_exists_binary(params, "mac-addr")) { mac = nvlist_get_binary(params, "mac-addr", &size); bcopy(mac, vf->mac, ETHER_ADDR_LEN); if (nvlist_get_bool(params, "allow-set-mac")) vf->vf_flags |= VF_FLAG_SET_MAC_CAP; } else /* * If the administrator has not specified a MAC address then * we must allow the VF to choose one. */ vf->vf_flags |= VF_FLAG_SET_MAC_CAP; if (nvlist_get_bool(params, "mac-anti-spoof")) vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF; if (nvlist_get_bool(params, "allow-promisc")) vf->vf_flags |= VF_FLAG_PROMISC_CAP; vf->vf_flags |= VF_FLAG_VLAN_CAP; ixl_reset_vf(pf, vf); out: - IXL_PF_UNLOCK(pf); + //IXL_PF_UNLOCK(pf); if (error == 0) { snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum); ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name); } return (error); } Index: head/sys/dev/ixl/ixl_pf_main.c =================================================================== --- head/sys/dev/ixl/ixl_pf_main.c (revision 335337) +++ head/sys/dev/ixl/ixl_pf_main.c (revision 335338) @@ -1,6551 +1,5049 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf.h" #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif -#ifdef DEV_NETMAP -#include -#include -#include -#endif /* DEV_NETMAP */ - -static int ixl_vsi_setup_queue(struct ixl_vsi *, struct ixl_queue *, int); -static u64 ixl_max_aq_speed_to_value(u8); static u8 ixl_convert_sysctl_aq_link_speed(u8, bool); static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool); +static void ixl_del_default_hw_filters(struct ixl_vsi *); /* Sysctls */ static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS); /* Debug Sysctls */ static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS); +static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS); #ifdef IXL_DEBUG static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); #endif #ifdef IXL_IW extern int ixl_enable_iwarp; extern int ixl_limit_iwarp_msix; #endif const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; const char * const ixl_fc_string[6] = { "None", "Rx", "Tx", "Full", "Priority", "Default" }; static char *ixl_fec_string[3] = { "CL108 RS-FEC", "CL74 FC-FEC/BASE-R", "None" }; MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); void ixl_debug_core(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...) { va_list args; if (!(mask & pf->dbg_mask)) return; /* Re-implement device_printf() */ device_print_prettyname(pf->dev); va_start(args, fmt); vprintf(fmt, args); va_end(args); } /* ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string */ void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf) { u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24); u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF); u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF); sbuf_printf(buf, "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d", hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, hw->aq.api_maj_ver, hw->aq.api_min_ver, (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >> IXL_NVM_VERSION_HI_SHIFT, (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >> IXL_NVM_VERSION_LO_SHIFT, hw->nvm.eetrack, oem_ver, oem_build, oem_patch); } void ixl_print_nvm_version(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *sbuf; sbuf = sbuf_new_auto(); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); device_printf(dev, "%s\n", sbuf_data(sbuf)); sbuf_delete(sbuf); } static void ixl_configure_tx_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_tx_queue *que = vsi->tx_queues; vsi->tx_itr_setting = pf->tx_itr; - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < vsi->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; } } static void ixl_configure_rx_itr(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; + struct ixl_rx_queue *que = vsi->rx_queues; vsi->rx_itr_setting = pf->rx_itr; - for (int i = 0; i < vsi->num_queues; i++, que++) { + for (int i = 0; i < vsi->num_rx_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; } } /* * Write PF ITR values to queue ITR registers. */ void ixl_configure_itr(struct ixl_pf *pf) { ixl_configure_tx_itr(pf); ixl_configure_rx_itr(pf); } - /********************************************************************* - * Init entry point * - * This routine is used in two ways. It is used by the stack as - * init entry point in network interface structure. It is also used - * by the driver as a hw/sw initialization routine to get to a - * consistent state. - * - * return 0 on success, positive on failure - **********************************************************************/ -void -ixl_init_locked(struct ixl_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; - struct ifnet *ifp = vsi->ifp; - device_t dev = pf->dev; - struct i40e_filter_control_settings filter; - u8 tmpaddr[ETHER_ADDR_LEN]; - int ret; - - INIT_DEBUGOUT("ixl_init_locked: begin"); - IXL_PF_LOCK_ASSERT(pf); - - ixl_stop_locked(pf); - - /* - * If the aq is dead here, it probably means something outside of the driver - * did something to the adapter, like a PF reset. - * So rebuild the driver's state here if that occurs. - */ - if (!i40e_check_asq_alive(&pf->hw)) { - device_printf(dev, "Admin Queue is down; resetting...\n"); - ixl_teardown_hw_structs(pf); - ixl_reset(pf); - } - - /* Get the latest mac address... User might use a LAA */ - bcopy(IF_LLADDR(vsi->ifp), tmpaddr, - ETH_ALEN); - if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && - (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { - device_printf(dev, "ixl_init_locked: reconfigure MAC addr\n"); - ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); - bcopy(tmpaddr, hw->mac.addr, - ETH_ALEN); - ret = i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_ONLY, - hw->mac.addr, NULL); - if (ret) { - device_printf(dev, "LLA address" - "change failed!!\n"); - return; - } - } - - ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); - - /* Set the various hardware offload abilities */ - ifp->if_hwassist = 0; - if (ifp->if_capenable & IFCAP_TSO) - ifp->if_hwassist |= CSUM_TSO; - if (ifp->if_capenable & IFCAP_TXCSUM) - ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); - if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) - ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); - - /* Set up the device filtering */ - bzero(&filter, sizeof(filter)); - filter.enable_ethtype = TRUE; - filter.enable_macvlan = TRUE; - filter.enable_fdir = FALSE; - filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; - if (i40e_set_filter_control(hw, &filter)) - device_printf(dev, "i40e_set_filter_control() failed\n"); - - /* Prepare the VSI: rings, hmc contexts, etc... */ - if (ixl_initialize_vsi(vsi)) { - device_printf(dev, "initialize vsi failed!!\n"); - return; - } - - /* Set up RSS */ - ixl_config_rss(pf); - - /* Add protocol filters to list */ - ixl_init_filters(vsi); - - /* Setup vlan's if needed */ - ixl_setup_vlan_filters(vsi); - - /* Set up MSI/X routing and the ITR settings */ - if (pf->msix > 1) { - ixl_configure_queue_intr_msix(pf); - ixl_configure_itr(pf); - } else - ixl_configure_legacy(pf); - - ixl_enable_rings(vsi); - - i40e_aq_set_default_vsi(hw, vsi->seid, NULL); - - ixl_reconfigure_filters(vsi); - - /* And now turn on interrupts */ - ixl_enable_intr(vsi); - - /* Get link info */ - hw->phy.get_link_info = TRUE; - i40e_get_link_status(hw, &pf->link_up); - ixl_update_link_status(pf); - - /* Start the local timer */ - callout_reset(&pf->timer, hz, ixl_local_timer, pf); - - /* Now inform the stack we're ready */ - ifp->if_drv_flags |= IFF_DRV_RUNNING; - -#ifdef IXL_IW - if (ixl_enable_iwarp && pf->iw_enabled) { - ret = ixl_iw_pf_init(pf); - if (ret) - device_printf(dev, - "initialize iwarp failed, code %d\n", ret); - } -#endif -} - - -/********************************************************************* - * * Get the hardware capabilities * **********************************************************************/ int ixl_get_hw_capabilities(struct ixl_pf *pf) { struct i40e_aqc_list_capabilities_element_resp *buf; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; - int error, len; - u16 needed; - bool again = TRUE; + enum i40e_status_code status; + int len, i2c_intfc_num; + bool again = TRUE; + u16 needed; len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp); retry: if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *) malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate cap memory\n"); return (ENOMEM); } /* This populates the hw struct */ - error = i40e_aq_discover_capabilities(hw, buf, len, + status = i40e_aq_discover_capabilities(hw, buf, len, &needed, i40e_aqc_opc_list_func_capabilities, NULL); free(buf, M_DEVBUF); if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) && (again == TRUE)) { /* retry once with a larger buffer */ again = FALSE; len = needed; goto retry; - } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { - device_printf(dev, "capability discovery failed: %d\n", - pf->hw.aq.asq_last_status); + } else if (status != I40E_SUCCESS) { + device_printf(dev, "capability discovery failed; status %s, error %s\n", + i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (ENODEV); } - /* Capture this PF's starting queue pair */ - pf->qbase = hw->func_caps.base_queue; - -#ifdef IXL_DEBUG - device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, " - "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n", - hw->pf_id, hw->func_caps.num_vfs, - hw->func_caps.num_msix_vectors, - hw->func_caps.num_msix_vectors_vf, - hw->func_caps.fd_filters_guaranteed, - hw->func_caps.fd_filters_best_effort, - hw->func_caps.num_tx_qp, - hw->func_caps.num_rx_qp, - hw->func_caps.base_queue); -#endif - struct i40e_osdep *osdep = (struct i40e_osdep *)hw->back; - osdep->i2c_intfc_num = ixl_find_i2c_interface(pf); - if (osdep->i2c_intfc_num != -1) + /* + * Some devices have both MDIO and I2C; since this isn't reported + * by the FW, check registers to see if an I2C interface exists. + */ + i2c_intfc_num = ixl_find_i2c_interface(pf); + if (i2c_intfc_num != -1) pf->has_i2c = true; + /* Determine functions to use for driver I2C accesses */ + switch (pf->i2c_access_method) { + case 0: { + if (hw->mac.type == I40E_MAC_XL710 && + hw->aq.api_maj_ver == 1 && + hw->aq.api_min_ver >= 7) { + pf->read_i2c_byte = ixl_read_i2c_byte_aq; + pf->write_i2c_byte = ixl_write_i2c_byte_aq; + } else { + pf->read_i2c_byte = ixl_read_i2c_byte_reg; + pf->write_i2c_byte = ixl_write_i2c_byte_reg; + } + break; + } + case 3: + pf->read_i2c_byte = ixl_read_i2c_byte_aq; + pf->write_i2c_byte = ixl_write_i2c_byte_aq; + break; + case 2: + pf->read_i2c_byte = ixl_read_i2c_byte_reg; + pf->write_i2c_byte = ixl_write_i2c_byte_reg; + break; + case 1: + pf->read_i2c_byte = ixl_read_i2c_byte_bb; + pf->write_i2c_byte = ixl_write_i2c_byte_bb; + break; + default: + /* Should not happen */ + device_printf(dev, "Error setting I2C access functions\n"); + break; + } + /* Print a subset of the capability information. */ device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n", hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors, hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp, (hw->func_caps.mdio_port_mode == 2) ? "I2C" : (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" : (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" : "MDIO shared"); - return (error); + return (0); } -void -ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask) -{ - device_t dev = vsi->dev; - - /* Enable/disable TXCSUM/TSO4 */ - if (!(ifp->if_capenable & IFCAP_TXCSUM) - && !(ifp->if_capenable & IFCAP_TSO4)) { - if (mask & IFCAP_TXCSUM) { - ifp->if_capenable |= IFCAP_TXCSUM; - /* enable TXCSUM, restore TSO if previously enabled */ - if (vsi->flags & IXL_FLAGS_KEEP_TSO4) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; - ifp->if_capenable |= IFCAP_TSO4; - } - } - else if (mask & IFCAP_TSO4) { - ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO4; - device_printf(dev, - "TSO4 requires txcsum, enabling both...\n"); - } - } else if((ifp->if_capenable & IFCAP_TXCSUM) - && !(ifp->if_capenable & IFCAP_TSO4)) { - if (mask & IFCAP_TXCSUM) - ifp->if_capenable &= ~IFCAP_TXCSUM; - else if (mask & IFCAP_TSO4) - ifp->if_capenable |= IFCAP_TSO4; - } else if((ifp->if_capenable & IFCAP_TXCSUM) - && (ifp->if_capenable & IFCAP_TSO4)) { - if (mask & IFCAP_TXCSUM) { - vsi->flags |= IXL_FLAGS_KEEP_TSO4; - ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4); - device_printf(dev, - "TSO4 requires txcsum, disabling both...\n"); - } else if (mask & IFCAP_TSO4) - ifp->if_capenable &= ~IFCAP_TSO4; - } - - /* Enable/disable TXCSUM_IPV6/TSO6 */ - if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6) - && !(ifp->if_capenable & IFCAP_TSO6)) { - if (mask & IFCAP_TXCSUM_IPV6) { - ifp->if_capenable |= IFCAP_TXCSUM_IPV6; - if (vsi->flags & IXL_FLAGS_KEEP_TSO6) { - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; - ifp->if_capenable |= IFCAP_TSO6; - } - } else if (mask & IFCAP_TSO6) { - ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - vsi->flags &= ~IXL_FLAGS_KEEP_TSO6; - device_printf(dev, - "TSO6 requires txcsum6, enabling both...\n"); - } - } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6) - && !(ifp->if_capenable & IFCAP_TSO6)) { - if (mask & IFCAP_TXCSUM_IPV6) - ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6; - else if (mask & IFCAP_TSO6) - ifp->if_capenable |= IFCAP_TSO6; - } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) - && (ifp->if_capenable & IFCAP_TSO6)) { - if (mask & IFCAP_TXCSUM_IPV6) { - vsi->flags |= IXL_FLAGS_KEEP_TSO6; - ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); - device_printf(dev, - "TSO6 requires txcsum6, disabling both...\n"); - } else if (mask & IFCAP_TSO6) - ifp->if_capenable &= ~IFCAP_TSO6; - } -} - /* For the set_advertise sysctl */ void ixl_set_initial_advertised_speeds(struct ixl_pf *pf) { device_t dev = pf->dev; int err; /* Make sure to initialize the device to the complete list of * supported speeds on driver load, to ensure unloading and * reloading the driver will restore this value. */ err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true); if (err) { /* Non-fatal error */ device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n", __func__, err); return; } pf->advertised_speed = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); } int ixl_teardown_hw_structs(struct ixl_pf *pf) { enum i40e_status_code status = 0; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; /* Shutdown LAN HMC */ if (hw->hmc.hmc_obj) { status = i40e_shutdown_lan_hmc(hw); if (status) { device_printf(dev, - "init: LAN HMC shutdown failure; status %d\n", status); + "init: LAN HMC shutdown failure; status %s\n", + i40e_stat_str(hw, status)); goto err_out; } } /* Shutdown admin queue */ ixl_disable_intr0(hw); status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, - "init: Admin Queue shutdown failure; status %d\n", status); + "init: Admin Queue shutdown failure; status %s\n", + i40e_stat_str(hw, status)); err_out: return (status); } int ixl_reset(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; - u8 set_fc_err_mask; + u32 reg; int error = 0; // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary i40e_clear_hw(hw); error = i40e_pf_reset(hw); if (error) { device_printf(dev, "init: PF reset failure\n"); error = EIO; goto err_out; } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "init: Admin queue init failure;" " status code %d\n", error); error = EIO; goto err_out; } i40e_clear_pxe_mode(hw); +#if 0 error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "init: Error retrieving HW capabilities;" " status code %d\n", error); goto err_out; } error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init: LAN HMC init failed; status code %d\n", error); error = EIO; goto err_out; } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "init: LAN HMC config failed; status code %d\n", error); error = EIO; goto err_out; } // XXX: possible fix for panic, but our failure recovery is still broken error = ixl_switch_config(pf); if (error) { device_printf(dev, "init: ixl_switch_config() failed: %d\n", error); goto err_out; } error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (error) { device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d," " aq_err %d\n", error, hw->aq.asq_last_status); error = EIO; goto err_out; } error = i40e_set_fc(hw, &set_fc_err_mask, true); if (error) { device_printf(dev, "init: setting link flow control failed; retcode %d," " fc_err_mask 0x%02x\n", error, set_fc_err_mask); goto err_out; } // XXX: (Rebuild VSIs?) /* Firmware delay workaround */ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "init: link restart failed, aq_err %d\n", hw->aq.asq_last_status); goto err_out; } } /* Re-enable admin queue interrupt */ if (pf->msix > 1) { ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); } err_out: return (error); -} +#endif + // TODO: Fix second parameter + ixl_rebuild_hw_structs_after_reset(pf, false); -/* -** MSIX Interrupt Handlers and Tasklets -*/ -void -ixl_handle_que(void *context, int pending) -{ - struct ixl_queue *que = context; - struct ixl_vsi *vsi = que->vsi; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - struct i40e_hw *hw = vsi->hw; - struct tx_ring *txr = &que->txr; - struct ifnet *ifp = vsi->ifp; - bool more; + /* The PF reset should have cleared any critical errors */ + atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR); + atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); + + reg = rd32(hw, I40E_PFINT_ICR0_ENA); + reg |= IXL_ICR0_CRIT_ERR_MASK; + wr32(hw, I40E_PFINT_ICR0_ENA, reg); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - more = ixl_rxeof(que, IXL_RX_LIMIT); - IXL_TX_LOCK(txr); - ixl_txeof(que); - if (!drbr_empty(ifp, txr->br)) - ixl_mq_start_locked(ifp, txr); - IXL_TX_UNLOCK(txr); - if (more) { - taskqueue_enqueue(que->tq, &que->task); - return; - } - } - - /* Re-enable queue interrupt */ - if (pf->msix > 1) - ixl_enable_queue(hw, que->me); - else - ixl_enable_intr0(hw); + err_out: + return (error); } - -/********************************************************************* - * - * Legacy Interrupt Service routine - * - **********************************************************************/ -void +/* + * TODO: Make sure this properly handles admin queue / single rx queue intr + */ +int ixl_intr(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; - struct ifnet *ifp = vsi->ifp; - struct tx_ring *txr = &que->txr; + struct ixl_rx_queue *que = vsi->rx_queues; u32 icr0; - bool more; - pf->admin_irq++; + // pf->admin_irq++ + ++que->irqs; +// TODO: Check against proper field +#if 0 /* Clear PBA at start of ISR if using legacy interrupts */ if (pf->msix == 0) wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)); +#endif icr0 = rd32(hw, I40E_PFINT_ICR0); #ifdef PCI_IOV if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) - taskqueue_enqueue(pf->tq, &pf->vflr_task); + iflib_iov_intr_deferred(vsi->ctx); #endif + // TODO!: Do the stuff that's done in ixl_msix_adminq here, too! if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) - taskqueue_enqueue(pf->tq, &pf->adminq); - - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - ++que->irqs; - - more = ixl_rxeof(que, IXL_RX_LIMIT); - - IXL_TX_LOCK(txr); - ixl_txeof(que); - if (!drbr_empty(vsi->ifp, txr->br)) - ixl_mq_start_locked(ifp, txr); - IXL_TX_UNLOCK(txr); - - if (more) - taskqueue_enqueue(que->tq, &que->task); - } - + iflib_admin_intr_deferred(vsi->ctx); + + // TODO: Is intr0 enabled somewhere else? ixl_enable_intr0(hw); + + if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) + return (FILTER_SCHEDULE_THREAD); + else + return (FILTER_HANDLED); } /********************************************************************* * * MSIX VSI Interrupt Service routine * **********************************************************************/ -void +int ixl_msix_que(void *arg) { - struct ixl_queue *que = arg; - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; - struct tx_ring *txr = &que->txr; - bool more_tx, more_rx; + struct ixl_rx_queue *que = arg; - /* Protect against spurious interrupts */ - if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)) - return; - ++que->irqs; - more_rx = ixl_rxeof(que, IXL_RX_LIMIT); - - IXL_TX_LOCK(txr); - more_tx = ixl_txeof(que); - /* - ** Make certain that if the stack - ** has anything queued the task gets - ** scheduled to handle it. - */ - if (!drbr_empty(vsi->ifp, txr->br)) - more_tx = 1; - IXL_TX_UNLOCK(txr); - ixl_set_queue_rx_itr(que); - ixl_set_queue_tx_itr(que); + // ixl_set_queue_tx_itr(que); - if (more_tx || more_rx) - taskqueue_enqueue(que->tq, &que->task); - else - ixl_enable_queue(hw, que->me); - - return; + return (FILTER_SCHEDULE_THREAD); } /********************************************************************* * * MSIX Admin Queue Interrupt Service routine * **********************************************************************/ -void +int ixl_msix_adminq(void *arg) { struct ixl_pf *pf = arg; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u32 reg, mask, rstat_reg; bool do_task = FALSE; + DDPRINTF(dev, "begin"); + ++pf->admin_irq; reg = rd32(hw, I40E_PFINT_ICR0); + // For masking off interrupt causes that need to be handled before + // they can be re-enabled mask = rd32(hw, I40E_PFINT_ICR0_ENA); /* Check on the cause */ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) { - mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK; + mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; do_task = TRUE; } if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) { - ixl_handle_mdd_event(pf); - mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK; + mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; + atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING); + do_task = TRUE; } if (reg & I40E_PFINT_ICR0_GRST_MASK) { + mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK; device_printf(dev, "Reset Requested!\n"); rstat_reg = rd32(hw, I40E_GLGEN_RSTAT); rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK) >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT; device_printf(dev, "Reset type: "); switch (rstat_reg) { /* These others might be handled similarly to an EMPR reset */ case I40E_RESET_CORER: printf("CORER\n"); break; case I40E_RESET_GLOBR: printf("GLOBR\n"); break; case I40E_RESET_EMPR: printf("EMPR\n"); - atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); break; default: printf("POR\n"); break; } /* overload admin queue task to check reset progress */ + atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); do_task = TRUE; } - if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) { - device_printf(dev, "ECC Error detected!\n"); + /* + * PE / PCI / ECC exceptions are all handled in the same way: + * mask out these three causes, then request a PF reset + * + * TODO: I think at least ECC error requires a GLOBR, not PFR + */ + if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) + device_printf(dev, "ECC Error detected!\n"); + if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) + device_printf(dev, "PCI Exception detected!\n"); + if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK) + device_printf(dev, "Critical Protocol Engine Error detected!\n"); + /* Checks against the conditions above */ + if (reg & IXL_ICR0_CRIT_ERR_MASK) { + mask &= ~IXL_ICR0_CRIT_ERR_MASK; + atomic_set_32(&pf->state, + IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR); + do_task = TRUE; } + // TODO: Linux driver never re-enables this interrupt once it has been detected + // Then what is supposed to happen? A PF reset? Should it never happen? + // TODO: Parse out this error into something human readable if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) { reg = rd32(hw, I40E_PFHMC_ERRORINFO); if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) { device_printf(dev, "HMC Error detected!\n"); device_printf(dev, "INFO 0x%08x\n", reg); reg = rd32(hw, I40E_PFHMC_ERRORDATA); device_printf(dev, "DATA 0x%08x\n", reg); wr32(hw, I40E_PFHMC_ERRORINFO, 0); } } - if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) { - device_printf(dev, "PCI Exception detected!\n"); - } - #ifdef PCI_IOV if (reg & I40E_PFINT_ICR0_VFLR_MASK) { mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK; - taskqueue_enqueue(pf->tq, &pf->vflr_task); + atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ); + do_task = TRUE; } #endif + wr32(hw, I40E_PFINT_ICR0_ENA, mask); if (do_task) - taskqueue_enqueue(pf->tq, &pf->adminq); + return (FILTER_SCHEDULE_THREAD); else - ixl_enable_intr0(hw); + return (FILTER_HANDLED); } -void -ixl_set_promisc(struct ixl_vsi *vsi) -{ - struct ifnet *ifp = vsi->ifp; - struct i40e_hw *hw = vsi->hw; - int err, mcnt = 0; - bool uni = FALSE, multi = FALSE; - - if (ifp->if_flags & IFF_PROMISC) - uni = multi = TRUE; - else if (ifp->if_flags & IFF_ALLMULTI) - multi = TRUE; - else { /* Need to count the multicast addresses */ - struct ifmultiaddr *ifma; - if_maddr_rlock(ifp); - CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { - if (ifma->ifma_addr->sa_family != AF_LINK) - continue; - if (mcnt == MAX_MULTICAST_ADDR) { - multi = TRUE; - break; - } - mcnt++; - } - if_maddr_runlock(ifp); - } - - err = i40e_aq_set_vsi_unicast_promiscuous(hw, - vsi->seid, uni, NULL, TRUE); - err = i40e_aq_set_vsi_multicast_promiscuous(hw, - vsi->seid, multi, NULL); - return; -} - /********************************************************************* * Filter Routines * * Routines for multicast and vlan filter management. * *********************************************************************/ void ixl_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_add_multi: begin"); if_maddr_rlock(ifp); /* ** First just get a count, to decide if we ** we simply use multicast promiscuous. */ CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete existing MC filters */ ixl_del_hw_filters(vsi, mcnt); i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } mcnt = 0; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; ixl_add_mc_filter(vsi, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); mcnt++; } if_maddr_runlock(ifp); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_add_multi: end"); - return; } void ixl_del_multi(struct ixl_vsi *vsi) { struct ifnet *ifp = vsi->ifp; struct ifmultiaddr *ifma; struct ixl_mac_filter *f; int mcnt = 0; bool match = FALSE; IOCTL_DEBUGOUT("ixl_del_multi: begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { match = FALSE; CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } if (match == FALSE) { f->flags |= IXL_FILTER_DEL; mcnt++; } } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_del_hw_filters(vsi, mcnt); } -/********************************************************************* - * Timer routine - * - * This routine checks for link status, updates statistics, - * and runs the watchdog check. - * - * Only runs when the driver is configured UP and RUNNING. - * - **********************************************************************/ - void -ixl_local_timer(void *arg) -{ - struct ixl_pf *pf = arg; - - IXL_PF_LOCK_ASSERT(pf); - - /* Fire off the adminq task */ - taskqueue_enqueue(pf->tq, &pf->adminq); - - /* Update stats */ - ixl_update_stats_counters(pf); - - /* Increment stat when a queue shows hung */ - if (ixl_queue_hang_check(&pf->vsi)) - pf->watchdog_events++; - - callout_reset(&pf->timer, hz, ixl_local_timer, pf); -} - -void ixl_link_up_msg(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ifnet *ifp = pf->vsi.ifp; char *req_fec_string, *neg_fec_string; u8 fec_abilities; fec_abilities = hw->phy.link_info.req_fec_info; /* If both RS and KR are requested, only show RS */ if (fec_abilities & I40E_AQ_REQUEST_FEC_RS) req_fec_string = ixl_fec_string[0]; else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR) req_fec_string = ixl_fec_string[1]; else req_fec_string = ixl_fec_string[2]; if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) neg_fec_string = ixl_fec_string[0]; else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) neg_fec_string = ixl_fec_string[1]; else neg_fec_string = ixl_fec_string[2]; log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", ifp->if_xname, ixl_aq_speed_to_str(hw->phy.link_info.link_speed), req_fec_string, neg_fec_string, (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False", (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX && hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ? ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ? ixl_fc_string[1] : ixl_fc_string[0]); } /* -** Note: this routine updates the OS on the link state -** the real check of the hardware only happens with -** a link interrupt. -*/ -void -ixl_update_link_status(struct ixl_pf *pf) -{ - struct ixl_vsi *vsi = &pf->vsi; - struct ifnet *ifp = vsi->ifp; - device_t dev = pf->dev; - - if (pf->link_up) { - if (vsi->link_active == FALSE) { - vsi->link_active = TRUE; -#if __FreeBSD_version >= 1100000 - ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->link_speed); -#else - if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->link_speed)); -#endif - if_link_state_change(ifp, LINK_STATE_UP); - ixl_link_up_msg(pf); -#ifdef PCI_IOV - ixl_broadcast_link_state(pf); -#endif - } - } else { /* Link down */ - if (vsi->link_active == TRUE) { - if (bootverbose) - device_printf(dev, "Link is Down\n"); - if_link_state_change(ifp, LINK_STATE_DOWN); - vsi->link_active = FALSE; -#ifdef PCI_IOV - ixl_broadcast_link_state(pf); -#endif - } - } -} - -/********************************************************************* - * - * This routine disables all traffic on the adapter by issuing a - * global reset on the MAC and deallocates TX/RX buffers. - * - **********************************************************************/ - -void -ixl_stop_locked(struct ixl_pf *pf) -{ - struct ixl_vsi *vsi = &pf->vsi; - struct ifnet *ifp = vsi->ifp; - - INIT_DEBUGOUT("ixl_stop: begin\n"); - - IXL_PF_LOCK_ASSERT(pf); - -#ifdef IXL_IW - /* Stop iWARP device */ - if (ixl_enable_iwarp && pf->iw_enabled) - ixl_iw_pf_stop(pf); -#endif - - /* Stop the local timer */ - callout_stop(&pf->timer); - - ixl_disable_rings_intr(vsi); - ixl_disable_rings(vsi); - - /* Tell the stack that the interface is no longer active */ - ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); -} - -void -ixl_stop(struct ixl_pf *pf) -{ - IXL_PF_LOCK(pf); - ixl_stop_locked(pf); - IXL_PF_UNLOCK(pf); -} - -/********************************************************************* - * - * Setup MSIX Interrupt resources and handlers for the VSI - * - **********************************************************************/ -int -ixl_setup_legacy(struct ixl_pf *pf) -{ - device_t dev = pf->dev; - int error, rid = 0; - - if (pf->msix == 1) - rid = 1; - pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, - &rid, RF_SHAREABLE | RF_ACTIVE); - if (pf->res == NULL) { - device_printf(dev, "bus_alloc_resource_any() for" - " legacy/msi interrupt\n"); - return (ENXIO); - } - - /* Set the handler function */ - error = bus_setup_intr(dev, pf->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixl_intr, pf, &pf->tag); - if (error) { - pf->res = NULL; - device_printf(dev, "bus_setup_intr() for legacy/msi" - " interrupt handler failed, error %d\n", error); - return (ENXIO); - } - error = bus_describe_intr(dev, pf->res, pf->tag, "irq"); - if (error) { - /* non-fatal */ - device_printf(dev, "bus_describe_intr() for Admin Queue" - " interrupt name failed, error %d\n", error); - } - - return (0); -} - -int -ixl_setup_adminq_tq(struct ixl_pf *pf) -{ - device_t dev = pf->dev; - int error = 0; - - /* Tasklet for Admin Queue interrupts */ - TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf); -#ifdef PCI_IOV - /* VFLR Tasklet */ - TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf); -#endif - /* Create and start Admin Queue taskqueue */ - pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT, - taskqueue_thread_enqueue, &pf->tq); - if (!pf->tq) { - device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n"); - return (ENOMEM); - } - error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq", - device_get_nameunit(dev)); - if (error) { - device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n", - error); - taskqueue_free(pf->tq); - return (error); - } - return (0); -} - -int -ixl_setup_queue_tqs(struct ixl_vsi *vsi) -{ - struct ixl_queue *que = vsi->queues; - device_t dev = vsi->dev; -#ifdef RSS - int cpu_id = 0; - cpuset_t cpu_mask; -#endif - - /* Create queue tasks and start queue taskqueues */ - for (int i = 0; i < vsi->num_queues; i++, que++) { - TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que); - TASK_INIT(&que->task, 0, ixl_handle_que, que); - que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT, - taskqueue_thread_enqueue, &que->tq); -#ifdef RSS - CPU_SETOF(cpu_id, &cpu_mask); - taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET, - &cpu_mask, "%s (bucket %d)", - device_get_nameunit(dev), cpu_id); -#else - taskqueue_start_threads(&que->tq, 1, PI_NET, - "%s (que %d)", device_get_nameunit(dev), que->me); -#endif - } - - return (0); -} - -void -ixl_free_adminq_tq(struct ixl_pf *pf) -{ - if (pf->tq) { - taskqueue_free(pf->tq); - pf->tq = NULL; - } -} - -void -ixl_free_queue_tqs(struct ixl_vsi *vsi) -{ - struct ixl_queue *que = vsi->queues; - - for (int i = 0; i < vsi->num_queues; i++, que++) { - if (que->tq) { - taskqueue_free(que->tq); - que->tq = NULL; - } - } -} - -int -ixl_setup_adminq_msix(struct ixl_pf *pf) -{ - device_t dev = pf->dev; - int rid, error = 0; - - /* Admin IRQ rid is 1, vector is 0 */ - rid = 1; - /* Get interrupt resource from bus */ - pf->res = bus_alloc_resource_any(dev, - SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); - if (!pf->res) { - device_printf(dev, "bus_alloc_resource_any() for Admin Queue" - " interrupt failed [rid=%d]\n", rid); - return (ENXIO); - } - /* Then associate interrupt with handler */ - error = bus_setup_intr(dev, pf->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixl_msix_adminq, pf, &pf->tag); - if (error) { - pf->res = NULL; - device_printf(dev, "bus_setup_intr() for Admin Queue" - " interrupt handler failed, error %d\n", error); - return (ENXIO); - } - error = bus_describe_intr(dev, pf->res, pf->tag, "aq"); - if (error) { - /* non-fatal */ - device_printf(dev, "bus_describe_intr() for Admin Queue" - " interrupt name failed, error %d\n", error); - } - pf->admvec = 0; - - return (0); -} - -/* - * Allocate interrupt resources from bus and associate an interrupt handler - * to those for the VSI's queues. - */ -int -ixl_setup_queue_msix(struct ixl_vsi *vsi) -{ - device_t dev = vsi->dev; - struct ixl_queue *que = vsi->queues; - struct tx_ring *txr; - int error, rid, vector = 1; - - /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */ - for (int i = 0; i < vsi->num_queues; i++, vector++, que++) { - int cpu_id = i; - rid = vector + 1; - txr = &que->txr; - que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, - RF_SHAREABLE | RF_ACTIVE); - if (!que->res) { - device_printf(dev, "bus_alloc_resource_any() for" - " Queue %d interrupt failed [rid=%d]\n", - que->me, rid); - return (ENXIO); - } - /* Set the handler function */ - error = bus_setup_intr(dev, que->res, - INTR_TYPE_NET | INTR_MPSAFE, NULL, - ixl_msix_que, que, &que->tag); - if (error) { - device_printf(dev, "bus_setup_intr() for Queue %d" - " interrupt handler failed, error %d\n", - que->me, error); - bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); - return (error); - } - error = bus_describe_intr(dev, que->res, que->tag, "q%d", i); - if (error) { - device_printf(dev, "bus_describe_intr() for Queue %d" - " interrupt name failed, error %d\n", - que->me, error); - } - /* Bind the vector to a CPU */ -#ifdef RSS - cpu_id = rss_getcpu(i % rss_getnumbuckets()); -#endif - error = bus_bind_intr(dev, que->res, cpu_id); - if (error) { - device_printf(dev, "bus_bind_intr() for Queue %d" - " to CPU %d failed, error %d\n", - que->me, cpu_id, error); - } - que->msix = vector; - } - - return (0); -} - -/* - * Allocate MSI/X vectors from the OS. - * Returns 0 for legacy, 1 for MSI, >1 for MSIX. - */ -int -ixl_init_msix(struct ixl_pf *pf) -{ - device_t dev = pf->dev; - struct i40e_hw *hw = &pf->hw; -#ifdef IXL_IW -#if __FreeBSD_version >= 1100000 - cpuset_t cpu_set; -#endif -#endif - int auto_max_queues; - int rid, want, vectors, queues, available; -#ifdef IXL_IW - int iw_want=0, iw_vectors; - - pf->iw_msix = 0; -#endif - - /* Override by tuneable */ - if (!pf->enable_msix) - goto no_msix; - - /* First try MSI/X */ - rid = PCIR_BAR(IXL_MSIX_BAR); - pf->msix_mem = bus_alloc_resource_any(dev, - SYS_RES_MEMORY, &rid, RF_ACTIVE); - if (!pf->msix_mem) { - /* May not be enabled */ - device_printf(pf->dev, - "Unable to map MSIX table\n"); - goto no_msix; - } - - available = pci_msix_count(dev); - if (available < 2) { - /* system has msix disabled (0), or only one vector (1) */ - device_printf(pf->dev, "Less than two MSI-X vectors available\n"); - bus_release_resource(dev, SYS_RES_MEMORY, - rid, pf->msix_mem); - pf->msix_mem = NULL; - goto no_msix; - } - - /* Clamp max number of queues based on: - * - # of MSI-X vectors available - * - # of cpus available - * - # of queues that can be assigned to the LAN VSI - */ - auto_max_queues = min(mp_ncpus, available - 1); - if (hw->mac.type == I40E_MAC_X722) - auto_max_queues = min(auto_max_queues, 128); - else - auto_max_queues = min(auto_max_queues, 64); - - /* Override with tunable value if tunable is less than autoconfig count */ - if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues)) - queues = pf->max_queues; - /* Use autoconfig amount if that's lower */ - else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) { - device_printf(dev, "ixl_max_queues (%d) is too large, using " - "autoconfig amount (%d)...\n", - pf->max_queues, auto_max_queues); - queues = auto_max_queues; - } - /* Limit maximum auto-configured queues to 8 if no user value is set */ - else - queues = min(auto_max_queues, 8); - -#ifdef RSS - /* If we're doing RSS, clamp at the number of RSS buckets */ - if (queues > rss_getnumbuckets()) - queues = rss_getnumbuckets(); -#endif - - /* - ** Want one vector (RX/TX pair) per queue - ** plus an additional for the admin queue. - */ - want = queues + 1; - if (want <= available) /* Have enough */ - vectors = want; - else { - device_printf(pf->dev, - "MSIX Configuration Problem, " - "%d vectors available but %d wanted!\n", - available, want); - pf->msix_mem = NULL; - goto no_msix; /* Will go to Legacy setup */ - } - -#ifdef IXL_IW - if (ixl_enable_iwarp && hw->func_caps.iwarp) { -#if __FreeBSD_version >= 1100000 - if(bus_get_cpus(dev, INTR_CPUS, sizeof(cpu_set), &cpu_set) == 0) - { - iw_want = min(CPU_COUNT(&cpu_set), IXL_IW_MAX_MSIX); - } -#endif - if(!iw_want) - iw_want = min(mp_ncpus, IXL_IW_MAX_MSIX); - if(ixl_limit_iwarp_msix > 0) - iw_want = min(iw_want, ixl_limit_iwarp_msix); - else - iw_want = min(iw_want, 1); - - available -= vectors; - if (available > 0) { - iw_vectors = (available >= iw_want) ? - iw_want : available; - vectors += iw_vectors; - } else - iw_vectors = 0; - } -#endif - - ixl_set_msix_enable(dev); - if (pci_alloc_msix(dev, &vectors) == 0) { - device_printf(pf->dev, - "Using MSIX interrupts with %d vectors\n", vectors); - pf->msix = vectors; -#ifdef IXL_IW - if (ixl_enable_iwarp && hw->func_caps.iwarp) - { - pf->iw_msix = iw_vectors; - device_printf(pf->dev, - "Reserving %d MSIX interrupts for iWARP CEQ and AEQ\n", - iw_vectors); - } -#endif - - pf->vsi.num_queues = queues; -#ifdef RSS - /* - * If we're doing RSS, the number of queues needs to - * match the number of RSS buckets that are configured. - * - * + If there's more queues than RSS buckets, we'll end - * up with queues that get no traffic. - * - * + If there's more RSS buckets than queues, we'll end - * up having multiple RSS buckets map to the same queue, - * so there'll be some contention. - */ - if (queues != rss_getnumbuckets()) { - device_printf(dev, - "%s: queues (%d) != RSS buckets (%d)" - "; performance will be impacted.\n", - __func__, queues, rss_getnumbuckets()); - } -#endif - return (vectors); - } -no_msix: - vectors = pci_msi_count(dev); - pf->vsi.num_queues = 1; - pf->max_queues = 1; - if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) - device_printf(pf->dev, "Using an MSI interrupt\n"); - else { - vectors = 0; - device_printf(pf->dev, "Using a Legacy interrupt\n"); - } - return (vectors); -} - -/* * Configure admin queue/misc interrupt cause registers in hardware. */ void ixl_configure_intr0_msix(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 reg; /* First set up the adminq - vector 0 */ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */ rd32(hw, I40E_PFINT_ICR0); /* read to clear */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* * 0x7FF is the end of the queue list. * This means we won't use MSI-X vector 0 for a queue interrupt * in MSIX mode. */ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E); wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK | I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK); wr32(hw, I40E_PFINT_STAT_CTL0, 0); } /* * Configure queue interrupt cause registers in hardware. + * + * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL */ void ixl_configure_queue_intr_msix(struct ixl_pf *pf) { - struct i40e_hw *hw = &pf->hw; + struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; u32 reg; u16 vector = 1; - for (int i = 0; i < vsi->num_queues; i++, vector++) { + // TODO: See if max is really necessary + for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) { + /* Make sure interrupt is disabled */ wr32(hw, I40E_PFINT_DYN_CTLN(i), 0); - /* First queue type is RX / 0 */ - wr32(hw, I40E_PFINT_LNKLSTN(i), i); + /* Set linked list head to point to corresponding RX queue + * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */ + reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) + & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) | + ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) + & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK); + wr32(hw, I40E_PFINT_LNKLSTN(i), reg); reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(i), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); } } /* - * Configure for MSI single vector operation + * Configure for single interrupt vector operation */ void ixl_configure_legacy(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *que = vsi->queues; - struct rx_ring *rxr = &que->rxr; - struct tx_ring *txr = &que->txr; u32 reg; +// TODO: Fix +#if 0 /* Configure ITR */ vsi->tx_itr_setting = pf->tx_itr; wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; vsi->rx_itr_setting = pf->rx_itr; wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; + /* XXX: Assuming only 1 queue in single interrupt mode */ +#endif + vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting; /* Setup "other" causes */ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK | I40E_PFINT_ICR0_ENA_GRST_MASK | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK ; wr32(hw, I40E_PFINT_ICR0_ENA, reg); /* No ITR for non-queue interrupts */ wr32(hw, I40E_PFINT_STAT_CTL0, IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT); /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ wr32(hw, I40E_PFINT_LNKLST0, 0); /* Associate the queue pair to the vector and enable the q int */ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT); wr32(hw, I40E_QINT_RQCTL(0), reg); reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(0), reg); } -int -ixl_allocate_pci_resources(struct ixl_pf *pf) -{ - int rid; - struct i40e_hw *hw = &pf->hw; - device_t dev = pf->dev; - - /* Map BAR0 */ - rid = PCIR_BAR(0); - pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, - &rid, RF_ACTIVE); - - if (!(pf->pci_mem)) { - device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); - return (ENXIO); - } - /* Ensure proper PCI device operation */ - ixl_set_busmaster(dev); - - /* Save off the PCI information */ - hw->vendor_id = pci_get_vendor(dev); - hw->device_id = pci_get_device(dev); - hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); - hw->subsystem_vendor_id = - pci_read_config(dev, PCIR_SUBVEND_0, 2); - hw->subsystem_device_id = - pci_read_config(dev, PCIR_SUBDEV_0, 2); - - hw->bus.device = pci_get_slot(dev); - hw->bus.func = pci_get_function(dev); - - /* Save off register access information */ - pf->osdep.mem_bus_space_tag = - rman_get_bustag(pf->pci_mem); - pf->osdep.mem_bus_space_handle = - rman_get_bushandle(pf->pci_mem); - pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); - pf->osdep.flush_reg = I40E_GLGEN_STAT; - pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; - - pf->hw.back = &pf->osdep; - - return (0); -} - -/* - * Teardown and release the admin queue/misc vector - * interrupt. - */ -int -ixl_teardown_adminq_msix(struct ixl_pf *pf) -{ - device_t dev = pf->dev; - int rid, error = 0; - - if (pf->admvec) /* we are doing MSIX */ - rid = pf->admvec + 1; - else - (pf->msix != 0) ? (rid = 1):(rid = 0); - - if (pf->tag != NULL) { - bus_teardown_intr(dev, pf->res, pf->tag); - if (error) { - device_printf(dev, "bus_teardown_intr() for" - " interrupt 0 failed\n"); - // return (ENXIO); - } - pf->tag = NULL; - } - if (pf->res != NULL) { - bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res); - if (error) { - device_printf(dev, "bus_release_resource() for" - " interrupt 0 failed [rid=%d]\n", rid); - // return (ENXIO); - } - pf->res = NULL; - } - - return (0); -} - -int -ixl_teardown_queue_msix(struct ixl_vsi *vsi) -{ - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - struct ixl_queue *que = vsi->queues; - device_t dev = vsi->dev; - int rid, error = 0; - - /* We may get here before stations are setup */ - if ((pf->msix < 2) || (que == NULL)) - return (0); - - /* Release all MSIX queue resources */ - for (int i = 0; i < vsi->num_queues; i++, que++) { - rid = que->msix + 1; - if (que->tag != NULL) { - error = bus_teardown_intr(dev, que->res, que->tag); - if (error) { - device_printf(dev, "bus_teardown_intr() for" - " Queue %d interrupt failed\n", - que->me); - // return (ENXIO); - } - que->tag = NULL; - } - if (que->res != NULL) { - error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); - if (error) { - device_printf(dev, "bus_release_resource() for" - " Queue %d interrupt failed [rid=%d]\n", - que->me, rid); - // return (ENXIO); - } - que->res = NULL; - } - } - - return (0); -} - void ixl_free_pci_resources(struct ixl_pf *pf) { - device_t dev = pf->dev; - int memrid; + struct ixl_vsi *vsi = &pf->vsi; + device_t dev = iflib_get_dev(vsi->ctx); + struct ixl_rx_queue *rx_que = vsi->rx_queues; - ixl_teardown_queue_msix(&pf->vsi); - ixl_teardown_adminq_msix(pf); + /* We may get here before stations are setup */ + if (rx_que == NULL) + goto early; - if (pf->msix > 0) - pci_release_msi(dev); - - memrid = PCIR_BAR(IXL_MSIX_BAR); + /* + ** Release all msix VSI resources: + */ + iflib_irq_free(vsi->ctx, &vsi->irq); - if (pf->msix_mem != NULL) - bus_release_resource(dev, SYS_RES_MEMORY, - memrid, pf->msix_mem); - + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) + iflib_irq_free(vsi->ctx, &rx_que->que_irq); +early: if (pf->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), pf->pci_mem); - - return; } void ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types) { /* Display supported media types */ if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) || phy_types & (I40E_CAP_PHY_TYPE_XFI) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) || phy_types & (I40E_CAP_PHY_TYPE_XLAUI) || phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_SFI)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL); if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC)) - ifmedia_add(&vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL); + ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ int -ixl_setup_interface(device_t dev, struct ixl_vsi *vsi) +ixl_setup_interface(device_t dev, struct ixl_pf *pf) { - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - struct ifnet *ifp; - struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct ixl_vsi *vsi = &pf->vsi; + if_ctx_t ctx = vsi->ctx; + struct i40e_hw *hw = &pf->hw; + struct ifnet *ifp = iflib_get_ifp(ctx); struct i40e_aq_get_phy_abilities_resp abilities; enum i40e_status_code aq_error = 0; - INIT_DEBUGOUT("ixl_setup_interface: begin"); + uint64_t cap; - ifp = vsi->ifp = if_alloc(IFT_ETHER); - if (ifp == NULL) { - device_printf(dev, "can not allocate ifnet structure\n"); - return (-1); - } - if_initname(ifp, device_get_name(dev), device_get_unit(dev)); - ifp->if_mtu = ETHERMTU; - ifp->if_init = ixl_init; - ifp->if_softc = vsi; - ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; - ifp->if_ioctl = ixl_ioctl; + INIT_DBG_DEV(dev, "begin"); -#if __FreeBSD_version >= 1100036 - if_setgetcounterfn(ifp, ixl_get_counter); -#endif - - ifp->if_transmit = ixl_mq_start; - - ifp->if_qflush = ixl_qflush; - - ifp->if_snd.ifq_maxlen = que->num_tx_desc - 2; - - vsi->max_frame_size = + /* initialize fast path functions */ + cap = IXL_CAPS; + if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); + if_setcapabilitiesbit(ifp, cap, 0); + if_setcapenable(ifp, if_getcapabilities(ifp)); + /* TODO: Remove VLAN_ENCAP_LEN? */ + vsi->shared->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; - /* Set TSO limits */ - ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN); - ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS; - ifp->if_hw_tsomaxsegsize = IXL_MAX_DMA_SEG_SIZE; - /* - * Tell the upper layer(s) we support long frames. - */ - ifp->if_hdrlen = sizeof(struct ether_vlan_header); - - ifp->if_capabilities |= IFCAP_HWCSUM; - ifp->if_capabilities |= IFCAP_HWCSUM_IPV6; - ifp->if_capabilities |= IFCAP_TSO; - ifp->if_capabilities |= IFCAP_JUMBO_MTU; - ifp->if_capabilities |= IFCAP_LRO; - - /* VLAN capabilties */ - ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING - | IFCAP_VLAN_HWTSO - | IFCAP_VLAN_MTU - | IFCAP_VLAN_HWCSUM; - ifp->if_capenable = ifp->if_capabilities; - - /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the ixl driver you can ** enable this and get full hardware tag filtering. */ - ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; + if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); - /* - * Specify the media types supported by this adapter and register - * callbacks to update media and link information - */ - ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change, - ixl_media_status); - aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, NULL); /* May need delay to detect fiber correctly */ if (aq_error == I40E_ERR_UNKNOWN_PHY) { + /* TODO: Maybe just retry this in a task... */ i40e_msec_delay(200); aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, TRUE, &abilities, NULL); } if (aq_error) { if (aq_error == I40E_ERR_UNKNOWN_PHY) device_printf(dev, "Unknown PHY type detected!\n"); else device_printf(dev, "Error getting supported media types, err %d," " AQ error %d\n", aq_error, hw->aq.asq_last_status); } else { pf->supported_speeds = abilities.link_speed; #if __FreeBSD_version >= 1100000 - ifp->if_baudrate = ixl_max_aq_speed_to_value(pf->supported_speeds); + if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds)); #else if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds)); #endif ixl_add_ifmedia(vsi, hw->phy.phy_types); } /* Use autoselect media by default */ - ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); - ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO); + ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO); - ether_ifattach(ifp, hw->mac.addr); - return (0); } /* ** Run when the Admin Queue gets a link state change interrupt. */ void ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e) { - struct i40e_hw *hw = &pf->hw; - device_t dev = pf->dev; + struct i40e_hw *hw = &pf->hw; + device_t dev = iflib_get_dev(pf->vsi.ctx); struct i40e_aqc_get_link_status *status = (struct i40e_aqc_get_link_status *)&e->desc.params.raw; /* Request link status from adapter */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); /* Print out message if an unqualified module is found */ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && (pf->advertised_speed) && (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && (!(status->link_info & I40E_AQ_LINK_UP))) device_printf(dev, "Link failed because " "an unqualified module was detected!\n"); - /* Update OS link info */ - ixl_update_link_status(pf); + /* OS link info is updated elsewhere */ } /********************************************************************* * * Get Firmware Switch configuration * - this will need to be more robust when more complex * switch configurations are enabled. * **********************************************************************/ int ixl_switch_config(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; - device_t dev = vsi->dev; + device_t dev = iflib_get_dev(vsi->ctx); struct i40e_aqc_get_switch_config_resp *sw_config; u8 aq_buf[I40E_AQ_LARGE_BUF]; int ret; u16 next = 0; memset(&aq_buf, 0, sizeof(aq_buf)); sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; ret = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (ret) { device_printf(dev, "aq_get_switch_config() failed, error %d," " aq_error %d\n", ret, pf->hw.aq.asq_last_status); return (ret); } if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) { device_printf(dev, "Switch config: header reported: %d in structure, %d total\n", sw_config->header.num_reported, sw_config->header.num_total); for (int i = 0; i < sw_config->header.num_reported; i++) { device_printf(dev, "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i, sw_config->element[i].element_type, sw_config->element[i].seid, sw_config->element[i].uplink_seid, sw_config->element[i].downlink_seid); } } /* Simplified due to a single VSI */ vsi->uplink_seid = sw_config->element[0].uplink_seid; vsi->downlink_seid = sw_config->element[0].downlink_seid; vsi->seid = sw_config->element[0].seid; return (ret); } /********************************************************************* * * Initialize the VSI: this handles contexts, which means things * like the number of descriptors, buffer size, * plus we init the rings thru this function. * **********************************************************************/ int ixl_initialize_vsi(struct ixl_vsi *vsi) { - struct ixl_pf *pf = vsi->back; - struct ixl_queue *que = vsi->queues; - device_t dev = vsi->dev; + struct ixl_pf *pf = vsi->back; + if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); + struct ixl_tx_queue *tx_que = vsi->tx_queues; + struct ixl_rx_queue *rx_que = vsi->rx_queues; + device_t dev = iflib_get_dev(vsi->ctx); struct i40e_hw *hw = vsi->hw; struct i40e_vsi_context ctxt; int tc_queues; int err = 0; memset(&ctxt, 0, sizeof(ctxt)); ctxt.seid = vsi->seid; if (pf->veb_seid != 0) ctxt.uplink_seid = pf->veb_seid; ctxt.pf_num = hw->pf_id; err = i40e_aq_get_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d" " aq_error %d\n", err, hw->aq.asq_last_status); return (err); } ixl_dbg(pf, IXL_DBG_SWITCH_INFO, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, " "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, " "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid, ctxt.uplink_seid, ctxt.vsi_number, ctxt.vsis_allocated, ctxt.vsis_unallocated, ctxt.flags, ctxt.pf_num, ctxt.vf_num, ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits); /* ** Set the queue and traffic class bits ** - when multiple traffic classes are supported ** this will need to be more robust. */ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; /* In contig mode, que_mapping[0] is first queue index used by this VSI */ ctxt.info.queue_mapping[0] = 0; /* * This VSI will only use traffic class 0; start traffic class 0's * queue allocation at queue 0, and assign it 2^tc_queues queues (though * the driver may not use all of them). */ - tc_queues = bsrl(pf->qtag.num_allocated); + tc_queues = fls(pf->qtag.num_allocated) - 1; ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) & I40E_AQ_VSI_TC_QUE_NUMBER_MASK); /* Set VLAN receive stripping mode */ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL; + // TODO: Call function to get this cap bit, instead if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH; else ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING; #ifdef IXL_IW /* Set TCP Enable for iWARP capable VSI */ if (ixl_enable_iwarp && pf->iw_enabled) { ctxt.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } #endif /* Save VSI number and info for use later */ vsi->vsi_num = ctxt.vsi_number; bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info)); /* Reset VSI statistics */ ixl_vsi_reset_stats(vsi); vsi->hw_filters_add = 0; vsi->hw_filters_del = 0; ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF); err = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (err) { device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d," " aq_error %d\n", err, hw->aq.asq_last_status); return (err); } - for (int i = 0; i < vsi->num_queues; i++, que++) { - struct tx_ring *txr = &que->txr; - struct rx_ring *rxr = &que->rxr; + for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) { + struct tx_ring *txr = &tx_que->txr; struct i40e_hmc_obj_txq tctx; - struct i40e_hmc_obj_rxq rctx; u32 txctl; - u16 size; /* Setup the HMC TX Context */ - size = que->num_tx_desc * sizeof(struct i40e_tx_desc); bzero(&tctx, sizeof(tctx)); tctx.new_context = 1; - tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS); - tctx.qlen = que->num_tx_desc; + tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS); + tctx.qlen = scctx->isc_ntxd[0]; tctx.fc_ena = 0; /* Disable FCoE */ /* * This value needs to pulled from the VSI that this queue * is assigned to. Index into array is traffic class. */ tctx.rdylist = vsi->info.qs_handle[0]; /* * Set these to enable Head Writeback * - Address is last entry in TX ring (reserved for HWB index) * Leave these as 0 for Descriptor Writeback */ if (vsi->enable_head_writeback) { tctx.head_wb_ena = 1; - tctx.head_wb_addr = txr->dma.pa + - (que->num_tx_desc * sizeof(struct i40e_tx_desc)); + tctx.head_wb_addr = txr->tx_paddr + + (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc)); + } else { + tctx.head_wb_ena = 0; + tctx.head_wb_addr = 0; } tctx.rdylist_act = 0; err = i40e_clear_lan_tx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear TX context\n"); break; } err = i40e_set_lan_tx_queue_context(hw, i, &tctx); if (err) { device_printf(dev, "Unable to set TX context\n"); break; } /* Associate the ring with this PF */ txctl = I40E_QTX_CTL_PF_QUEUE; txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & I40E_QTX_CTL_PF_INDX_MASK); wr32(hw, I40E_QTX_CTL(i), txctl); ixl_flush(hw); /* Do ring (re)init */ - ixl_init_tx_ring(que); + ixl_init_tx_ring(vsi, tx_que); + } + for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { + struct rx_ring *rxr = &rx_que->rxr; + struct i40e_hmc_obj_rxq rctx; /* Next setup the HMC RX Context */ - if (vsi->max_frame_size <= MCLBYTES) + if (scctx->isc_max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len; /* Set up an RX context for the HMC */ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq)); rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT; /* ignore header split for now */ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; - rctx.rxmax = (vsi->max_frame_size < max_rxmax) ? - vsi->max_frame_size : max_rxmax; + rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ? + scctx->isc_max_frame_size : max_rxmax; rctx.dtype = 0; rctx.dsize = 1; /* do 32byte descriptors */ rctx.hsplit_0 = 0; /* no header split */ - rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS); - rctx.qlen = que->num_rx_desc; + rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS); + rctx.qlen = scctx->isc_nrxd[0]; rctx.tphrdesc_ena = 1; rctx.tphwdesc_ena = 1; rctx.tphdata_ena = 0; /* Header Split related */ rctx.tphhead_ena = 0; /* Header Split related */ - rctx.lrxqthresh = 2; /* Interrupt at <128 desc avail */ + rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */ rctx.crcstrip = 1; rctx.l2tsel = 1; rctx.showiv = 1; /* Strip inner VLAN header */ rctx.fc_ena = 0; /* Disable FCoE */ rctx.prefena = 1; /* Prefetch descriptors */ err = i40e_clear_lan_rx_queue_context(hw, i); if (err) { device_printf(dev, "Unable to clear RX context %d\n", i); break; } err = i40e_set_lan_rx_queue_context(hw, i, &rctx); if (err) { device_printf(dev, "Unable to set RX context %d\n", i); break; } - err = ixl_init_rx_ring(que); - if (err) { - device_printf(dev, "Fail in init_rx_ring %d\n", i); - break; - } -#ifdef DEV_NETMAP - /* preserve queue */ - if (vsi->ifp->if_capenable & IFCAP_NETMAP) { - struct netmap_adapter *na = NA(vsi->ifp); - struct netmap_kring *kring = na->rx_rings[i]; - int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); - wr32(vsi->hw, I40E_QRX_TAIL(que->me), t); - } else -#endif /* DEV_NETMAP */ - wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_rx_desc - 1); + wr32(vsi->hw, I40E_QRX_TAIL(i), 0); } return (err); } - - - void -ixl_vsi_free_queues(struct ixl_vsi *vsi) -{ - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - struct ixl_queue *que = vsi->queues; - - if (NULL == vsi->queues) - return; - - for (int i = 0; i < vsi->num_queues; i++, que++) { - struct tx_ring *txr = &que->txr; - struct rx_ring *rxr = &que->rxr; - - if (!mtx_initialized(&txr->mtx)) /* uninitialized */ - continue; - IXL_TX_LOCK(txr); - if (txr->br) - buf_ring_free(txr->br, M_DEVBUF); - ixl_free_que_tx(que); - if (txr->base) - i40e_free_dma_mem(&pf->hw, &txr->dma); - IXL_TX_UNLOCK(txr); - IXL_TX_LOCK_DESTROY(txr); - - if (!mtx_initialized(&rxr->mtx)) /* uninitialized */ - continue; - IXL_RX_LOCK(rxr); - ixl_free_que_rx(que); - if (rxr->base) - i40e_free_dma_mem(&pf->hw, &rxr->dma); - IXL_RX_UNLOCK(rxr); - IXL_RX_LOCK_DESTROY(rxr); - } -} - - -/********************************************************************* - * - * Free all VSI structs. - * - **********************************************************************/ -void -ixl_free_vsi(struct ixl_vsi *vsi) -{ - - /* Free station queues */ - ixl_vsi_free_queues(vsi); - if (vsi->queues) - free(vsi->queues, M_DEVBUF); - - /* Free VSI filter list */ - ixl_free_mac_filters(vsi); -} - -void ixl_free_mac_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; while (!SLIST_EMPTY(&vsi->ftl)) { f = SLIST_FIRST(&vsi->ftl); SLIST_REMOVE_HEAD(&vsi->ftl, next); free(f, M_DEVBUF); } } /* - * Fill out fields in queue struct and setup tx/rx memory and structs - */ -static int -ixl_vsi_setup_queue(struct ixl_vsi *vsi, struct ixl_queue *que, int index) -{ - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - device_t dev = pf->dev; - struct i40e_hw *hw = &pf->hw; - struct tx_ring *txr = &que->txr; - struct rx_ring *rxr = &que->rxr; - int error = 0; - int rsize, tsize; - - que->num_tx_desc = vsi->num_tx_desc; - que->num_rx_desc = vsi->num_rx_desc; - que->me = index; - que->vsi = vsi; - - txr->que = que; - txr->tail = I40E_QTX_TAIL(que->me); - - /* Initialize the TX lock */ - snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", - device_get_nameunit(dev), que->me); - mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); - /* - * Create the TX descriptor ring - * - * In Head Writeback mode, the descriptor ring is one bigger - * than the number of descriptors for space for the HW to - * write back index of last completed descriptor. - */ - if (vsi->enable_head_writeback) { - tsize = roundup2((que->num_tx_desc * - sizeof(struct i40e_tx_desc)) + - sizeof(u32), DBA_ALIGN); - } else { - tsize = roundup2((que->num_tx_desc * - sizeof(struct i40e_tx_desc)), DBA_ALIGN); - } - if (i40e_allocate_dma_mem(hw, - &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { - device_printf(dev, - "Unable to allocate TX Descriptor memory\n"); - error = ENOMEM; - goto err_destroy_tx_mtx; - } - txr->base = (struct i40e_tx_desc *)txr->dma.va; - bzero((void *)txr->base, tsize); - /* Now allocate transmit soft structs for the ring */ - if (ixl_allocate_tx_data(que)) { - device_printf(dev, - "Critical Failure setting up TX structures\n"); - error = ENOMEM; - goto err_free_tx_dma; - } - /* Allocate a buf ring */ - txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF, - M_NOWAIT, &txr->mtx); - if (txr->br == NULL) { - device_printf(dev, - "Critical Failure setting up TX buf ring\n"); - error = ENOMEM; - goto err_free_tx_data; - } - - rsize = roundup2(que->num_rx_desc * - sizeof(union i40e_rx_desc), DBA_ALIGN); - rxr->que = que; - rxr->tail = I40E_QRX_TAIL(que->me); - - /* Initialize the RX side lock */ - snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", - device_get_nameunit(dev), que->me); - mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); - - if (i40e_allocate_dma_mem(hw, - &rxr->dma, i40e_mem_reserved, rsize, 4096)) { - device_printf(dev, - "Unable to allocate RX Descriptor memory\n"); - error = ENOMEM; - goto err_destroy_rx_mtx; - } - rxr->base = (union i40e_rx_desc *)rxr->dma.va; - bzero((void *)rxr->base, rsize); - /* Allocate receive soft structs for the ring*/ - if (ixl_allocate_rx_data(que)) { - device_printf(dev, - "Critical Failure setting up receive structs\n"); - error = ENOMEM; - goto err_free_rx_dma; - } - - return (0); - -err_free_rx_dma: - i40e_free_dma_mem(&pf->hw, &rxr->dma); -err_destroy_rx_mtx: - mtx_destroy(&rxr->mtx); - /* err_free_tx_buf_ring */ - buf_ring_free(txr->br, M_DEVBUF); -err_free_tx_data: - ixl_free_que_tx(que); -err_free_tx_dma: - i40e_free_dma_mem(&pf->hw, &txr->dma); -err_destroy_tx_mtx: - mtx_destroy(&txr->mtx); - - return (error); -} - -int -ixl_vsi_setup_queues(struct ixl_vsi *vsi) -{ - struct ixl_queue *que; - int error = 0; - - for (int i = 0; i < vsi->num_queues; i++) { - que = &vsi->queues[i]; - error = ixl_vsi_setup_queue(vsi, que, i); - if (error) - break; - } - return (error); -} - - -/********************************************************************* - * - * Allocate memory for the VSI (virtual station interface) and their - * associated queues, rings and the descriptors associated with each, - * called only once at attach. - * - **********************************************************************/ -int -ixl_setup_stations(struct ixl_pf *pf) -{ - device_t dev = pf->dev; - struct ixl_vsi *vsi; - int error = 0; - - vsi = &pf->vsi; - vsi->back = (void *)pf; - vsi->hw = &pf->hw; - vsi->id = 0; - vsi->num_vlans = 0; - vsi->back = pf; - - if (pf->msix > 1) - vsi->flags |= IXL_FLAGS_USES_MSIX; - - /* Get memory for the station queues */ - if (!(vsi->queues = - (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * - vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate queue memory\n"); - error = ENOMEM; - goto ixl_setup_stations_err; - } - - /* Then setup each queue */ - error = ixl_vsi_setup_queues(vsi); -ixl_setup_stations_err: - return (error); -} - -/* ** Provide a update to the queue RX ** interrupt moderation value. */ void -ixl_set_queue_rx_itr(struct ixl_queue *que) +ixl_set_queue_rx_itr(struct ixl_rx_queue *que) { struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (pf->dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = min(rx_itr, IXL_MAX_ITR); wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, - que->me), rxr->itr); + rxr->me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = pf->rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, - que->me), rxr->itr); + rxr->me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; - return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ void -ixl_set_queue_tx_itr(struct ixl_queue *que) +ixl_set_queue_tx_itr(struct ixl_tx_queue *que) { struct ixl_vsi *vsi = que->vsi; struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (pf->dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = min(tx_itr, IXL_MAX_ITR); wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, - que->me), txr->itr); + txr->me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = pf->tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, - que->me), txr->itr); + txr->me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name) { struct sysctl_oid *tree; struct sysctl_oid_list *child; struct sysctl_oid_list *vsi_list; tree = device_get_sysctl_tree(pf->dev); child = SYSCTL_CHILDREN(tree); vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name, CTLFLAG_RD, NULL, "VSI Number"); vsi_list = SYSCTL_CHILDREN(vsi->vsi_node); ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats); } #ifdef IXL_DEBUG /** * ixl_sysctl_qtx_tail_handler * Retrieves I40E_QTX_TAIL value from hardware * for a sysctl. */ -static int +int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) { - struct ixl_queue *que; + struct ixl_tx_queue *tx_que; int error; u32 val; - que = ((struct ixl_queue *)oidp->oid_arg1); - if (!que) return 0; + tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1); + if (!tx_que) return 0; - val = rd32(que->vsi->hw, que->txr.tail); + val = rd32(tx_que->vsi->hw, tx_que->txr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /** * ixl_sysctl_qrx_tail_handler * Retrieves I40E_QRX_TAIL value from hardware * for a sysctl. */ -static int +int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) { - struct ixl_queue *que; + struct ixl_rx_queue *rx_que; int error; u32 val; - que = ((struct ixl_queue *)oidp->oid_arg1); - if (!que) return 0; + rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1); + if (!rx_que) return 0; - val = rd32(que->vsi->hw, que->rxr.tail); + val = rd32(rx_que->vsi->hw, rx_que->rxr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } #endif /* * Used to set the Tx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_tx_itr; requested_tx_itr = pf->tx_itr; error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_tx_itr) { device_printf(dev, "Cannot set TX itr value while dynamic TX itr is enabled\n"); return (EINVAL); } if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid TX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->tx_itr = requested_tx_itr; ixl_configure_tx_itr(pf); return (error); } /* * Used to set the Rx ITR value for all of the PF LAN VSI's queues. * Writes to the ITR registers immediately. */ static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int error = 0; int requested_rx_itr; requested_rx_itr = pf->rx_itr; error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (pf->dynamic_rx_itr) { device_printf(dev, "Cannot set RX itr value while dynamic RX itr is enabled\n"); return (EINVAL); } if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid RX itr value; value must be between 0 and %d\n", IXL_MAX_ITR); return (EINVAL); } pf->rx_itr = requested_rx_itr; ixl_configure_rx_itr(pf); return (error); } void ixl_add_hw_stats(struct ixl_pf *pf) { - device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; - struct ixl_queue *queues = vsi->queues; + device_t dev = iflib_get_dev(vsi->ctx); struct i40e_hw_port_stats *pf_stats = &pf->stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); - struct sysctl_oid_list *vsi_list; - + struct sysctl_oid_list *vsi_list, *queue_list; struct sysctl_oid *queue_node; - struct sysctl_oid_list *queue_list; + char queue_namebuf[32]; + struct ixl_rx_queue *rx_que; + struct ixl_tx_queue *tx_que; struct tx_ring *txr; struct rx_ring *rxr; - char queue_namebuf[QUEUE_NAME_LEN]; /* Driver statistics */ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &pf->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &pf->admin_irq, "Admin Queue IRQ Handled"); ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf"); vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node); /* Queue statistics */ - for (int q = 0; q < vsi->num_queues; q++) { - snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); + for (int q = 0; q < vsi->num_rx_queues; q++) { + snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, - OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #"); + OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "RX Queue #"); queue_list = SYSCTL_CHILDREN(queue_node); - txr = &(queues[q].txr); - rxr = &(queues[q].rxr); + rx_que = &(vsi->rx_queues[q]); + rxr = &(rx_que->rxr); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", - CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), - "m_defrag() failed"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", - CTLFLAG_RD, &(queues[q].irqs), - "irqs on this queue"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", - CTLFLAG_RD, &(queues[q].tso), - "TSO"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", - CTLFLAG_RD, &(queues[q].tx_dmamap_failed), - "Driver tx dma failure in xmit"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", - CTLFLAG_RD, &(queues[q].mss_too_small), - "TSO sends with an MSS less than 64"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", - CTLFLAG_RD, &(txr->no_desc), - "Queue No Descriptor Available"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", - CTLFLAG_RD, &(txr->total_packets), - "Queue Packets Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes", - CTLFLAG_RD, &(txr->tx_bytes), - "Queue Bytes Transmitted"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + CTLFLAG_RD, &(rx_que->irqs), + "irqs on this queue (both Tx and Rx)"); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err", + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "desc_err", CTLFLAG_RD, &(rxr->desc_errs), "Queue Rx Descriptor Errors"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr", CTLFLAG_RD, &(rxr->itr), 0, "Queue Rx ITR Interval"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", - CTLFLAG_RD, &(txr->itr), 0, - "Queue Tx ITR Interval"); #ifdef IXL_DEBUG - SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "txr_watchdog", - CTLFLAG_RD, &(txr->watchdog_timer), 0, - "Ticks before watchdog timer causes interface reinit"); - SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_avail", - CTLFLAG_RD, &(txr->next_avail), 0, - "Next TX descriptor to be used"); - SYSCTL_ADD_U16(ctx, queue_list, OID_AUTO, "tx_next_to_clean", - CTLFLAG_RD, &(txr->next_to_clean), 0, - "Next TX descriptor to be cleaned"); - SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done", - CTLFLAG_RD, &(rxr->not_done), - "Queue Rx Descriptors not Done"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh", - CTLFLAG_RD, &(rxr->next_refresh), 0, - "Queue Rx Descriptors not Done"); - SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check", - CTLFLAG_RD, &(rxr->next_check), 0, - "Queue Rx Descriptors not Done"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail", - CTLTYPE_UINT | CTLFLAG_RD, &queues[q], - sizeof(struct ixl_queue), + CTLTYPE_UINT | CTLFLAG_RD, rx_que, + sizeof(struct ixl_rx_queue), ixl_sysctl_qrx_tail_handler, "IU", "Queue Receive Descriptor Tail"); +#endif + } + for (int q = 0; q < vsi->num_tx_queues; q++) { + snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q); + queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, + OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "TX Queue #"); + queue_list = SYSCTL_CHILDREN(queue_node); + + tx_que = &(vsi->tx_queues[q]); + txr = &(tx_que->txr); + + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso", + CTLFLAG_RD, &(tx_que->tso), + "TSO"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mss_too_small", + CTLFLAG_RD, &(txr->mss_too_small), + "TSO sends with an MSS less than 64"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "packets", + CTLFLAG_RD, &(txr->tx_packets), + "Queue Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "bytes", + CTLFLAG_RD, &(txr->tx_bytes), + "Queue Bytes Transmitted"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "itr", + CTLFLAG_RD, &(txr->itr), 0, + "Queue Tx ITR Interval"); +#ifdef IXL_DEBUG SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail", - CTLTYPE_UINT | CTLFLAG_RD, &queues[q], - sizeof(struct ixl_queue), + CTLTYPE_UINT | CTLFLAG_RD, tx_que, + sizeof(struct ixl_tx_queue), ixl_sysctl_qtx_tail_handler, "IU", "Queue Transmit Descriptor Tail"); #endif } /* MAC stats */ ixl_add_sysctls_mac_stats(ctx, child, pf_stats); } void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_eth_stats *eth_stats) { struct ixl_sysctl_info ctls[] = { {ð_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, {ð_stats->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received"}, {ð_stats->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received"}, {ð_stats->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received"}, {ð_stats->rx_discards, "rx_discards", "Discarded RX packets"}, {ð_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, {ð_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, {ð_stats->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted"}, {ð_stats->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted"}, // end {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct i40e_hw_port_stats *stats) { struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD, NULL, "Mac Statistics"); struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node); struct i40e_eth_stats *eth_stats = &stats->eth; ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats); struct ixl_sysctl_info ctls[] = { {&stats->crc_errors, "crc_errors", "CRC Errors"}, {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"}, {&stats->mac_local_faults, "local_faults", "MAC Local Faults"}, {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"}, {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"}, /* Packet Reception Stats */ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"}, {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"}, {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"}, {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"}, {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"}, {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"}, {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"}, {&stats->rx_undersize, "rx_undersize", "Undersized packets received"}, {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"}, {&stats->rx_oversize, "rx_oversized", "Oversized packets received"}, {&stats->rx_jabber, "rx_jabber", "Received Jabber"}, {&stats->checksum_error, "checksum_errors", "Checksum Errors"}, /* Packet Transmission Stats */ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"}, {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"}, {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"}, {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"}, {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"}, {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"}, {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"}, /* Flow control */ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"}, {&stats->link_xon_rx, "xon_recvd", "Link XON received"}, {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"}, {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"}, /* End */ {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != 0) { SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } } void ixl_set_rss_key(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; enum i40e_status_code status; #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key(rss_seed); #endif /* Fill out hash function seed */ if (hw->mac.type == I40E_MAC_X722) { struct i40e_aqc_get_set_rss_key_data key_data; bcopy(rss_seed, &key_data, 52); status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]); } } /* * Configure enabled PCTYPES for RSS. */ void ixl_set_rss_pctypes(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; u64 set_hena = 0, hena; #ifdef RSS u32 rss_hash_config; rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else if (hw->mac.type == I40E_MAC_X722) set_hena = IXL_DEFAULT_RSS_HENA_X722; else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= set_hena; i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); } void ixl_set_rss_hlut(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; - device_t dev = pf->dev; struct ixl_vsi *vsi = &pf->vsi; + device_t dev = iflib_get_dev(vsi->ctx); int i, que_id; int lut_entry_width; u32 lut = 0; enum i40e_status_code status; lut_entry_width = pf->hw.func_caps.rss_table_entry_width; /* Populate the LUT with max no. of queues in round robin fashion */ u8 hlut_buf[512]; for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) { #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); - que_id = que_id % vsi->num_queues; + que_id = que_id % vsi->num_rx_queues; #else - que_id = i % vsi->num_queues; + que_id = i % vsi->num_rx_queues; #endif lut = (que_id & ((0x1 << lut_entry_width) - 1)); hlut_buf[i] = lut; } if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf)); if (status) device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++) wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]); ixl_flush(hw); } } /* ** Setup the PF's RSS parameters. */ void ixl_config_rss(struct ixl_pf *pf) { ixl_set_rss_key(pf); ixl_set_rss_pctypes(pf); ixl_set_rss_hlut(pf); } /* -** This routine is run via an vlan config EVENT, -** it enables us to use the HW Filter table since -** we can get the vlan id. This just creates the -** entry in the soft version of the VFTA, init will -** repopulate the real table. -*/ -void -ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) -{ - struct ixl_vsi *vsi = ifp->if_softc; - struct i40e_hw *hw = vsi->hw; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - - if (ifp->if_softc != arg) /* Not our event */ - return; - - if ((vtag == 0) || (vtag > 4095)) /* Invalid */ - return; - - IXL_PF_LOCK(pf); - ++vsi->num_vlans; - ixl_add_filter(vsi, hw->mac.addr, vtag); - IXL_PF_UNLOCK(pf); -} - -/* -** This routine is run via an vlan -** unconfig EVENT, remove our entry -** in the soft vfta. -*/ -void -ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) -{ - struct ixl_vsi *vsi = ifp->if_softc; - struct i40e_hw *hw = vsi->hw; - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - - if (ifp->if_softc != arg) - return; - - if ((vtag == 0) || (vtag > 4095)) /* Invalid */ - return; - - IXL_PF_LOCK(pf); - --vsi->num_vlans; - ixl_del_filter(vsi, hw->mac.addr, vtag); - IXL_PF_UNLOCK(pf); -} - -/* ** This routine updates vlan filters, called by init ** it scans the filter table and then updates the hw ** after a soft reset. */ void ixl_setup_vlan_filters(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; int cnt = 0, flags; if (vsi->num_vlans == 0) return; /* ** Scan the filter list for vlan entries, ** mark them for addition and then call ** for the AQ update. */ SLIST_FOREACH(f, &vsi->ftl, next) { if (f->flags & IXL_FILTER_VLAN) { f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); cnt++; } } if (cnt == 0) { printf("setup vlan: no filters found!\n"); return; } flags = IXL_FILTER_VLAN; flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); ixl_add_hw_filters(vsi, flags, cnt); - return; } /* + * In some firmware versions there is default MAC/VLAN filter + * configured which interferes with filters managed by driver. + * Make sure it's removed. + */ +static void +ixl_del_default_hw_filters(struct ixl_vsi *vsi) +{ + struct i40e_aqc_remove_macvlan_element_data e; + + bzero(&e, sizeof(e)); + bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); + e.vlan_tag = 0; + e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); + + bzero(&e, sizeof(e)); + bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN); + e.vlan_tag = 0; + e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; + i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL); +} + +/* ** Initialize filter list and add filters that the hardware ** needs to know about. ** ** Requires VSI's filter list & seid to be set before calling. */ void ixl_init_filters(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; - /* Add broadcast address */ - ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY); + /* Initialize mac filter list for VSI */ + SLIST_INIT(&vsi->ftl); + /* Receive broadcast Ethernet frames */ + i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL); + + ixl_del_default_hw_filters(vsi); + + ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY); /* * Prevent Tx flow control frames from being sent out by * non-firmware transmitters. * This affects every VSI in the PF. */ if (pf->enable_tx_fc_filter) i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid); } /* ** This routine adds mulicast filters */ void ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr) { struct ixl_mac_filter *f; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (f != NULL) return; - f = ixl_get_filter(vsi); - if (f == NULL) { + f = ixl_new_filter(vsi, macaddr, IXL_VLAN_ANY); + if (f != NULL) + f->flags |= IXL_FILTER_MC; + else printf("WARNING: no filter available!!\n"); - return; - } - bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); - f->vlan = IXL_VLAN_ANY; - f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED - | IXL_FILTER_MC); return; } void ixl_reconfigure_filters(struct ixl_vsi *vsi) { ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs); } /* ** This routine adds macvlan filters */ void ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f, *tmp; struct ixl_pf *pf; device_t dev; DEBUGOUT("ixl_add_filter: begin"); pf = vsi->back; dev = pf->dev; /* Does one already exist */ f = ixl_find_filter(vsi, macaddr, vlan); if (f != NULL) return; /* ** Is this the first vlan being registered, if so we ** need to remove the ANY filter that indicates we are ** not in a vlan, and replace that with a 0 filter. */ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) { tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY); if (tmp != NULL) { ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY); ixl_add_filter(vsi, macaddr, 0); } } - f = ixl_get_filter(vsi); + f = ixl_new_filter(vsi, macaddr, vlan); if (f == NULL) { device_printf(dev, "WARNING: no filter available!!\n"); return; } - bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); - f->vlan = vlan; - f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); if (f->vlan != IXL_VLAN_ANY) f->flags |= IXL_FILTER_VLAN; else vsi->num_macs++; ixl_add_hw_filters(vsi, f->flags, 1); return; } void ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; f = ixl_find_filter(vsi, macaddr, vlan); if (f == NULL) return; f->flags |= IXL_FILTER_DEL; ixl_del_hw_filters(vsi, 1); - vsi->num_macs--; + if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0) + vsi->num_macs--; /* Check if this is the last vlan removal */ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) { /* Switch back to a non-vlan filter */ ixl_del_filter(vsi, macaddr, 0); ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY); } return; } /* ** Find the filter with both matching mac addr and vlan id */ struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan) { struct ixl_mac_filter *f; - bool match = FALSE; SLIST_FOREACH(f, &vsi->ftl, next) { - if (!cmp_etheraddr(f->macaddr, macaddr)) - continue; - if (f->vlan == vlan) { - match = TRUE; - break; + if ((cmp_etheraddr(f->macaddr, macaddr) != 0) + && (f->vlan == vlan)) { + return (f); } } - if (!match) - f = NULL; - return (f); + return (NULL); } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ void ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt) { struct i40e_aqc_add_macvlan_element_data *a, *b; struct ixl_mac_filter *f; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; - int err, j = 0; + enum i40e_status_code status; + int j = 0; + MPASS(cnt > 0); + pf = vsi->back; - dev = pf->dev; + dev = iflib_get_dev(vsi->ctx); hw = &pf->hw; - IXL_PF_LOCK_ASSERT(pf); a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "add_hw_filters failed to get memory\n"); return; } /* ** Scan the filter list, each time we find one ** we add it to the admin queue array and turn off ** the add bit. */ SLIST_FOREACH(f, &vsi->ftl, next) { if ((f->flags & flags) == flags) { b = &a[j]; // a pox on fvl long names :) bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN); if (f->vlan == IXL_VLAN_ANY) { b->vlan_tag = 0; b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; } else { b->vlan_tag = f->vlan; b->flags = 0; } b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; f->flags &= ~IXL_FILTER_ADD; j++; } if (j == cnt) break; } if (j > 0) { - err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); - if (err) - device_printf(dev, "aq_add_macvlan err %d, " - "aq_error %d\n", err, hw->aq.asq_last_status); + status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL); + if (status) + device_printf(dev, "i40e_aq_add_macvlan status %s, " + "error %s\n", i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); else vsi->hw_filters_add += j; } free(a, M_DEVBUF); return; } /* ** This routine takes removals in the vsi filter ** table and creates an Admin Queue call to delete ** the filters in the hardware. */ void ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt) { struct i40e_aqc_remove_macvlan_element_data *d, *e; struct ixl_pf *pf; struct i40e_hw *hw; device_t dev; struct ixl_mac_filter *f, *f_temp; - int err, j = 0; + enum i40e_status_code status; + int j = 0; - DEBUGOUT("ixl_del_hw_filters: begin\n"); - pf = vsi->back; hw = &pf->hw; - dev = pf->dev; + dev = iflib_get_dev(vsi->ctx); d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { - printf("del hw filter failed to get memory\n"); + device_printf(dev, "%s: failed to get memory\n", __func__); return; } SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { e = &d[j]; // a pox on fvl long names :) bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN); e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; if (f->vlan == IXL_VLAN_ANY) { e->vlan_tag = 0; e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; } else { e->vlan_tag = f->vlan; } /* delete entry from vsi list */ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next); free(f, M_DEVBUF); j++; } if (j == cnt) break; } if (j > 0) { - err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); - if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) { + status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL); + if (status) { int sc = 0; for (int i = 0; i < j; i++) sc += (!d[i].error_code); vsi->hw_filters_del += sc; device_printf(dev, - "Failed to remove %d/%d filters, aq error %d\n", - j - sc, j, hw->aq.asq_last_status); + "Failed to remove %d/%d filters, error %s\n", + j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status)); } else vsi->hw_filters_del += j; } free(d, M_DEVBUF); - - DEBUGOUT("ixl_del_hw_filters: end\n"); return; } int ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF TX ring %4d / VSI TX ring %4d...\n", pf_qidx, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg |= I40E_QTX_ENA_QENA_REQ_MASK | I40E_QTX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (reg & I40E_QTX_ENA_QENA_STAT_MASK) break; i40e_usec_delay(10); } if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "TX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); ixl_dbg(pf, IXL_DBG_EN_DIS, "Enabling PF RX ring %4d / VSI RX ring %4d...\n", pf_qidx, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg |= I40E_QRX_ENA_QENA_REQ_MASK | I40E_QRX_ENA_QENA_STAT_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the enable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (reg & I40E_QRX_ENA_QENA_STAT_MASK) break; i40e_usec_delay(10); } if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { device_printf(pf->dev, "RX queue %d still disabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_enable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_enable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* For PF VSI only */ int ixl_enable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; int error = 0; - for (int i = 0; i < vsi->num_queues; i++) { - error = ixl_enable_ring(pf, &pf->qtag, i); - if (error) - return (error); - } + for (int i = 0; i < vsi->num_tx_queues; i++) + error = ixl_enable_tx_ring(pf, &pf->qtag, i); + for (int i = 0; i < vsi->num_rx_queues; i++) + error = ixl_enable_rx_ring(pf, &pf->qtag, i); + return (error); } /* * Returns error on first ring that is detected hung. */ int ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE); i40e_usec_delay(500); reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QTX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QTX_ENA(pf_qidx)); if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QTX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "TX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } /* * Returns error on first ring that is detected hung. */ int ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { struct i40e_hw *hw = &pf->hw; int error = 0; u32 reg; u16 pf_qidx; pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_qidx), reg); /* Verify the disable took */ for (int j = 0; j < 10; j++) { reg = rd32(hw, I40E_QRX_ENA(pf_qidx)); if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK)) break; i40e_msec_delay(10); } if (reg & I40E_QRX_ENA_QENA_STAT_MASK) { device_printf(pf->dev, "RX queue %d still enabled!\n", pf_qidx); error = ETIMEDOUT; } return (error); } int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx) { int error = 0; error = ixl_disable_tx_ring(pf, qtag, vsi_qidx); /* Called function already prints error message */ if (error) return (error); error = ixl_disable_rx_ring(pf, qtag, vsi_qidx); return (error); } /* For PF VSI only */ int ixl_disable_rings(struct ixl_vsi *vsi) { struct ixl_pf *pf = vsi->back; int error = 0; - for (int i = 0; i < vsi->num_queues; i++) { - error = ixl_disable_ring(pf, &pf->qtag, i); - if (error) - return (error); - } + for (int i = 0; i < vsi->num_tx_queues; i++) + error = ixl_disable_tx_ring(pf, &pf->qtag, i); + for (int i = 0; i < vsi->num_rx_queues; i++) + error = ixl_disable_rx_ring(pf, &pf->qtag, i); + return (error); } /** * ixl_handle_mdd_event * * Called from interrupt handler to identify possibly malicious vfs * (But also detects events from the PF, as well) **/ void ixl_handle_mdd_event(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; + struct ixl_vf *vf; bool mdd_detected = false; bool pf_mdd_detected = false; + bool vf_mdd_detected = false; u32 reg; /* find what triggered the MDD event */ reg = rd32(hw, I40E_GL_MDET_TX); if (reg & I40E_GL_MDET_TX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >> I40E_GL_MDET_TX_PF_NUM_SHIFT; u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >> I40E_GL_MDET_TX_EVENT_SHIFT; u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> I40E_GL_MDET_TX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event %d" " on TX queue %d, pf number %d\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_TX, 0xffffffff); mdd_detected = true; } reg = rd32(hw, I40E_GL_MDET_RX); if (reg & I40E_GL_MDET_RX_VALID_MASK) { u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> I40E_GL_MDET_RX_FUNCTION_SHIFT; u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >> I40E_GL_MDET_RX_EVENT_SHIFT; u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> I40E_GL_MDET_RX_QUEUE_SHIFT; device_printf(dev, "Malicious Driver Detection event %d" " on RX queue %d, pf number %d\n", event, queue, pf_num); wr32(hw, I40E_GL_MDET_RX, 0xffffffff); mdd_detected = true; } if (mdd_detected) { reg = rd32(hw, I40E_PF_MDET_TX); if (reg & I40E_PF_MDET_TX_VALID_MASK) { wr32(hw, I40E_PF_MDET_TX, 0xFFFF); device_printf(dev, "MDD TX event is for this function!\n"); pf_mdd_detected = true; } reg = rd32(hw, I40E_PF_MDET_RX); if (reg & I40E_PF_MDET_RX_VALID_MASK) { wr32(hw, I40E_PF_MDET_RX, 0xFFFF); device_printf(dev, "MDD RX event is for this function!\n"); pf_mdd_detected = true; } } + if (pf_mdd_detected) { + atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); + goto end; + } + + // Handle VF detection + for (int i = 0; i < pf->num_vfs && mdd_detected; i++) { + vf = &(pf->vfs[i]); + reg = rd32(hw, I40E_VP_MDET_TX(i)); + if (reg & I40E_VP_MDET_TX_VALID_MASK) { + wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF); + vf->num_mdd_events++; + device_printf(dev, "MDD TX event is for VF %d\n", i); + vf_mdd_detected = true; + } + + reg = rd32(hw, I40E_VP_MDET_RX(i)); + if (reg & I40E_VP_MDET_RX_VALID_MASK) { + wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF); + vf->num_mdd_events++; + device_printf(dev, "MDD RX event is for VF %d\n", i); + vf_mdd_detected = true; + } + + // TODO: Disable VF if there are too many MDD events from it + } + + if (vf_mdd_detected) + atomic_set_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ); + +end: + atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING); + /* re-enable mdd interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); ixl_flush(hw); } +/* This only enables HW interrupts for the RX queues */ void ixl_enable_intr(struct ixl_vsi *vsi) { - struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct ixl_rx_queue *que = vsi->rx_queues; - if (pf->msix > 1) { - for (int i = 0; i < vsi->num_queues; i++, que++) - ixl_enable_queue(hw, que->me); + // TODO: Check iflib interrupt mode instead? + if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + ixl_enable_queue(hw, que->rxr.me); } else ixl_enable_intr0(hw); } void ixl_disable_rings_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; - struct ixl_queue *que = vsi->queues; + struct ixl_rx_queue *que = vsi->rx_queues; - for (int i = 0; i < vsi->num_queues; i++, que++) - ixl_disable_queue(hw, que->me); + for (int i = 0; i < vsi->num_rx_queues; i++, que++) + ixl_disable_queue(hw, que->rxr.me); } void ixl_enable_intr0(struct i40e_hw *hw) { u32 reg; /* Use IXL_ITR_NONE so ITR isn't updated here */ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, reg); } void ixl_disable_intr0(struct i40e_hw *hw) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTL0, reg); ixl_flush(hw); } void ixl_enable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_disable_queue(struct i40e_hw *hw, int id) { u32 reg; reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; wr32(hw, I40E_PFINT_DYN_CTLN(id), reg); } void ixl_update_stats_counters(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; struct ixl_vf *vf; struct i40e_hw_port_stats *nsd = &pf->stats; struct i40e_hw_port_stats *osd = &pf->stats_offsets; /* Update hw stats */ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), pf->stat_offsets_loaded, &osd->crc_errors, &nsd->crc_errors); ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), pf->stat_offsets_loaded, &osd->illegal_bytes, &nsd->illegal_bytes); ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), I40E_GLPRT_GORCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_bytes, &nsd->eth.rx_bytes); ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), I40E_GLPRT_GOTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_bytes, &nsd->eth.tx_bytes); ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_discards, &nsd->eth.rx_discards); ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), I40E_GLPRT_UPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_unicast, &nsd->eth.rx_unicast); ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), I40E_GLPRT_UPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_unicast, &nsd->eth.tx_unicast); ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), I40E_GLPRT_MPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_multicast, &nsd->eth.rx_multicast); ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), I40E_GLPRT_MPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_multicast, &nsd->eth.tx_multicast); ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), I40E_GLPRT_BPRCL(hw->port), pf->stat_offsets_loaded, &osd->eth.rx_broadcast, &nsd->eth.rx_broadcast); ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), I40E_GLPRT_BPTCL(hw->port), pf->stat_offsets_loaded, &osd->eth.tx_broadcast, &nsd->eth.tx_broadcast); ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), pf->stat_offsets_loaded, &osd->tx_dropped_link_down, &nsd->tx_dropped_link_down); ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), pf->stat_offsets_loaded, &osd->mac_local_faults, &nsd->mac_local_faults); ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), pf->stat_offsets_loaded, &osd->mac_remote_faults, &nsd->mac_remote_faults); ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), pf->stat_offsets_loaded, &osd->rx_length_errors, &nsd->rx_length_errors); /* Flow control (LFC) stats */ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_rx, &nsd->link_xon_rx); ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xon_tx, &nsd->link_xon_tx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_rx, &nsd->link_xoff_rx); ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), pf->stat_offsets_loaded, &osd->link_xoff_tx, &nsd->link_xoff_tx); /* Packet size stats rx */ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), I40E_GLPRT_PRC64L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_64, &nsd->rx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), I40E_GLPRT_PRC127L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_127, &nsd->rx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), I40E_GLPRT_PRC255L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_255, &nsd->rx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), I40E_GLPRT_PRC511L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_511, &nsd->rx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), I40E_GLPRT_PRC1023L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1023, &nsd->rx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), I40E_GLPRT_PRC1522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_1522, &nsd->rx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), I40E_GLPRT_PRC9522L(hw->port), pf->stat_offsets_loaded, &osd->rx_size_big, &nsd->rx_size_big); /* Packet size stats tx */ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), I40E_GLPRT_PTC64L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_64, &nsd->tx_size_64); ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), I40E_GLPRT_PTC127L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_127, &nsd->tx_size_127); ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), I40E_GLPRT_PTC255L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_255, &nsd->tx_size_255); ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), I40E_GLPRT_PTC511L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_511, &nsd->tx_size_511); ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), I40E_GLPRT_PTC1023L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1023, &nsd->tx_size_1023); ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), I40E_GLPRT_PTC1522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_1522, &nsd->tx_size_1522); ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), I40E_GLPRT_PTC9522L(hw->port), pf->stat_offsets_loaded, &osd->tx_size_big, &nsd->tx_size_big); ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port), pf->stat_offsets_loaded, &osd->rx_undersize, &nsd->rx_undersize); ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port), pf->stat_offsets_loaded, &osd->rx_fragments, &nsd->rx_fragments); ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port), pf->stat_offsets_loaded, &osd->rx_oversize, &nsd->rx_oversize); ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port), pf->stat_offsets_loaded, &osd->rx_jabber, &nsd->rx_jabber); pf->stat_offsets_loaded = true; /* End hw stats */ /* Update vsi stats */ ixl_update_vsi_stats(vsi); for (int i = 0; i < pf->num_vfs; i++) { vf = &pf->vfs[i]; if (vf->vf_flags & VF_FLAG_ENABLED) ixl_update_eth_stats(&pf->vfs[i].vsi); } } int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up) { struct i40e_hw *hw = &pf->hw; - struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; int error = 0; - /* Teardown */ - if (is_up) - ixl_stop(pf); - - ixl_teardown_queue_msix(vsi); - error = i40e_shutdown_lan_hmc(hw); if (error) device_printf(dev, "Shutdown LAN HMC failed with code %d\n", error); ixl_disable_intr0(hw); - ixl_teardown_adminq_msix(pf); error = i40e_shutdown_adminq(hw); if (error) device_printf(dev, "Shutdown Admin queue failed with code %d\n", error); - callout_drain(&pf->timer); - - /* Free ring buffers, locks and filters */ - ixl_vsi_free_queues(vsi); - - /* Free VSI filter list */ - ixl_free_mac_filters(vsi); - ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag); - return (error); } int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up) { struct i40e_hw *hw = &pf->hw; struct ixl_vsi *vsi = &pf->vsi; device_t dev = pf->dev; int error = 0; device_printf(dev, "Rebuilding driver state...\n"); error = i40e_pf_reset(hw); if (error) { device_printf(dev, "PF reset failure %s\n", i40e_stat_str(hw, error)); goto ixl_rebuild_hw_structs_after_reset_err; } /* Setup */ error = i40e_init_adminq(hw); if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } i40e_clear_pxe_mode(hw); error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (error) { device_printf(dev, "init_lan_hmc failed: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (error) { device_printf(dev, "configure_lan_hmc failed: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } /* reserve a contiguous allocation for the PF's VSI */ - error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag); + error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag); if (error) { device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", error); /* TODO: error handling */ } - device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", - pf->qtag.num_allocated, pf->qtag.num_active); - error = ixl_switch_config(pf); if (error) { device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n", error); goto ixl_rebuild_hw_structs_after_reset_err; } - if (ixl_vsi_setup_queues(vsi)) { - device_printf(dev, "setup queues failed!\n"); - error = ENOMEM; - goto ixl_rebuild_hw_structs_after_reset_err; - } + /* Remove default filters reinstalled by FW on reset */ + ixl_del_default_hw_filters(vsi); - if (pf->msix > 1) { - error = ixl_setup_adminq_msix(pf); - if (error) { - device_printf(dev, "ixl_setup_adminq_msix() error: %d\n", - error); - goto ixl_rebuild_hw_structs_after_reset_err; - } - - ixl_configure_intr0_msix(pf); - ixl_enable_intr0(hw); - - error = ixl_setup_queue_msix(vsi); - if (error) { - device_printf(dev, "ixl_setup_queue_msix() error: %d\n", - error); - goto ixl_rebuild_hw_structs_after_reset_err; - } - } else { - error = ixl_setup_legacy(pf); - if (error) { - device_printf(dev, "ixl_setup_legacy() error: %d\n", - error); - goto ixl_rebuild_hw_structs_after_reset_err; - } - } - /* Determine link state */ if (ixl_attach_get_link_status(pf)) { error = EINVAL; /* TODO: error handling */ } i40e_aq_set_dcb_parameters(hw, TRUE, NULL); ixl_get_fw_lldp_status(pf); - if (is_up) - ixl_init(pf); + /* Keep admin queue interrupts active while driver is loaded */ + if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { + ixl_configure_intr0_msix(pf); + ixl_enable_intr0(hw); + } device_printf(dev, "Rebuilding driver state done.\n"); return (0); ixl_rebuild_hw_structs_after_reset_err: device_printf(dev, "Reload the driver to recover\n"); return (error); } void ixl_handle_empr_reset(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING); int count = 0; u32 reg; ixl_prepare_for_reset(pf, is_up); /* Typically finishes within 3-4 seconds */ while (count++ < 100) { reg = rd32(hw, I40E_GLGEN_RSTAT) & I40E_GLGEN_RSTAT_DEVSTATE_MASK; if (reg) i40e_msec_delay(100); else break; } ixl_dbg(pf, IXL_DBG_INFO, - "EMPR reset wait count: %d\n", count); + "Reset wait count: %d\n", count); ixl_rebuild_hw_structs_after_reset(pf, is_up); - atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING); + atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING); } -/* -** Tasklet handler for MSIX Adminq interrupts -** - do outside interrupt since it might sleep -*/ -void -ixl_do_adminq(void *context, int pending) -{ - struct ixl_pf *pf = context; - struct i40e_hw *hw = &pf->hw; - struct i40e_arq_event_info event; - i40e_status ret; - device_t dev = pf->dev; - u32 loop = 0; - u16 opcode, result; - - if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { - /* Flag cleared at end of this function */ - ixl_handle_empr_reset(pf); - return; - } - - /* Admin Queue handling */ - event.buf_len = IXL_AQ_BUF_SZ; - event.msg_buf = malloc(event.buf_len, - M_DEVBUF, M_NOWAIT | M_ZERO); - if (!event.msg_buf) { - device_printf(dev, "%s: Unable to allocate memory for Admin" - " Queue event!\n", __func__); - return; - } - - IXL_PF_LOCK(pf); - /* clean and process any events */ - do { - ret = i40e_clean_arq_element(hw, &event, &result); - if (ret) - break; - opcode = LE16_TO_CPU(event.desc.opcode); - ixl_dbg(pf, IXL_DBG_AQ, - "Admin Queue event: %#06x\n", opcode); - switch (opcode) { - case i40e_aqc_opc_get_link_status: - ixl_link_event(pf, &event); - break; - case i40e_aqc_opc_send_msg_to_pf: -#ifdef PCI_IOV - ixl_handle_vf_msg(pf, &event); -#endif - break; - case i40e_aqc_opc_event_lan_overflow: - default: - break; - } - - } while (result && (loop++ < IXL_ADM_LIMIT)); - - free(event.msg_buf, M_DEVBUF); - - /* - * If there are still messages to process, reschedule ourselves. - * Otherwise, re-enable our interrupt. - */ - if (result > 0) - taskqueue_enqueue(pf->tq, &pf->adminq); - else - ixl_enable_intr0(hw); - - IXL_PF_UNLOCK(pf); -} - /** * Update VSI-specific ethernet statistics counters. **/ void ixl_update_eth_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf = (struct ixl_pf *)vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_eth_stats *es; struct i40e_eth_stats *oes; struct i40e_hw_port_stats *nsd; u16 stat_idx = vsi->info.stat_counter_idx; es = &vsi->eth_stats; oes = &vsi->eth_stats_offsets; nsd = &pf->stats; /* Gather up the stats that the hw collects */ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx), vsi->stat_offsets_loaded, &oes->tx_errors, &es->tx_errors); ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx), vsi->stat_offsets_loaded, &oes->rx_discards, &es->rx_discards); ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx), I40E_GLV_GORCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_bytes, &es->rx_bytes); ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx), I40E_GLV_UPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_unicast, &es->rx_unicast); ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx), I40E_GLV_MPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_multicast, &es->rx_multicast); ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx), I40E_GLV_BPRCL(stat_idx), vsi->stat_offsets_loaded, &oes->rx_broadcast, &es->rx_broadcast); ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx), I40E_GLV_GOTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_bytes, &es->tx_bytes); ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx), I40E_GLV_UPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_unicast, &es->tx_unicast); ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx), I40E_GLV_MPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_multicast, &es->tx_multicast); ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx), I40E_GLV_BPTCL(stat_idx), vsi->stat_offsets_loaded, &oes->tx_broadcast, &es->tx_broadcast); vsi->stat_offsets_loaded = true; } void ixl_update_vsi_stats(struct ixl_vsi *vsi) { struct ixl_pf *pf; struct ifnet *ifp; struct i40e_eth_stats *es; u64 tx_discards; struct i40e_hw_port_stats *nsd; pf = vsi->back; ifp = vsi->ifp; es = &vsi->eth_stats; nsd = &pf->stats; ixl_update_eth_stats(vsi); tx_discards = es->tx_discards + nsd->tx_dropped_link_down; - for (int i = 0; i < vsi->num_queues; i++) - tx_discards += vsi->queues[i].txr.br->br_drops; /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes + nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments + nsd->rx_jabber); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); } /** * Reset all of the stats for the given pf **/ void ixl_pf_reset_stats(struct ixl_pf *pf) { bzero(&pf->stats, sizeof(struct i40e_hw_port_stats)); bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats)); pf->stat_offsets_loaded = false; } /** * Resets all stats of the given vsi **/ void ixl_vsi_reset_stats(struct ixl_vsi *vsi) { bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats)); bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats)); vsi->stat_offsets_loaded = false; } /** * Read and update a 48 bit stat from the hw * * Since the device stats are not reset at PFReset, they likely will not * be zeroed when the driver starts. We'll save the first values read * and use them as offsets to be subtracted from the raw values in order * to report stats that count from zero. **/ void ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg, bool offset_loaded, u64 *offset, u64 *stat) { u64 new_data; #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__) new_data = rd64(hw, loreg); #else /* * Use two rd32's instead of one rd64; FreeBSD versions before * 10 don't support 64-bit bus reads/writes. */ new_data = rd32(hw, loreg); new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32; #endif if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = new_data - *offset; else *stat = (new_data + ((u64)1 << 48)) - *offset; *stat &= 0xFFFFFFFFFFFFULL; } /** * Read and update a 32 bit stat from the hw **/ void ixl_stat_update32(struct i40e_hw *hw, u32 reg, bool offset_loaded, u64 *offset, u64 *stat) { u32 new_data; new_data = rd32(hw, reg); if (!offset_loaded) *offset = new_data; if (new_data >= *offset) *stat = (u32)(new_data - *offset); else *stat = (u32)((new_data + ((u64)1 << 32)) - *offset); } void ixl_add_device_sysctls(struct ixl_pf *pf) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid_list *ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); struct sysctl_oid *debug_node; struct sysctl_oid_list *debug_list; struct sysctl_oid *fec_node; struct sysctl_oid_list *fec_list; /* Set up sysctls */ SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "supported_speeds", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_current_speed, "A", "Current Port Speed"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_show_fw, "A", "Firmware version"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD, pf, 0, ixl_sysctl_unallocated_queues, "I", "Queues not allocated to a PF or VF"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_pf_tx_itr, "I", "Immediately set TX ITR value for all queues"); SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_pf_rx_itr, "I", "Immediately set RX ITR value for all queues"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW, &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR"); SYSCTL_ADD_INT(ctx, ctx_list, OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW, &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR"); - SYSCTL_ADD_INT(ctx, ctx_list, - OID_AUTO, "tx_ring_size", CTLFLAG_RD, - &pf->vsi.num_tx_desc, 0, "TX ring size"); - - SYSCTL_ADD_INT(ctx, ctx_list, - OID_AUTO, "rx_ring_size", CTLFLAG_RD, - &pf->vsi.num_rx_desc, 0, "RX ring size"); - /* Add FEC sysctls for 25G adapters */ if (i40e_is_25G_device(hw->device_id)) { fec_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "fec", CTLFLAG_RD, NULL, "FEC Sysctls"); fec_list = SYSCTL_CHILDREN(fec_node); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_ability", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_ability", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "fc_requested", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_fc_request, "I", "FC FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "rs_requested", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_rs_request, "I", "RS FEC mode requested on link"); SYSCTL_ADD_PROC(ctx, fec_list, OID_AUTO, "auto_fec_enabled", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fec_auto_enable, "I", "Let FW decide FEC ability/request modes"); } SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW, pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP); /* Add sysctls meant to print debug information, but don't list them * in "sysctl -a" output. */ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls"); debug_list = SYSCTL_CHILDREN(debug_node); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "shared_debug_mask", CTLFLAG_RW, &pf->hw.debug_mask, 0, "Shared code debug message level"); SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "core_debug_mask", CTLFLAG_RW, - &pf->dbg_mask, 0, "Non-hared code debug message level"); + &pf->dbg_mask, 0, "Non-shared code debug message level"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hkey, "A", "View RSS key"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "rss_hena", CTLTYPE_ULONG | CTLFLAG_RD, pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "disable_fw_link_management", CTLTYPE_INT | CTLFLAG_WR, pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management"); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "dump_debug_data", CTLTYPE_STRING | CTLFLAG_RD, pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW"); + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "do_pf_reset", CTLTYPE_INT | CTLFLAG_WR, + pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "do_core_reset", CTLTYPE_INT | CTLFLAG_WR, + pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "do_global_reset", CTLTYPE_INT | CTLFLAG_WR, + pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "do_emp_reset", CTLTYPE_INT | CTLFLAG_WR, + pf, 0, ixl_sysctl_do_emp_reset, "I", + "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset"); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "queue_interrupt_table", CTLTYPE_STRING | CTLFLAG_RD, + pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues"); + if (pf->has_i2c) { SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "read_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, - pf, 0, ixl_sysctl_read_i2c_byte, "I", "Read byte from I2C bus"); + pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C); SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "write_i2c_byte", CTLTYPE_INT | CTLFLAG_RW, - pf, 0, ixl_sysctl_write_i2c_byte, "I", "Write byte to I2C bus"); + pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C); + + SYSCTL_ADD_PROC(ctx, debug_list, + OID_AUTO, "read_i2c_diag_data", CTLTYPE_STRING | CTLFLAG_RD, + pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW"); } #ifdef PCI_IOV SYSCTL_ADD_UINT(ctx, debug_list, OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl, 0, "PF/VF Virtual Channel debug level"); #endif } /* * Primarily for finding out how many queues can be assigned to VFs, * at runtime. */ static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int queues; - IXL_PF_LOCK(pf); + //IXL_PF_LOCK(pf); queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr); - IXL_PF_UNLOCK(pf); + //IXL_PF_UNLOCK(pf); return sysctl_handle_int(oidp, NULL, queues, req); } /* ** Set flow control using sysctl: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_fc, error = 0; enum i40e_status_code aq_error = 0; u8 fc_aq_err = 0; /* Get request */ requested_fc = pf->fc; error = sysctl_handle_int(oidp, &requested_fc, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (requested_fc < 0 || requested_fc > 3) { device_printf(dev, "Invalid fc mode; valid modes are 0 through 3\n"); return (EINVAL); } /* Set fc ability for port */ hw->fc.requested_mode = requested_fc; aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE); if (aq_error) { device_printf(dev, "%s: Error setting new fc mode %d; fc_err %#x\n", __func__, aq_error, fc_aq_err); return (EIO); } pf->fc = requested_fc; - /* Get new link state */ - i40e_msec_delay(250); - hw->phy.get_link_info = TRUE; - i40e_get_link_status(hw, &pf->link_up); - return (0); } char * ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed) { int index; char *speeds[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", }; switch (link_speed) { case I40E_LINK_SPEED_100MB: index = 1; break; case I40E_LINK_SPEED_1GB: index = 2; break; case I40E_LINK_SPEED_10GB: index = 3; break; case I40E_LINK_SPEED_40GB: index = 4; break; case I40E_LINK_SPEED_20GB: index = 5; break; case I40E_LINK_SPEED_25GB: index = 6; break; case I40E_LINK_SPEED_UNKNOWN: default: index = 0; break; } return speeds[index]; } int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; int error = 0; ixl_update_link_status(pf); error = sysctl_handle_string(oidp, ixl_aq_speed_to_str(hw->phy.link_info.link_speed), 8, req); return (error); } /* * Converts 8-bit speeds value to and from sysctl flags and * Admin Queue flags. */ static u8 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq) { static u16 speedmap[6] = { (I40E_LINK_SPEED_100MB | (0x1 << 8)), (I40E_LINK_SPEED_1GB | (0x2 << 8)), (I40E_LINK_SPEED_10GB | (0x4 << 8)), (I40E_LINK_SPEED_20GB | (0x8 << 8)), (I40E_LINK_SPEED_25GB | (0x10 << 8)), (I40E_LINK_SPEED_40GB | (0x20 << 8)) }; u8 retval = 0; for (int i = 0; i < 6; i++) { if (to_aq) retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0; else retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0; } return (retval); } int ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config; enum i40e_status_code aq_error = 0; /* Get current capability information */ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (aq_error) { device_printf(dev, "%s: Error getting phy capabilities %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } /* Prepare new config */ bzero(&config, sizeof(config)); if (from_aq) config.link_speed = speeds; else config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true); config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e); /* Do aq command & restart link */ aq_error = i40e_aq_set_phy_config(hw, &config, NULL); if (aq_error) { device_printf(dev, "%s: Error setting new phy config %d," " aq error: %d\n", __func__, aq_error, hw->aq.asq_last_status); return (EIO); } return (0); } /* ** Supported link speedsL ** Flags: ** 0x1 - 100 Mb ** 0x2 - 1G ** 0x4 - 10G ** 0x8 - 20G ** 0x10 - 25G ** 0x20 - 40G */ static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false); return sysctl_handle_int(oidp, NULL, supported, req); } /* ** Control link advertise speed: ** Flags: ** 0x1 - advertise 100 Mb ** 0x2 - advertise 1G ** 0x4 - advertise 10G ** 0x8 - advertise 20G ** 0x10 - advertise 25G ** 0x20 - advertise 40G ** ** Set to 0 to disable link */ int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; u8 converted_speeds; int requested_ls = 0; int error = 0; /* Read in new mode */ requested_ls = pf->advertised_speed; error = sysctl_handle_int(oidp, &requested_ls, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Error out if bits outside of possible flag range are set */ if ((requested_ls & ~((u8)0x3F)) != 0) { device_printf(dev, "Input advertised speed out of range; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } /* Check if adapter supports input value */ converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true); if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) { device_printf(dev, "Invalid advertised speed; " "valid flags are: 0x%02x\n", ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false)); return (EINVAL); } error = ixl_set_advertised_speeds(pf, requested_ls, false); if (error) return (error); pf->advertised_speed = requested_ls; ixl_update_link_status(pf); return (0); } /* - * Input: bitmap of enum i40e_aq_link_speed - */ -static u64 -ixl_max_aq_speed_to_value(u8 link_speeds) -{ - if (link_speeds & I40E_LINK_SPEED_40GB) - return IF_Gbps(40); - if (link_speeds & I40E_LINK_SPEED_25GB) - return IF_Gbps(25); - if (link_speeds & I40E_LINK_SPEED_20GB) - return IF_Gbps(20); - if (link_speeds & I40E_LINK_SPEED_10GB) - return IF_Gbps(10); - if (link_speeds & I40E_LINK_SPEED_1GB) - return IF_Gbps(1); - if (link_speeds & I40E_LINK_SPEED_100MB) - return IF_Mbps(100); - else - /* Minimum supported link speed */ - return IF_Mbps(100); -} - -/* ** Get the width and transaction speed of ** the bus this adapter is plugged into. */ void ixl_get_bus_info(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u16 link; u32 offset, num_ports; u64 max_speed; /* Some devices don't use PCIE */ if (hw->mac.type == I40E_MAC_X722) return; /* Read PCI Express Capabilities Link Status Register */ pci_find_cap(dev, PCIY_EXPRESS, &offset); link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); /* Fill out hw struct with PCIE info */ i40e_set_pci_config_data(hw, link); /* Use info to print out bandwidth messages */ device_printf(dev,"PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s": (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s": (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"), (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" : (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" : (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" : ("Unknown")); /* * If adapter is in slot with maximum supported speed, * no warning message needs to be printed out. */ if (hw->bus.speed >= i40e_bus_speed_8000 && hw->bus.width >= i40e_bus_width_pcie_x8) return; num_ports = bitcount32(hw->func_caps.valid_functions); max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000; if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) { device_printf(dev, "PCI-Express bandwidth available" " for this device may be insufficient for" " optimal performance.\n"); device_printf(dev, "Please move the device to a different" " PCI-e link with more lanes and/or higher" " transfer rate.\n"); } } static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; struct sbuf *sbuf; sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); ixl_nvm_version_str(hw, sbuf); sbuf_finish(sbuf); sbuf_delete(sbuf); return (0); } void ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma) { if ((nvma->command == I40E_NVM_READ) && ((nvma->config & 0xFF) == 0xF) && (((nvma->config & 0xF00) >> 8) == 0xF) && (nvma->offset == 0) && (nvma->data_size == 1)) { // device_printf(dev, "- Get Driver Status Command\n"); } else if (nvma->command == I40E_NVM_READ) { - + } else { switch (nvma->command) { case 0xB: device_printf(dev, "- command: I40E_NVM_READ\n"); break; case 0xC: device_printf(dev, "- command: I40E_NVM_WRITE\n"); break; default: device_printf(dev, "- command: unknown 0x%08x\n", nvma->command); break; } device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF); device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8); device_printf(dev, "- offset : 0x%08x\n", nvma->offset); device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size); } } int ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd) { struct i40e_hw *hw = &pf->hw; struct i40e_nvm_access *nvma; device_t dev = pf->dev; enum i40e_status_code status = 0; int perrno; DEBUGFUNC("ixl_handle_nvmupd_cmd"); /* Sanity checks */ if (ifd->ifd_len < sizeof(struct i40e_nvm_access) || ifd->ifd_data == NULL) { device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__); device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n", __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access)); device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data); return (EINVAL); } nvma = (struct i40e_nvm_access *)ifd->ifd_data; if (pf->dbg_mask & IXL_DBG_NVMUPD) ixl_print_nvm_cmd(dev, nvma); - if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { + if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) { int count = 0; while (count++ < 100) { i40e_msec_delay(100); - if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) + if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) break; } } - if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) { - IXL_PF_LOCK(pf); + if (!(pf->state & IXL_PF_STATE_ADAPTER_RESETTING)) { + // TODO: Might need a different lock here + // IXL_PF_LOCK(pf); status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno); - IXL_PF_UNLOCK(pf); + // IXL_PF_UNLOCK(pf); } else { perrno = -EBUSY; } /* Let the nvmupdate report errors, show them only when debug is enabled */ if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0) device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n", i40e_stat_str(hw, status), perrno); /* * -EPERM is actually ERESTART, which the kernel interprets as it needing * to run this ioctl again. So use -EACCES for -EPERM instead. */ if (perrno == -EPERM) return (-EACCES); else return (perrno); } -/********************************************************************* - * - * Media Ioctl callback - * - * This routine is called whenever the user queries the status of - * the interface using ifconfig. - * - * When adding new media types here, make sure to add them to - * ixl_add_ifmedia(), too. - * - **********************************************************************/ -void -ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) -{ - struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - - INIT_DEBUGOUT("ixl_media_status: begin"); - - /* Don't touch PF during reset */ - if (atomic_load_acq_int(&pf->state) & IXL_PF_STATE_EMPR_RESETTING) - return; - - IXL_PF_LOCK(pf); - - i40e_get_link_status(hw, &pf->link_up); - ixl_update_link_status(pf); - - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_active = IFM_ETHER; - - if (!pf->link_up) { - IXL_PF_UNLOCK(pf); - return; - } - - ifmr->ifm_status |= IFM_ACTIVE; - - /* Hardware always does full-duplex */ - ifmr->ifm_active |= IFM_FDX; - - switch (hw->phy.link_info.phy_type) { - /* 100 M */ - case I40E_PHY_TYPE_100BASE_TX: - ifmr->ifm_active |= IFM_100_TX; - break; - /* 1 G */ - case I40E_PHY_TYPE_1000BASE_T: - ifmr->ifm_active |= IFM_1000_T; - break; - case I40E_PHY_TYPE_1000BASE_SX: - ifmr->ifm_active |= IFM_1000_SX; - break; - case I40E_PHY_TYPE_1000BASE_LX: - ifmr->ifm_active |= IFM_1000_LX; - break; - case I40E_PHY_TYPE_1000BASE_T_OPTICAL: - ifmr->ifm_active |= IFM_1000_T; - break; - /* 10 G */ - case I40E_PHY_TYPE_10GBASE_SFPP_CU: - ifmr->ifm_active |= IFM_10G_TWINAX; - break; - case I40E_PHY_TYPE_10GBASE_SR: - ifmr->ifm_active |= IFM_10G_SR; - break; - case I40E_PHY_TYPE_10GBASE_LR: - ifmr->ifm_active |= IFM_10G_LR; - break; - case I40E_PHY_TYPE_10GBASE_T: - ifmr->ifm_active |= IFM_10G_T; - break; - case I40E_PHY_TYPE_XAUI: - case I40E_PHY_TYPE_XFI: - ifmr->ifm_active |= IFM_10G_TWINAX; - break; - case I40E_PHY_TYPE_10GBASE_AOC: - ifmr->ifm_active |= IFM_10G_AOC; - break; - /* 25 G */ - case I40E_PHY_TYPE_25GBASE_KR: - ifmr->ifm_active |= IFM_25G_KR; - break; - case I40E_PHY_TYPE_25GBASE_CR: - ifmr->ifm_active |= IFM_25G_CR; - break; - case I40E_PHY_TYPE_25GBASE_SR: - ifmr->ifm_active |= IFM_25G_SR; - break; - case I40E_PHY_TYPE_25GBASE_LR: - ifmr->ifm_active |= IFM_25G_LR; - break; - case I40E_PHY_TYPE_25GBASE_AOC: - ifmr->ifm_active |= IFM_25G_AOC; - break; - case I40E_PHY_TYPE_25GBASE_ACC: - ifmr->ifm_active |= IFM_25G_ACC; - break; - /* 40 G */ - case I40E_PHY_TYPE_40GBASE_CR4: - case I40E_PHY_TYPE_40GBASE_CR4_CU: - ifmr->ifm_active |= IFM_40G_CR4; - break; - case I40E_PHY_TYPE_40GBASE_SR4: - ifmr->ifm_active |= IFM_40G_SR4; - break; - case I40E_PHY_TYPE_40GBASE_LR4: - ifmr->ifm_active |= IFM_40G_LR4; - break; - case I40E_PHY_TYPE_XLAUI: - ifmr->ifm_active |= IFM_OTHER; - break; - case I40E_PHY_TYPE_1000BASE_KX: - ifmr->ifm_active |= IFM_1000_KX; - break; - case I40E_PHY_TYPE_SGMII: - ifmr->ifm_active |= IFM_1000_SGMII; - break; - /* ERJ: What's the difference between these? */ - case I40E_PHY_TYPE_10GBASE_CR1_CU: - case I40E_PHY_TYPE_10GBASE_CR1: - ifmr->ifm_active |= IFM_10G_CR1; - break; - case I40E_PHY_TYPE_10GBASE_KX4: - ifmr->ifm_active |= IFM_10G_KX4; - break; - case I40E_PHY_TYPE_10GBASE_KR: - ifmr->ifm_active |= IFM_10G_KR; - break; - case I40E_PHY_TYPE_SFI: - ifmr->ifm_active |= IFM_10G_SFI; - break; - /* Our single 20G media type */ - case I40E_PHY_TYPE_20GBASE_KR2: - ifmr->ifm_active |= IFM_20G_KR2; - break; - case I40E_PHY_TYPE_40GBASE_KR4: - ifmr->ifm_active |= IFM_40G_KR4; - break; - case I40E_PHY_TYPE_XLPPI: - case I40E_PHY_TYPE_40GBASE_AOC: - ifmr->ifm_active |= IFM_40G_XLPPI; - break; - /* Unknown to driver */ - default: - ifmr->ifm_active |= IFM_UNKNOWN; - break; - } - /* Report flow control status as well */ - if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) - ifmr->ifm_active |= IFM_ETH_TXPAUSE; - if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) - ifmr->ifm_active |= IFM_ETH_RXPAUSE; - - IXL_PF_UNLOCK(pf); -} - -void -ixl_init(void *arg) -{ - struct ixl_pf *pf = arg; - - IXL_PF_LOCK(pf); - ixl_init_locked(pf); - IXL_PF_UNLOCK(pf); -} - -/* - * NOTE: Fortville does not support forcing media speeds. Instead, - * use the set_advertise sysctl to set the speeds Fortville - * will advertise or be allowed to operate at. - */ int -ixl_media_change(struct ifnet * ifp) -{ - struct ixl_vsi *vsi = ifp->if_softc; - struct ifmedia *ifm = &vsi->media; - - INIT_DEBUGOUT("ixl_media_change: begin"); - - if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) - return (EINVAL); - - if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n"); - - return (ENODEV); -} - -/********************************************************************* - * Ioctl entry point - * - * ixl_ioctl is called when the user wants to configure the - * interface. - * - * return 0 on success, positive on failure - **********************************************************************/ - -int -ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data) -{ - struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_pf *pf = vsi->back; - struct ifreq *ifr = (struct ifreq *)data; - struct ifdrv *ifd = (struct ifdrv *)data; -#if defined(INET) || defined(INET6) - struct ifaddr *ifa = (struct ifaddr *)data; - bool avoid_reset = FALSE; -#endif - int error = 0; - - switch (command) { - - case SIOCSIFADDR: - IOCTL_DEBUGOUT("ioctl: SIOCSIFADDR (Set Interface Address)"); -#ifdef INET - if (ifa->ifa_addr->sa_family == AF_INET) - avoid_reset = TRUE; -#endif -#ifdef INET6 - if (ifa->ifa_addr->sa_family == AF_INET6) - avoid_reset = TRUE; -#endif -#if defined(INET) || defined(INET6) - /* - ** Calling init results in link renegotiation, - ** so we avoid doing it when possible. - */ - if (avoid_reset) { - ifp->if_flags |= IFF_UP; - if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - ixl_init(pf); -#ifdef INET - if (!(ifp->if_flags & IFF_NOARP)) - arp_ifinit(ifp, ifa); -#endif - } else - error = ether_ioctl(ifp, command, data); - break; -#endif - case SIOCSIFMTU: - IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); - if (ifr->ifr_mtu > IXL_MAX_FRAME - - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { - error = EINVAL; - } else { - IXL_PF_LOCK(pf); - ifp->if_mtu = ifr->ifr_mtu; - vsi->max_frame_size = - ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN - + ETHER_VLAN_ENCAP_LEN; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) - ixl_init_locked(pf); - IXL_PF_UNLOCK(pf); - } - break; - case SIOCSIFFLAGS: - IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); - IXL_PF_LOCK(pf); - if (ifp->if_flags & IFF_UP) { - if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { - if ((ifp->if_flags ^ pf->if_flags) & - (IFF_PROMISC | IFF_ALLMULTI)) { - ixl_set_promisc(vsi); - } - } else { - IXL_PF_UNLOCK(pf); - ixl_init(pf); - IXL_PF_LOCK(pf); - } - } else { - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - ixl_stop_locked(pf); - } - } - pf->if_flags = ifp->if_flags; - IXL_PF_UNLOCK(pf); - break; - case SIOCSDRVSPEC: - case SIOCGDRVSPEC: - IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific " - "Info)\n"); - - /* NVM update command */ - if (ifd->ifd_cmd == I40E_NVM_ACCESS) - error = ixl_handle_nvmupd_cmd(pf, ifd); - else - error = EINVAL; - break; - case SIOCADDMULTI: - IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI"); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXL_PF_LOCK(pf); - ixl_disable_rings_intr(vsi); - ixl_add_multi(vsi); - ixl_enable_intr(vsi); - IXL_PF_UNLOCK(pf); - } - break; - case SIOCDELMULTI: - IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI"); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXL_PF_LOCK(pf); - ixl_disable_rings_intr(vsi); - ixl_del_multi(vsi); - ixl_enable_intr(vsi); - IXL_PF_UNLOCK(pf); - } - break; - case SIOCSIFMEDIA: - case SIOCGIFMEDIA: - case SIOCGIFXMEDIA: - IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); - error = ifmedia_ioctl(ifp, ifr, &vsi->media, command); - break; - case SIOCSIFCAP: - { - int mask = ifr->ifr_reqcap ^ ifp->if_capenable; - IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); - - ixl_cap_txcsum_tso(vsi, ifp, mask); - - if (mask & IFCAP_RXCSUM) - ifp->if_capenable ^= IFCAP_RXCSUM; - if (mask & IFCAP_RXCSUM_IPV6) - ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; - if (mask & IFCAP_LRO) - ifp->if_capenable ^= IFCAP_LRO; - if (mask & IFCAP_VLAN_HWTAGGING) - ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; - if (mask & IFCAP_VLAN_HWFILTER) - ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; - if (mask & IFCAP_VLAN_HWTSO) - ifp->if_capenable ^= IFCAP_VLAN_HWTSO; - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - IXL_PF_LOCK(pf); - ixl_init_locked(pf); - IXL_PF_UNLOCK(pf); - } - VLAN_CAPABILITIES(ifp); - - break; - } -#if __FreeBSD_version >= 1003000 - case SIOCGI2C: - { - struct ifi2creq i2c; - int i; - - IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)"); - if (!pf->has_i2c) - return (ENOTTY); - - error = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); - if (error != 0) - break; - if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { - error = EINVAL; - break; - } - if (i2c.len > sizeof(i2c.data)) { - error = EINVAL; - break; - } - - for (i = 0; i < i2c.len; i++) - if (ixl_read_i2c_byte(pf, i2c.offset + i, - i2c.dev_addr, &i2c.data[i])) - return (EIO); - - error = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); - break; - } -#endif - default: - IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command); - error = ether_ioctl(ifp, command, data); - break; - } - - return (error); -} - -int ixl_find_i2c_interface(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; bool i2c_en, port_matched; u32 reg; for (int i = 0; i < 4; i++) { reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i)); i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK); port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK) >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) & BIT(hw->port); if (i2c_en && port_matched) return (i); } return (-1); } static char * ixl_phy_type_string(u32 bit_pos, bool ext) { static char * phy_types_str[32] = { "SGMII", "1000BASE-KX", "10GBASE-KX4", "10GBASE-KR", "40GBASE-KR4", "XAUI", "XFI", "SFI", "XLAUI", "XLPPI", "40GBASE-CR4", "10GBASE-CR1", "SFP+ Active DA", "QSFP+ Active DA", "Reserved (14)", "Reserved (15)", "Reserved (16)", "100BASE-TX", "1000BASE-T", "10GBASE-T", "10GBASE-SR", "10GBASE-LR", "10GBASE-SFP+Cu", "10GBASE-CR1", "40GBASE-CR4", "40GBASE-SR4", "40GBASE-LR4", "1000BASE-SX", "1000BASE-LX", "1000BASE-T Optical", "20GBASE-KR2", "Reserved (31)" }; static char * ext_phy_types_str[8] = { "25GBASE-KR", "25GBASE-CR", "25GBASE-SR", "25GBASE-LR", "25GBASE-AOC", "25GBASE-ACC", "Reserved (6)", "Reserved (7)" }; if (ext && bit_pos > 7) return "Invalid_Ext"; if (bit_pos > 31) return "Invalid"; return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos]; } +/* TODO: ERJ: I don't this is necessary anymore. */ int ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_desc desc; enum i40e_status_code status; struct i40e_aqc_get_link_status *aq_link_status = (struct i40e_aqc_get_link_status *)&desc.params.raw; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE); status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); if (status) { device_printf(dev, "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status)); return (0); } static char * ixl_phy_type_string_ls(u8 val) { if (val >= 0x1F) return ixl_phy_type_string(val - 0x1F, true); else return ixl_phy_type_string(val, false); } static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } struct i40e_aqc_get_link_status link_status; error = ixl_aq_get_link_status(pf, &link_status); if (error) { sbuf_delete(buf); return (error); } sbuf_printf(buf, "\n" "PHY Type : 0x%02x<%s>\n" "Speed : 0x%02x\n" "Link info: 0x%02x\n" "AN info : 0x%02x\n" "Ext info : 0x%02x\n" "Loopback : 0x%02x\n" "Max Frame: %d\n" "Config : 0x%02x\n" "Power : 0x%02x", link_status.phy_type, ixl_phy_type_string_ls(link_status.phy_type), - link_status.link_speed, + link_status.link_speed, link_status.link_info, link_status.an_info, link_status.ext_info, link_status.loopback, link_status.max_frame_size, link_status.config, link_status.power_desc); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; struct i40e_aq_get_phy_abilities_resp abilities; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, &abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (EIO); } sbuf_printf(buf, "\n" "PHY Type : %08x", abilities.phy_type); if (abilities.phy_type != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 32; i++) if ((1 << i) & abilities.phy_type) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false)); sbuf_printf(buf, ">\n"); } sbuf_printf(buf, "PHY Ext : %02x", abilities.phy_type_ext); if (abilities.phy_type_ext != 0) { sbuf_printf(buf, "<"); for (int i = 0; i < 4; i++) if ((1 << i) & abilities.phy_type_ext) sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true)); sbuf_printf(buf, ">"); } sbuf_printf(buf, "\n"); sbuf_printf(buf, "Speed : %02x\n" "Abilities: %02x\n" "EEE cap : %04x\n" "EEER reg : %08x\n" "D3 Lpan : %02x\n" "ID : %02x %02x %02x %02x\n" "ModType : %02x %02x %02x\n" "ModType E: %01x\n" "FEC Cfg : %02x\n" "Ext CC : %02x", - abilities.link_speed, + abilities.link_speed, abilities.abilities, abilities.eee_capability, abilities.eeer_val, abilities.d3_lpan, abilities.phy_id[0], abilities.phy_id[1], abilities.phy_id[2], abilities.phy_id[3], abilities.module_type[0], abilities.module_type[1], abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5, abilities.fec_cfg_curr_mod_ext_info & 0x1F, abilities.ext_comp_code); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct ixl_vsi *vsi = &pf->vsi; struct ixl_mac_filter *f; char *buf, *buf_i; int error = 0; int ftl_len = 0; int ftl_counter = 0; int buf_len = 0; int entry_len = 42; SLIST_FOREACH(f, &vsi->ftl, next) { ftl_len++; } if (ftl_len < 1) { sysctl_handle_string(oidp, "(none)", 6, req); return (0); } buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2; - buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT); + buf = buf_i = malloc(buf_len, M_DEVBUF, M_WAITOK); sprintf(buf_i++, "\n"); SLIST_FOREACH(f, &vsi->ftl, next) { sprintf(buf_i, MAC_FORMAT ", vlan %4d, flags %#06x", MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags); buf_i += entry_len; /* don't print '\n' for last entry */ if (++ftl_counter != ftl_len) { sprintf(buf_i, "\n"); buf_i++; } } error = sysctl_handle_string(oidp, buf, strlen(buf), req); if (error) printf("sysctl error: %d\n", error); free(buf, M_DEVBUF); return error; } #define IXL_SW_RES_SIZE 0x14 int ixl_res_alloc_cmp(const void *a, const void *b) { const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two; one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a; two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b; return ((int)one->resource_type - (int)two->resource_type); } /* - * Longest string length: 25 + * Longest string length: 25 */ char * ixl_switch_res_type_string(u8 type) { - static char * ixl_switch_res_type_strings[0x14] = { + // TODO: This should be changed to static const + char * ixl_switch_res_type_strings[0x14] = { "VEB", "VSI", "Perfect Match MAC address", "S-tag", "(Reserved)", "Multicast hash entry", "Unicast hash entry", "VLAN", "VSI List entry", "(Reserved)", "VLAN Statistic Pool", "Mirror Rule", "Queue Set", "Inner VLAN Forward filter", "(Reserved)", "Inner MAC", "IP", "GRE/VN1 Key", "VN2 Key", "Tunneling Port" }; if (type < 0x14) return ixl_switch_res_type_strings[type]; else return "(Reserved)"; } static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; enum i40e_status_code status; int error = 0; u8 num_entries; struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE]; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(resp, sizeof(resp)); status = i40e_aq_get_switch_resource_alloc(hw, &num_entries, resp, IXL_SW_RES_SIZE, NULL); if (status) { device_printf(dev, "%s: get_switch_resource_alloc() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return (error); } /* Sort entries by type for display */ qsort(resp, num_entries, sizeof(struct i40e_aqc_switch_resource_alloc_element_resp), &ixl_res_alloc_cmp); sbuf_cat(buf, "\n"); sbuf_printf(buf, "# of entries: %d\n", num_entries); sbuf_printf(buf, " Type | Guaranteed | Total | Used | Un-allocated\n" " | (this) | (all) | (this) | (all) \n"); for (int i = 0; i < num_entries; i++) { sbuf_printf(buf, "%25s | %10d %5d %6d %12d", ixl_switch_res_type_string(resp[i].resource_type), resp[i].guaranteed, resp[i].total, resp[i].used, resp[i].total_unalloced); if (i < num_entries - 1) sbuf_cat(buf, "\n"); } error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } /* ** Caller must init and delete sbuf; this function will clear and ** finish it for caller. -** -** XXX: Cannot use the SEID for this, since there is no longer a -** fixed mapping between SEID and element type. */ char * ixl_switch_element_string(struct sbuf *s, struct i40e_aqc_switch_config_element_resp *element) { sbuf_clear(s); switch (element->element_type) { case I40E_AQ_SW_ELEM_TYPE_MAC: sbuf_printf(s, "MAC %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_PF: sbuf_printf(s, "PF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_VF: sbuf_printf(s, "VF %3d", element->element_info); break; case I40E_AQ_SW_ELEM_TYPE_EMP: sbuf_cat(s, "EMP"); break; case I40E_AQ_SW_ELEM_TYPE_BMC: sbuf_cat(s, "BMC"); break; case I40E_AQ_SW_ELEM_TYPE_PV: sbuf_cat(s, "PV"); break; case I40E_AQ_SW_ELEM_TYPE_VEB: sbuf_cat(s, "VEB"); break; case I40E_AQ_SW_ELEM_TYPE_PA: sbuf_cat(s, "PA"); break; case I40E_AQ_SW_ELEM_TYPE_VSI: sbuf_printf(s, "VSI %3d", element->element_info); break; default: sbuf_cat(s, "?"); break; } sbuf_finish(s); return sbuf_data(s); } static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; struct sbuf *nmbuf; enum i40e_status_code status; int error = 0; u16 next = 0; u8 aq_buf[I40E_AQ_LARGE_BUF]; struct i40e_aqc_get_switch_config_resp *sw_config; sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for sysctl output.\n"); return (ENOMEM); } status = i40e_aq_get_switch_config(hw, sw_config, sizeof(aq_buf), &next, NULL); if (status) { device_printf(dev, "%s: aq_get_switch_config() error %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); sbuf_delete(buf); return error; } if (next) device_printf(dev, "%s: TODO: get more config with SEID %d\n", __func__, next); nmbuf = sbuf_new_auto(); if (!nmbuf) { device_printf(dev, "Could not allocate sbuf for name output.\n"); sbuf_delete(buf); return (ENOMEM); } sbuf_cat(buf, "\n"); /* Assuming <= 255 elements in switch */ sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported); sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total); /* Exclude: ** Revision -- all elements are revision 1 for now */ sbuf_printf(buf, "SEID ( Name ) | Uplink | Downlink | Conn Type\n" " | | | (uplink)\n"); for (int i = 0; i < sw_config->header.num_reported; i++) { // "%4d (%8s) | %8s %8s %#8x", sbuf_printf(buf, "%4d", sw_config->element[i].seid); sbuf_cat(buf, " "); sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf, &sw_config->element[i])); sbuf_cat(buf, " | "); sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid); sbuf_cat(buf, " "); sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type); if (i < sw_config->header.num_reported - 1) sbuf_cat(buf, "\n"); } sbuf_delete(nmbuf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u32 reg; struct i40e_aqc_get_set_rss_key_data key_data; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key)); sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data); if (status) device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) { reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); bcopy(®, ((caddr_t)&key_data) + (i << 2), 4); } } ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static void ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text) { int i, j, k, width; char c; if (length < 1 || buf == NULL) return; int byte_stride = 16; int lines = length / byte_stride; int rem = length % byte_stride; if (rem > 0) lines++; for (i = 0; i < lines; i++) { width = (rem > 0 && i == lines - 1) ? rem : byte_stride; sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride); for (j = 0; j < width; j++) sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]); if (width < byte_stride) { for (k = 0; k < (byte_stride - width); k++) sbuf_printf(sb, " "); } if (!text) { sbuf_printf(sb, "\n"); continue; } for (j = 0; j < width; j++) { c = (char)buf[i * byte_stride + j]; if (c < 32 || c > 126) sbuf_printf(sb, "."); else sbuf_printf(sb, "%c", c); if (j == width - 1) sbuf_printf(sb, "\n"); } } } static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; u8 hlut[512]; u32 reg; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } bzero(hlut, sizeof(hlut)); sbuf_cat(buf, "\n"); if (hw->mac.type == I40E_MAC_X722) { status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut)); if (status) device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); } else { for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) { reg = rd32(hw, I40E_PFQF_HLUT(i)); bcopy(®, &hlut[i << 2], 4); } } ixl_sbuf_print_bytes(buf, hlut, 512, 0, false); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; u64 hena; hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); return sysctl_handle_long(oidp, NULL, hena, req); } /* * Sysctl to disable firmware's link management * * 1 - Disable link management on this port * 0 - Re-enable link management * * On normal NVMs, firmware manages link by default. */ static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int requested_mode = -1; enum i40e_status_code status = 0; int error = 0; /* Read in new mode */ error = sysctl_handle_int(oidp, &requested_mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Check for sane value */ if (requested_mode < 0 || requested_mode > 1) { device_printf(dev, "Valid modes are 0 or 1\n"); return (EINVAL); } /* Set new mode */ status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL); if (status) { device_printf(dev, "%s: Error setting new phy debug mode %s," " aq error: %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } return (0); } /* + * Read some diagnostic data from an SFP module + * Bytes 96-99, 102-105 from device address 0xA2 + */ +static int +ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + device_t dev = pf->dev; + struct sbuf *sbuf; + int error = 0; + u8 output; + + error = pf->read_i2c_byte(pf, 0, 0xA0, &output); + if (error) { + device_printf(dev, "Error reading from i2c\n"); + return (error); + } + if (output != 0x3) { + device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output); + return (EIO); + } + + pf->read_i2c_byte(pf, 92, 0xA0, &output); + if (!(output & 0x60)) { + device_printf(dev, "Module doesn't support diagnostics: %02X\n", output); + return (EIO); + } + + sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + + for (u8 offset = 96; offset < 100; offset++) { + pf->read_i2c_byte(pf, offset, 0xA2, &output); + sbuf_printf(sbuf, "%02X ", output); + } + for (u8 offset = 102; offset < 106; offset++) { + pf->read_i2c_byte(pf, offset, 0xA2, &output); + sbuf_printf(sbuf, "%02X ", output); + } + + sbuf_finish(sbuf); + sbuf_delete(sbuf); + + return (0); +} + +/* * Sysctl to read a byte from I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-31: unused * Output: 8-bit value read */ static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; - - device_printf(dev, "%s: start\n", __func__); - u8 dev_addr, offset, output; /* Read in I2C read parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; - error = ixl_read_i2c_byte(pf, offset, dev_addr, &output); + error = pf->read_i2c_byte(pf, offset, dev_addr, &output); if (error) return (error); device_printf(dev, "%02X\n", output); return (0); } /* * Sysctl to write a byte to the I2C bus. * * Input: 32-bit value: * bits 0-7: device address (0xA0 or 0xA2) * bits 8-15: offset (0-255) * bits 16-23: value to write * bits 24-31: unused * Output: 8-bit value written */ static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; device_t dev = pf->dev; int input = -1, error = 0; - u8 dev_addr, offset, value; /* Read in I2C write parameters */ error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Validate device address */ dev_addr = input & 0xFF; if (dev_addr != 0xA0 && dev_addr != 0xA2) { return (EINVAL); } offset = (input >> 8) & 0xFF; value = (input >> 16) & 0xFF; - error = ixl_write_i2c_byte(pf, offset, dev_addr, value); + error = pf->write_i2c_byte(pf, offset, dev_addr, value); if (error) return (error); device_printf(dev, "%02X written\n", value); return (0); } static int ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int *is_set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; enum i40e_status_code status; status = i40e_aq_get_phy_capabilities(hw, FALSE, FALSE, abilities, NULL); if (status) { device_printf(dev, "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos); return (0); } static int ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities, u8 bit_pos, int set) { device_t dev = pf->dev; struct i40e_hw *hw = &pf->hw; struct i40e_aq_set_phy_config config; enum i40e_status_code status; /* Set new PHY config */ memset(&config, 0, sizeof(config)); config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos); if (set) config.fec_config |= bit_pos; if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) { config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; config.phy_type = abilities->phy_type; config.phy_type_ext = abilities->phy_type_ext; config.link_speed = abilities->link_speed; config.eee_capability = abilities->eee_capability; config.eeer = abilities->eeer_val; config.low_power_ctrl = abilities->d3_lpan; status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { device_printf(dev, "%s: i40e_aq_set_phy_config() status %s, aq error %s\n", __func__, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); return (EIO); } } return (0); } static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode)); } static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode)); } static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode)); } static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode)); } static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; int mode, error = 0; struct i40e_aq_get_phy_abilities_resp abilities; error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode); if (error) return (error); /* Read in new mode */ error = sysctl_handle_int(oidp, &mode, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode)); } static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; struct sbuf *buf; int error = 0; enum i40e_status_code status; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } u8 *final_buff; /* This amount is only necessary if reading the entire cluster into memory */ #define IXL_FINAL_BUFF_SIZE (1280 * 1024) final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK); if (final_buff == NULL) { device_printf(dev, "Could not allocate memory for output.\n"); goto out; } int final_buff_len = 0; u8 cluster_id = 1; bool more = true; u8 dump_buf[4096]; u16 curr_buff_size = 4096; u8 curr_next_table = 0; u32 curr_next_index = 0; u16 ret_buff_size; u8 ret_next_table; u32 ret_next_index; sbuf_cat(buf, "\n"); while (more) { status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size, dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL); if (status) { device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); goto free_out; } /* copy info out of temp buffer */ bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size); final_buff_len += ret_buff_size; if (ret_next_table != curr_next_table) { /* We're done with the current table; we can dump out read data. */ sbuf_printf(buf, "%d:", curr_next_table); int bytes_printed = 0; while (bytes_printed <= final_buff_len) { sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), ""); bytes_printed += 16; } sbuf_cat(buf, "\n"); /* The entire cluster has been read; we're finished */ if (ret_next_table == 0xFF) break; /* Otherwise clear the output buffer and continue reading */ bzero(final_buff, IXL_FINAL_BUFF_SIZE); final_buff_len = 0; } if (ret_next_index == 0xFFFFFFFF) ret_next_index = 0; bzero(dump_buf, sizeof(dump_buf)); curr_next_table = ret_next_table; curr_next_index = ret_next_index; } free_out: free(final_buff, M_DEVBUF); out: error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (error); } static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS) { struct ixl_pf *pf = (struct ixl_pf *)arg1; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error = 0; int state, new_state; enum i40e_status_code status; state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0); /* Read in new mode */ error = sysctl_handle_int(oidp, &new_state, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Already in requested state */ if (new_state == state) return (error); if (new_state == 0) { if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) { device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n"); return (EINVAL); } if (pf->hw.aq.api_maj_ver < 1 || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver < 7)) { device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n"); return (EINVAL); } i40e_aq_stop_lldp(&pf->hw, true, NULL); i40e_aq_set_dcb_parameters(&pf->hw, true, NULL); atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); } else { status = i40e_aq_start_lldp(&pf->hw, NULL); if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST) device_printf(dev, "FW LLDP agent is already running\n"); atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); } return (0); } /* * Get FW LLDP Agent status */ int ixl_get_fw_lldp_status(struct ixl_pf *pf) { enum i40e_status_code ret = I40E_SUCCESS; struct i40e_lldp_variables lldp_cfg; struct i40e_hw *hw = &pf->hw; u8 adminstatus = 0; ret = i40e_read_lldp_cfg(hw, &lldp_cfg); if (ret) return ret; /* Get the LLDP AdminStatus for the current port */ adminstatus = lldp_cfg.adminstatus >> (hw->port * 4); adminstatus &= 0xf; /* Check if LLDP agent is disabled */ if (!adminstatus) { device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n"); atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); } else atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED); return (0); } int ixl_attach_get_link_status(struct ixl_pf *pf) { struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; int error = 0; if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || (hw->aq.fw_maj_ver < 4)) { i40e_msec_delay(75); error = i40e_aq_set_link_restart_an(hw, TRUE, NULL); if (error) { device_printf(dev, "link restart failed, aq_err=%d\n", pf->hw.aq.asq_last_status); return error; } } /* Determine link state */ hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &pf->link_up); return (0); +} + +static int +ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + int requested = 0, error = 0; + + /* Read in new mode */ + error = sysctl_handle_int(oidp, &requested, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + /* Initiate the PF reset later in the admin task */ + atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ); + + return (error); +} + +static int +ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct i40e_hw *hw = &pf->hw; + int requested = 0, error = 0; + + /* Read in new mode */ + error = sysctl_handle_int(oidp, &requested, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK); + + return (error); +} + +static int +ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct i40e_hw *hw = &pf->hw; + int requested = 0, error = 0; + + /* Read in new mode */ + error = sysctl_handle_int(oidp, &requested, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK); + + return (error); +} + +static int +ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct i40e_hw *hw = &pf->hw; + int requested = 0, error = 0; + + /* Read in new mode */ + error = sysctl_handle_int(oidp, &requested, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + /* TODO: Find out how to bypass this */ + if (!(rd32(hw, 0x000B818C) & 0x1)) { + device_printf(pf->dev, "SW not allowed to initiate EMPR\n"); + error = EINVAL; + } else + wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK); + + return (error); +} + +/* + * Print out mapping of TX queue indexes and Rx queue indexes + * to MSI-X vectors. + */ +static int +ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS) +{ + struct ixl_pf *pf = (struct ixl_pf *)arg1; + struct ixl_vsi *vsi = &pf->vsi; + device_t dev = pf->dev; + struct sbuf *buf; + int error = 0; + + struct ixl_rx_queue *rx_que = vsi->rx_queues; + struct ixl_tx_queue *tx_que = vsi->tx_queues; + + buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + if (!buf) { + device_printf(dev, "Could not allocate sbuf for output.\n"); + return (ENOMEM); + } + + sbuf_cat(buf, "\n"); + for (int i = 0; i < vsi->num_rx_queues; i++) { + rx_que = &vsi->rx_queues[i]; + sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix); + } + for (int i = 0; i < vsi->num_tx_queues; i++) { + tx_que = &vsi->tx_queues[i]; + sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix); + } + + error = sbuf_finish(buf); + if (error) + device_printf(dev, "Error finishing sbuf: %d\n", error); + sbuf_delete(buf); + + return (error); } Index: head/sys/dev/ixl/ixl_pf_qmgr.c =================================================================== --- head/sys/dev/ixl/ixl_pf_qmgr.c (revision 335337) +++ head/sys/dev/ixl/ixl_pf_qmgr.c (revision 335338) @@ -1,308 +1,308 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl_pf_qmgr.h" static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num); int ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues) { if (num_queues < 1) return (EINVAL); qmgr->num_queues = num_queues; qmgr->qinfo = malloc(num_queues * sizeof(struct ixl_pf_qmgr_qinfo), M_IXL, M_ZERO | M_WAITOK); if (qmgr->qinfo == NULL) return ENOMEM; return (0); } int ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag) { int i; int avail; int block_start; u16 alloc_size; if (qtag == NULL || num < 1) return (EINVAL); /* We have to allocate in power-of-two chunks, so get next power of two */ alloc_size = (u16)next_power_of_two(num); /* Don't try if there aren't enough queues */ avail = ixl_pf_qmgr_get_num_free(qmgr); if (avail < alloc_size) return (ENOSPC); block_start = ixl_pf_qmgr_find_free_contiguous_block(qmgr, alloc_size); if (block_start < 0) return (ENOSPC); /* Mark queues as allocated */ for (i = block_start; i < block_start + alloc_size; i++) qmgr->qinfo[i].allocated = true; bzero(qtag, sizeof(*qtag)); qtag->qmgr = qmgr; qtag->type = IXL_PF_QALLOC_CONTIGUOUS; qtag->qidx[0] = block_start; - qtag->num_allocated = num; - qtag->num_active = alloc_size; + qtag->num_allocated = alloc_size; + qtag->num_active = num; return (0); } /* * NB: indices is u16 because this is the queue index width used in the Add VSI AQ command */ int ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag) { int i; int avail, count = 0; u16 alloc_size; if (qtag == NULL || num < 1 || num > 16) return (EINVAL); /* We have to allocate in power-of-two chunks, so get next power of two */ alloc_size = (u16)next_power_of_two(num); avail = ixl_pf_qmgr_get_num_free(qmgr); if (avail < alloc_size) return (ENOSPC); bzero(qtag, sizeof(*qtag)); qtag->qmgr = qmgr; qtag->type = IXL_PF_QALLOC_SCATTERED; qtag->num_active = num; qtag->num_allocated = alloc_size; for (i = 0; i < qmgr->num_queues; i++) { if (!qmgr->qinfo[i].allocated) { qtag->qidx[count] = i; count++; qmgr->qinfo[i].allocated = true; if (count == alloc_size) return (0); } } // Shouldn't get here return (EDOOFUS); } int ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag) { u16 i, qidx; if (qtag == NULL) return (EINVAL); if (qtag->type == IXL_PF_QALLOC_SCATTERED) { for (i = 0; i < qtag->num_allocated; i++) { qidx = qtag->qidx[i]; bzero(&qmgr->qinfo[qidx], sizeof(qmgr->qinfo[qidx])); } } else { u16 first_index = qtag->qidx[0]; for (i = first_index; i < first_index + qtag->num_allocated; i++) bzero(&qmgr->qinfo[i], sizeof(qmgr->qinfo[qidx])); } qtag->qmgr = NULL; return (0); } int ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr) { return (qmgr->num_queues); } /* * ERJ: This assumes the info array isn't longer than INT_MAX. * This assumption might cause a y3k bug or something, I'm sure. */ int ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr) { int count = 0; for (int i = 0; i < qmgr->num_queues; i++) { if (!qmgr->qinfo[i].allocated) count++; } return (count); } int ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start) { int i; if (start > qmgr->num_queues - 1) return (-EINVAL); for (i = start; i < qmgr->num_queues; i++) { if (qmgr->qinfo[i].allocated) continue; else return (i); } // No free queues return (-ENOSPC); } void ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr) { free(qmgr->qinfo, M_IXL); qmgr->qinfo = NULL; } void ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) qmgr->qinfo[pf_qidx].tx_enabled = true; else qmgr->qinfo[pf_qidx].rx_enabled = true; } void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) qmgr->qinfo[pf_qidx].tx_enabled = false; else qmgr->qinfo[pf_qidx].rx_enabled = false; } void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) qmgr->qinfo[pf_qidx].tx_configured = true; else qmgr->qinfo[pf_qidx].rx_configured = true; } bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) return (qmgr->qinfo[pf_qidx].tx_enabled); else return (qmgr->qinfo[pf_qidx].rx_enabled); } bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx) { MPASS(qtag != NULL); struct ixl_pf_qmgr *qmgr = qtag->qmgr; u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx); if (tx) return (qmgr->qinfo[pf_qidx].tx_configured); else return (qmgr->qinfo[pf_qidx].rx_configured); } u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index) { MPASS(index < qtag->num_allocated); if (qtag->type == IXL_PF_QALLOC_CONTIGUOUS) return qtag->qidx[0] + index; else return qtag->qidx[index]; } /* Static Functions */ static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num) { int i; int count = 0; bool block_started = false; int possible_start; for (i = 0; i < qmgr->num_queues; i++) { if (!qmgr->qinfo[i].allocated) { if (!block_started) { block_started = true; possible_start = i; } count++; if (count == num) return (possible_start); } else { /* this queue is already allocated */ block_started = false; count = 0; } } /* Can't find a contiguous block of the requested size */ return (-1); } Index: head/sys/dev/ixl/ixl_txrx.c =================================================================== --- head/sys/dev/ixl/ixl_txrx.c (revision 335337) +++ head/sys/dev/ixl/ixl_txrx.c (revision 335338) @@ -1,2139 +1,769 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* ** IXL driver TX/RX Routines: ** This was seperated to allow usage by ** both the PF and VF drivers. */ #ifndef IXL_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #endif #include "ixl.h" #ifdef RSS #include #endif /* Local Prototypes */ -static void ixl_rx_checksum(struct mbuf *, u32, u32, u8); -static void ixl_refresh_mbufs(struct ixl_queue *, int); -static int ixl_xmit(struct ixl_queue *, struct mbuf **); -static int ixl_tx_setup_offload(struct ixl_queue *, - struct mbuf *, u32 *, u32 *); -static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *); -static void ixl_queue_sw_irq(struct ixl_vsi *, int); +static void ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype); -static inline void ixl_rx_discard(struct rx_ring *, int); -static inline void ixl_rx_input(struct rx_ring *, struct ifnet *, - struct mbuf *, u8); +static int ixl_isc_txd_encap(void *arg, if_pkt_info_t pi); +static void ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); +static int ixl_isc_txd_credits_update_hwb(void *arg, uint16_t txqid, bool clear); +static int ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear); -static inline bool ixl_tso_detect_sparse(struct mbuf *mp); -static inline u32 ixl_get_tx_head(struct ixl_queue *que); +static void ixl_isc_rxd_refill(void *arg, if_rxd_update_t iru); +static void ixl_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, + qidx_t pidx); +static int ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, + qidx_t budget); +static int ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); -#ifdef DEV_NETMAP -#include -#if __FreeBSD_version >= 1200000 -int ixl_rx_miss, ixl_rx_miss_bufs, ixl_crcstrip = 1; -#endif -#endif /* DEV_NETMAP */ +extern int ixl_intr(void *arg); +struct if_txrx ixl_txrx_hwb = { + ixl_isc_txd_encap, + ixl_isc_txd_flush, + ixl_isc_txd_credits_update_hwb, + ixl_isc_rxd_available, + ixl_isc_rxd_pkt_get, + ixl_isc_rxd_refill, + ixl_isc_rxd_flush, + ixl_intr +}; + +struct if_txrx ixl_txrx_dwb = { + ixl_isc_txd_encap, + ixl_isc_txd_flush, + ixl_isc_txd_credits_update_dwb, + ixl_isc_rxd_available, + ixl_isc_rxd_pkt_get, + ixl_isc_rxd_refill, + ixl_isc_rxd_flush, + ixl_intr +}; + /* * @key key is saved into this parameter */ void ixl_get_default_rss_key(u32 *key) { MPASS(key != NULL); u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687, 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377, 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1, 0x0, 0x0, 0x0}; bcopy(rss_seed, key, IXL_RSS_KEY_SIZE); } /** * i40e_vc_stat_str - convert virtchnl status err code to a string * @hw: pointer to the HW structure * @stat_err: the status error code to convert **/ const char * i40e_vc_stat_str(struct i40e_hw *hw, enum virtchnl_status_code stat_err) { switch (stat_err) { case VIRTCHNL_STATUS_SUCCESS: return "OK"; case VIRTCHNL_ERR_PARAM: return "VIRTCHNL_ERR_PARAM"; case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH"; case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR"; case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID"; case VIRTCHNL_STATUS_NOT_SUPPORTED: return "VIRTCHNL_STATUS_NOT_SUPPORTED"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); return hw->err_str; } -/* - * PCI BUSMASTER needs to be set for proper operation. - */ -void -ixl_set_busmaster(device_t dev) +static bool +ixl_is_tx_desc_done(struct tx_ring *txr, int idx) { - u16 pci_cmd_word; - - pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); - pci_cmd_word |= PCIM_CMD_BUSMASTEREN; - pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); + return (((txr->tx_base[idx].cmd_type_offset_bsz >> I40E_TXD_QW1_DTYPE_SHIFT) + & I40E_TXD_QW1_DTYPE_MASK) == I40E_TX_DESC_DTYPE_DESC_DONE); } -/* - * Rewrite the ENABLE bit in the MSIX control register - */ -void -ixl_set_msix_enable(device_t dev) +static int +ixl_tso_detect_sparse(bus_dma_segment_t *segs, int nsegs, if_pkt_info_t pi) { - int msix_ctrl, rid; + int count, curseg, i, hlen, segsz, seglen, tsolen; - pci_find_cap(dev, PCIY_MSIX, &rid); - rid += PCIR_MSIX_CTRL; - msix_ctrl = pci_read_config(dev, rid, 2); - msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; - pci_write_config(dev, rid, msix_ctrl, 2); -} + if (nsegs <= IXL_MAX_TX_SEGS-2) + return (0); + segsz = pi->ipi_tso_segsz; + curseg = count = 0; + hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; + tsolen = pi->ipi_len - hlen; -/* -** Multiqueue Transmit driver -*/ -int -ixl_mq_start(struct ifnet *ifp, struct mbuf *m) -{ - struct ixl_vsi *vsi = ifp->if_softc; - struct ixl_queue *que; - struct tx_ring *txr; - int err, i; -#ifdef RSS - u32 bucket_id; -#endif + i = 0; + curseg = segs[0].ds_len; + while (hlen > 0) { + count++; + if (count > IXL_MAX_TX_SEGS - 2) + return (1); + if (curseg == 0) { + i++; + if (__predict_false(i == nsegs)) + return (1); - /* - * Which queue to use: - * - * When doing RSS, map it to the same outbound - * queue as the incoming flow would be mapped to. - * If everything is setup correctly, it should be - * the same bucket that the current CPU we're on is. - */ - if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { -#ifdef RSS - if (rss_hash2bucket(m->m_pkthdr.flowid, - M_HASHTYPE_GET(m), &bucket_id) == 0) { - i = bucket_id % vsi->num_queues; - } else -#endif - i = m->m_pkthdr.flowid % vsi->num_queues; - } else - i = curcpu % vsi->num_queues; - - que = &vsi->queues[i]; - txr = &que->txr; - - err = drbr_enqueue(ifp, txr->br, m); - if (err) - return (err); - if (IXL_TX_TRYLOCK(txr)) { - ixl_mq_start_locked(ifp, txr); - IXL_TX_UNLOCK(txr); - } else - taskqueue_enqueue(que->tq, &que->tx_task); - - return (0); -} - -int -ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) -{ - struct ixl_queue *que = txr->que; - struct ixl_vsi *vsi = que->vsi; - struct mbuf *next; - int err = 0; - - - if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || - vsi->link_active == 0) - return (ENETDOWN); - - /* Process the transmit queue */ - while ((next = drbr_peek(ifp, txr->br)) != NULL) { - if ((err = ixl_xmit(que, &next)) != 0) { - if (next == NULL) - drbr_advance(ifp, txr->br); - else - drbr_putback(ifp, txr->br, next); - break; + curseg = segs[i].ds_len; } - drbr_advance(ifp, txr->br); - /* Send a copy of the frame to the BPF listener */ - ETHER_BPF_MTAP(ifp, next); - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; + seglen = min(curseg, hlen); + curseg -= seglen; + hlen -= seglen; + // printf("H:seglen = %d, count=%d\n", seglen, count); } - - if (txr->avail < IXL_TX_CLEANUP_THRESHOLD) - ixl_txeof(que); - - return (err); -} - -/* - * Called from a taskqueue to drain queued transmit packets. - */ -void -ixl_deferred_mq_start(void *arg, int pending) -{ - struct ixl_queue *que = arg; - struct tx_ring *txr = &que->txr; - struct ixl_vsi *vsi = que->vsi; - struct ifnet *ifp = vsi->ifp; - - IXL_TX_LOCK(txr); - if (!drbr_empty(ifp, txr->br)) - ixl_mq_start_locked(ifp, txr); - IXL_TX_UNLOCK(txr); -} - -/* -** Flush all queue ring buffers -*/ -void -ixl_qflush(struct ifnet *ifp) -{ - struct ixl_vsi *vsi = ifp->if_softc; - - for (int i = 0; i < vsi->num_queues; i++) { - struct ixl_queue *que = &vsi->queues[i]; - struct tx_ring *txr = &que->txr; - struct mbuf *m; - IXL_TX_LOCK(txr); - while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) - m_freem(m); - IXL_TX_UNLOCK(txr); - } - if_qflush(ifp); -} - -static inline bool -ixl_tso_detect_sparse(struct mbuf *mp) -{ - struct mbuf *m; - int num, mss; - - num = 0; - mss = mp->m_pkthdr.tso_segsz; - - /* Exclude first mbuf; assume it contains all headers */ - for (m = mp->m_next; m != NULL; m = m->m_next) { - if (m == NULL) - break; - num++; - mss -= m->m_len % mp->m_pkthdr.tso_segsz; - - if (num > IXL_SPARSE_CHAIN) - return (true); - if (mss < 1) { - num = (mss == 0) ? 0 : 1; - mss += mp->m_pkthdr.tso_segsz; - } - } - - return (false); -} - - -/********************************************************************* - * - * This routine maps the mbufs to tx descriptors, allowing the - * TX engine to transmit the packets. - * - return 0 on success, positive on failure - * - **********************************************************************/ -#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) - -static int -ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) -{ - struct ixl_vsi *vsi = que->vsi; - struct i40e_hw *hw = vsi->hw; - struct tx_ring *txr = &que->txr; - struct ixl_tx_buf *buf; - struct i40e_tx_desc *txd = NULL; - struct mbuf *m_head, *m; - int i, j, error, nsegs; - int first, last = 0; - u16 vtag = 0; - u32 cmd, off; - bus_dmamap_t map; - bus_dma_tag_t tag; - bus_dma_segment_t segs[IXL_MAX_TSO_SEGS]; - - cmd = off = 0; - m_head = *m_headp; - - /* - * Important to capture the first descriptor - * used because it will contain the index of - * the one we tell the hardware to report back - */ - first = txr->next_avail; - buf = &txr->buffers[first]; - map = buf->map; - tag = txr->tx_tag; - - if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { - /* Use larger mapping for TSO */ - tag = txr->tso_tag; - if (ixl_tso_detect_sparse(m_head)) { - m = m_defrag(m_head, M_NOWAIT); - if (m == NULL) { - m_freem(*m_headp); - *m_headp = NULL; - return (ENOBUFS); + while (tsolen > 0) { + segsz = pi->ipi_tso_segsz; + while (segsz > 0 && tsolen != 0) { + count++; + if (count > IXL_MAX_TX_SEGS - 2) { + // printf("bad: count = %d\n", count); + return (1); } - *m_headp = m; + if (curseg == 0) { + i++; + if (__predict_false(i == nsegs)) { + // printf("bad: tsolen = %d", tsolen); + return (1); + } + curseg = segs[i].ds_len; + } + seglen = min(curseg, segsz); + segsz -= seglen; + curseg -= seglen; + tsolen -= seglen; + // printf("D:seglen = %d, count=%d\n", seglen, count); } + count = 0; } - /* - * Map the packet for DMA. - */ - error = bus_dmamap_load_mbuf_sg(tag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); - - if (error == EFBIG) { - struct mbuf *m; - - m = m_defrag(*m_headp, M_NOWAIT); - if (m == NULL) { - que->mbuf_defrag_failed++; - m_freem(*m_headp); - *m_headp = NULL; - return (ENOBUFS); - } - *m_headp = m; - - /* Try it again */ - error = bus_dmamap_load_mbuf_sg(tag, map, - *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); - - if (error != 0) { - que->tx_dmamap_failed++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); - } - } else if (error != 0) { - que->tx_dmamap_failed++; - m_freem(*m_headp); - *m_headp = NULL; - return (error); - } - - /* Make certain there are enough descriptors */ - if (nsegs > txr->avail - 2) { - txr->no_desc++; - error = ENOBUFS; - goto xmit_fail; - } - m_head = *m_headp; - - /* Set up the TSO/CSUM offload */ - if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { - error = ixl_tx_setup_offload(que, m_head, &cmd, &off); - if (error) - goto xmit_fail; - } - - cmd |= I40E_TX_DESC_CMD_ICRC; - /* Grab the VLAN tag */ - if (m_head->m_flags & M_VLANTAG) { - cmd |= I40E_TX_DESC_CMD_IL2TAG1; - vtag = htole16(m_head->m_pkthdr.ether_vtag); - } - - i = txr->next_avail; - for (j = 0; j < nsegs; j++) { - bus_size_t seglen; - - buf = &txr->buffers[i]; - buf->tag = tag; /* Keep track of the type tag */ - txd = &txr->base[i]; - seglen = segs[j].ds_len; - - txd->buffer_addr = htole64(segs[j].ds_addr); - txd->cmd_type_offset_bsz = - htole64(I40E_TX_DESC_DTYPE_DATA - | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) - | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) - | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) - | ((u64)vtag << I40E_TXD_QW1_L2TAG1_SHIFT)); - - last = i; /* descriptor that will get completion IRQ */ - - if (++i == que->num_tx_desc) - i = 0; - - buf->m_head = NULL; - buf->eop_index = -1; - } - /* Set the last descriptor for report */ - txd->cmd_type_offset_bsz |= - htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); - txr->avail -= nsegs; - txr->next_avail = i; - - buf->m_head = m_head; - /* Swap the dma map between the first and last descriptor. - * The descriptor that gets checked on completion will now - * have the real map from the first descriptor. - */ - txr->buffers[first].map = buf->map; - buf->map = map; - bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE); - - /* Set the index of the descriptor that will be marked done */ - buf = &txr->buffers[first]; - buf->eop_index = last; - - bus_dmamap_sync(txr->dma.tag, txr->dma.map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - /* - * Advance the Transmit Descriptor Tail (Tdt), this tells the - * hardware that this frame is available to transmit. - */ - ++txr->total_packets; - wr32(hw, txr->tail, i); - - /* Mark outstanding work */ - atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); - return (0); - -xmit_fail: - bus_dmamap_unload(tag, buf->map); - return (error); + return (0); } - /********************************************************************* * - * Allocate memory for tx_buffer structures. The tx_buffer stores all - * the information needed to transmit a packet on the wire. This is - * called only once at attach, setup is done every reset. - * - **********************************************************************/ -int -ixl_allocate_tx_data(struct ixl_queue *que) -{ - struct tx_ring *txr = &que->txr; - struct ixl_vsi *vsi = que->vsi; - device_t dev = vsi->dev; - struct ixl_tx_buf *buf; - int i, error = 0; - - /* - * Setup DMA descriptor areas. - */ - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - IXL_TSO_SIZE, /* maxsize */ - IXL_MAX_TX_SEGS, /* nsegments */ - IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &txr->tx_tag))) { - device_printf(dev,"Unable to allocate TX DMA tag\n"); - return (error); - } - - /* Make a special tag for TSO */ - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - IXL_TSO_SIZE, /* maxsize */ - IXL_MAX_TSO_SEGS, /* nsegments */ - IXL_MAX_DMA_SEG_SIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &txr->tso_tag))) { - device_printf(dev,"Unable to allocate TX TSO DMA tag\n"); - goto free_tx_dma; - } - - if (!(txr->buffers = - (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) * - que->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate tx_buffer memory\n"); - error = ENOMEM; - goto free_tx_tso_dma; - } - - /* Create the descriptor buffer default dma maps */ - buf = txr->buffers; - for (i = 0; i < que->num_tx_desc; i++, buf++) { - buf->tag = txr->tx_tag; - error = bus_dmamap_create(buf->tag, 0, &buf->map); - if (error != 0) { - device_printf(dev, "Unable to create TX DMA map\n"); - goto free_buffers; - } - } - - return 0; - -free_buffers: - while (i--) { - buf--; - bus_dmamap_destroy(buf->tag, buf->map); - } - - free(txr->buffers, M_DEVBUF); - txr->buffers = NULL; -free_tx_tso_dma: - bus_dma_tag_destroy(txr->tso_tag); - txr->tso_tag = NULL; -free_tx_dma: - bus_dma_tag_destroy(txr->tx_tag); - txr->tx_tag = NULL; - - return (error); -} - - -/********************************************************************* - * - * (Re)Initialize a queue transmit ring. - * - called by init, it clears the descriptor ring, - * and frees any stale mbufs - * - **********************************************************************/ -void -ixl_init_tx_ring(struct ixl_queue *que) -{ -#ifdef DEV_NETMAP - struct netmap_adapter *na = NA(que->vsi->ifp); - struct netmap_slot *slot; -#endif /* DEV_NETMAP */ - struct tx_ring *txr = &que->txr; - struct ixl_tx_buf *buf; - - /* Clear the old ring contents */ - IXL_TX_LOCK(txr); - -#ifdef DEV_NETMAP - /* - * (under lock): if in netmap mode, do some consistency - * checks and set slot to entry 0 of the netmap ring. - */ - slot = netmap_reset(na, NR_TX, que->me, 0); -#endif /* DEV_NETMAP */ - - bzero((void *)txr->base, - (sizeof(struct i40e_tx_desc)) * que->num_tx_desc); - - /* Reset indices */ - txr->next_avail = 0; - txr->next_to_clean = 0; - - /* Reset watchdog status */ - txr->watchdog_timer = 0; - - /* Free any existing tx mbufs. */ - buf = txr->buffers; - for (int i = 0; i < que->num_tx_desc; i++, buf++) { - if (buf->m_head != NULL) { - bus_dmamap_sync(buf->tag, buf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(buf->tag, buf->map); - m_freem(buf->m_head); - buf->m_head = NULL; - } -#ifdef DEV_NETMAP - /* - * In netmap mode, set the map for the packet buffer. - * NOTE: Some drivers (not this one) also need to set - * the physical buffer address in the NIC ring. - * netmap_idx_n2k() maps a nic index, i, into the corresponding - * netmap slot index, si - */ - if (slot) { - int si = netmap_idx_n2k(na->tx_rings[que->me], i); - netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si)); - } -#endif /* DEV_NETMAP */ - /* Clear the EOP index */ - buf->eop_index = -1; - } - - /* Set number of descriptors available */ - txr->avail = que->num_tx_desc; - - bus_dmamap_sync(txr->dma.tag, txr->dma.map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - IXL_TX_UNLOCK(txr); -} - - -/********************************************************************* - * - * Free transmit ring related data structures. - * - **********************************************************************/ -void -ixl_free_que_tx(struct ixl_queue *que) -{ - struct tx_ring *txr = &que->txr; - struct ixl_tx_buf *buf; - - INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me); - - for (int i = 0; i < que->num_tx_desc; i++) { - buf = &txr->buffers[i]; - if (buf->m_head != NULL) { - bus_dmamap_sync(buf->tag, buf->map, - BUS_DMASYNC_POSTWRITE); - m_freem(buf->m_head); - buf->m_head = NULL; - } - bus_dmamap_unload(buf->tag, buf->map); - bus_dmamap_destroy(buf->tag, buf->map); - } - if (txr->buffers != NULL) { - free(txr->buffers, M_DEVBUF); - txr->buffers = NULL; - } - if (txr->tx_tag != NULL) { - bus_dma_tag_destroy(txr->tx_tag); - txr->tx_tag = NULL; - } - if (txr->tso_tag != NULL) { - bus_dma_tag_destroy(txr->tso_tag); - txr->tso_tag = NULL; - } - - INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me); - return; -} - -/********************************************************************* - * * Setup descriptor for hw offloads * **********************************************************************/ -static int -ixl_tx_setup_offload(struct ixl_queue *que, - struct mbuf *mp, u32 *cmd, u32 *off) +static void +ixl_tx_setup_offload(struct ixl_tx_queue *que, + if_pkt_info_t pi, u32 *cmd, u32 *off) { - struct ether_vlan_header *eh; + switch (pi->ipi_etype) { #ifdef INET - struct ip *ip = NULL; -#endif - struct tcphdr *th = NULL; -#ifdef INET6 - struct ip6_hdr *ip6; -#endif - int elen, ip_hlen = 0, tcp_hlen; - u16 etype; - u8 ipproto = 0; - bool tso = FALSE; - - /* Set up the TSO context descriptor if required */ - if (mp->m_pkthdr.csum_flags & CSUM_TSO) { - tso = ixl_tso_setup(que, mp); - if (tso) - ++que->tso; - else - return (ENXIO); - } - - /* - * Determine where frame payload starts. - * Jump over vlan headers if already present, - * helpful for QinQ too. - */ - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - etype = ntohs(eh->evl_proto); - elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - } else { - etype = ntohs(eh->evl_encap_proto); - elen = ETHER_HDR_LEN; - } - - switch (etype) { -#ifdef INET case ETHERTYPE_IP: - ip = (struct ip *)(mp->m_data + elen); - ip_hlen = ip->ip_hl << 2; - ipproto = ip->ip_p; - th = (struct tcphdr *)((caddr_t)ip + ip_hlen); - /* The IP checksum must be recalculated with TSO */ - if (tso) + if (pi->ipi_csum_flags & CSUM_IP) *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; else *cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: - ip6 = (struct ip6_hdr *)(mp->m_data + elen); - ip_hlen = sizeof(struct ip6_hdr); - ipproto = ip6->ip6_nxt; - th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); *cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; break; #endif default: break; } - *off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; - *off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; + *off |= (pi->ipi_ehdrlen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + *off |= (pi->ipi_ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - switch (ipproto) { + switch (pi->ipi_ipproto) { case IPPROTO_TCP: - tcp_hlen = th->th_off << 2; - if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) { + if (pi->ipi_csum_flags & IXL_CSUM_TCP) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *off |= (tcp_hlen >> 2) << + *off |= (pi->ipi_tcp_hlen >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } break; case IPPROTO_UDP: - if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) { + if (pi->ipi_csum_flags & IXL_CSUM_UDP) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; *off |= (sizeof(struct udphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } break; case IPPROTO_SCTP: - if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) { + if (pi->ipi_csum_flags & IXL_CSUM_SCTP) { *cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; *off |= (sizeof(struct sctphdr) >> 2) << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; } /* Fall Thru */ default: break; } - - return (0); } - /********************************************************************** * * Setup context for hardware segmentation offload (TSO) * **********************************************************************/ -static bool -ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp) +static int +ixl_tso_setup(struct tx_ring *txr, if_pkt_info_t pi) { - struct tx_ring *txr = &que->txr; + if_softc_ctx_t scctx; struct i40e_tx_context_desc *TXD; - struct ixl_tx_buf *buf; u32 cmd, mss, type, tsolen; - u16 etype; - int idx, elen, ip_hlen, tcp_hlen; - struct ether_vlan_header *eh; -#ifdef INET - struct ip *ip; -#endif -#ifdef INET6 - struct ip6_hdr *ip6; -#endif -#if defined(INET6) || defined(INET) - struct tcphdr *th; -#endif + int idx; u64 type_cmd_tso_mss; - /* - * Determine where frame payload starts. - * Jump over vlan headers if already present - */ - eh = mtod(mp, struct ether_vlan_header *); - if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { - elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - etype = eh->evl_proto; - } else { - elen = ETHER_HDR_LEN; - etype = eh->evl_encap_proto; - } + idx = pi->ipi_pidx; + TXD = (struct i40e_tx_context_desc *) &txr->tx_base[idx]; + tsolen = pi->ipi_len - (pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen); + scctx = txr->que->vsi->shared; - switch (ntohs(etype)) { -#ifdef INET6 - case ETHERTYPE_IPV6: - ip6 = (struct ip6_hdr *)(mp->m_data + elen); - if (ip6->ip6_nxt != IPPROTO_TCP) - return (ENXIO); - ip_hlen = sizeof(struct ip6_hdr); - th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); - th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); - tcp_hlen = th->th_off << 2; - /* - * The corresponding flag is set by the stack in the IPv4 - * TSO case, but not in IPv6 (at least in FreeBSD 10.2). - * So, set it here because the rest of the flow requires it. - */ - mp->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; - break; -#endif -#ifdef INET - case ETHERTYPE_IP: - ip = (struct ip *)(mp->m_data + elen); - if (ip->ip_p != IPPROTO_TCP) - return (ENXIO); - ip->ip_sum = 0; - ip_hlen = ip->ip_hl << 2; - th = (struct tcphdr *)((caddr_t)ip + ip_hlen); - th->th_sum = in_pseudo(ip->ip_src.s_addr, - ip->ip_dst.s_addr, htons(IPPROTO_TCP)); - tcp_hlen = th->th_off << 2; - break; -#endif - default: - printf("%s: CSUM_TSO but no supported IP version (0x%04x)", - __func__, ntohs(etype)); - return FALSE; - } - - /* Ensure we have at least the IP+TCP header in the first mbuf. */ - if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr)) - return FALSE; - - idx = txr->next_avail; - buf = &txr->buffers[idx]; - TXD = (struct i40e_tx_context_desc *) &txr->base[idx]; - tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen); - type = I40E_TX_DESC_DTYPE_CONTEXT; cmd = I40E_TX_CTX_DESC_TSO; /* TSO MSS must not be less than 64 */ - if (mp->m_pkthdr.tso_segsz < IXL_MIN_TSO_MSS) { - que->mss_too_small++; - mp->m_pkthdr.tso_segsz = IXL_MIN_TSO_MSS; + if (pi->ipi_tso_segsz < IXL_MIN_TSO_MSS) { + txr->mss_too_small++; + pi->ipi_tso_segsz = IXL_MIN_TSO_MSS; } - mss = mp->m_pkthdr.tso_segsz; + mss = pi->ipi_tso_segsz; type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) | ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT); TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss); TXD->tunneling_params = htole32(0); - buf->m_head = NULL; - buf->eop_index = -1; + txr->que->tso++; - if (++idx == que->num_tx_desc) - idx = 0; - - txr->avail--; - txr->next_avail = idx; - - return TRUE; + return ((idx + 1) & (scctx->isc_ntxd[0]-1)); } -/* - * ixl_get_tx_head - Retrieve the value from the - * location the HW records its HEAD index - */ -static inline u32 -ixl_get_tx_head(struct ixl_queue *que) -{ - struct tx_ring *txr = &que->txr; - void *head = &txr->base[que->num_tx_desc]; - return LE32_TO_CPU(*(volatile __le32 *)head); -} +/********************************************************************* + * + * This routine maps the mbufs to tx descriptors, allowing the + * TX engine to transmit the packets. + * - return 0 on success, positive on failure + * + **********************************************************************/ +#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) -/********************************************************************** - * - * Get index of last used descriptor/buffer from hardware, and clean - * the descriptors/buffers up to that index. - * - **********************************************************************/ -static bool -ixl_txeof_hwb(struct ixl_queue *que) +static int +ixl_isc_txd_encap(void *arg, if_pkt_info_t pi) { - struct tx_ring *txr = &que->txr; - u32 first, last, head, done; - struct ixl_tx_buf *buf; - struct i40e_tx_desc *tx_desc, *eop_desc; + struct ixl_vsi *vsi = arg; + if_softc_ctx_t scctx = vsi->shared; + struct ixl_tx_queue *que = &vsi->tx_queues[pi->ipi_qsidx]; + struct tx_ring *txr = &que->txr; + int nsegs = pi->ipi_nsegs; + bus_dma_segment_t *segs = pi->ipi_segs; + struct i40e_tx_desc *txd = NULL; + int i, j, mask, pidx_last; + u32 cmd, off, tx_intr; - mtx_assert(&txr->mtx, MA_OWNED); + // device_printf(iflib_get_dev(vsi->ctx), "%s: begin\n", __func__); -#ifdef DEV_NETMAP - // XXX todo: implement moderation - if (netmap_tx_irq(que->vsi->ifp, que->me)) - return FALSE; -#endif /* DEF_NETMAP */ + cmd = off = 0; + i = pi->ipi_pidx; - /* These are not the descriptors you seek, move along :) */ - if (txr->avail == que->num_tx_desc) { - atomic_store_rel_32(&txr->watchdog_timer, 0); - return FALSE; - } + tx_intr = (pi->ipi_flags & IPI_TX_INTR); +#if 0 + device_printf(iflib_get_dev(vsi->ctx), "%s: tx_intr %d\n", __func__, tx_intr); +#endif - first = txr->next_to_clean; - buf = &txr->buffers[first]; - tx_desc = (struct i40e_tx_desc *)&txr->base[first]; - last = buf->eop_index; - if (last == -1) - return FALSE; - eop_desc = (struct i40e_tx_desc *)&txr->base[last]; - - /* Sync DMA before reading head index from ring */ - bus_dmamap_sync(txr->dma.tag, txr->dma.map, - BUS_DMASYNC_POSTREAD); - - /* Get the Head WB value */ - head = ixl_get_tx_head(que); - - /* - ** Get the index of the first descriptor - ** BEYOND the EOP and call that 'done'. - ** I do this so the comparison in the - ** inner while loop below can be simple - */ - if (++last == que->num_tx_desc) last = 0; - done = last; - - /* - ** The HEAD index of the ring is written in a - ** defined location, this rather than a done bit - ** is what is used to keep track of what must be - ** 'cleaned'. - */ - while (first != head) { - /* We clean the range of the packet */ - while (first != done) { - ++txr->avail; - - if (buf->m_head) { - txr->bytes += /* for ITR adjustment */ - buf->m_head->m_pkthdr.len; - txr->tx_bytes += /* for TX stats */ - buf->m_head->m_pkthdr.len; - bus_dmamap_sync(buf->tag, - buf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(buf->tag, - buf->map); - m_freem(buf->m_head); - buf->m_head = NULL; - } - buf->eop_index = -1; - - if (++first == que->num_tx_desc) - first = 0; - - buf = &txr->buffers[first]; - tx_desc = &txr->base[first]; + /* Set up the TSO/CSUM offload */ + if (pi->ipi_csum_flags & CSUM_OFFLOAD) { + /* Set up the TSO context descriptor if required */ + if (pi->ipi_csum_flags & CSUM_TSO) { + if (ixl_tso_detect_sparse(segs, nsegs, pi)) + return (EFBIG); + i = ixl_tso_setup(txr, pi); } - ++txr->packets; - /* If a packet was successfully cleaned, reset the watchdog timer */ - atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); - /* See if there is more work now */ - last = buf->eop_index; - if (last != -1) { - eop_desc = &txr->base[last]; - /* Get next done point */ - if (++last == que->num_tx_desc) last = 0; - done = last; - } else - break; + ixl_tx_setup_offload(que, pi, &cmd, &off); } - bus_dmamap_sync(txr->dma.tag, txr->dma.map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (pi->ipi_mflags & M_VLANTAG) + cmd |= I40E_TX_DESC_CMD_IL2TAG1; - txr->next_to_clean = first; + cmd |= I40E_TX_DESC_CMD_ICRC; + mask = scctx->isc_ntxd[0] - 1; + for (j = 0; j < nsegs; j++) { + bus_size_t seglen; - /* - * If there are no pending descriptors, clear the timeout. - */ - if (txr->avail == que->num_tx_desc) { - atomic_store_rel_32(&txr->watchdog_timer, 0); - return FALSE; - } + txd = &txr->tx_base[i]; + seglen = segs[j].ds_len; - return TRUE; -} + txd->buffer_addr = htole64(segs[j].ds_addr); + txd->cmd_type_offset_bsz = + htole64(I40E_TX_DESC_DTYPE_DATA + | ((u64)cmd << I40E_TXD_QW1_CMD_SHIFT) + | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT) + | ((u64)seglen << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + | ((u64)htole16(pi->ipi_vtag) << I40E_TXD_QW1_L2TAG1_SHIFT)); -/********************************************************************** - * - * Use index kept by driver and the flag on each descriptor to find used - * descriptor/buffers and clean them up for re-use. - * - * This method of reclaiming descriptors is current incompatible with - * DEV_NETMAP. - * - * Returns TRUE if there are more descriptors to be cleaned after this - * function exits. - * - **********************************************************************/ -static bool -ixl_txeof_dwb(struct ixl_queue *que) -{ - struct tx_ring *txr = &que->txr; - u32 first, last, done; - u32 limit = 256; - struct ixl_tx_buf *buf; - struct i40e_tx_desc *tx_desc, *eop_desc; - - mtx_assert(&txr->mtx, MA_OWNED); - - /* There are no descriptors to clean */ - if (txr->avail == que->num_tx_desc) { - atomic_store_rel_32(&txr->watchdog_timer, 0); - return FALSE; + txr->tx_bytes += seglen; + pidx_last = i; + i = (i+1) & mask; } + /* Set the last descriptor for report */ + txd->cmd_type_offset_bsz |= + htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT)); + /* Add to report status array (if using TX interrupts) */ + if (!vsi->enable_head_writeback && tx_intr) { + txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; + txr->tx_rs_pidx = (txr->tx_rs_pidx+1) & mask; + MPASS(txr->tx_rs_pidx != txr->tx_rs_cidx); + } + pi->ipi_new_pidx = i; - /* Set starting index/descriptor/buffer */ - first = txr->next_to_clean; - buf = &txr->buffers[first]; - tx_desc = &txr->base[first]; - - /* - * This function operates per-packet -- identifies the start of the - * packet and gets the index of the last descriptor of the packet from - * it, from eop_index. - * - * If the last descriptor is marked "done" by the hardware, then all - * of the descriptors for the packet are cleaned. - */ - last = buf->eop_index; - if (last == -1) - return FALSE; - eop_desc = &txr->base[last]; - - /* Sync DMA before reading from ring */ - bus_dmamap_sync(txr->dma.tag, txr->dma.map, BUS_DMASYNC_POSTREAD); - - /* - * Get the index of the first descriptor beyond the EOP and call that - * 'done'. Simplifies the comparison for the inner loop below. - */ - if (++last == que->num_tx_desc) - last = 0; - done = last; - - /* - * We find the last completed descriptor by examining each - * descriptor's status bits to see if it's done. - */ - do { - /* Break if last descriptor in packet isn't marked done */ - if ((eop_desc->cmd_type_offset_bsz & I40E_TXD_QW1_DTYPE_MASK) - != I40E_TX_DESC_DTYPE_DESC_DONE) - break; - - /* Clean the descriptors that make up the processed packet */ - while (first != done) { - /* - * If there was a buffer attached to this descriptor, - * prevent the adapter from accessing it, and add its - * length to the queue's TX stats. - */ - if (buf->m_head) { - txr->bytes += buf->m_head->m_pkthdr.len; - txr->tx_bytes += buf->m_head->m_pkthdr.len; - bus_dmamap_sync(buf->tag, buf->map, - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(buf->tag, buf->map); - m_freem(buf->m_head); - buf->m_head = NULL; - } - buf->eop_index = -1; - ++txr->avail; - - if (++first == que->num_tx_desc) - first = 0; - buf = &txr->buffers[first]; - tx_desc = &txr->base[first]; - } - ++txr->packets; - /* If a packet was successfully cleaned, reset the watchdog timer */ - atomic_store_rel_32(&txr->watchdog_timer, IXL_WATCHDOG); - - /* - * Since buf is the first buffer after the one that was just - * cleaned, check if the packet it starts is done, too. - */ - last = buf->eop_index; - if (last != -1) { - eop_desc = &txr->base[last]; - /* Get next done point */ - if (++last == que->num_tx_desc) last = 0; - done = last; - } else - break; - } while (--limit); - - bus_dmamap_sync(txr->dma.tag, txr->dma.map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - txr->next_to_clean = first; - - /* - * If there are no pending descriptors, clear the watchdog timer. - */ - if (txr->avail == que->num_tx_desc) { - atomic_store_rel_32(&txr->watchdog_timer, 0); - return FALSE; - } - - return TRUE; + ++txr->tx_packets; + return (0); } -bool -ixl_txeof(struct ixl_queue *que) +static void +ixl_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) { - struct ixl_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = arg; + struct tx_ring *txr = &vsi->tx_queues[txqid].txr; - return (vsi->enable_head_writeback) ? ixl_txeof_hwb(que) - : ixl_txeof_dwb(que); + /* + * Advance the Transmit Descriptor Tail (Tdt), this tells the + * hardware that this frame is available to transmit. + */ + wr32(vsi->hw, txr->tail, pidx); } /********************************************************************* * - * Refresh mbuf buffers for RX descriptor rings - * - now keeps its own state so discards due to resource - * exhaustion are unnecessary, if an mbuf cannot be obtained - * it just returns, keeping its placeholder, thus it can simply - * be recalled to try again. + * (Re)Initialize a queue transmit ring by clearing its memory. * **********************************************************************/ -static void -ixl_refresh_mbufs(struct ixl_queue *que, int limit) +void +ixl_init_tx_ring(struct ixl_vsi *vsi, struct ixl_tx_queue *que) { - struct ixl_vsi *vsi = que->vsi; - struct rx_ring *rxr = &que->rxr; - bus_dma_segment_t hseg[1]; - bus_dma_segment_t pseg[1]; - struct ixl_rx_buf *buf; - struct mbuf *mh, *mp; - int i, j, nsegs, error; - bool refreshed = FALSE; + struct tx_ring *txr = &que->txr; - i = j = rxr->next_refresh; - /* Control the loop with one beyond */ - if (++j == que->num_rx_desc) - j = 0; + /* Clear the old ring contents */ + bzero((void *)txr->tx_base, + (sizeof(struct i40e_tx_desc)) * + (vsi->shared->isc_ntxd[0] + (vsi->enable_head_writeback ? 1 : 0))); - while (j != limit) { - buf = &rxr->buffers[i]; - if (rxr->hdr_split == FALSE) - goto no_split; + // TODO: Write max descriptor index instead of 0? + wr32(vsi->hw, txr->tail, 0); + wr32(vsi->hw, I40E_QTX_HEAD(txr->me), 0); +} - if (buf->m_head == NULL) { - mh = m_gethdr(M_NOWAIT, MT_DATA); - if (mh == NULL) - goto update; - } else - mh = buf->m_head; +/* + * ixl_get_tx_head - Retrieve the value from the + * location the HW records its HEAD index + */ +static inline u32 +ixl_get_tx_head(struct ixl_tx_queue *que) +{ + if_softc_ctx_t scctx = que->vsi->shared; + struct tx_ring *txr = &que->txr; + void *head = &txr->tx_base[scctx->isc_ntxd[0]]; - mh->m_pkthdr.len = mh->m_len = MHLEN; - mh->m_len = MHLEN; - mh->m_flags |= M_PKTHDR; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->htag, - buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("Refresh mbufs: hdr dmamap load" - " failure - %d\n", error); - m_free(mh); - buf->m_head = NULL; - goto update; - } - buf->m_head = mh; - bus_dmamap_sync(rxr->htag, buf->hmap, - BUS_DMASYNC_PREREAD); - rxr->base[i].read.hdr_addr = - htole64(hseg[0].ds_addr); - -no_split: - if (buf->m_pack == NULL) { - mp = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, rxr->mbuf_sz); - if (mp == NULL) - goto update; - } else - mp = buf->m_pack; - - mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); - if (error != 0) { - printf("Refresh mbufs: payload dmamap load" - " failure - %d\n", error); - m_free(mp); - buf->m_pack = NULL; - goto update; - } - buf->m_pack = mp; - bus_dmamap_sync(rxr->ptag, buf->pmap, - BUS_DMASYNC_PREREAD); - rxr->base[i].read.pkt_addr = - htole64(pseg[0].ds_addr); - /* Used only when doing header split */ - rxr->base[i].read.hdr_addr = 0; - - refreshed = TRUE; - /* Next is precalculated */ - i = j; - rxr->next_refresh = i; - if (++j == que->num_rx_desc) - j = 0; - } -update: - if (refreshed) /* Update hardware tail index */ - wr32(vsi->hw, rxr->tail, rxr->next_refresh); - return; + return LE32_TO_CPU(*(volatile __le32 *)head); } - -/********************************************************************* - * - * Allocate memory for rx_buffer structures. Since we use one - * rx_buffer per descriptor, the maximum number of rx_buffer's - * that we'll need is equal to the number of receive descriptors - * that we've defined. - * - **********************************************************************/ -int -ixl_allocate_rx_data(struct ixl_queue *que) +static int +ixl_isc_txd_credits_update_hwb(void *arg, uint16_t qid, bool clear) { - struct rx_ring *rxr = &que->rxr; - struct ixl_vsi *vsi = que->vsi; - device_t dev = vsi->dev; - struct ixl_rx_buf *buf; - int i, bsize, error; + struct ixl_vsi *vsi = arg; + if_softc_ctx_t scctx = vsi->shared; + struct ixl_tx_queue *que = &vsi->tx_queues[qid]; + struct tx_ring *txr = &que->txr; + int head, credits; - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MSIZE, /* maxsize */ - 1, /* nsegments */ - MSIZE, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &rxr->htag))) { - device_printf(dev, "Unable to create RX DMA htag\n"); - return (error); - } + /* Get the Head WB value */ + head = ixl_get_tx_head(que); - if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ - 1, 0, /* alignment, bounds */ - BUS_SPACE_MAXADDR, /* lowaddr */ - BUS_SPACE_MAXADDR, /* highaddr */ - NULL, NULL, /* filter, filterarg */ - MJUM16BYTES, /* maxsize */ - 1, /* nsegments */ - MJUM16BYTES, /* maxsegsize */ - 0, /* flags */ - NULL, /* lockfunc */ - NULL, /* lockfuncarg */ - &rxr->ptag))) { - device_printf(dev, "Unable to create RX DMA ptag\n"); - goto free_rx_htag; - } + credits = head - txr->tx_cidx_processed; + if (credits < 0) + credits += scctx->isc_ntxd[0]; + if (clear) + txr->tx_cidx_processed = head; - bsize = sizeof(struct ixl_rx_buf) * que->num_rx_desc; - if (!(rxr->buffers = - (struct ixl_rx_buf *) malloc(bsize, - M_DEVBUF, M_NOWAIT | M_ZERO))) { - device_printf(dev, "Unable to allocate rx_buffer memory\n"); - error = ENOMEM; - goto free_rx_ptag; - } - - for (i = 0; i < que->num_rx_desc; i++) { - buf = &rxr->buffers[i]; - error = bus_dmamap_create(rxr->htag, - BUS_DMA_NOWAIT, &buf->hmap); - if (error) { - device_printf(dev, "Unable to create RX head map\n"); - goto free_buffers; - } - error = bus_dmamap_create(rxr->ptag, - BUS_DMA_NOWAIT, &buf->pmap); - if (error) { - bus_dmamap_destroy(rxr->htag, buf->hmap); - device_printf(dev, "Unable to create RX pkt map\n"); - goto free_buffers; - } - } - - return 0; -free_buffers: - while (i--) { - buf = &rxr->buffers[i]; - bus_dmamap_destroy(rxr->ptag, buf->pmap); - bus_dmamap_destroy(rxr->htag, buf->hmap); - } - free(rxr->buffers, M_DEVBUF); - rxr->buffers = NULL; -free_rx_ptag: - bus_dma_tag_destroy(rxr->ptag); - rxr->ptag = NULL; -free_rx_htag: - bus_dma_tag_destroy(rxr->htag); - rxr->htag = NULL; - return (error); + return (credits); } - -/********************************************************************* - * - * (Re)Initialize the queue receive ring and its buffers. - * - **********************************************************************/ -int -ixl_init_rx_ring(struct ixl_queue *que) +static int +ixl_isc_txd_credits_update_dwb(void *arg, uint16_t txqid, bool clear) { - struct rx_ring *rxr = &que->rxr; - struct ixl_vsi *vsi = que->vsi; -#if defined(INET6) || defined(INET) - struct ifnet *ifp = vsi->ifp; - struct lro_ctrl *lro = &rxr->lro; -#endif - struct ixl_rx_buf *buf; - bus_dma_segment_t pseg[1], hseg[1]; - int rsize, nsegs, error = 0; -#ifdef DEV_NETMAP - struct netmap_adapter *na = NA(que->vsi->ifp); - struct netmap_slot *slot; -#endif /* DEV_NETMAP */ + struct ixl_vsi *vsi = arg; + struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; + if_softc_ctx_t scctx = vsi->shared; + struct tx_ring *txr = &tx_que->txr; - IXL_RX_LOCK(rxr); -#ifdef DEV_NETMAP - /* same as in ixl_init_tx_ring() */ - slot = netmap_reset(na, NR_RX, que->me, 0); -#endif /* DEV_NETMAP */ - /* Clear the ring contents */ - rsize = roundup2(que->num_rx_desc * - sizeof(union i40e_rx_desc), DBA_ALIGN); - bzero((void *)rxr->base, rsize); - /* Cleanup any existing buffers */ - for (int i = 0; i < que->num_rx_desc; i++) { - buf = &rxr->buffers[i]; - if (buf->m_head != NULL) { - bus_dmamap_sync(rxr->htag, buf->hmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, buf->hmap); - buf->m_head->m_flags |= M_PKTHDR; - m_freem(buf->m_head); - } - if (buf->m_pack != NULL) { - bus_dmamap_sync(rxr->ptag, buf->pmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, buf->pmap); - buf->m_pack->m_flags |= M_PKTHDR; - m_freem(buf->m_pack); - } - buf->m_head = NULL; - buf->m_pack = NULL; - } + qidx_t processed = 0; + qidx_t cur, prev, ntxd, rs_cidx; + int32_t delta; + bool is_done; - /* header split is off */ - rxr->hdr_split = FALSE; + rs_cidx = txr->tx_rs_cidx; +#if 0 + device_printf(iflib_get_dev(vsi->ctx), "%s: (q%d) rs_cidx %d, txr->tx_rs_pidx %d\n", __func__, + txr->me, rs_cidx, txr->tx_rs_pidx); +#endif + if (rs_cidx == txr->tx_rs_pidx) + return (0); + cur = txr->tx_rsq[rs_cidx]; + MPASS(cur != QIDX_INVALID); + is_done = ixl_is_tx_desc_done(txr, cur); - /* Now replenish the mbufs */ - for (int j = 0; j != que->num_rx_desc; ++j) { - struct mbuf *mh, *mp; + if (clear == false || !is_done) + return (0); - buf = &rxr->buffers[j]; -#ifdef DEV_NETMAP - /* - * In netmap mode, fill the map and set the buffer - * address in the NIC ring, considering the offset - * between the netmap and NIC rings (see comment in - * ixgbe_setup_transmit_ring() ). No need to allocate - * an mbuf, so end the block with a continue; - */ - if (slot) { - int sj = netmap_idx_n2k(na->rx_rings[que->me], j); - uint64_t paddr; - void *addr; - - addr = PNMB(na, slot + sj, &paddr); - netmap_load_map(na, rxr->dma.tag, buf->pmap, addr); - /* Update descriptor and the cached value */ - rxr->base[j].read.pkt_addr = htole64(paddr); - rxr->base[j].read.hdr_addr = 0; - continue; - } -#endif /* DEV_NETMAP */ - /* - ** Don't allocate mbufs if not - ** doing header split, its wasteful - */ - if (rxr->hdr_split == FALSE) - goto skip_head; - - /* First the header */ - buf->m_head = m_gethdr(M_NOWAIT, MT_DATA); - if (buf->m_head == NULL) { - error = ENOBUFS; - goto fail; - } - m_adj(buf->m_head, ETHER_ALIGN); - mh = buf->m_head; - mh->m_len = mh->m_pkthdr.len = MHLEN; - mh->m_flags |= M_PKTHDR; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->htag, - buf->hmap, buf->m_head, hseg, - &nsegs, BUS_DMA_NOWAIT); - if (error != 0) /* Nothing elegant to do here */ - goto fail; - bus_dmamap_sync(rxr->htag, - buf->hmap, BUS_DMASYNC_PREREAD); - /* Update descriptor */ - rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr); - -skip_head: - /* Now the payload cluster */ - buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, - M_PKTHDR, rxr->mbuf_sz); - if (buf->m_pack == NULL) { - error = ENOBUFS; - goto fail; - } - mp = buf->m_pack; - mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz; - /* Get the memory mapping */ - error = bus_dmamap_load_mbuf_sg(rxr->ptag, - buf->pmap, mp, pseg, - &nsegs, BUS_DMA_NOWAIT); - if (error != 0) - goto fail; - bus_dmamap_sync(rxr->ptag, - buf->pmap, BUS_DMASYNC_PREREAD); - /* Update descriptor */ - rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr); - rxr->base[j].read.hdr_addr = 0; - } - - - /* Setup our descriptor indices */ - rxr->next_check = 0; - rxr->next_refresh = 0; - rxr->lro_enabled = FALSE; - rxr->split = 0; - rxr->bytes = 0; - rxr->discard = FALSE; - - wr32(vsi->hw, rxr->tail, que->num_rx_desc - 1); - ixl_flush(vsi->hw); - -#if defined(INET6) || defined(INET) - /* - ** Now set up the LRO interface: - */ - if (ifp->if_capenable & IFCAP_LRO) { - int err = tcp_lro_init(lro); - if (err) { - if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me); - goto fail; - } - INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me); - rxr->lro_enabled = TRUE; - lro->ifp = vsi->ifp; - } + prev = txr->tx_cidx_processed; + ntxd = scctx->isc_ntxd[0]; + do { + delta = (int32_t)cur - (int32_t)prev; + MPASS(prev == 0 || delta != 0); + if (delta < 0) + delta += ntxd; +#if 0 + device_printf(iflib_get_dev(vsi->ctx), + "%s: (q%d) cidx_processed=%u cur=%u clear=%d delta=%d\n", + __func__, txr->me, prev, cur, clear, delta); #endif + processed += delta; + prev = cur; + rs_cidx = (rs_cidx + 1) & (ntxd-1); + if (rs_cidx == txr->tx_rs_pidx) + break; + cur = txr->tx_rsq[rs_cidx]; + MPASS(cur != QIDX_INVALID); + is_done = ixl_is_tx_desc_done(txr, cur); + } while (is_done); - bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + txr->tx_rs_cidx = rs_cidx; + txr->tx_cidx_processed = prev; -fail: - IXL_RX_UNLOCK(rxr); - return (error); +#if 0 + device_printf(iflib_get_dev(vsi->ctx), "%s: (q%d) processed %d\n", __func__, txr->me, processed); +#endif + return (processed); } - -/********************************************************************* - * - * Free station receive ring data structures - * - **********************************************************************/ -void -ixl_free_que_rx(struct ixl_queue *que) +static void +ixl_isc_rxd_refill(void *arg, if_rxd_update_t iru) { - struct rx_ring *rxr = &que->rxr; - struct ixl_rx_buf *buf; + struct ixl_vsi *vsi = arg; + if_softc_ctx_t scctx = vsi->shared; + struct rx_ring *rxr = &((vsi->rx_queues[iru->iru_qsidx]).rxr); + uint64_t *paddrs; + uint32_t next_pidx, pidx; + uint16_t count; + int i; - /* Cleanup any existing buffers */ - if (rxr->buffers != NULL) { - for (int i = 0; i < que->num_rx_desc; i++) { - buf = &rxr->buffers[i]; + paddrs = iru->iru_paddrs; + pidx = iru->iru_pidx; + count = iru->iru_count; - /* Free buffers and unload dma maps */ - ixl_rx_discard(rxr, i); - - bus_dmamap_destroy(rxr->htag, buf->hmap); - bus_dmamap_destroy(rxr->ptag, buf->pmap); - } - free(rxr->buffers, M_DEVBUF); - rxr->buffers = NULL; - } - - if (rxr->htag != NULL) { - bus_dma_tag_destroy(rxr->htag); - rxr->htag = NULL; - } - if (rxr->ptag != NULL) { - bus_dma_tag_destroy(rxr->ptag); - rxr->ptag = NULL; - } + for (i = 0, next_pidx = pidx; i < count; i++) { + rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]); + if (++next_pidx == scctx->isc_nrxd[0]) + next_pidx = 0; + } } -static inline void -ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype) +static void +ixl_isc_rxd_flush(void * arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx) { + struct ixl_vsi *vsi = arg; + struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr; -#if defined(INET6) || defined(INET) - /* - * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet - * should be computed by hardware. Also it should not have VLAN tag in - * ethernet header. - */ - if (rxr->lro_enabled && - (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && - (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == - (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { - /* - * Send to the stack if: - ** - LRO not enabled, or - ** - no LRO resources, or - ** - lro enqueue fails - */ - if (rxr->lro.lro_cnt != 0) - if (tcp_lro_rx(&rxr->lro, m, 0) == 0) - return; - } -#endif - (*ifp->if_input)(ifp, m); + wr32(vsi->hw, rxr->tail, pidx); } - -static inline void -ixl_rx_discard(struct rx_ring *rxr, int i) +static int +ixl_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget) { - struct ixl_rx_buf *rbuf; + struct ixl_vsi *vsi = arg; + struct rx_ring *rxr = &vsi->rx_queues[rxqid].rxr; + union i40e_rx_desc *rxd; + u64 qword; + uint32_t status; + int cnt, i, nrxd; - KASSERT(rxr != NULL, ("Receive ring pointer cannot be null")); - KASSERT(i < rxr->que->num_rx_desc, ("Descriptor index must be less than que->num_desc")); + nrxd = vsi->shared->isc_nrxd[0]; - rbuf = &rxr->buffers[i]; + if (budget == 1) { + rxd = &rxr->rx_base[idx]; + qword = le64toh(rxd->wb.qword1.status_error_len); + status = (qword & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; + return !!(status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)); + } - /* Free the mbufs in the current chain for the packet */ - if (rbuf->fmp != NULL) { - bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); - m_freem(rbuf->fmp); - rbuf->fmp = NULL; - } + for (cnt = 0, i = idx; cnt < nrxd - 1 && cnt <= budget;) { + rxd = &rxr->rx_base[i]; + qword = le64toh(rxd->wb.qword1.status_error_len); + status = (qword & I40E_RXD_QW1_STATUS_MASK) + >> I40E_RXD_QW1_STATUS_SHIFT; - /* - * Free the mbufs for the current descriptor; and let ixl_refresh_mbufs() - * assign new mbufs to these. - */ - if (rbuf->m_head) { - bus_dmamap_sync(rxr->htag, rbuf->hmap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, rbuf->hmap); - m_free(rbuf->m_head); - rbuf->m_head = NULL; + if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) + break; + if (++i == nrxd) + i = 0; + if (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)) + cnt++; } - - if (rbuf->m_pack) { - bus_dmamap_sync(rxr->ptag, rbuf->pmap, BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rbuf->pmap); - m_free(rbuf->m_pack); - rbuf->m_pack = NULL; - } + + return (cnt); } -#ifdef RSS /* ** i40e_ptype_to_hash: parse the packet type ** to determine the appropriate hash. */ static inline int ixl_ptype_to_hash(u8 ptype) { struct i40e_rx_ptype_decoded decoded; decoded = decode_rx_desc_ptype(ptype); if (!decoded.known) - return M_HASHTYPE_OPAQUE_HASH; + return M_HASHTYPE_OPAQUE; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2) - return M_HASHTYPE_OPAQUE_HASH; + if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2) + return M_HASHTYPE_OPAQUE; /* Note: anything that gets to this point is IP */ - if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) { + if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) { switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case I40E_RX_PTYPE_INNER_PROT_UDP: return M_HASHTYPE_RSS_UDP_IPV6; default: return M_HASHTYPE_RSS_IPV6; } } - if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) { + if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) { switch (decoded.inner_prot) { case I40E_RX_PTYPE_INNER_PROT_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case I40E_RX_PTYPE_INNER_PROT_UDP: return M_HASHTYPE_RSS_UDP_IPV4; default: return M_HASHTYPE_RSS_IPV4; } } /* We should never get here!! */ - return M_HASHTYPE_OPAQUE_HASH; + return M_HASHTYPE_OPAQUE; } -#endif /* RSS */ /********************************************************************* * - * This routine executes in interrupt context. It replenishes - * the mbufs in the descriptor and sends data which has been + * This routine executes in ithread context. It sends data which has been * dma'ed into host memory to upper layer. * - * We loop at most count times if count is > 0, or until done if - * count < 0. + * Returns 0 upon success, errno on failure * - * Return TRUE for more work, FALSE for all clean. *********************************************************************/ -bool -ixl_rxeof(struct ixl_queue *que, int count) +static int +ixl_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { - struct ixl_vsi *vsi = que->vsi; + struct ixl_vsi *vsi = arg; + struct ixl_rx_queue *que = &vsi->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; - struct ifnet *ifp = vsi->ifp; -#if defined(INET6) || defined(INET) - struct lro_ctrl *lro = &rxr->lro; -#endif - int i, nextp, processed = 0; union i40e_rx_desc *cur; - struct ixl_rx_buf *rbuf, *nbuf; + u32 status, error; + u16 plen, vtag; + u64 qword; + u8 ptype; + bool eop; + int i, cidx; - IXL_RX_LOCK(rxr); + cidx = ri->iri_cidx; + i = 0; + do { + /* 5 descriptor receive limit */ + MPASS(i < IXL_MAX_RX_SEGS); -#ifdef DEV_NETMAP - if (netmap_rx_irq(ifp, que->me, &count)) { - IXL_RX_UNLOCK(rxr); - return (FALSE); - } -#endif /* DEV_NETMAP */ - - for (i = rxr->next_check; count != 0;) { - struct mbuf *sendmp, *mh, *mp; - u32 status, error; - u16 hlen, plen, vtag; - u64 qword; - u8 ptype; - bool eop; - - /* Sync the ring. */ - bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - - cur = &rxr->base[i]; + cur = &rxr->rx_base[cidx]; qword = le64toh(cur->wb.qword1.status_error_len); status = (qword & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT; error = (qword & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT; plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) - >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT; ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) { - ++rxr->not_done; - break; - } - if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) - break; + /* we should never be called without a valid descriptor */ + MPASS((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) != 0); - count--; - sendmp = NULL; - nbuf = NULL; + ri->iri_len += plen; + rxr->bytes += plen; + cur->wb.qword1.status_error_len = 0; - rbuf = &rxr->buffers[i]; - mh = rbuf->m_head; - mp = rbuf->m_pack; eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)); if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1); else vtag = 0; - /* Remove device access to the rx buffers. */ - if (rbuf->m_head != NULL) { - bus_dmamap_sync(rxr->htag, rbuf->hmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->htag, rbuf->hmap); - } - if (rbuf->m_pack != NULL) { - bus_dmamap_sync(rxr->ptag, rbuf->pmap, - BUS_DMASYNC_POSTREAD); - bus_dmamap_unload(rxr->ptag, rbuf->pmap); - } - /* ** Make sure bad packets are discarded, ** note that only EOP descriptor has valid ** error results. */ - if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { + if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { rxr->desc_errs++; - ixl_rx_discard(rxr, i); - goto next_desc; + return (EBADMSG); } + ri->iri_frags[i].irf_flid = 0; + ri->iri_frags[i].irf_idx = cidx; + ri->iri_frags[i].irf_len = plen; + if (++cidx == vsi->shared->isc_nrxd[0]) + cidx = 0; + i++; + } while (!eop); - /* Prefetch the next buffer */ - if (!eop) { - nextp = i + 1; - if (nextp == que->num_rx_desc) - nextp = 0; - nbuf = &rxr->buffers[nextp]; - prefetch(nbuf); - } + /* capture data for dynamic ITR adjustment */ + rxr->packets++; + rxr->rx_packets++; - /* - ** The header mbuf is ONLY used when header - ** split is enabled, otherwise we get normal - ** behavior, ie, both header and payload - ** are DMA'd into the payload buffer. - ** - ** Rather than using the fmp/lmp global pointers - ** we now keep the head of a packet chain in the - ** buffer struct and pass this along from one - ** descriptor to the next, until we get EOP. - */ - if (rxr->hdr_split && (rbuf->fmp == NULL)) { - if (hlen > IXL_RX_HDR) - hlen = IXL_RX_HDR; - mh->m_len = hlen; - mh->m_flags |= M_PKTHDR; - mh->m_next = NULL; - mh->m_pkthdr.len = mh->m_len; - /* Null buf pointer so it is refreshed */ - rbuf->m_head = NULL; - /* - ** Check the payload length, this - ** could be zero if its a small - ** packet. - */ - if (plen > 0) { - mp->m_len = plen; - mp->m_next = NULL; - mp->m_flags &= ~M_PKTHDR; - mh->m_next = mp; - mh->m_pkthdr.len += mp->m_len; - /* Null buf pointer so it is refreshed */ - rbuf->m_pack = NULL; - rxr->split++; - } - /* - ** Now create the forward - ** chain so when complete - ** we wont have to. - */ - if (eop == 0) { - /* stash the chain head */ - nbuf->fmp = mh; - /* Make forward chain */ - if (plen) - mp->m_next = nbuf->m_pack; - else - mh->m_next = nbuf->m_pack; - } else { - /* Singlet, prepare to send */ - sendmp = mh; - if (vtag) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; - } - } - } else { - /* - ** Either no header split, or a - ** secondary piece of a fragmented - ** split packet. - */ - mp->m_len = plen; - /* - ** See if there is a stored head - ** that determines what we are - */ - sendmp = rbuf->fmp; - rbuf->m_pack = rbuf->fmp = NULL; - - if (sendmp != NULL) /* secondary frag */ - sendmp->m_pkthdr.len += mp->m_len; - else { - /* first desc of a non-ps chain */ - sendmp = mp; - sendmp->m_flags |= M_PKTHDR; - sendmp->m_pkthdr.len = mp->m_len; - } - /* Pass the head pointer on */ - if (eop == 0) { - nbuf->fmp = sendmp; - sendmp = NULL; - mp->m_next = nbuf->m_pack; - } - } - ++processed; - /* Sending this frame? */ - if (eop) { - sendmp->m_pkthdr.rcvif = ifp; - /* gather stats */ - rxr->rx_packets++; - rxr->rx_bytes += sendmp->m_pkthdr.len; - /* capture data for dynamic ITR adjustment */ - rxr->packets++; - rxr->bytes += sendmp->m_pkthdr.len; - /* Set VLAN tag (field only valid in eop desc) */ - if (vtag) { - sendmp->m_pkthdr.ether_vtag = vtag; - sendmp->m_flags |= M_VLANTAG; - } - if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) - ixl_rx_checksum(sendmp, status, error, ptype); -#ifdef RSS - sendmp->m_pkthdr.flowid = - le32toh(cur->wb.qword0.hi_dword.rss); - M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype)); -#else - sendmp->m_pkthdr.flowid = que->msix; - M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE); -#endif - } -next_desc: - bus_dmamap_sync(rxr->dma.tag, rxr->dma.map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - - /* Advance our pointers to the next descriptor. */ - if (++i == que->num_rx_desc) - i = 0; - - /* Now send to the stack or do LRO */ - if (sendmp != NULL) { - rxr->next_check = i; - IXL_RX_UNLOCK(rxr); - ixl_rx_input(rxr, ifp, sendmp, ptype); - IXL_RX_LOCK(rxr); - /* - * Update index used in loop in case another - * ixl_rxeof() call executes when lock is released - */ - i = rxr->next_check; - } - - /* Every 8 descriptors we go to refresh mbufs */ - if (processed == 8) { - ixl_refresh_mbufs(que, i); - processed = 0; - } - } - - /* Refresh any remaining buf structs */ - if (ixl_rx_unrefreshed(que)) - ixl_refresh_mbufs(que, i); - - rxr->next_check = i; - - IXL_RX_UNLOCK(rxr); - -#if defined(INET6) || defined(INET) - /* - * Flush any outstanding LRO work - */ -#if __FreeBSD_version >= 1100105 - tcp_lro_flush_all(lro); -#else - struct lro_entry *queued; - while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { - SLIST_REMOVE_HEAD(&lro->lro_active, next); - tcp_lro_flush(lro, queued); - } -#endif -#endif /* defined(INET6) || defined(INET) */ - - return (FALSE); + if ((vsi->ifp->if_capenable & IFCAP_RXCSUM) != 0) + ixl_rx_checksum(ri, status, error, ptype); + ri->iri_flowid = le32toh(cur->wb.qword0.hi_dword.rss); + ri->iri_rsstype = ixl_ptype_to_hash(ptype); + ri->iri_vtag = vtag; + ri->iri_nfrags = i; + if (vtag) + ri->iri_flags |= M_VLANTAG; + return (0); } - /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void -ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype) +ixl_rx_checksum(if_rxd_info_t ri, u32 status, u32 error, u8 ptype) { struct i40e_rx_ptype_decoded decoded; - decoded = decode_rx_desc_ptype(ptype); + ri->iri_csum_flags = 0; - /* Errors? */ - if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) | - (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) { - mp->m_pkthdr.csum_flags = 0; + /* No L3 or L4 checksum was calculated */ + if (!(status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - } + decoded = decode_rx_desc_ptype(ptype); + /* IPv6 with extension headers likely have bad csum */ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) + decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) { if (status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) { - mp->m_pkthdr.csum_flags = 0; + ri->iri_csum_flags = 0; return; } + } - - /* IP Checksum Good */ - mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; - mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; + ri->iri_csum_flags |= CSUM_L3_CALC; - if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) { - mp->m_pkthdr.csum_flags |= - (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); - mp->m_pkthdr.csum_data |= htons(0xffff); - } - return; -} + /* IPv4 checksum error */ + if (error & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)) + return; -#if __FreeBSD_version >= 1100000 -uint64_t -ixl_get_counter(if_t ifp, ift_counter cnt) -{ - struct ixl_vsi *vsi; + ri->iri_csum_flags |= CSUM_L3_VALID; + ri->iri_csum_flags |= CSUM_L4_CALC; - vsi = if_getsoftc(ifp); + /* L4 checksum error */ + if (error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)) + return; - switch (cnt) { - case IFCOUNTER_IPACKETS: - return (vsi->ipackets); - case IFCOUNTER_IERRORS: - return (vsi->ierrors); - case IFCOUNTER_OPACKETS: - return (vsi->opackets); - case IFCOUNTER_OERRORS: - return (vsi->oerrors); - case IFCOUNTER_COLLISIONS: - /* Collisions are by standard impossible in 40G/10G Ethernet */ - return (0); - case IFCOUNTER_IBYTES: - return (vsi->ibytes); - case IFCOUNTER_OBYTES: - return (vsi->obytes); - case IFCOUNTER_IMCASTS: - return (vsi->imcasts); - case IFCOUNTER_OMCASTS: - return (vsi->omcasts); - case IFCOUNTER_IQDROPS: - return (vsi->iqdrops); - case IFCOUNTER_OQDROPS: - return (vsi->oqdrops); - case IFCOUNTER_NOPROTO: - return (vsi->noproto); - default: - return (if_get_counter_default(ifp, cnt)); - } + ri->iri_csum_flags |= CSUM_L4_VALID; + ri->iri_csum_data |= htons(0xffff); } -#endif /* - * Set TX and RX ring size adjusting value to supported range + * Input: bitmap of enum i40e_aq_link_speed */ -void -ixl_vsi_setup_rings_size(struct ixl_vsi * vsi, int tx_ring_size, int rx_ring_size) +u64 +ixl_max_aq_speed_to_value(u8 link_speeds) { - struct device * dev = vsi->dev; - - if (tx_ring_size < IXL_MIN_RING - || tx_ring_size > IXL_MAX_RING - || tx_ring_size % IXL_RING_INCREMENT != 0) { - device_printf(dev, "Invalid tx_ring_size value of %d set!\n", - tx_ring_size); - device_printf(dev, "tx_ring_size must be between %d and %d, " - "inclusive, and must be a multiple of %d\n", - IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); - device_printf(dev, "Using default value of %d instead\n", - IXL_DEFAULT_RING); - vsi->num_tx_desc = IXL_DEFAULT_RING; - } else - vsi->num_tx_desc = tx_ring_size; - - if (rx_ring_size < IXL_MIN_RING - || rx_ring_size > IXL_MAX_RING - || rx_ring_size % IXL_RING_INCREMENT != 0) { - device_printf(dev, "Invalid rx_ring_size value of %d set!\n", - rx_ring_size); - device_printf(dev, "rx_ring_size must be between %d and %d, " - "inclusive, and must be a multiple of %d\n", - IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT); - device_printf(dev, "Using default value of %d instead\n", - IXL_DEFAULT_RING); - vsi->num_rx_desc = IXL_DEFAULT_RING; - } else - vsi->num_rx_desc = rx_ring_size; - - device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", - vsi->num_tx_desc, vsi->num_rx_desc); - + if (link_speeds & I40E_LINK_SPEED_40GB) + return IF_Gbps(40); + if (link_speeds & I40E_LINK_SPEED_25GB) + return IF_Gbps(25); + if (link_speeds & I40E_LINK_SPEED_20GB) + return IF_Gbps(20); + if (link_speeds & I40E_LINK_SPEED_10GB) + return IF_Gbps(10); + if (link_speeds & I40E_LINK_SPEED_1GB) + return IF_Gbps(1); + if (link_speeds & I40E_LINK_SPEED_100MB) + return IF_Mbps(100); + else + /* Minimum supported link speed */ + return IF_Mbps(100); } - -static void -ixl_queue_sw_irq(struct ixl_vsi *vsi, int qidx) -{ - struct i40e_hw *hw = vsi->hw; - u32 reg, mask; - - if ((vsi->flags & IXL_FLAGS_IS_VF) != 0) { - mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); - - reg = I40E_VFINT_DYN_CTLN1(qidx); - } else { - mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); - - reg = ((vsi->flags & IXL_FLAGS_USES_MSIX) != 0) ? - I40E_PFINT_DYN_CTLN(qidx) : I40E_PFINT_DYN_CTL0; - } - - wr32(hw, reg, mask); -} - -int -ixl_queue_hang_check(struct ixl_vsi *vsi) -{ - struct ixl_queue *que = vsi->queues; - device_t dev = vsi->dev; - struct tx_ring *txr; - s32 timer, new_timer; - int hung = 0; - - for (int i = 0; i < vsi->num_queues; i++, que++) { - txr = &que->txr; - /* - * If watchdog_timer is equal to defualt value set by ixl_txeof - * just substract hz and move on - the queue is most probably - * running. Otherwise check the value. - */ - if (atomic_cmpset_rel_32(&txr->watchdog_timer, - IXL_WATCHDOG, (IXL_WATCHDOG) - hz) == 0) { - timer = atomic_load_acq_32(&txr->watchdog_timer); - /* - * Again - if the timer was reset to default value - * then queue is running. Otherwise check if watchdog - * expired and act accrdingly. - */ - - if (timer > 0 && timer != IXL_WATCHDOG) { - new_timer = timer - hz; - if (new_timer <= 0) { - atomic_store_rel_32(&txr->watchdog_timer, -1); - device_printf(dev, "WARNING: queue %d " - "appears to be hung!\n", que->me); - ++hung; - /* Try to unblock the queue with SW IRQ */ - ixl_queue_sw_irq(vsi, i); - } else { - /* - * If this fails, that means something in the TX path - * has updated the watchdog, so it means the TX path - * is still working and the watchdog doesn't need - * to countdown. - */ - atomic_cmpset_rel_32(&txr->watchdog_timer, - timer, new_timer); - } - } - } - } - - return (hung); -} - Index: head/sys/dev/ixl/ixlv.h =================================================================== --- head/sys/dev/ixl/ixlv.h (revision 335337) +++ head/sys/dev/ixl/ixlv.h (revision 335338) @@ -1,238 +1,241 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXLV_H_ #define _IXLV_H_ #include "ixlv_vc_mgr.h" -#define IXLV_AQ_MAX_ERR 30 -#define IXLV_MAX_INIT_WAIT 120 +#define IXLV_AQ_MAX_ERR 200 #define IXLV_MAX_FILTERS 128 #define IXLV_MAX_QUEUES 16 #define IXLV_AQ_TIMEOUT (1 * hz) #define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */ #define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0) #define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) #define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) #define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) #define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) #define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) #define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) #define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) #define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) #define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9) #define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10) #define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11) #define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12) #define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13) #define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14) /* printf %b arg */ #define IXLV_FLAGS \ "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \ "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \ "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \ "\12CONFIGURE_PROMISC\13GET_STATS" #define IXLV_PRINTF_VF_OFFLOAD_FLAGS \ "\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \ "\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \ "\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \ "\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \ "\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \ "\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \ "\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \ "\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \ "\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \ "\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF" +static MALLOC_DEFINE(M_IXLV, "ixlv", "ixlv driver allocations"); + /* Driver state */ enum ixlv_state_t { IXLV_START, IXLV_FAILED, IXLV_RESET_REQUIRED, IXLV_RESET_PENDING, IXLV_VERSION_CHECK, IXLV_GET_RESOURCES, IXLV_INIT_READY, IXLV_INIT_START, IXLV_INIT_CONFIG, IXLV_INIT_MAPPING, IXLV_INIT_ENABLE, IXLV_INIT_COMPLETE, IXLV_RUNNING, }; /* Structs */ struct ixlv_mac_filter { SLIST_ENTRY(ixlv_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; u16 flags; }; SLIST_HEAD(mac_list, ixlv_mac_filter); struct ixlv_vlan_filter { SLIST_ENTRY(ixlv_vlan_filter) next; u16 vlan; u16 flags; }; SLIST_HEAD(vlan_list, ixlv_vlan_filter); /* Software controller structure */ struct ixlv_sc { struct i40e_hw hw; struct i40e_osdep osdep; device_t dev; struct resource *pci_mem; struct resource *msix_mem; enum ixlv_state_t init_state; int init_in_progress; /* * Interrupt resources */ void *tag; struct resource *res; /* For the AQ */ struct ifmedia media; struct callout timer; int msix; int pf_version; int if_flags; bool link_up; enum virtchnl_link_speed link_speed; struct mtx mtx; u32 qbase; u32 admvec; struct timeout_task timeout; +#ifdef notyet struct task aq_irq; struct task aq_sched; - struct taskqueue *tq; +#endif struct ixl_vsi vsi; /* Filter lists */ struct mac_list *mac_filters; struct vlan_list *vlan_filters; /* Promiscuous mode */ u32 promiscuous_flags; /* Admin queue task flags */ u32 aq_wait_count; struct ixl_vc_mgr vc_mgr; struct ixl_vc_cmd add_mac_cmd; struct ixl_vc_cmd del_mac_cmd; struct ixl_vc_cmd config_queues_cmd; struct ixl_vc_cmd map_vectors_cmd; struct ixl_vc_cmd enable_queues_cmd; struct ixl_vc_cmd add_vlan_cmd; struct ixl_vc_cmd del_vlan_cmd; struct ixl_vc_cmd add_multi_cmd; struct ixl_vc_cmd del_multi_cmd; struct ixl_vc_cmd config_rss_key_cmd; struct ixl_vc_cmd get_rss_hena_caps_cmd; struct ixl_vc_cmd set_rss_hena_cmd; struct ixl_vc_cmd config_rss_lut_cmd; /* Virtual comm channel */ struct virtchnl_vf_resource *vf_res; struct virtchnl_vsi_resource *vsi_res; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; u8 aq_buffer[IXL_AQ_BUF_SZ]; }; -#define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED) /* ** This checks for a zero mac addr, something that will be likely ** unless the Admin on the Host has created one. */ static inline bool ixlv_check_ether_addr(u8 *addr) { bool status = TRUE; if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) status = FALSE; return (status); } /* ** VF Common function prototypes */ +void ixlv_if_init(if_ctx_t ctx); + int ixlv_send_api_ver(struct ixlv_sc *); int ixlv_verify_api_ver(struct ixlv_sc *); int ixlv_send_vf_config_msg(struct ixlv_sc *); int ixlv_get_vf_config(struct ixlv_sc *); void ixlv_init(void *); int ixlv_reinit_locked(struct ixlv_sc *); void ixlv_configure_queues(struct ixlv_sc *); void ixlv_enable_queues(struct ixlv_sc *); void ixlv_disable_queues(struct ixlv_sc *); void ixlv_map_queues(struct ixlv_sc *); void ixlv_enable_intr(struct ixl_vsi *); void ixlv_disable_intr(struct ixl_vsi *); void ixlv_add_ether_filters(struct ixlv_sc *); void ixlv_del_ether_filters(struct ixlv_sc *); void ixlv_request_stats(struct ixlv_sc *); void ixlv_request_reset(struct ixlv_sc *); void ixlv_vc_completion(struct ixlv_sc *, enum virtchnl_ops, enum virtchnl_status_code, u8 *, u16); void ixlv_add_ether_filter(struct ixlv_sc *); void ixlv_add_vlans(struct ixlv_sc *); void ixlv_del_vlans(struct ixlv_sc *); void ixlv_update_stats_counters(struct ixlv_sc *, struct i40e_eth_stats *); void ixlv_update_link_status(struct ixlv_sc *); void ixlv_get_default_rss_key(u32 *, bool); void ixlv_config_rss_key(struct ixlv_sc *); void ixlv_set_rss_hena(struct ixlv_sc *); void ixlv_config_rss_lut(struct ixlv_sc *); #endif /* _IXLV_H_ */ Index: head/sys/dev/ixl/ixlvc.c =================================================================== --- head/sys/dev/ixl/ixlvc.c (revision 335337) +++ head/sys/dev/ixl/ixlvc.c (revision 335338) @@ -1,1258 +1,1272 @@ /****************************************************************************** Copyright (c) 2013-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* ** Virtual Channel support ** These are support functions to communication ** between the VF and PF drivers. */ #include "ixl.h" #include "ixlv.h" #include "i40e_prototype.h" /* busy wait delay in msec */ #define IXLV_BUSY_WAIT_DELAY 10 #define IXLV_BUSY_WAIT_COUNT 50 static void ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t, enum virtchnl_status_code); static void ixl_vc_process_next(struct ixl_vc_mgr *mgr); static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr); static void ixl_vc_send_current(struct ixl_vc_mgr *mgr); #ifdef IXL_DEBUG /* ** Validate VF messages */ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode, u8 *msg, u16 msglen) { bool err_msg_format = false; int valid_len; /* Validate message length. */ switch (v_opcode) { case VIRTCHNL_OP_VERSION: valid_len = sizeof(struct virtchnl_version_info); break; case VIRTCHNL_OP_RESET_VF: valid_len = 0; break; case VIRTCHNL_OP_GET_VF_RESOURCES: /* Valid length in api v1.0 is 0, v1.1 is 4 */ valid_len = 4; break; case VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct virtchnl_txq_info); break; case VIRTCHNL_OP_CONFIG_RX_QUEUE: valid_len = sizeof(struct virtchnl_rxq_info); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: valid_len = sizeof(struct virtchnl_vsi_queue_config_info); if (msglen >= valid_len) { struct virtchnl_vsi_queue_config_info *vqc = (struct virtchnl_vsi_queue_config_info *)msg; valid_len += (vqc->num_queue_pairs * sizeof(struct virtchnl_queue_pair_info)); if (vqc->num_queue_pairs == 0) err_msg_format = true; } break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: valid_len = sizeof(struct virtchnl_irq_map_info); if (msglen >= valid_len) { struct virtchnl_irq_map_info *vimi = (struct virtchnl_irq_map_info *)msg; valid_len += (vimi->num_vectors * sizeof(struct virtchnl_vector_map)); if (vimi->num_vectors == 0) err_msg_format = true; } break; case VIRTCHNL_OP_ENABLE_QUEUES: case VIRTCHNL_OP_DISABLE_QUEUES: valid_len = sizeof(struct virtchnl_queue_select); break; case VIRTCHNL_OP_ADD_ETH_ADDR: case VIRTCHNL_OP_DEL_ETH_ADDR: valid_len = sizeof(struct virtchnl_ether_addr_list); if (msglen >= valid_len) { struct virtchnl_ether_addr_list *veal = (struct virtchnl_ether_addr_list *)msg; valid_len += veal->num_elements * sizeof(struct virtchnl_ether_addr); if (veal->num_elements == 0) err_msg_format = true; } break; case VIRTCHNL_OP_ADD_VLAN: case VIRTCHNL_OP_DEL_VLAN: valid_len = sizeof(struct virtchnl_vlan_filter_list); if (msglen >= valid_len) { struct virtchnl_vlan_filter_list *vfl = (struct virtchnl_vlan_filter_list *)msg; valid_len += vfl->num_elements * sizeof(u16); if (vfl->num_elements == 0) err_msg_format = true; } break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: valid_len = sizeof(struct virtchnl_promisc_info); break; case VIRTCHNL_OP_GET_STATS: valid_len = sizeof(struct virtchnl_queue_select); break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: default: return EPERM; break; } /* few more checks */ if ((valid_len != msglen) || (err_msg_format)) return EINVAL; else return 0; } #endif /* ** ixlv_send_pf_msg ** ** Send message to PF and print status if failure. */ static int ixlv_send_pf_msg(struct ixlv_sc *sc, enum virtchnl_ops op, u8 *msg, u16 len) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; i40e_status err; #ifdef IXL_DEBUG /* ** Pre-validating messages to the PF */ int val_err; val_err = ixl_vc_validate_vf_msg(sc, op, msg, len); if (val_err) device_printf(dev, "Error validating msg to PF for op %d," " msglen %d: error %d\n", op, len, val_err); #endif err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL); if (err) device_printf(dev, "Unable to send opcode %s to PF, " "status %s, aq error %s\n", ixl_vc_opcode_str(op), i40e_stat_str(hw, err), i40e_aq_str(hw, hw->aq.asq_last_status)); return err; } /* ** ixlv_send_api_ver ** ** Send API version admin queue message to the PF. The reply is not checked ** in this function. Returns 0 if the message was successfully ** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. */ int ixlv_send_api_ver(struct ixlv_sc *sc) { struct virtchnl_version_info vvi; vvi.major = VIRTCHNL_VERSION_MAJOR; vvi.minor = VIRTCHNL_VERSION_MINOR; return ixlv_send_pf_msg(sc, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); } /* ** ixlv_verify_api_ver ** ** Compare API versions with the PF. Must be called after admin queue is ** initialized. Returns 0 if API versions match, EIO if ** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty. */ int ixlv_verify_api_ver(struct ixlv_sc *sc) { struct virtchnl_version_info *pf_vvi; struct i40e_hw *hw = &sc->hw; struct i40e_arq_event_info event; device_t dev = sc->dev; i40e_status err; int retries = 0; event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); if (!event.msg_buf) { err = ENOMEM; goto out; } for (;;) { if (++retries > IXLV_AQ_MAX_ERR) goto out_alloc; /* Initial delay here is necessary */ i40e_msec_pause(100); err = i40e_clean_arq_element(hw, &event, NULL); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) continue; else if (err) { err = EIO; goto out_alloc; } if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != VIRTCHNL_OP_VERSION) { DDPRINTF(dev, "Received unexpected op response: %d\n", le32toh(event.desc.cookie_high)); /* Don't stop looking for expected response */ continue; } err = (i40e_status)le32toh(event.desc.cookie_low); if (err) { err = EIO; goto out_alloc; } else break; } pf_vvi = (struct virtchnl_version_info *)event.msg_buf; if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) || ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) && (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) { device_printf(dev, "Critical PF/VF API version mismatch!\n"); err = EIO; } else sc->pf_version = pf_vvi->minor; /* Log PF/VF api versions */ device_printf(dev, "PF API %d.%d / VF API %d.%d\n", pf_vvi->major, pf_vvi->minor, VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR); out_alloc: free(event.msg_buf, M_DEVBUF); out: return (err); } /* ** ixlv_send_vf_config_msg ** ** Send VF configuration request admin queue message to the PF. The reply ** is not checked in this function. Returns 0 if the message was ** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not. */ int ixlv_send_vf_config_msg(struct ixlv_sc *sc) { u32 caps; caps = VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF | VIRTCHNL_VF_OFFLOAD_VLAN; if (sc->pf_version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS) return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, NULL, 0); else return ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES, (u8 *)&caps, sizeof(caps)); } /* ** ixlv_get_vf_config ** ** Get VF configuration from PF and populate hw structure. Must be called after ** admin queue is initialized. Busy waits until response is received from PF, ** with maximum timeout. Response from PF is returned in the buffer for further ** processing by the caller. */ int ixlv_get_vf_config(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; struct i40e_arq_event_info event; u16 len; i40e_status err = 0; u32 retries = 0; /* Note this assumes a single VSI */ len = sizeof(struct virtchnl_vf_resource) + sizeof(struct virtchnl_vsi_resource); event.buf_len = len; event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT); if (!event.msg_buf) { err = ENOMEM; goto out; } for (;;) { err = i40e_clean_arq_element(hw, &event, NULL); if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { if (++retries <= IXLV_AQ_MAX_ERR) i40e_msec_pause(10); } else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) != VIRTCHNL_OP_GET_VF_RESOURCES) { DDPRINTF(dev, "Received a response from PF," " opcode %d, error %d", le32toh(event.desc.cookie_high), le32toh(event.desc.cookie_low)); retries++; continue; } else { err = (i40e_status)le32toh(event.desc.cookie_low); if (err) { device_printf(dev, "%s: Error returned from PF," " opcode %d, error %d\n", __func__, le32toh(event.desc.cookie_high), le32toh(event.desc.cookie_low)); err = EIO; goto out_alloc; } /* We retrieved the config message, with no errors */ break; } if (retries > IXLV_AQ_MAX_ERR) { INIT_DBG_DEV(dev, "Did not receive response after %d tries.", retries); err = ETIMEDOUT; goto out_alloc; } } memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len)); i40e_vf_parse_hw_config(hw, sc->vf_res); out_alloc: free(event.msg_buf, M_DEVBUF); out: return err; } /* ** ixlv_configure_queues ** ** Request that the PF set up our queues. */ void ixlv_configure_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; ++ if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx); ++ struct ixl_tx_queue *tx_que = vsi->tx_queues; ++ struct ixl_rx_queue *rx_que = vsi->rx_queues; struct tx_ring *txr; struct rx_ring *rxr; int len, pairs; struct virtchnl_vsi_queue_config_info *vqci; struct virtchnl_queue_pair_info *vqpi; - pairs = vsi->num_queues; ++ /* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX ++ * queues of a pair need to be configured */ ++ pairs = max(vsi->num_tx_queues, vsi->num_rx_queues); len = sizeof(struct virtchnl_vsi_queue_config_info) + (sizeof(struct virtchnl_queue_pair_info) * pairs); vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!vqci) { device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } vqci->vsi_id = sc->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; /* Size check is not needed here - HW max is 16 queue pairs, and we * can fit info for 31 of them into the AQ buffer before it overflows. */ - for (int i = 0; i < pairs; i++, que++, vqpi++) { - txr = &que->txr; - rxr = &que->rxr; ++ for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) { ++ txr = &tx_que->txr; ++ rxr = &rx_que->rxr; ++ vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.queue_id = i; - vqpi->txq.ring_len = que->num_tx_desc; - vqpi->txq.dma_ring_addr = txr->dma.pa; ++ vqpi->txq.ring_len = scctx->isc_ntxd[0]; ++ vqpi->txq.dma_ring_addr = txr->tx_paddr; /* Enable Head writeback */ - if (vsi->enable_head_writeback) { - vqpi->txq.headwb_enabled = 1; - vqpi->txq.dma_headwb_addr = txr->dma.pa + - (que->num_tx_desc * sizeof(struct i40e_tx_desc)); - } + vqpi->txq.headwb_enabled = 0; + vqpi->txq.dma_headwb_addr = 0; vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.queue_id = i; - vqpi->rxq.ring_len = que->num_rx_desc; - vqpi->rxq.dma_ring_addr = rxr->dma.pa; - vqpi->rxq.max_pkt_size = vsi->max_frame_size; ++ vqpi->rxq.ring_len = scctx->isc_nrxd[0]; ++ vqpi->rxq.dma_ring_addr = rxr->rx_paddr; ++ vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size; ++ // TODO: Get this value from iflib, somehow vqpi->rxq.databuffer_size = rxr->mbuf_sz; vqpi->rxq.splithdr_enabled = 0; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_VSI_QUEUES, (u8 *)vqci, len); free(vqci, M_DEVBUF); } /* ** ixlv_enable_queues ** ** Request that the PF enable all of our queues. */ void ixlv_enable_queues(struct ixlv_sc *sc) { struct virtchnl_queue_select vqs; vqs.vsi_id = sc->vsi_res->vsi_id; ++ /* XXX: In Linux PF, as long as neither of these is 0, ++ * every queue in VF VSI is enabled. */ vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; vqs.rx_queues = vqs.tx_queues; ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } /* ** ixlv_disable_queues ** ** Request that the PF disable all of our queues. */ void ixlv_disable_queues(struct ixlv_sc *sc) { struct virtchnl_queue_select vqs; vqs.vsi_id = sc->vsi_res->vsi_id; ++ /* XXX: In Linux PF, as long as neither of these is 0, ++ * every queue in VF VSI is disabled. */ vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1; vqs.rx_queues = vqs.tx_queues; ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES, (u8 *)&vqs, sizeof(vqs)); } /* ** ixlv_map_queues ** ** Request that the PF map queues to interrupt vectors. Misc causes, including ** admin queue, are always mapped to vector 0. */ void ixlv_map_queues(struct ixlv_sc *sc) { struct virtchnl_irq_map_info *vm; int i, q, len; struct ixl_vsi *vsi = &sc->vsi; - struct ixl_queue *que = vsi->queues; ++ struct ixl_rx_queue *rx_que = vsi->rx_queues; ++ if_softc_ctx_t scctx = vsi->shared; ++ device_t dev = sc->dev; ++ ++ // XXX: What happens if we only get 1 MSI-X vector? ++ MPASS(scctx->isc_vectors > 1); /* How many queue vectors, adminq uses one */ - q = sc->msix - 1; ++ // XXX: How do we know how many interrupt vectors we have? ++ q = scctx->isc_vectors - 1; len = sizeof(struct virtchnl_irq_map_info) + - (sc->msix * sizeof(struct virtchnl_vector_map)); ++ (scctx->isc_vectors * sizeof(struct i40e_virtchnl_vector_map)); vm = malloc(len, M_DEVBUF, M_NOWAIT); if (!vm) { - printf("%s: unable to allocate memory\n", __func__); ++ device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } - vm->num_vectors = sc->msix; ++ vm->num_vectors = scctx->isc_vectors; /* Queue vectors first */ - for (i = 0; i < q; i++, que++) { ++ for (i = 0; i < q; i++, rx_que++) { vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; vm->vecmap[i].vector_id = i + 1; /* first is adminq */ - vm->vecmap[i].txq_map = (1 << que->me); - vm->vecmap[i].rxq_map = (1 << que->me); + // vm->vecmap[i].txq_map = (1 << que->me); + vm->vecmap[i].rxq_map = (1 << rx_que->rxr.me); vm->vecmap[i].rxitr_idx = 0; vm->vecmap[i].txitr_idx = 1; } /* Misc vector last - this is only for AdminQ messages */ vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id; vm->vecmap[i].vector_id = 0; vm->vecmap[i].txq_map = 0; vm->vecmap[i].rxq_map = 0; vm->vecmap[i].rxitr_idx = 0; vm->vecmap[i].txitr_idx = 0; ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_IRQ_MAP, (u8 *)vm, len); free(vm, M_DEVBUF); } /* ** Scan the Filter List looking for vlans that need ** to be added, then create the data to hand to the AQ ** for handling. */ void ixlv_add_vlans(struct ixlv_sc *sc) { struct virtchnl_vlan_filter_list *v; struct ixlv_vlan_filter *f, *ftmp; device_t dev = sc->dev; int len, i = 0, cnt = 0; /* Get count of VLAN filters to add */ SLIST_FOREACH(f, sc->vlan_filters, next) { if (f->flags & IXL_FILTER_ADD) cnt++; } if (!cnt) { /* no work... */ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_vlan_filter_list) + (cnt * sizeof(u16)); if (len > IXL_AQ_BUF_SZ) { device_printf(dev, "%s: Exceeded Max AQ Buf size\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v = malloc(len, M_DEVBUF, M_NOWAIT); if (!v) { device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v->vsi_id = sc->vsi_res->vsi_id; v->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { if (f->flags & IXL_FILTER_ADD) { bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); f->flags = IXL_FILTER_USED; i++; } if (i == cnt) break; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len); free(v, M_DEVBUF); /* add stats? */ } /* ** Scan the Filter Table looking for vlans that need ** to be removed, then create the data to hand to the AQ ** for handling. */ void ixlv_del_vlans(struct ixlv_sc *sc) { device_t dev = sc->dev; struct virtchnl_vlan_filter_list *v; struct ixlv_vlan_filter *f, *ftmp; int len, i = 0, cnt = 0; /* Get count of VLAN filters to delete */ SLIST_FOREACH(f, sc->vlan_filters, next) { if (f->flags & IXL_FILTER_DEL) cnt++; } if (!cnt) { /* no work... */ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_vlan_filter_list) + (cnt * sizeof(u16)); if (len > IXL_AQ_BUF_SZ) { device_printf(dev, "%s: Exceeded Max AQ Buf size\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (!v) { device_printf(dev, "%s: unable to allocate memory\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } v->vsi_id = sc->vsi_res->vsi_id; v->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) { if (f->flags & IXL_FILTER_DEL) { bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16)); i++; SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next); free(f, M_DEVBUF); } if (i == cnt) break; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len); free(v, M_DEVBUF); /* add stats? */ } /* ** This routine takes additions to the vsi filter ** table and creates an Admin Queue call to create ** the filters in the hardware. */ void ixlv_add_ether_filters(struct ixlv_sc *sc) { struct virtchnl_ether_addr_list *a; struct ixlv_mac_filter *f; device_t dev = sc->dev; int len, j = 0, cnt = 0; /* Get count of MAC addresses to add */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_ADD) cnt++; } if (cnt == 0) { /* Should not happen... */ DDPRINTF(dev, "cnt == 0, exiting..."); ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_ether_addr_list) + (cnt * sizeof(struct virtchnl_ether_addr)); a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (a == NULL) { device_printf(dev, "%s: Failed to get memory for " "virtchnl_ether_addr_list\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } a->vsi_id = sc->vsi.id; a->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_ADD) { bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN); f->flags &= ~IXL_FILTER_ADD; j++; DDPRINTF(dev, "ADD: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } if (j == cnt) break; } DDPRINTF(dev, "len %d, j %d, cnt %d", len, j, cnt); ixlv_send_pf_msg(sc, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len); /* add stats? */ free(a, M_DEVBUF); return; } /* ** This routine takes filters flagged for deletion in the ** sc MAC filter list and creates an Admin Queue call ** to delete those filters in the hardware. */ void ixlv_del_ether_filters(struct ixlv_sc *sc) { struct virtchnl_ether_addr_list *d; device_t dev = sc->dev; struct ixlv_mac_filter *f, *f_temp; int len, j = 0, cnt = 0; /* Get count of MAC addresses to delete */ SLIST_FOREACH(f, sc->mac_filters, next) { if (f->flags & IXL_FILTER_DEL) cnt++; } if (cnt == 0) { DDPRINTF(dev, "cnt == 0, exiting..."); ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, VIRTCHNL_STATUS_SUCCESS); return; } len = sizeof(struct virtchnl_ether_addr_list) + (cnt * sizeof(struct virtchnl_ether_addr)); d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { device_printf(dev, "%s: Failed to get memory for " "virtchnl_ether_addr_list\n", __func__); ixl_vc_schedule_retry(&sc->vc_mgr); return; } d->vsi_id = sc->vsi.id; d->num_elements = cnt; /* Scan the filter array */ SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) { if (f->flags & IXL_FILTER_DEL) { bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN); DDPRINTF(dev, "DEL: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); j++; SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next); free(f, M_DEVBUF); } if (j == cnt) break; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len); /* add stats? */ free(d, M_DEVBUF); return; } /* ** ixlv_request_reset ** Request that the PF reset this VF. No response is expected. */ void ixlv_request_reset(struct ixlv_sc *sc) { /* ** Set the reset status to "in progress" before ** the request, this avoids any possibility of ** a mistaken early detection of completion. */ wr32(&sc->hw, I40E_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS); ixlv_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0); } /* ** ixlv_request_stats ** Request the statistics for this VF's VSI from PF. */ void ixlv_request_stats(struct ixlv_sc *sc) { struct virtchnl_queue_select vqs; int error = 0; vqs.vsi_id = sc->vsi_res->vsi_id; /* Low priority, we don't need to error check */ error = ixlv_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS, (u8 *)&vqs, sizeof(vqs)); #ifdef IXL_DEBUG if (error) device_printf(sc->dev, "Error sending stats request to PF: %d\n", error); #endif } /* ** Updates driver's stats counters with VSI stats returned from PF. */ void ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es) { struct ixl_vsi *vsi = &sc->vsi; uint64_t tx_discards; tx_discards = es->tx_discards; +#if 0 for (int i = 0; i < vsi->num_queues; i++) tx_discards += sc->vsi.queues[i].txr.br->br_drops; +#endif /* Update ifnet stats */ IXL_SET_IPACKETS(vsi, es->rx_unicast + es->rx_multicast + es->rx_broadcast); IXL_SET_OPACKETS(vsi, es->tx_unicast + es->tx_multicast + es->tx_broadcast); IXL_SET_IBYTES(vsi, es->rx_bytes); IXL_SET_OBYTES(vsi, es->tx_bytes); IXL_SET_IMCASTS(vsi, es->rx_multicast); IXL_SET_OMCASTS(vsi, es->tx_multicast); IXL_SET_OERRORS(vsi, es->tx_errors); IXL_SET_IQDROPS(vsi, es->rx_discards); IXL_SET_OQDROPS(vsi, tx_discards); IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol); IXL_SET_COLLISIONS(vsi, 0); vsi->eth_stats = *es; } void ixlv_config_rss_key(struct ixlv_sc *sc) { struct virtchnl_rss_key *rss_key_msg; int msg_len, key_length; u8 rss_seed[IXL_RSS_KEY_SIZE]; #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key((u32 *)rss_seed); #endif /* Send the fetched key */ key_length = IXL_RSS_KEY_SIZE; msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1; rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (rss_key_msg == NULL) { device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n"); return; } rss_key_msg->vsi_id = sc->vsi_res->vsi_id; rss_key_msg->key_len = key_length; bcopy(rss_seed, &rss_key_msg->key[0], key_length); DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d", rss_key_msg->vsi_id, rss_key_msg->key_len); ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY, (u8 *)rss_key_msg, msg_len); free(rss_key_msg, M_DEVBUF); } void ixlv_set_rss_hena(struct ixlv_sc *sc) { struct virtchnl_rss_hena hena; ++ struct i40e_hw *hw = &sc->hw; - hena.hena = IXL_DEFAULT_RSS_HENA_X722; ++ if (hw->mac.type == I40E_MAC_X722_VF) ++ hena.hena = IXL_DEFAULT_RSS_HENA_X722; ++ else ++ hena.hena = IXL_DEFAULT_RSS_HENA_XL710; ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA, (u8 *)&hena, sizeof(hena)); } void ixlv_config_rss_lut(struct ixlv_sc *sc) { struct virtchnl_rss_lut *rss_lut_msg; int msg_len; u16 lut_length; u32 lut; int i, que_id; lut_length = IXL_RSS_VSI_LUT_SIZE; msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1; rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO); if (rss_lut_msg == NULL) { device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n"); return; } rss_lut_msg->vsi_id = sc->vsi_res->vsi_id; /* Each LUT entry is a max of 1 byte, so this is easy */ rss_lut_msg->lut_entries = lut_length; /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0; i < lut_length; i++) { #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); - que_id = que_id % sc->vsi.num_queues; ++ que_id = que_id % sc->vsi.num_rx_queues; #else - que_id = i % sc->vsi.num_queues; ++ que_id = i % sc->vsi.num_rx_queues; #endif lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK; rss_lut_msg->lut[i] = lut; } ixlv_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT, (u8 *)rss_lut_msg, msg_len); free(rss_lut_msg, M_DEVBUF); } /* ** ixlv_vc_completion ** ** Asynchronous completion function for admin queue messages. Rather than busy ** wait, we fire off our requests and assume that no errors will be returned. ** This function handles the reply messages. */ void ixlv_vc_completion(struct ixlv_sc *sc, enum virtchnl_ops v_opcode, enum virtchnl_status_code v_retval, u8 *msg, u16 msglen) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; if (v_opcode == VIRTCHNL_OP_EVENT) { struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; switch (vpe->event) { case VIRTCHNL_EVENT_LINK_CHANGE: #ifdef IXL_DEBUG device_printf(dev, "Link change: status %d, speed %d\n", vpe->event_data.link_event.link_status, vpe->event_data.link_event.link_speed); #endif sc->link_up = vpe->event_data.link_event.link_status; sc->link_speed = vpe->event_data.link_event.link_speed; ixlv_update_link_status(sc); break; case VIRTCHNL_EVENT_RESET_IMPENDING: device_printf(dev, "PF initiated reset!\n"); sc->init_state = IXLV_RESET_PENDING; - mtx_unlock(&sc->mtx); - ixlv_init(vsi); - mtx_lock(&sc->mtx); ++ // mtx_unlock(&sc->mtx); ++ ixlv_if_init(sc->vsi.ctx); ++ // mtx_lock(&sc->mtx); break; default: device_printf(dev, "%s: Unknown event %d from AQ\n", __func__, vpe->event); break; } return; } /* Catch-all error response */ if (v_retval) { device_printf(dev, "%s: AQ returned error %s to our request %s!\n", __func__, i40e_vc_stat_str(&sc->hw, v_retval), ixl_vc_opcode_str(v_opcode)); } #ifdef IXL_DEBUG if (v_opcode != VIRTCHNL_OP_GET_STATS) DDPRINTF(dev, "opcode %d", v_opcode); #endif switch (v_opcode) { case VIRTCHNL_OP_GET_STATS: ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg); break; case VIRTCHNL_OP_ADD_ETH_ADDR: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER, v_retval); if (v_retval) { device_printf(dev, "WARNING: Error adding VF mac filter!\n"); device_printf(dev, "WARNING: Device may not receive traffic!\n"); } break; case VIRTCHNL_OP_DEL_ETH_ADDR: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER, v_retval); break; case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC, v_retval); break; case VIRTCHNL_OP_ADD_VLAN: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER, v_retval); break; case VIRTCHNL_OP_DEL_VLAN: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER, v_retval); break; case VIRTCHNL_OP_ENABLE_QUEUES: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES, v_retval); if (v_retval == 0) { /* Update link status */ ixlv_update_link_status(sc); /* Turn on all interrupts */ ixlv_enable_intr(vsi); /* And inform the stack we're ready */ - vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; + // vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING; /* TODO: Clear a state flag, so we know we're ready to run init again */ } break; case VIRTCHNL_OP_DISABLE_QUEUES: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES, v_retval); if (v_retval == 0) { /* Turn off all interrupts */ ixlv_disable_intr(vsi); /* Tell the stack that the interface is no longer active */ vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING); } break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES, v_retval); break; case VIRTCHNL_OP_CONFIG_IRQ_MAP: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS, v_retval); break; case VIRTCHNL_OP_CONFIG_RSS_KEY: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY, v_retval); break; case VIRTCHNL_OP_SET_RSS_HENA: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA, v_retval); break; case VIRTCHNL_OP_CONFIG_RSS_LUT: ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT, v_retval); break; default: #ifdef IXL_DEBUG device_printf(dev, "%s: Received unexpected message %s from PF.\n", __func__, ixl_vc_opcode_str(v_opcode)); #endif break; } return; } static void ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request) { switch (request) { case IXLV_FLAG_AQ_MAP_VECTORS: ixlv_map_queues(sc); break; case IXLV_FLAG_AQ_ADD_MAC_FILTER: ixlv_add_ether_filters(sc); break; case IXLV_FLAG_AQ_ADD_VLAN_FILTER: ixlv_add_vlans(sc); break; case IXLV_FLAG_AQ_DEL_MAC_FILTER: ixlv_del_ether_filters(sc); break; case IXLV_FLAG_AQ_DEL_VLAN_FILTER: ixlv_del_vlans(sc); break; case IXLV_FLAG_AQ_CONFIGURE_QUEUES: ixlv_configure_queues(sc); break; case IXLV_FLAG_AQ_DISABLE_QUEUES: ixlv_disable_queues(sc); break; case IXLV_FLAG_AQ_ENABLE_QUEUES: ixlv_enable_queues(sc); break; case IXLV_FLAG_AQ_CONFIG_RSS_KEY: ixlv_config_rss_key(sc); break; case IXLV_FLAG_AQ_SET_RSS_HENA: ixlv_set_rss_hena(sc); break; case IXLV_FLAG_AQ_CONFIG_RSS_LUT: ixlv_config_rss_lut(sc); break; } } void ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr) { mgr->sc = sc; mgr->current = NULL; TAILQ_INIT(&mgr->pending); callout_init_mtx(&mgr->callout, &sc->mtx, 0); } static void ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err) { struct ixl_vc_cmd *cmd; cmd = mgr->current; mgr->current = NULL; cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; cmd->callback(cmd, cmd->arg, err); ixl_vc_process_next(mgr); } static void ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request, enum virtchnl_status_code err) { struct ixl_vc_cmd *cmd; cmd = mgr->current; if (cmd == NULL || cmd->request != request) return; callout_stop(&mgr->callout); /* ATM, the virtchnl codes map to i40e ones directly */ ixl_vc_process_completion(mgr, (enum i40e_status_code)err); } static void ixl_vc_cmd_timeout(void *arg) { struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; - IXLV_CORE_LOCK_ASSERT(mgr->sc); ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT); } static void ixl_vc_cmd_retry(void *arg) { struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg; - IXLV_CORE_LOCK_ASSERT(mgr->sc); ixl_vc_send_current(mgr); } static void ixl_vc_send_current(struct ixl_vc_mgr *mgr) { struct ixl_vc_cmd *cmd; cmd = mgr->current; ixl_vc_send_cmd(mgr->sc, cmd->request); callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr); } static void ixl_vc_process_next(struct ixl_vc_mgr *mgr) { struct ixl_vc_cmd *cmd; if (mgr->current != NULL) return; if (TAILQ_EMPTY(&mgr->pending)) return; cmd = TAILQ_FIRST(&mgr->pending); TAILQ_REMOVE(&mgr->pending, cmd, next); mgr->current = cmd; ixl_vc_send_current(mgr); } static void ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr) { callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr); } void ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd, uint32_t req, ixl_vc_callback_t *callback, void *arg) { - IXLV_CORE_LOCK_ASSERT(mgr->sc); - if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) { if (mgr->current == cmd) mgr->current = NULL; else TAILQ_REMOVE(&mgr->pending, cmd, next); } cmd->request = req; cmd->callback = callback; cmd->arg = arg; cmd->flags |= IXLV_VC_CMD_FLAG_BUSY; TAILQ_INSERT_TAIL(&mgr->pending, cmd, next); ixl_vc_process_next(mgr); } void ixl_vc_flush(struct ixl_vc_mgr *mgr) { struct ixl_vc_cmd *cmd; - IXLV_CORE_LOCK_ASSERT(mgr->sc); KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL, ("ixlv: pending commands waiting but no command in progress")); cmd = mgr->current; if (cmd != NULL) { mgr->current = NULL; cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); } while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) { TAILQ_REMOVE(&mgr->pending, cmd, next); cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY; cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED); } callout_stop(&mgr->callout); } Index: head/sys/modules/Makefile =================================================================== --- head/sys/modules/Makefile (revision 335337) +++ head/sys/modules/Makefile (revision 335338) @@ -1,843 +1,842 @@ # $FreeBSD$ SYSDIR?=${SRCTOP}/sys .include "${SYSDIR}/conf/kern.opts.mk" SUBDIR_PARALLEL= # Modules that include binary-only blobs of microcode should be selectable by # MK_SOURCELESS_UCODE option (see below). .if defined(MODULES_OVERRIDE) && !defined(ALL_MODULES) SUBDIR=${MODULES_OVERRIDE} .else SUBDIR= \ ${_3dfx} \ ${_3dfx_linux} \ ${_aac} \ ${_aacraid} \ accf_data \ accf_dns \ accf_http \ acl_nfs4 \ acl_posix1e \ ${_acpi} \ ae \ ${_aesni} \ age \ ${_agp} \ aha \ ahci \ ${_aic} \ aic7xxx \ alc \ ale \ alq \ ${_amd_ecc_inject} \ ${_amdsbwd} \ ${_amdsmn} \ ${_amdtemp} \ amr \ ${_an} \ ${_aout} \ ${_apm} \ ${_arcmsr} \ ${_allwinner} \ ${_armv8crypto} \ ${_asmc} \ ata \ ath \ ath_dfs \ ath_hal \ ath_hal_ar5210 \ ath_hal_ar5211 \ ath_hal_ar5212 \ ath_hal_ar5416 \ ath_hal_ar9300 \ ath_main \ ath_rate \ ath_pci \ ${_autofs} \ ${_auxio} \ ${_bce} \ ${_bcm283x_clkman} \ ${_bcm283x_pwm} \ bfe \ bge \ bhnd \ ${_bxe} \ ${_bios} \ ${_bktr} \ ${_blake2} \ ${_bm} \ bnxt \ bridgestp \ bwi \ bwn \ ${_bytgpio} \ ${_chvgpio} \ cam \ ${_cardbus} \ ${_carp} \ cas \ ${_cbb} \ cc \ ${_ccp} \ cd9660 \ cd9660_iconv \ ${_ce} \ ${_cfi} \ ${_chromebook_platform} \ ${_ciss} \ cloudabi \ ${_cloudabi32} \ ${_cloudabi64} \ ${_cmx} \ ${_coff} \ ${_coretemp} \ ${_cp} \ ${_cpsw} \ ${_cpuctl} \ ${_cpufreq} \ ${_crypto} \ ${_cryptodev} \ ${_cs} \ ${_ctau} \ ctl \ ${_cxgb} \ ${_cxgbe} \ dc \ dcons \ dcons_crom \ de \ ${_dpms} \ ${_dpt} \ ${_drm} \ ${_drm2} \ dummynet \ ${_ed} \ ${_efirt} \ ${_em} \ ${_ena} \ ${_ep} \ ${_epic} \ epoch_test \ esp \ ${_et} \ evdev \ ${_ex} \ ${_exca} \ ext2fs \ fdc \ fdescfs \ ${_fe} \ ${_ffec} \ filemon \ firewire \ firmware \ fuse \ ${_fxp} \ gem \ geom \ ${_glxiic} \ ${_glxsb} \ gpio \ hifn \ hme \ ${_hpt27xx} \ ${_hptiop} \ ${_hptmv} \ ${_hptnr} \ ${_hptrr} \ hwpmc \ ${_hwpmc_mips24k} \ ${_hwpmc_mips74k} \ ${_hyperv} \ i2c \ ${_ibcore} \ ${_ibcs2} \ ${_ichwd} \ ${_ida} \ if_bridge \ if_disc \ if_edsc \ ${_if_enc} \ if_epair \ ${_if_gif} \ ${_if_gre} \ ${_if_me} \ if_lagg \ ${_if_ndis} \ ${_if_stf} \ if_tap \ if_tun \ if_vlan \ if_vxlan \ ${_iir} \ imgact_binmisc \ ${_intelspi} \ ${_io} \ ${_ioat} \ ${_ipoib} \ ${_ipdivert} \ ${_ipfilter} \ ${_ipfw} \ ipfw_nat \ ${_ipfw_nat64} \ ${_ipfw_nptv6} \ ${_ipfw_pmod} \ ${_ipmi} \ ip6_mroute_mod \ ip_mroute_mod \ ${_ips} \ ${_ipsec} \ ${_ipw} \ ${_ipwfw} \ ${_isci} \ ${_iser} \ isp \ ${_ispfw} \ ${_iwi} \ ${_iwifw} \ ${_iwm} \ ${_iwmfw} \ ${_iwn} \ ${_iwnfw} \ ${_ix} \ ${_ixv} \ ${_ixl} \ - ${_ixlv} \ jme \ joy \ kbdmux \ kgssapi \ kgssapi_krb5 \ khelp \ krpc \ ksyms \ le \ lge \ libalias \ libiconv \ libmchain \ ${_linprocfs} \ ${_linsysfs} \ ${_linux} \ ${_linux_common} \ ${_linux64} \ linuxkpi \ ${_lio} \ lpt \ mac_biba \ mac_bsdextended \ mac_ifoff \ mac_lomac \ mac_mls \ mac_none \ mac_partition \ mac_portacl \ mac_seeotheruids \ mac_stub \ mac_test \ malo \ md \ mdio \ mem \ mfi \ mii \ mlx \ ${_mlx4} \ ${_mlx4ib} \ ${_mlx4en} \ ${_mlx5} \ ${_mlx5en} \ ${_mlx5ib} \ ${_mly} \ mmc \ mmcsd \ mpr \ mps \ mpt \ mqueue \ mrsas \ msdosfs \ msdosfs_iconv \ ${_mse} \ msk \ ${_mthca} \ mvs \ mwl \ ${_mwlfw} \ mxge \ my \ ${_nandfs} \ ${_nandsim} \ ${_ncr} \ ${_nctgpio} \ ${_ncv} \ ${_ndis} \ ${_netgraph} \ ${_nfe} \ nfscl \ nfscommon \ nfsd \ nfslock \ nfslockd \ nfssvc \ nge \ nmdm \ ${_nsp} \ nullfs \ ${_ntb} \ ${_nvd} \ ${_nvme} \ ${_nvram} \ oce \ ${_ocs_fc} \ otus \ ${_otusfw} \ ow \ ${_padlock} \ ${_padlock_rng} \ ${_pccard} \ ${_pcfclock} \ pcn \ ${_pf} \ ${_pflog} \ ${_pfsync} \ plip \ ${_pms} \ ppbus \ ppc \ ppi \ pps \ procfs \ proto \ pseudofs \ ${_pst} \ pty \ puc \ ${_qlxge} \ ${_qlxgb} \ ${_qlxgbe} \ ${_qlnx} \ ral \ ${_ralfw} \ ${_random_fortuna} \ ${_random_yarrow} \ ${_random_other} \ rc4 \ ${_rdma} \ ${_rdrand_rng} \ re \ rl \ ${_rockchip} \ rtwn \ rtwn_pci \ rtwn_usb \ ${_rtwnfw} \ ${_s3} \ ${_safe} \ ${_sbni} \ scc \ ${_scsi_low} \ sdhci \ ${_sdhci_acpi} \ sdhci_pci \ sem \ send \ ${_sf} \ ${_sfxge} \ sge \ ${_sgx} \ ${_sgx_linux} \ siftr \ siis \ sis \ sk \ ${_smartpqi} \ smbfs \ sn \ snp \ sound \ ${_speaker} \ spi \ ${_splash} \ ${_sppp} \ ste \ ${_stg} \ stge \ ${_sym} \ ${_syscons} \ sysvipc \ tcp \ ${_ti} \ tl \ tmpfs \ ${_toecore} \ ${_tpm} \ trm \ ${_twa} \ twe \ tws \ tx \ ${_txp} \ uart \ ubsec \ udf \ udf_iconv \ ufs \ uinput \ unionfs \ usb \ ${_vesa} \ ${_virtio} \ vge \ ${_viawd} \ videomode \ vkbd \ ${_vmm} \ ${_vmware} \ ${_vpo} \ vr \ vte \ vx \ wb \ ${_wbwd} \ ${_wi} \ wlan \ wlan_acl \ wlan_amrr \ wlan_ccmp \ wlan_rssadapt \ wlan_tkip \ wlan_wep \ wlan_xauth \ ${_wpi} \ ${_wpifw} \ ${_x86bios} \ ${_xe} \ xl \ zlib .if ${MK_AUTOFS} != "no" || defined(ALL_MODULES) _autofs= autofs .endif .if ${MK_CDDL} != "no" || defined(ALL_MODULES) .if (${MACHINE_CPUARCH} != "arm" || ${MACHINE_ARCH:Marmv[67]*} != "") && \ ${MACHINE_CPUARCH} != "mips" && \ ${MACHINE_CPUARCH} != "sparc64" SUBDIR+= dtrace .endif SUBDIR+= opensolaris .endif .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) .if exists(${SRCTOP}/sys/opencrypto) _crypto= crypto _cryptodev= cryptodev _random_fortuna=random_fortuna _random_yarrow= random_yarrow _random_other= random_other .endif .endif .if ${MK_CUSE} != "no" || defined(ALL_MODULES) SUBDIR+= cuse .endif .if (${MK_INET_SUPPORT} != "no" || ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _carp= carp _toecore= toecore _if_enc= if_enc _if_gif= if_gif _if_gre= if_gre _ipfw_pmod= ipfw_pmod .if ${MK_IPSEC_SUPPORT} != "no" _ipsec= ipsec .endif .endif .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _if_stf= if_stf .endif .if ${MK_INET_SUPPORT} != "no" || defined(ALL_MODULES) _if_me= if_me _ipdivert= ipdivert _ipfw= ipfw .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nat64= ipfw_nat64 .endif .endif .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nptv6= ipfw_nptv6 .endif .if ${MK_IPFILTER} != "no" || defined(ALL_MODULES) _ipfilter= ipfilter .endif .if ${MK_ISCSI} != "no" || defined(ALL_MODULES) SUBDIR+= cfiscsi SUBDIR+= iscsi SUBDIR+= iscsi_initiator .endif .if ${MK_NAND} != "no" || defined(ALL_MODULES) _nandfs= nandfs _nandsim= nandsim .endif .if ${MK_NETGRAPH} != "no" || defined(ALL_MODULES) _netgraph= netgraph .endif .if (${MK_PF} != "no" && (${MK_INET_SUPPORT} != "no" || \ ${MK_INET6_SUPPORT} != "no")) || defined(ALL_MODULES) _pf= pf _pflog= pflog .if ${MK_INET_SUPPORT} != "no" _pfsync= pfsync .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" _bce= bce _fxp= fxp _ispfw= ispfw _sf= sf _ti= ti _txp= txp .if ${MACHINE_CPUARCH} != "mips" _mwlfw= mwlfw _otusfw= otusfw _ralfw= ralfw _rtwnfw= rtwnfw .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && \ ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "riscv" _cxgbe= cxgbe .endif .if ${MK_TESTS} != "no" || defined(ALL_MODULES) SUBDIR+= tests .endif .if ${MK_ZFS} != "no" || defined(ALL_MODULES) SUBDIR+= zfs .endif .if (${MACHINE_CPUARCH} == "mips" && ${MACHINE_ARCH:Mmips64} == "") _hwpmc_mips24k= hwpmc_mips24k _hwpmc_mips74k= hwpmc_mips74k .endif .if ${MACHINE_CPUARCH} != "aarch64" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && ${MACHINE_CPUARCH} != "powerpc" && \ ${MACHINE_CPUARCH} != "riscv" _syscons= syscons _vpo= vpo .endif .if ${MACHINE_CPUARCH} != "mips" # no BUS_SPACE_UNSPECIFIED # No barrier instruction support (specific to this driver) _sym= sym # intr_disable() is a macro, causes problems .if ${MK_SOURCELESS_UCODE} != "no" _cxgb= cxgb .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" _allwinner= allwinner _armv8crypto= armv8crypto _efirt= efirt _em= em _rockchip= rockchip .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _agp= agp _an= an _aout= aout _bios= bios _bktr= bktr .if ${MK_SOURCELESS_UCODE} != "no" _bxe= bxe .endif _cardbus= cardbus _cbb= cbb _cpuctl= cpuctl _cpufreq= cpufreq _cs= cs _dpms= dpms _drm= drm _drm2= drm2 _ed= ed _em= em _ena= ena _ep= ep _et= et _exca= exca _fe= fe .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ibcore= ibcore .endif _if_ndis= if_ndis _io= io .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ipoib= ipoib _iser= iser .endif _ix= ix _ixv= ixv _linprocfs= linprocfs _linsysfs= linsysfs _linux= linux .if ${MK_SOURCELESS_UCODE} != "no" _lio= lio .endif _nctgpio= nctgpio _ndis= ndis _ocs_fc= ocs_fc _pccard= pccard .if ${MK_OFED} != "no" || defined(ALL_MODULES) _rdma= rdma .endif _safe= safe _scsi_low= scsi_low _speaker= speaker _splash= splash _sppp= sppp _vmware= vmware _wbwd= wbwd _wi= wi _xe= xe _aac= aac _aacraid= aacraid _acpi= acpi .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) .if ${COMPILER_TYPE} != "gcc" || ${COMPILER_VERSION} > 40201 _aesni= aesni .endif .endif _amd_ecc_inject=amd_ecc_inject _amdsbwd= amdsbwd _amdsmn= amdsmn _amdtemp= amdtemp _arcmsr= arcmsr _asmc= asmc .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _blake2= blake2 .endif _bytgpio= bytgpio _chvgpio= chvgpio _ciss= ciss _chromebook_platform= chromebook_platform _cmx= cmx _coretemp= coretemp .if ${MK_SOURCELESS_HOST} != "no" _hpt27xx= hpt27xx .endif _hptiop= hptiop .if ${MK_SOURCELESS_HOST} != "no" _hptmv= hptmv _hptnr= hptnr _hptrr= hptrr .endif _hyperv= hyperv _ichwd= ichwd _ida= ida _iir= iir _intelspi= intelspi _ipmi= ipmi _ips= ips _isci= isci _ipw= ipw _iwi= iwi _iwm= iwm _iwn= iwn .if ${MK_SOURCELESS_UCODE} != "no" _ipwfw= ipwfw _iwifw= iwifw _iwmfw= iwmfw _iwnfw= iwnfw .endif _mlx4= mlx4 _mlx5= mlx5 .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _mlx4en= mlx4en _mlx5en= mlx5en .endif .if ${MK_OFED} != "no" || defined(ALL_MODULES) _mthca= mthca _mlx4ib= mlx4ib _mlx5ib= mlx5ib .endif _mly= mly _nfe= nfe _nvd= nvd _nvme= nvme _nvram= nvram .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _padlock= padlock _padlock_rng= padlock_rng _rdrand_rng= rdrand_rng .endif _s3= s3 _sdhci_acpi= sdhci_acpi _tpm= tpm _twa= twa _vesa= vesa _viawd= viawd _virtio= virtio _wpi= wpi .if ${MK_SOURCELESS_UCODE} != "no" _wpifw= wpifw .endif _x86bios= x86bios .endif .if ${MACHINE_CPUARCH} == "amd64" _ccp= ccp _efirt= efirt _ioat= ioat _ixl= ixl _ixlv= ixlv _linux64= linux64 _linux_common= linux_common _ntb= ntb _pms= pms _qlxge= qlxge _qlxgb= qlxgb .if ${MK_SOURCELESS_UCODE} != "no" _qlxgbe= qlxgbe _qlnx= qlnx .endif _sfxge= sfxge _sgx= sgx _sgx_linux= sgx_linux _smartpqi= smartpqi .if ${MK_BHYVE} != "no" || defined(ALL_MODULES) _vmm= vmm .endif .endif .if ${MACHINE_CPUARCH} == "i386" # XXX some of these can move to the general case when de-i386'ed # XXX some of these can move now, but are untested on other architectures. _3dfx= 3dfx _3dfx_linux= 3dfx_linux _aic= aic _apm= apm .if ${MK_SOURCELESS_UCODE} != "no" _ce= ce .endif _coff= coff .if ${MK_SOURCELESS_UCODE} != "no" _cp= cp .endif _glxiic= glxiic _glxsb= glxsb #_ibcs2= ibcs2 _mse= mse _ncr= ncr _ncv= ncv _nsp= nsp _pcfclock= pcfclock _pst= pst _sbni= sbni _stg= stg .if ${MK_SOURCELESS_UCODE} != "no" _ctau= ctau .endif _dpt= dpt _ex= ex .endif .if ${MACHINE_CPUARCH} == "arm" _cfi= cfi _cpsw= cpsw .endif .if ${MACHINE_CPUARCH} == "powerpc" _agp= agp _an= an _bm= bm _cardbus= cardbus _cbb= cbb _cfi= cfi _cpufreq= cpufreq _drm= drm _exca= exca _ffec= ffec _nvd= nvd _nvme= nvme _pccard= pccard _wi= wi .endif .if ${MACHINE_ARCH} == "powerpc64" _drm2= drm2 .endif .if ${MACHINE_ARCH} == "powerpc64" || ${MACHINE_ARCH} == "powerpc" # Don't build powermac_nvram for powerpcspe, it's never supported. _nvram= powermac_nvram .endif .if ${MACHINE_CPUARCH} == "sparc64" _auxio= auxio _em= em _epic= epic .endif .if (${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_ARCH:Marmv[67]*} != "" || ${MACHINE_CPUARCH} == "i386") _cloudabi32= cloudabi32 .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _cloudabi64= cloudabi64 .endif .endif .if ${MACHINE_ARCH:Marmv[67]*} != "" || ${MACHINE_CPUARCH} == "aarch64" _bcm283x_clkman= bcm283x_clkman _bcm283x_pwm= bcm283x_pwm .endif SUBDIR+=${MODULES_EXTRA} .for reject in ${WITHOUT_MODULES} SUBDIR:= ${SUBDIR:N${reject}} .endfor # Calling kldxref(8) for each module is expensive. .if !defined(NO_XREF) .MAKEFLAGS+= -DNO_XREF afterinstall: .PHONY @if type kldxref >/dev/null 2>&1; then \ ${ECHO} kldxref ${DESTDIR}${KMODDIR}; \ kldxref ${DESTDIR}${KMODDIR}; \ fi .endif .include "${SYSDIR}/conf/config.mk" SUBDIR:= ${SUBDIR:u:O} .include Index: head/sys/modules/ixl/Makefile =================================================================== --- head/sys/modules/ixl/Makefile (revision 335337) +++ head/sys/modules/ixl/Makefile (revision 335338) @@ -1,18 +1,20 @@ #$FreeBSD$ .PATH: ${SRCTOP}/sys/dev/ixl KMOD = if_ixl -SRCS = device_if.h bus_if.h pci_if.h -SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h +SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h +SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h SRCS += if_ixl.c ixl_pf_main.c ixl_pf_qmgr.c ixl_txrx.c ixl_pf_i2c.c i40e_osdep.c -SRCS += ixl_iw.c -SRCS.PCI_IOV= pci_iov_if.h ixl_pf_iov.c +SRCS.PCI_IOV = pci_iov_if.h ixl_pf_iov.c # Shared source SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c i40e_lan_hmc.c i40e_hmc.c i40e_dcb.c # Debug messages / sysctls # CFLAGS += -DIXL_DEBUG + +#CFLAGS += -DIXL_IW +#SRCS += ixl_iw.c .include Index: head/sys/modules/ixlv/Makefile =================================================================== --- head/sys/modules/ixlv/Makefile (revision 335337) +++ head/sys/modules/ixlv/Makefile (revision 335338) @@ -1,16 +1,16 @@ #$FreeBSD$ .PATH: ${SRCTOP}/sys/dev/ixl KMOD = if_ixlv -SRCS = device_if.h bus_if.h pci_if.h -SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h +SRCS = device_if.h bus_if.h pci_if.h ifdi_if.h +SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_ixl.h opt_iflib.h SRCS += if_ixlv.c ixlvc.c ixl_txrx.c i40e_osdep.c # Shared source SRCS += i40e_common.c i40e_nvm.c i40e_adminq.c # Debug messages / sysctls # CFLAGS += -DIXL_DEBUG .include