diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC index 0400b2208b95..78a9b5f88eb1 100644 --- a/sys/amd64/conf/GENERIC +++ b/sys/amd64/conf/GENERIC @@ -1,379 +1,380 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/amd64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu HAMMER ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options NUMA # Non-Uniform Memory Architecture support options PREEMPTION # Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options TCP_OFFLOAD # TCP offload options TCP_BLACKBOX # Enhanced TCP event logging options TCP_HHOOK # hhook(9) framework for TCP options TCP_RFC7413 # TCP Fast Open options SCTP_SUPPORT # Allow kldload of SCTP options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options TMPFS # Efficient memory filesystem options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options EFIRT # EFI Runtime Services support options COMPAT_FREEBSD32 # Compatible with i386 binaries options COMPAT_FREEBSD4 # Compatible with FreeBSD4 options COMPAT_FREEBSD5 # Compatible with FreeBSD5 options COMPAT_FREEBSD6 # Compatible with FreeBSD6 options COMPAT_FREEBSD7 # Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options COMPAT_FREEBSD12 # Compatible with FreeBSD12 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options BUF_TRACKING # Track buffer history options DDB # Support DDB. options FULL_BUF_TRACKING # Track more buffer history options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options QUEUE_MACRO_DEBUG_TRASH # Trash queue(2) internal pointers on invalidation options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default options IOMMU # Kernel Sanitizers #options COVERAGE # Generic kernel coverage. Used by KCOV #options KCOV # Kernel Coverage Sanitizer # Warning: KUBSAN can result in a kernel too large for loader to load #options KUBSAN # Kernel Undefined Behavior Sanitizer #options KCSAN # Kernel Concurrency Sanitizer # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options DEBUGNET # debugnet networking options NETDUMP # netdump(4) client support options NETGDB # netgdb(4) client support # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel options EARLY_AP_STARTUP # CPU frequency control device cpufreq # Bus support. device acpi device pci options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # Floppy drives device fdc # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device esp # AMD Am53C974 (Tekram DC-390(T)) device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family #device ispfw # Firmware for QLogic HBAs- normally a module device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device mpr # LSI-Logic MPT-Fusion 3 device sym # NCR/Symbios Logic device isci # Intel C600 SAS controller device ocs_fc # Emulex FC adapters device pvscsi # VMware PVSCSI # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device ch # SCSI media changers device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) device ses # Enclosure Services (SES and SAF-TE) #device ctl # CAM Target Layer # RAID controllers interfaced to the SCSI subsystem device amr # AMI MegaRAID device arcmsr # Areca SATA II RAID device ciss # Compaq Smart RAID 5* device iir # Intel Integrated RAID device ips # IBM (Adaptec) ServeRAID device mly # Mylex AcceleRAID/eXtremeRAID device twa # 3ware 9000 series PATA/SATA RAID device smartpqi # Microsemi smartpqi driver device tws # LSI 3ware 9750 SATA+SAS 6Gb/s RAID controller # RAID controllers device aac # Adaptec FSA RAID device aacp # SCSI passthrough for aac (requires CAM) device aacraid # Adaptec by PMC RAID device ida # Compaq Smart RAID device mfi # LSI MegaRAID SAS device mlx # Mylex DAC960 family device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s device pmspcv # PMC-Sierra SAS/SATA Controller driver #XXX pointer/int warnings #device pst # Promise Supertrak SX6000 device twe # 3ware ATA RAID # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme # Intel Volume Management Device (VMD) support device vmd # base VMD device device vmd_bus # bus for VMD children # atkbdc0 controls both the keyboard and the PS/2 mouse device atkbdc # AT keyboard controller device atkbd # AT keyboard device psm # PS/2 mouse device kbdmux # keyboard multiplexer device vga # VGA video card driver options VESA # Add support for VESA BIOS Extensions (VBE) device splash # Splash screen and screen saver support # syscons is the default console driver, resembling an SCO console device sc options SC_PIXEL_MODE # add support for the raster text mode # vt is the new video console driver device vt device vt_vga device vt_efifb device agp # support several AGP chipsets # PCCARD (PCMCIA) support # PCMCIA and cardbus bridge support device cbb # cardbus (yenta) bridge device pccard # PC Card (16-bit) bus device cardbus # CardBus (32-bit) bus # Serial (COM) ports device uart # Generic UART driver # Parallel port device ppc device ppbus # Parallel port bus (required) device lpt # Printer device ppi # Parallel port interface device #device vpo # Requires scbus and da device puc # Multi I/O cards and multi-channel UARTs # PCI/PCI-X/PCIe Ethernet NICs that use iflib infrastructure device iflib device em # Intel PRO/1000 Gigabit Ethernet Family device ix # Intel PRO/10GbE PCIE PF Ethernet device ixv # Intel PRO/10GbE PCIE VF Ethernet device ixl # Intel 700 Series Physical Function device iavf # Intel Adaptive Virtual Function device ice # Intel 800 Series Physical Function device vmx # VMware VMXNET3 Ethernet +device axp # AMD EPYC integrated NIC # PCI Ethernet NICs. device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE device le # AMD Am7900 LANCE and Am79C9xx PCnet device ti # Alteon Networks Tigon I/II gigabit Ethernet # PCI Ethernet NICs that use the common MII bus controller code. # NOTE: Be sure to keep the 'device miibus' line in order to use these NICs! device miibus # MII bus support device ae # Attansic/Atheros L2 FastEthernet device age # Attansic/Atheros L1 Gigabit Ethernet device alc # Atheros AR8131/AR8132 Ethernet device ale # Atheros AR8121/AR8113/AR8114 Ethernet device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet device bfe # Broadcom BCM440x 10/100 Ethernet device bge # Broadcom BCM570xx Gigabit Ethernet device cas # Sun Cassini/Cassini+ and NS DP83065 Saturn device dc # DEC/Intel 21143 and various workalikes device et # Agere ET1310 10/100/Gigabit Ethernet device fxp # Intel EtherExpress PRO/100B (82557, 82558) device gem # Sun GEM/Sun ERI/Apple GMAC device hme # Sun HME (Happy Meal Ethernet) device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet device lge # Level 1 LXT1001 gigabit Ethernet device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device nfe # nVidia nForce MCP on-board Ethernet device nge # NatSemi DP83820 gigabit Ethernet device re # RealTek 8139C+/8169/8169S/8110S device rl # RealTek 8129/8139 device sge # Silicon Integrated Systems SiS190/191 device sis # Silicon Integrated Systems SiS 900/SiS 7016 device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet device ste # Sundance ST201 (D-Link DFE-550TX) device stge # Sundance/Tamarack TC9021 gigabit Ethernet device vge # VIA VT612x gigabit Ethernet device vr # VIA Rhine, Rhine II device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'') # Wireless NIC cards device wlan # 802.11 support options IEEE80211_DEBUG # enable debug msgs options IEEE80211_SUPPORT_MESH # enable 802.11s draft support device wlan_wep # 802.11 WEP support device wlan_ccmp # 802.11 CCMP support device wlan_tkip # 802.11 TKIP support device wlan_amrr # AMRR transmit rate control algorithm device an # Aironet 4500/4800 802.11 wireless NICs. device ath # Atheros NICs device ath_pci # Atheros pci/cardbus glue device ath_hal # pci/cardbus chip support options AH_AR5416_INTERRUPT_MITIGATION # AR5416 interrupt mitigation options ATH_ENABLE_11N # Enable 802.11n support for AR5416 and later device ath_rate_sample # SampleRate tx rate control for ath #device bwi # Broadcom BCM430x/BCM431x wireless NICs. #device bwn # Broadcom BCM43xx wireless NICs. device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device malo # Marvell Libertas wireless NICs. device mwl # Marvell 88W8363 802.11n wireless NICs. device ral # Ralink Technology RT2500 wireless NICs. device wi # WaveLAN/Intersil/Symbol 802.11 wireless NICs. device wpi # Intel 3945ABG wireless NICs. # Pseudo devices. device crypto # core crypto support device loop # Network loopback device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device ether # Ethernet support device vlan # 802.1Q VLAN support device tuntap # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface (USB 2.0) device xhci # XHCI PCI->USB interface (USB 3.0) device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # Sound support device sound # Generic sound driver (required) device snd_cmi # CMedia CMI8338/CMI8738 device snd_csa # Crystal Semiconductor CS461x/428x device snd_emu10kx # Creative SoundBlaster Live! and Audigy device snd_es137x # Ensoniq AudioPCI ES137x device snd_hda # Intel High Definition Audio device snd_ich # Intel, NVidia and other ICH AC'97 Audio device snd_via8233 # VIA VT8233x Audio # MMC/SD device mmc # MMC/SD bus device mmcsd # MMC/SD memory card device sdhci # Generic PCI SD Host Controller # VirtIO support device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI device device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device # HyperV drivers and enhancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations # NOTE: XENHVM depends on xenpci. They must be added or removed together. options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver # Netmap provides direct access to TX/RX rings on supported NICs device netmap # netmap(4) support # evdev interface options EVDEV_SUPPORT # evdev support in legacy drivers device evdev # input event device support device uinput # install /dev/uinput cdev diff --git a/sys/amd64/conf/NOTES b/sys/amd64/conf/NOTES index 7cdded7007c8..9bfb0cfb15e6 100644 --- a/sys/amd64/conf/NOTES +++ b/sys/amd64/conf/NOTES @@ -1,682 +1,683 @@ # # NOTES -- Lines that can be cut/pasted into kernel and hints configs. # # This file contains machine dependent kernel configuration notes. For # machine independent notes, look in /sys/conf/NOTES. # # $FreeBSD$ # # # We want LINT to cover profiling as well. profile 2 # # Enable the kernel DTrace hooks which are required to load the DTrace # kernel modules. # options KDTRACE_HOOKS # DTrace core # NOTE: introduces CDDL-licensed components into the kernel #device dtrace # DTrace modules #device dtrace_profile #device dtrace_sdt #device dtrace_fbt #device dtrace_systrace #device dtrace_prototype #device dtnfscl #device dtmalloc # Alternatively include all the DTrace modules #device dtraceall ##################################################################### # SMP OPTIONS: # # Notes: # # IPI_PREEMPTION instructs the kernel to preempt threads running on other # CPUS if needed. Relies on the PREEMPTION option # Optional: options IPI_PREEMPTION device atpic # Optional legacy pic support device mptable # Optional MPSPEC mptable support # # Watchdog routines. # options MP_WATCHDOG # Debugging options. # options COUNT_XINVLTLB_HITS # Counters for TLB events options COUNT_IPIS # Per-CPU IPI interrupt counters ##################################################################### # CPU OPTIONS # # You must specify at least one CPU (the one you intend to run on); # deleting the specification for CPUs you don't need to use may make # parts of the system run faster. # cpu HAMMER # aka K8, aka Opteron & Athlon64 # # Options for CPU features. # ##################################################################### # NETWORKING OPTIONS # # DEVICE_POLLING adds support for mixed interrupt-polling handling # of network device drivers, which has significant benefits in terms # of robustness to overloads and responsivity, as well as permitting # accurate scheduling of the CPU time between kernel network processing # and other activities. The drawback is a moderate (up to 1/HZ seconds) # potential increase in response times. # It is strongly recommended to use HZ=1000 or 2000 with DEVICE_POLLING # to achieve smoother behaviour. # Additionally, you can enable/disable polling at runtime with help of # the ifconfig(8) utility, and select the CPU fraction reserved to # userland with the sysctl variable kern.polling.user_frac # (default 50, range 0..100). # # Not all device drivers support this mode of operation at the time of # this writing. See polling(4) for more details. options DEVICE_POLLING # BPF_JITTER adds support for BPF just-in-time compiler. options BPF_JITTER # OpenFabrics Enterprise Distribution (Infiniband). options OFED options OFED_DEBUG_INIT # Sockets Direct Protocol options SDP options SDP_DEBUG # IP over Infiniband options IPOIB options IPOIB_DEBUG options IPOIB_CM ##################################################################### # CLOCK OPTIONS # Provide read/write access to the memory in the clock chip. device nvram # Access to rtc cmos via /dev/nvram ##################################################################### # MISCELLANEOUS DEVICES AND OPTIONS device speaker #Play IBM BASIC-style noises out your speaker envvar hint.speaker.0.at="isa" envvar hint.speaker.0.port="0x61" ##################################################################### # HARDWARE BUS CONFIGURATION # # ISA bus # device isa # # Options for `isa': # # AUTO_EOI_1 enables the `automatic EOI' feature for the master 8259A # interrupt controller. This saves about 0.7-1.25 usec for each interrupt. # This option breaks suspend/resume on some portables. # # AUTO_EOI_2 enables the `automatic EOI' feature for the slave 8259A # interrupt controller. This saves about 0.7-1.25 usec for each interrupt. # Automatic EOI is documented not to work for for the slave with the # original i8259A, but it works for some clones and some integrated # versions. # # MAXMEM specifies the amount of RAM on the machine; if this is not # specified, FreeBSD will first read the amount of memory from the CMOS # RAM, so the amount of memory will initially be limited to 64MB or 16MB # depending on the BIOS. If the BIOS reports 64MB, a memory probe will # then attempt to detect the installed amount of RAM. If this probe # fails to detect >64MB RAM you will have to use the MAXMEM option. # The amount is in kilobytes, so for a machine with 128MB of RAM, it would # be 131072 (128 * 1024). # # BROKEN_KEYBOARD_RESET disables the use of the keyboard controller to # reset the CPU for reboot. This is needed on some systems with broken # keyboard controllers. options AUTO_EOI_1 #options AUTO_EOI_2 options MAXMEM=(128*1024) #options BROKEN_KEYBOARD_RESET # # AGP GART support device agp # # AGP debugging. # options AGP_DEBUG ##################################################################### # HARDWARE DEVICE CONFIGURATION # To include support for VGA VESA video modes options VESA # Turn on extra debugging checks and output for VESA support. options VESA_DEBUG device dpms # DPMS suspend & resume via VESA BIOS # x86 real mode BIOS emulator, required by atkbdc/dpms/vesa options X86BIOS # # Optional devices: # # PS/2 mouse device psm envvar hint.psm.0.at="atkbdc" envvar hint.psm.0.irq="12" # Options for psm: options PSM_HOOKRESUME #hook the system resume event, useful #for some laptops options PSM_RESETAFTERSUSPEND #reset the device at the resume event # The keyboard controller; it controls the keyboard and the PS/2 mouse. device atkbdc envvar hint.atkbdc.0.at="isa" envvar hint.atkbdc.0.port="0x060" # The AT keyboard device atkbd envvar hint.atkbd.0.at="atkbdc" envvar hint.atkbd.0.irq="1" # Options for atkbd: options ATKBD_DFLT_KEYMAP # specify the built-in keymap makeoptions ATKBD_DFLT_KEYMAP=fr.dvorak # `flags' for atkbd: # 0x01 Force detection of keyboard, else we always assume a keyboard # 0x02 Don't reset keyboard, useful for some newer ThinkPads # 0x03 Force detection and avoid reset, might help with certain # dockingstations # 0x04 Old-style (XT) keyboard support, useful for older ThinkPads # Video card driver for VGA adapters. device vga envvar hint.vga.0.at="isa" # Options for vga: # Try the following option if the mouse pointer is not drawn correctly # or font does not seem to be loaded properly. May cause flicker on # some systems. options VGA_ALT_SEQACCESS # If you can dispense with some vga driver features, you may want to # use the following options to save some memory. #options VGA_NO_FONT_LOADING # don't save/load font #options VGA_NO_MODE_CHANGE # don't change video modes # Older video cards may require this option for proper operation. options VGA_SLOW_IOACCESS # do byte-wide i/o's to TS and GDC regs # The following option probably won't work with the LCD displays. options VGA_WIDTH90 # support 90 column modes # Debugging. options VGA_DEBUG # vt(4) drivers. device vt_vga # VGA device vt_efifb # EFI framebuffer # Linear framebuffer driver for S3 VESA 1.2 cards. Works on top of VESA. device s3pci # 3Dfx Voodoo Graphics, Voodoo II /dev/3dfx CDEV support. This will create # the /dev/3dfx0 device to work with glide implementations. This should get # linked to /dev/3dfx and /dev/voodoo. Note that this is not the same as # the tdfx DRI module from XFree86 and is completely unrelated. # # To enable Linuxulator support, one must also include COMPAT_LINUX in the # config as well. The other option is to load both as modules. device tdfx # Enable 3Dfx Voodoo support #XXX#device tdfx_linux # Enable Linuxulator support # # ACPI support using the Intel ACPI Component Architecture reference # implementation. # # ACPI_DEBUG enables the use of the debug.acpi.level and debug.acpi.layer # kernel environment variables to select initial debugging levels for the # Intel ACPICA code. (Note that the Intel code must also have USE_DEBUGGER # defined when it is built). device acpi options ACPI_DEBUG # The cpufreq(4) driver provides support for non-ACPI CPU frequency control device cpufreq # # Network interfaces: # # bxe: Broadcom NetXtreme II (BCM5771X/BCM578XX) PCIe 10Gb Ethernet # adapters. # ice: Intel 800 Series Physical Function # Requires the ice_ddp module for full functionality # ipw: Intel PRO/Wireless 2100 IEEE 802.11 adapter # Requires the ipw firmware module # iwi: Intel PRO/Wireless 2200BG/2225BG/2915ABG IEEE 802.11 adapters # Requires the iwi firmware module # iwn: Intel Wireless WiFi Link 1000/105/135/2000/4965/5000/6000/6050 abgn # 802.11 network adapters # Requires the iwn firmware module # mthca: Mellanox HCA InfiniBand # mlx4ib: Mellanox ConnectX HCA InfiniBand # mlx4en: Mellanox ConnectX HCA Ethernet # nfe: nVidia nForce MCP on-board Ethernet Networking (BSD open source) # sfxge: Solarflare SFC9000 family 10Gb Ethernet adapters # vmx: VMware VMXNET3 Ethernet (BSD open source) # wpi: Intel 3945ABG Wireless LAN controller # Requires the wpi firmware module device bxe # Broadcom NetXtreme II BCM5771X/BCM578XX 10GbE options ED_3C503 options ED_HPP options ED_SIC device ipw # Intel 2100 wireless NICs. device iwi # Intel 2200BG/2225BG/2915ABG wireless NICs. device iwn # Intel 4965/1000/5000/6000 wireless NICs. device ixl # Intel 700 Series Physical Function device iavf # Intel Adaptive Virtual Function device ice # Intel 800 Series Physical Function device ice_ddp # Intel 800 Series DDP Package device mthca # Mellanox HCA InfiniBand device mlx4 # Shared code module between IB and Ethernet device mlx4ib # Mellanox ConnectX HCA InfiniBand device mlx4en # Mellanox ConnectX HCA Ethernet device nfe # nVidia nForce MCP on-board Ethernet device sfxge # Solarflare SFC9000 10Gb Ethernet device vmx # VMware VMXNET3 Ethernet device wpi # Intel 3945ABG wireless NICs. +device axp # AMD EPYC integrated NIC # IEEE 802.11 adapter firmware modules # Intel PRO/Wireless 2100 firmware: # ipwfw: BSS/IBSS/monitor mode firmware # ipwbssfw: BSS mode firmware # ipwibssfw: IBSS mode firmware # ipwmonitorfw: Monitor mode firmware # Intel PRO/Wireless 2200BG/2225BG/2915ABG firmware: # iwifw: BSS/IBSS/monitor mode firmware # iwibssfw: BSS mode firmware # iwiibssfw: IBSS mode firmware # iwimonitorfw: Monitor mode firmware # Intel Wireless WiFi Link 4965/1000/5000/6000 series firmware: # iwnfw: Single module to support all devices # iwn1000fw: Specific module for the 1000 only # iwn105fw: Specific module for the 105 only # iwn135fw: Specific module for the 135 only # iwn2000fw: Specific module for the 2000 only # iwn2030fw: Specific module for the 2030 only # iwn4965fw: Specific module for the 4965 only # iwn5000fw: Specific module for the 5000 only # iwn5150fw: Specific module for the 5150 only # iwn6000fw: Specific module for the 6000 only # iwn6000g2afw: Specific module for the 6000g2a only # iwn6000g2bfw: Specific module for the 6000g2b only # iwn6050fw: Specific module for the 6050 only # wpifw: Intel 3945ABG Wireless LAN Controller firmware device iwifw device iwibssfw device iwiibssfw device iwimonitorfw device ipwfw device ipwbssfw device ipwibssfw device ipwmonitorfw device iwnfw device iwn1000fw device iwn105fw device iwn135fw device iwn2000fw device iwn2030fw device iwn4965fw device iwn5000fw device iwn5150fw device iwn6000fw device iwn6000g2afw device iwn6000g2bfw device iwn6050fw device wpifw # # Non-Transparent Bridge (NTB) drivers # device if_ntb # Virtual NTB network interface device ntb_transport # NTB packet transport driver device ntb # NTB hardware interface device ntb_hw_amd # AMD NTB hardware driver device ntb_hw_intel # Intel NTB hardware driver device ntb_hw_plx # PLX NTB hardware driver # #XXX this stores pointers in a 32bit field that is defined by the hardware #device pst # # Areca 11xx and 12xx series of SATA II RAID controllers. # CAM is required. # device arcmsr # Areca SATA II RAID # # Microsemi smartpqi controllers. # These controllers have a SCSI-like interface, and require the # CAM infrastructure. # device smartpqi # # 3ware 9000 series PATA/SATA RAID controller driver and options. # The driver is implemented as a SIM, and so, needs the CAM infrastructure. # options TWA_DEBUG # 0-10; 10 prints the most messages. device twa # 3ware 9000 series PATA/SATA RAID # # Adaptec FSA RAID controllers, including integrated DELL controllers, # the Dell PERC 2/QC and the HP NetRAID-4M device aac device aacp # SCSI Passthrough interface (optional, CAM required) # # Highpoint RocketRAID 27xx. device hpt27xx # # Highpoint RocketRAID 182x. device hptmv # # Highpoint DC7280 and R750. device hptnr # # Highpoint RocketRAID. Supports RR172x, RR222x, RR2240, RR232x, RR2340, # RR2210, RR174x, RR2522, RR231x, RR230x. device hptrr # # Highpoint RocketRaid 3xxx series SATA RAID device hptiop # # IBM (now Adaptec) ServeRAID controllers device ips # # Intel integrated Memory Controller (iMC) SMBus controller # Sandybridge-Xeon, Ivybridge-Xeon, Haswell-Xeon, Broadwell-Xeon device imcsmb # # Intel C600 (Patsburg) integrated SAS controller device isci options ISCI_LOGGING # enable debugging in isci HAL # # NVM Express (NVMe) support device nvme # base NVMe driver device nvd # expose NVMe namespaces as disks, depends on nvme # # Intel Volume Management Device (VMD) support device vmd # base VMD device device vmd_bus # bus for VMD children # # PMC-Sierra SAS/SATA controller device pmspcv # # SafeNet crypto driver: can be moved to the MI NOTES as soon as # it's tested on a big-endian machine # device safe # SafeNet 1141 options SAFE_DEBUG # enable debugging support: hw.safe.debug options SAFE_RNDTEST # enable rndtest support # # VirtIO support # # The virtio entry provides a generic bus for use by the device drivers. # It must be combined with an interface that communicates with the host. # Multiple such interfaces are defined by the VirtIO specification. FreeBSD # only has support for PCI. Therefore, virtio_pci must be statically # compiled in or loaded as a module for the device drivers to function. # device virtio # Generic VirtIO bus (required) device virtio_pci # VirtIO PCI Interface device vtnet # VirtIO Ethernet device device virtio_blk # VirtIO Block device device virtio_scsi # VirtIO SCSI device device virtio_balloon # VirtIO Memory Balloon device device virtio_random # VirtIO Entropy device device virtio_console # VirtIO Console device # Microsoft Hyper-V enhancement support device hyperv # HyperV drivers # Xen HVM Guest Optimizations options XENHVM # Xen HVM kernel infrastructure device xenpci # Xen HVM Hypervisor services driver ##################################################################### # # Miscellaneous hardware: # # ipmi: Intelligent Platform Management Interface # pbio: Parallel (8255 PPI) basic I/O (mode 0) port (e.g. Advantech PCL-724) # smbios: DMI/SMBIOS entry point # vpd: Vital Product Data kernel interface # asmc: Apple System Management Controller # si: Specialix International SI/XIO or SX intelligent serial card # tpm: Trusted Platform Module # Notes on the Specialix SI/XIO driver: # The host card is memory, not IO mapped. # The Rev 1 host cards use a 64K chunk, on a 32K boundary. # The Rev 2 host cards use a 32K chunk, on a 32K boundary. # The cards can use an IRQ of 11, 12 or 15. device ipmi device pbio envvar hint.pbio.0.at="isa" envvar hint.pbio.0.port="0x360" device smbios device vpd device asmc device tpm device padlock_rng # VIA Padlock RNG device rdrand_rng # Intel Bull Mountain RNG device aesni # AES-NI OpenCrypto module device ioat # Intel I/OAT DMA engine # # Laptop/Notebook options: # device backlight # # I2C Bus # # # Hardware watchdog timers: # # ichwd: Intel ICH watchdog timer # amdsbwd: AMD SB7xx watchdog timer # viawd: VIA south bridge watchdog timer # wbwd: Winbond watchdog timer # itwd: ITE Super I/O watchdog timer # device ichwd device amdsbwd device viawd device wbwd device itwd # # Temperature sensors: # # coretemp: on-die sensor on Intel Core and newer CPUs # amdtemp: on-die sensor on AMD K8/K10/K11 CPUs # device coretemp device amdtemp # # CPU control pseudo-device. Provides access to MSRs, CPUID info and # microcode update feature. # device cpuctl # # SuperIO driver. # device superio # # System Management Bus (SMB) # options ENABLE_ALART # Control alarm on Intel intpm driver # # AMD System Management Network (SMN) # device amdsmn # # Number of initial kernel page table pages used for early bootstrap. # This number should include enough pages to map the kernel and any # modules or other data loaded with the kernel by the loader. Each # page table page maps 2MB. # options NKPT=31 # EFI Runtime Services support options EFIRT ##################################################################### # ABI Emulation #XXX keep these here for now and reactivate when support for emulating #XXX these 32 bit binaries is added. # Enable 32-bit runtime support for FreeBSD/i386 binaries. options COMPAT_FREEBSD32 # Enable (32-bit) a.out binary support options COMPAT_AOUT # Enable 32-bit runtime support for CloudABI binaries. options COMPAT_CLOUDABI32 # Enable 64-bit runtime support for CloudABI binaries. options COMPAT_CLOUDABI64 # Enable Linux ABI emulation #XXX#options COMPAT_LINUX # Enable 32-bit Linux ABI emulation (requires COMPAT_FREEBSD32). options COMPAT_LINUX32 # Enable the linux-like proc filesystem support (requires COMPAT_LINUX32 # and PSEUDOFS) options LINPROCFS #Enable the linux-like sys filesystem support (requires COMPAT_LINUX32 # and PSEUDOFS) options LINSYSFS ##################################################################### # ZFS support # NB: This depends on crypto, cryptodev and ZSTDIO options ZFS ##################################################################### # VM OPTIONS # KSTACK_PAGES is the number of memory pages to assign to the kernel # stack of each thread. options KSTACK_PAGES=5 # Enable detailed accounting by the PV entry allocator. options PV_STATS ##################################################################### # More undocumented options for linting. # Note that documenting these are not considered an affront. options FB_INSTALL_CDEV # install a CDEV entry in /dev options KBDIO_DEBUG=2 options KBD_MAXRETRY=4 options KBD_MAXWAIT=6 options KBD_RESETDELAY=201 options PSM_DEBUG=1 options TIMER_FREQ=((14318182+6)/12) options VM_KMEM_SIZE options VM_KMEM_SIZE_MAX options VM_KMEM_SIZE_SCALE # Enable NDIS binary driver support options NDISAPI device ndis # GCOV (code coverage) support options LINDEBUGFS options GCOV diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC index 2807284b01f8..fcaf54129c94 100644 --- a/sys/arm64/conf/GENERIC +++ b/sys/arm64/conf/GENERIC @@ -1,369 +1,369 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/arm64 # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # https://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (https://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu ARM64 ident GENERIC makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options SCHED_ULE # ULE scheduler options NUMA # Non-Uniform Memory Architecture support options PREEMPTION # Enable kernel thread preemption options VIMAGE # Subsystem virtualization, e.g. VNET options INET # InterNETworking options INET6 # IPv6 communications protocols options IPSEC_SUPPORT # Allow kldload of ipsec and tcpmd5 options TCP_HHOOK # hhook(9) framework for TCP options TCP_OFFLOAD # TCP offload options TCP_RFC7413 # TCP Fast Open options SCTP_SUPPORT # Allow kldload of SCTP options FFS # Berkeley Fast Filesystem options SOFTUPDATES # Enable FFS soft updates support options UFS_ACL # Support for access control lists options UFS_DIRHASH # Improve performance on big directories options UFS_GJOURNAL # Enable gjournal-based UFS journaling options QUOTA # Enable disk quotas for UFS options MD_ROOT # MD is a potential root device options NFSCL # Network Filesystem Client options NFSD # Network Filesystem Server options NFSLOCKD # Network Lock Manager options NFS_ROOT # NFS usable as /, requires NFSCL options MSDOSFS # MSDOS Filesystem options CD9660 # ISO 9660 Filesystem options PROCFS # Process filesystem (requires PSEUDOFS) options PSEUDOFS # Pseudo-filesystem framework options TMPFS # Efficient memory filesystem options GEOM_RAID # Soft RAID functionality. options GEOM_LABEL # Provides labelization options COMPAT_FREEBSD32 # Compatible with FreeBSD/arm options COMPAT_FREEBSD11 # Compatible with FreeBSD11 options COMPAT_FREEBSD12 # Compatible with FreeBSD12 options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI options KTRACE # ktrace(1) support options STACK # stack(9) support options SYSVSHM # SYSV-style shared memory options SYSVMSG # SYSV-style message queues options SYSVSEM # SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KBD_INSTALL_CDEV # install a CDEV entry in /dev options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_FRAME # Ensure frames are compiled in options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options VFP # Floating-point support options RACCT # Resource accounting framework options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options RCTL # Resource limits options SMP options INTRNG options LINUX_BOOT_ABI # Boot using booti command from U-Boot # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB # Support DDB. #options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones options ALT_BREAK_TO_DEBUGGER # Enter debugger on keyboard escape sequence options USB_DEBUG # enable debug msgs options VERBOSE_SYSINIT=0 # Support debug.verbose_sysinit, off by default # Kernel Sanitizers #options COVERAGE # Generic kernel coverage. Used by KCOV #options KCOV # Kernel Coverage Sanitizer # Warning: KUBSAN can result in a kernel too large for loader to load #options KUBSAN # Kernel Undefined Behavior Sanitizer #options KCSAN # Kernel Concurrency Sanitizer # Kernel dump features. options EKCD # Support for encrypted kernel dumps options GZIO # gzip-compressed kernel and user dumps options ZSTDIO # zstd-compressed kernel and user dumps options DEBUGNET # debugnet networking options NETDUMP # netdump(4) client support # SoC support options SOC_ALLWINNER_A64 options SOC_ALLWINNER_H5 options SOC_ALLWINNER_H6 options SOC_CAVM_THUNDERX options SOC_FREESCALE_IMX8 options SOC_HISI_HI6220 options SOC_INTEL_STRATIX10 options SOC_BRCM_BCM2837 options SOC_BRCM_BCM2838 options SOC_MARVELL_8K options SOC_NXP_LS options SOC_ROCKCHIP_RK3328 options SOC_ROCKCHIP_RK3399 options SOC_XILINX_ZYNQ # Timer drivers device a10_timer # Annapurna Alpine drivers device al_ccu # Alpine Cache Coherency Unit device al_nb_service # Alpine North Bridge Service device al_iofic # I/O Fabric Interrupt Controller device al_serdes # Serializer/Deserializer device al_udma # Universal DMA # Qualcomm Snapdragon drivers device qcom_gcc # Global Clock Controller # VirtIO support device virtio device virtio_pci device virtio_mmio device virtio_blk device vtnet # CPU frequency control device cpufreq # Bus drivers device pci device pci_n1sdp # ARM Neoverse N1 SDP PCI device al_pci # Annapurna Alpine PCI-E options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # PCI/PCI-X/PCIe Ethernet NICs that use iflib infrastructure device iflib device em # Intel PRO/1000 Gigabit Ethernet Family device ix # Intel 10Gb Ethernet Family # Ethernet NICs device mdio device mii device miibus # MII bus support device awg # Allwinner EMAC Gigabit Ethernet -device axgbe # AMD Opteron A1100 integrated NIC +device axa # AMD Opteron A1100 integrated NIC device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet device neta # Marvell Armada 370/38x/XP/3700 NIC device smc # SMSC LAN91C111 device vnic # Cavium ThunderX NIC device al_eth # Annapurna Alpine Ethernet NIC device dwc_rk # Rockchip Designware device dwc_socfpga # Altera SOCFPGA Ethernet MAC device genet # Broadcom on RPi4 device ffec # iMX FFEC # Etherswitch devices device etherswitch # Enable etherswitch support device miiproxy # Required for etherswitch device e6000sw # Marvell mv88e6085 based switches # Block devices device ahci device scbus device da # ATA/SCSI peripherals device cd # CD device pass # Passthrough device (direct ATA/SCSI access) # NVM Express (NVMe) support device nvme # base NVMe driver options NVME_USE_NVD=0 # prefer the cam(4) based nda(4) driver device nvd # expose NVMe namespaces as disks, depends on nvme # MMC/SD/SDIO Card slot support device sdhci device sdhci_xenon # Marvell Xenon SD/MMC controller device aw_mmc # Allwinner SD/MMC controller device mmc # mmc/sd bus device mmcsd # mmc/sd flash cards device dwmmc device dwmmc_altera device dwmmc_hisi device rk_dwmmc device rk_emmcphy # Serial (COM) ports device uart # Generic UART driver device uart_imx # iMX8 UART device uart_msm # Qualcomm MSM UART driver device uart_mu # RPI3 aux port device uart_mvebu # Armada 3700 UART driver device uart_ns8250 # ns8250-type UART driver device uart_snps device pl011 # USB support options USB_HOST_ALIGN=64 # Align usb buffers to cache line size. device aw_usbphy # Allwinner USB PHY device rk_usb2phy # Rockchip USB2PHY device rk_typec_phy # Rockchip TypeC PHY device dwcotg # DWC OTG controller device musb # Mentor Graphics USB OTG controller device ohci # OHCI USB interface device ehci # EHCI USB interface (USB 2.0) device ehci_mv # Marvell EHCI USB interface device xhci # XHCI USB interface (USB 3.0) device dwc3 # Synopsys DWC controller device aw_dwc3 # Allwinner DWC3 controller device rk_dwc3 # Rockchip DWC3 controller device usb # USB Bus (required) device ukbd # Keyboard device umass # Disks/Mass storage - Requires scbus and da # USB ethernet support device muge device smcphy device smsc # Sound support device sound device a10_codec # DMA controller device a31_dmac # GPIO / PINCTRL device a37x0_gpio # Marvell Armada 37x0 GPIO controller device aw_gpio # Allwinner GPIO controller device dwgpio # Synopsys DesignWare APB GPIO Controller device gpio device gpioled device fdt_pinctrl device gpioregulator device ls1046_gpio # LS1046A GPIO controller device mv_gpio # Marvell GPIO controller device mvebu_pinctrl # Marvell Pinmux Controller device pl061 # Arm PL061 GPIO controller device rk_gpio # RockChip GPIO Controller device rk_pinctrl # RockChip Pinmux Controller # I2C device a37x0_iic # Armada 37x0 I2C controller device aw_rsb # Allwinner Reduced Serial Bus device bcm2835_bsc # Broadcom BCM283x I2C bus device iicbus device iic device twsi # Allwinner I2C controller device rk_i2c # RockChip I2C controller device syr827 # Silergy SYR827 PMIC device sy8106a # SY8106A Buck Regulator device vf_i2c # Freescale Vybrid I2C controller device fsliic # Freescale iMX I2C controller # Clock and reset controllers device aw_ccu # Allwinner clock controller # Interrupt controllers device aw_nmi # Allwinner NMI support device mv_cp110_icu # Marvell CP110 ICU device mv_ap806_gicp # Marvell AP806 GICP device mv_ap806_sei # Marvell AP806 SEI # Real-time clock support device aw_rtc # Allwinner Real-time Clock device mv_rtc # Marvell Real-time Clock # Crypto accelerators device safexcel # Inside Secure EIP-97 # Watchdog controllers device aw_wdog # Allwinner Watchdog # Power management controllers device axp81x # X-Powers AXP81x PMIC device rk805 # RockChip RK805 PMIC # EFUSE device aw_sid # Allwinner Secure ID EFUSE # Thermal sensors device aw_thermal # Allwinner Thermal Sensor Controller device mv_thermal # Marvell Thermal Sensor Controller # SPI device spibus device a37x0_spi # Marvell Armada 37x0 SPI Controller device bcm2835_spi # Broadcom BCM283x SPI bus device rk_spi # RockChip SPI controller # PWM device pwm device aw_pwm device rk_pwm # Console device vt device kbdmux device vt_efifb # EVDEV support device evdev # input event device support options EVDEV_SUPPORT # evdev support in legacy drivers device uinput # install /dev/uinput cdev device aw_cir # Pseudo devices. device crypto # core crypto support device loop # Network loopback device ether # Ethernet support device vlan # 802.1Q VLAN support device tuntap # Packet tunnel. device md # Memory "disks" device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module options EFIRT # EFI Runtime Services # EXT_RESOURCES pseudo devices options EXT_RESOURCES device clk device phy device hwreset device nvmem device regulator device syscon device aw_syscon # IO Domains device rk_iodomain # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter # Chip-specific errata options THUNDERX_PASS_1_1_ERRATA options FDT device acpi # DTBs makeoptions MODULES_EXTRA="dtb/allwinner dtb/imx8 dtb/mv dtb/rockchip dtb/rpi" diff --git a/sys/arm64/conf/NOTES b/sys/arm64/conf/NOTES index 2bf2337d610f..c966ab436221 100644 --- a/sys/arm64/conf/NOTES +++ b/sys/arm64/conf/NOTES @@ -1,241 +1,241 @@ # # NOTES -- Lines that can be cut/pasted into kernel and hints configs. # # This file contains machine dependent kernel configuration notes. For # machine independent notes, look in /sys/conf/NOTES. # # $FreeBSD$ # # # We want LINT to cover profiling as well. # Except it's broken. #profile 2 # # Enable the kernel DTrace hooks which are required to load the DTrace # kernel modules. # options KDTRACE_HOOKS # # Most of the following is copied from ARM64 GENERIC. cpu ARM64 makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. options KDTRACE_FRAME # Ensure frames are compiled in options VFP # Floating-point support options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default options INTRNG nooptions GDB # Support remote GDB -- not supported # SoC support options SOC_ALLWINNER_A64 options SOC_ALLWINNER_H5 options SOC_CAVM_THUNDERX options SOC_HISI_HI6220 options SOC_BRCM_BCM2837 options SOC_BRCM_BCM2838 options SOC_MARVELL_8K options SOC_ROCKCHIP_RK3328 options SOC_ROCKCHIP_RK3399 options SOC_XILINX_ZYNQ # Timer drivers device a10_timer # Annapurna Alpine drivers device al_ccu # Alpine Cache Coherency Unit device al_nb_service # Alpine North Bridge Service device al_iofic # I/O Fabric Interrupt Controller device al_serdes # Serializer/Deserializer device al_udma # Universal DMA # Qualcomm Snapdragon drivers device qcom_gcc # Global Clock Controller # VirtIO support device virtio device virtio_pci device virtio_mmio device virtio_blk device vtnet # CPU frequency control device cpufreq # Bus drivers device al_pci # Annapurna Alpine PCI-E options PCI_HP # PCI-Express native HotPlug options PCI_IOV # PCI SR-IOV support # Ethernet NICs device mdio device awg # Allwinner EMAC Gigabit Ethernet -device axgbe # AMD Opteron A1100 integrated NIC +device axa # AMD Opteron A1100 integrated NIC device neta # Marvell Armada 370/38x/XP/3700 NIC device smc # SMSC LAN91C111 device vnic # Cavium ThunderX NIC device al_eth # Annapurna Alpine Ethernet NIC device dwc_rk # Rockchip Designware device dwc_socfpga # Altera SOCFPGA Ethernet MAC device ice # Intel 800 Series Physical Function device ice_ddp # Intel 800 Series DDP Package # Etherswitch devices device e6000sw # Marvell mv88e6085 based switches # NVM Express (NVMe) support device nvme # base NVMe driver options NVME_USE_NVD=0 # prefer the cam(4) based nda(4) driver device nvd # expose NVMe namespaces as disks, depends on nvme # MMC/SD/SDIO Card slot support device sdhci_xenon # Marvell Xenon SD/MMC controller device aw_mmc # Allwinner SD/MMC controller device dwmmc device dwmmc_altera device rk_emmcphy # Serial (COM) ports device uart_msm # Qualcomm MSM UART driver device uart_mu # RPI3 aux port device uart_mvebu # Armada 3700 UART driver device uart_ns8250 # ns8250-type UART driver device uart_snps device pl011 # USB support device aw_usbphy # Allwinner USB PHY device dwcotg # DWC OTG controller device ehci_mv # Marvell EHCI USB interface # USB ethernet support device muge device smsc # Sound support device a10_codec # DMA controller device a31_dmac # GPIO / PINCTRL device a37x0_gpio # Marvell Armada 37x0 GPIO controller device aw_gpio # Allwinner GPIO controller device fdt_pinctrl device mv_gpio # Marvell GPIO controller device mvebu_pinctrl # Marvell Pinmux Controller device rk_gpio # RockChip GPIO Controller device rk_pinctrl # RockChip Pinmux Controller # I2C device aw_rsb # Allwinner Reduced Serial Bus device bcm2835_bsc # Broadcom BCM283x I2C bus device twsi # Allwinner I2C controller device rk_i2c # RockChip I2C controller # Clock and reset controllers device aw_ccu # Allwinner clock controller # Interrupt controllers device aw_nmi # Allwinner NMI support device mv_cp110_icu # Marvell CP110 ICU device mv_ap806_gicp # Marvell AP806 GICP # Real-time clock support device aw_rtc # Allwinner Real-time Clock device mv_rtc # Marvell Real-time Clock # Watchdog controllers device aw_wdog # Allwinner Watchdog # Power management controllers device axp81x # X-Powers AXP81x PMIC device rk805 # RockChip RK805 PMIC # EFUSE device aw_sid # Allwinner Secure ID EFUSE # Thermal sensors device aw_thermal # Allwinner Thermal Sensor Controller device mv_thermal # Marvell Thermal Sensor Controller # SPI device bcm2835_spi # Broadcom BCM283x SPI bus # PWM device pwm device aw_pwm device vt_efifb # EVDEV support options EVDEV_SUPPORT # evdev support in legacy drivers device aw_cir # Pseudo devices. options EFIRT # EFI Runtime Services # EXT_RESOURCES pseudo devices options EXT_RESOURCES device clk device phy device hwreset device nvmem device regulator device syscon device aw_syscon # Backlight subsystem device backlight # Misc devices. device pl330 # ARM PL330 dma controller device xdma # xDMA framework for SoC on-chip dma controllers # Chip-specific errata options THUNDERX_PASS_1_1_ERRATA options FDT device acpi # DTBs makeoptions MODULES_EXTRA="dtb/allwinner dtb/rockchip dtb/rpi" # Add CAMDEBUG stuff options CAMDEBUG options CAM_DEBUG_FLAGS=(CAM_DEBUG_INFO|CAM_DEBUG_PROBE|CAM_DEBUG_PERIPH) # bring in camified MMC too options MMCCAM # arm64 doesn't support inb/outb, so disable chipset probing which needs it nooptions PPC_PROBE_CHIPSET # These cause weird issues, not sure why nooptions DEBUG # Makes assumptions about bus tags that aren't true on arm64 nodevice snd_cmi # arm64 didn't exist for these releases, so doesn't have the required compat # support. Just disable them because they are meaningless. nooptions COMPAT_FREEBSD4 nooptions COMPAT_FREEBSD5 nooptions COMPAT_FREEBSD6 nooptions COMPAT_FREEBSD7 nooptions COMPAT_FREEBSD9 nooptions COMPAT_FREEBSD10 # arm64 supports 32-bit FreeBSD/arm binaries (armv[67] ABIs) options COMPAT_FREEBSD32 # Compatible with FreeBSD/arm ##################################################################### # ZFS support options ZFS diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 index 8815e9ad3355..79ad9dfd5a32 100644 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -1,476 +1,486 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # common files stuff between i386 and amd64 include "conf/files.x86" # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_i686_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_x86_64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_x86_64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" # cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" # linux32_genassym.o optional compat_linux32 \ dependency "$S/amd64/linux32/linux32_genassym.c offset.inc" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -fcommon -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "linux32_genassym.o" # linux32_assym.h optional compat_linux32 \ dependency "$S/kern/genassym.sh linux32_genassym.o" \ compile-with "sh $S/kern/genassym.sh linux32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "linux32_assym.h" # linux32_locore.o optional compat_linux32 \ dependency "linux32_assym.h $S/amd64/linux32/linux32_locore.asm" \ compile-with "${CC} -x assembler-with-cpp -DLOCORE -m32 -shared -s -pipe -I. -I$S ${WERROR} -Wall -fPIC -fno-common -nostdinc -nostdlib -Wl,-T$S/amd64/linux32/linux32_vdso.lds.s -Wl,-soname=linux32_vdso.so,--eh-frame-hdr,-warn-common ${.IMPSRC} -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "linux32_locore.o" # linux32_vdso.so optional compat_linux32 \ dependency "linux32_locore.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 linux32_locore.o ${.TARGET}" \ no-implicit-rule \ clean "linux32_vdso.so" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c offset.inc" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -fcommon -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # amd64/acpica/acpi_machdep.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.inc" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # #amd64/amd64/apic_vector.S standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/copyout.c standard amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/efirt_machdep.c optional efirt amd64/amd64/efirt_support.S optional efirt amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/in_cksum.c optional inet | inet6 amd64/amd64/initcpu.c standard amd64/amd64/io.c optional io amd64/amd64/locore.S standard no-obj amd64/amd64/xen-locore.S optional xenhvm \ compile-with "${NORMAL_S} -g0" \ no-ctfconvert amd64/amd64/machdep.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mpboot.S optional smp amd64/amd64/pmap.c standard amd64/amd64/prof_machdep.c optional profiling-routine amd64/amd64/ptrace_machdep.c standard amd64/amd64/sigtramp.S standard amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 amd64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 amd64/pci/pci_cfgreg.c optional pci cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/amd64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" crypto/aesni/aeskeys_amd64.S optional aesni crypto/des/des_enc.c optional netsmb dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdgpio/amdgpio.c optional amdgpio +dev/axgbe/if_axgbe_pci.c optional axp +dev/axgbe/xgbe-desc.c optional axp +dev/axgbe/xgbe-dev.c optional axp +dev/axgbe/xgbe-drv.c optional axp +dev/axgbe/xgbe-mdio.c optional axp +dev/axgbe/xgbe-sysctl.c optional axp +dev/axgbe/xgbe-txrx.c optional axp +dev/axgbe/xgbe_osdep.c optional axp +dev/axgbe/xgbe-i2c.c optional axp +dev/axgbe/xgbe-phy-v2.c optional axp dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sriov.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031000 -mice_ddp -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.16.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.16.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/ioat/ioat.c optional ioat pci dev/ioat/ioat_test.c optional ioat pci dev/ixl/if_ixl.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iflib.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/if_iavf.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/iavf_vc.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci | iavf pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_dcb.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/nctgpio/nctgpio.c optional nctgpio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional ntb_transport | if_ntb dev/ntb/ntb.c optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_if.m optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_hw/ntb_hw_amd.c optional ntb_hw_amd | ntb_hw dev/ntb/ntb_hw/ntb_hw_intel.c optional ntb_hw_intel | ntb_hw dev/ntb/ntb_hw/ntb_hw_plx.c optional ntb_hw_plx | ntb_hw dev/ntb/test/ntb_tool.c optional ntb_tool dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng !random_loadable dev/random/nehemiah.c optional padlock_rng !random_loadable dev/qlxge/qls_dbg.c optional qlxge pci dev/qlxge/qls_dump.c optional qlxge pci dev/qlxge/qls_hw.c optional qlxge pci dev/qlxge/qls_ioctl.c optional qlxge pci dev/qlxge/qls_isr.c optional qlxge pci dev/qlxge/qls_os.c optional qlxge pci dev/qlxgb/qla_dbg.c optional qlxgb pci dev/qlxgb/qla_hw.c optional qlxgb pci dev/qlxgb/qla_ioctl.c optional qlxgb pci dev/qlxgb/qla_isr.c optional qlxgb pci dev/qlxgb/qla_misc.c optional qlxgb pci dev/qlxgb/qla_os.c optional qlxgb pci dev/qlxgbe/ql_dbg.c optional qlxgbe pci dev/qlxgbe/ql_hw.c optional qlxgbe pci dev/qlxgbe/ql_ioctl.c optional qlxgbe pci dev/qlxgbe/ql_isr.c optional qlxgbe pci dev/qlxgbe/ql_misc.c optional qlxgbe pci dev/qlxgbe/ql_os.c optional qlxgbe pci dev/qlxgbe/ql_reset.c optional qlxgbe pci dev/qlxgbe/ql_fw.c optional qlxgbe pci dev/qlxgbe/ql_boot.c optional qlxgbe pci dev/qlxgbe/ql_minidump.c optional qlxgbe pci dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/sfxge/common/ef10_ev.c optional sfxge pci dev/sfxge/common/ef10_filter.c optional sfxge pci dev/sfxge/common/ef10_image.c optional sfxge pci dev/sfxge/common/ef10_intr.c optional sfxge pci dev/sfxge/common/ef10_mac.c optional sfxge pci dev/sfxge/common/ef10_mcdi.c optional sfxge pci dev/sfxge/common/ef10_nic.c optional sfxge pci dev/sfxge/common/ef10_nvram.c optional sfxge pci dev/sfxge/common/ef10_phy.c optional sfxge pci dev/sfxge/common/ef10_rx.c optional sfxge pci dev/sfxge/common/ef10_tx.c optional sfxge pci dev/sfxge/common/ef10_vpd.c optional sfxge pci dev/sfxge/common/efx_bootcfg.c optional sfxge pci dev/sfxge/common/efx_crc32.c optional sfxge pci dev/sfxge/common/efx_ev.c optional sfxge pci dev/sfxge/common/efx_filter.c optional sfxge pci dev/sfxge/common/efx_hash.c optional sfxge pci dev/sfxge/common/efx_intr.c optional sfxge pci dev/sfxge/common/efx_lic.c optional sfxge pci dev/sfxge/common/efx_mac.c optional sfxge pci dev/sfxge/common/efx_mcdi.c optional sfxge pci dev/sfxge/common/efx_mon.c optional sfxge pci dev/sfxge/common/efx_nic.c optional sfxge pci dev/sfxge/common/efx_nvram.c optional sfxge pci dev/sfxge/common/efx_phy.c optional sfxge pci dev/sfxge/common/efx_port.c optional sfxge pci dev/sfxge/common/efx_rx.c optional sfxge pci dev/sfxge/common/efx_sram.c optional sfxge pci dev/sfxge/common/efx_tunnel.c optional sfxge pci dev/sfxge/common/efx_tx.c optional sfxge pci dev/sfxge/common/efx_vpd.c optional sfxge pci dev/sfxge/common/hunt_nic.c optional sfxge pci dev/sfxge/common/mcdi_mon.c optional sfxge pci dev/sfxge/common/medford_nic.c optional sfxge pci dev/sfxge/common/medford2_nic.c optional sfxge pci dev/sfxge/common/siena_mac.c optional sfxge pci dev/sfxge/common/siena_mcdi.c optional sfxge pci dev/sfxge/common/siena_nic.c optional sfxge pci dev/sfxge/common/siena_nvram.c optional sfxge pci dev/sfxge/common/siena_phy.c optional sfxge pci dev/sfxge/common/siena_sram.c optional sfxge pci dev/sfxge/common/siena_vpd.c optional sfxge pci dev/sfxge/sfxge.c optional sfxge pci dev/sfxge/sfxge_dma.c optional sfxge pci dev/sfxge/sfxge_ev.c optional sfxge pci dev/sfxge/sfxge_intr.c optional sfxge pci dev/sfxge/sfxge_mcdi.c optional sfxge pci dev/sfxge/sfxge_nvram.c optional sfxge pci dev/sfxge/sfxge_port.c optional sfxge pci dev/sfxge/sfxge_rx.c optional sfxge pci dev/sfxge/sfxge_tx.c optional sfxge pci dev/smartpqi/smartpqi_cam.c optional smartpqi dev/smartpqi/smartpqi_cmd.c optional smartpqi dev/smartpqi/smartpqi_discovery.c optional smartpqi dev/smartpqi/smartpqi_event.c optional smartpqi dev/smartpqi/smartpqi_helper.c optional smartpqi dev/smartpqi/smartpqi_init.c optional smartpqi dev/smartpqi/smartpqi_intr.c optional smartpqi dev/smartpqi/smartpqi_ioctl.c optional smartpqi dev/smartpqi/smartpqi_main.c optional smartpqi dev/smartpqi/smartpqi_mem.c optional smartpqi dev/smartpqi/smartpqi_misc.c optional smartpqi dev/smartpqi/smartpqi_queue.c optional smartpqi dev/smartpqi/smartpqi_request.c optional smartpqi dev/smartpqi/smartpqi_response.c optional smartpqi dev/smartpqi/smartpqi_sis.c optional smartpqi dev/smartpqi/smartpqi_tag.c optional smartpqi dev/speaker/spkr.c optional speaker dev/sume/if_sume.c optional sume dev/superio/superio.c optional superio isa dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/tpm/tpm.c optional tpm dev/tpm/tpm20.c optional tpm dev/tpm/tpm_crb.c optional tpm acpi dev/tpm/tpm_tis.c optional tpm acpi dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmware/vmxnet3/if_vmx.c optional vmx dev/vmware/vmci/vmci.c optional vmci dev/vmware/vmci/vmci_datagram.c optional vmci dev/vmware/vmci/vmci_doorbell.c optional vmci dev/vmware/vmci/vmci_driver.c optional vmci dev/vmware/vmci/vmci_event.c optional vmci dev/vmware/vmci/vmci_hashtable.c optional vmci dev/vmware/vmci/vmci_kernel_if.c optional vmci dev/vmware/vmci/vmci_qpair.c optional vmci dev/vmware/vmci/vmci_queue_pair.c optional vmci dev/vmware/vmci/vmci_resource.c optional vmci dev/vmware/pvscsi/pvscsi.c optional pvscsi dev/vmd/vmd.c optional vmd dev/vmd/vmd_bus.c optional vmd_bus dev/wbwd/wbwd.c optional wbwd dev/xen/pci/xen_acpi_pci.c optional xenhvm dev/xen/pci/xen_pci.c optional xenhvm isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/kern_clocksource.c standard kern/imgact_aout.c optional compat_aout kern/link_elf_obj.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_freebsd32 amd64/ia32/ia32_reg.c optional compat_freebsd32 amd64/ia32/ia32_signal.c optional compat_freebsd32 amd64/ia32/ia32_sigtramp.S optional compat_freebsd32 amd64/ia32/ia32_syscall.c optional compat_freebsd32 amd64/ia32/ia32_misc.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 compat/linprocfs/linprocfs.c optional linprocfs compat/linsysfs/linsysfs.c optional linsysfs # # Linux/i386 binary support # amd64/linux32/linux32_dummy.c optional compat_linux32 amd64/linux32/linux32_machdep.c optional compat_linux32 amd64/linux32/linux32_support.s optional compat_linux32 \ dependency "linux32_assym.h" amd64/linux32/linux32_sysent.c optional compat_linux32 amd64/linux32/linux32_sysvec.c optional compat_linux32 compat/linux/linux_emul.c optional compat_linux32 compat/linux/linux_errno.c optional compat_linux32 compat/linux/linux_file.c optional compat_linux32 compat/linux/linux_fork.c optional compat_linux32 compat/linux/linux_futex.c optional compat_linux32 compat/linux/linux_getcwd.c optional compat_linux32 compat/linux/linux_ioctl.c optional compat_linux32 compat/linux/linux_ipc.c optional compat_linux32 compat/linux/linux_mib.c optional compat_linux32 compat/linux/linux_misc.c optional compat_linux32 compat/linux/linux_mmap.c optional compat_linux32 compat/linux/linux_signal.c optional compat_linux32 compat/linux/linux_socket.c optional compat_linux32 compat/linux/linux_stats.c optional compat_linux32 compat/linux/linux_sysctl.c optional compat_linux32 compat/linux/linux_time.c optional compat_linux32 compat/linux/linux_timer.c optional compat_linux32 compat/linux/linux_uid16.c optional compat_linux32 compat/linux/linux_util.c optional compat_linux32 compat/linux/linux_vdso.c optional compat_linux32 compat/linux/linux_common.c optional compat_linux32 compat/linux/linux_event.c optional compat_linux32 compat/linux/linux.c optional compat_linux32 dev/amr/amr_linux.c optional compat_linux32 amr dev/mfi/mfi_linux.c optional compat_linux32 mfi compat/ndis/winx64_wrap.S optional ndisapi pci # # x86 real mode BIOS emulator, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa contrib/x86emu/x86emu.c optional x86bios | dpms | pci | vesa # # bvm console # dev/bvm/bvm_console.c optional bvmconsole dev/bvm/bvm_dbg.c optional bvmdebug # Common files where we currently configure the system differently, but perhaps shouldn't # config(8) doesn't have a way to force standard options, so we've been inconsistent # about marking non-optional things 'standard'. x86/acpica/madt.c optional acpi x86/isa/atpic.c optional atpic isa x86/isa/elcr.c optional atpic isa | mptable x86/isa/isa.c standard x86/isa/isa_dma.c standard x86/pci/pci_early_quirks.c optional pci x86/x86/io_apic.c standard x86/x86/local_apic.c standard x86/x86/mptable.c optional mptable x86/x86/mptable_pci.c optional mptable pci x86/x86/msi.c optional pci x86/xen/pv.c optional xenhvm x86/xen/pvcpu_enum.c optional xenhvm x86/xen/xen_pci_bus.c optional xenhvm contrib/openzfs/module/zcommon/zfs_fletcher_avx512.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_intel.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_sse.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx2.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx512bw.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx512f.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_sse2.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_ssse3.c optional zfs compile-with "${ZFS_C}" diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 index ab35a133f85c..6a112168533e 100644 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -1,421 +1,425 @@ # $FreeBSD$ cloudabi32_vdso.o optional compat_cloudabi32 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_armv6_on_64bit.S" \ compile-with "${CC} -x assembler-with-cpp -m32 -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_armv6_on_64bit.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi32_vdso.o" # cloudabi32_vdso_blob.o optional compat_cloudabi32 \ dependency "cloudabi32_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-littleaarch64 --binary-architecture aarch64 cloudabi32_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi32_vdso_blob.o" # cloudabi64_vdso.o optional compat_cloudabi64 \ dependency "$S/contrib/cloudabi/cloudabi_vdso_aarch64.S" \ compile-with "${CC} -x assembler-with-cpp -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi/cloudabi_vdso.lds $S/contrib/cloudabi/cloudabi_vdso_aarch64.S -o ${.TARGET}" \ no-obj no-implicit-rule \ clean "cloudabi64_vdso.o" # cloudabi64_vdso_blob.o optional compat_cloudabi64 \ dependency "cloudabi64_vdso.o" \ compile-with "${OBJCOPY} --input-target binary --output-target elf64-littleaarch64 --binary-architecture aarch64 cloudabi64_vdso.o ${.TARGET}" \ no-implicit-rule \ clean "cloudabi64_vdso_blob.o" # # Allwinner common files arm/allwinner/a10_timer.c optional a10_timer fdt arm/allwinner/a10_codec.c optional sound a10_codec arm/allwinner/a31_dmac.c optional a31_dmac arm/allwinner/sunxi_dma_if.m optional a31_dmac arm/allwinner/aw_cir.c optional evdev aw_cir fdt arm/allwinner/aw_dwc3.c optional aw_dwc3 fdt arm/allwinner/aw_gpio.c optional gpio aw_gpio fdt arm/allwinner/aw_mmc.c optional mmc aw_mmc fdt | mmccam aw_mmc fdt arm/allwinner/aw_nmi.c optional aw_nmi fdt \ compile-with "${NORMAL_C} -I$S/gnu/dts/include" arm/allwinner/aw_pwm.c optional aw_pwm fdt arm/allwinner/aw_rsb.c optional aw_rsb fdt arm/allwinner/aw_rtc.c optional aw_rtc fdt arm/allwinner/aw_sid.c optional aw_sid nvmem fdt arm/allwinner/aw_spi.c optional aw_spi fdt arm/allwinner/aw_syscon.c optional aw_syscon ext_resources syscon fdt arm/allwinner/aw_thermal.c optional aw_thermal nvmem fdt arm/allwinner/aw_usbphy.c optional ehci aw_usbphy fdt arm/allwinner/aw_usb3phy.c optional xhci aw_usbphy fdt arm/allwinner/aw_wdog.c optional aw_wdog fdt arm/allwinner/axp81x.c optional axp81x fdt arm/allwinner/if_awg.c optional awg ext_resources syscon aw_sid nvmem fdt # Allwinner clock driver arm/allwinner/clkng/aw_ccung.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_frac.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_m.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_mipi.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nkmp.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nm.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_nmm.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_np.c optional aw_ccu fdt arm/allwinner/clkng/aw_clk_prediv_mux.c optional aw_ccu fdt arm/allwinner/clkng/ccu_a64.c optional soc_allwinner_a64 aw_ccu fdt arm/allwinner/clkng/ccu_h3.c optional soc_allwinner_h5 aw_ccu fdt arm/allwinner/clkng/ccu_h6.c optional soc_allwinner_h6 aw_ccu fdt arm/allwinner/clkng/ccu_h6_r.c optional soc_allwinner_h6 aw_ccu fdt arm/allwinner/clkng/ccu_sun8i_r.c optional aw_ccu fdt arm/allwinner/clkng/ccu_de2.c optional aw_ccu fdt # Allwinner padconf files arm/allwinner/a64/a64_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/a64/a64_r_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/h3/h3_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h3/h3_r_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h6/h6_padconf.c optional soc_allwinner_h6 fdt arm/allwinner/h6/h6_r_padconf.c optional soc_allwinner_h6 fdt arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${PROF} ${.IMPSRC}" arm/arm/generic_timer.c standard arm/arm/gic.c standard arm/arm/gic_acpi.c optional acpi arm/arm/gic_fdt.c optional fdt arm/arm/pmu.c standard arm/broadcom/bcm2835/bcm2835_audio.c optional sound vchiq fdt \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" arm/broadcom/bcm2835/bcm2835_bsc.c optional bcm2835_bsc fdt arm/broadcom/bcm2835/bcm2835_clkman.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_cpufreq.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_dma.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_fbd.c optional vt soc_brcm_bcm2837 fdt | vt soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_firmware.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_ft5406.c optional evdev bcm2835_ft5406 fdt arm/broadcom/bcm2835/bcm2835_gpio.c optional gpio soc_brcm_bcm2837 fdt | gpio soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_intr.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_mbox.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_rng.c optional !random_loadable soc_brcm_bcm2837 fdt | !random_loadable soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhci.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhost.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_spi.c optional bcm2835_spi fdt arm/broadcom/bcm2835/bcm2835_vcbus.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_vcio.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_wdog.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2836.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm283x_dwc_fdt.c optional dwcotg fdt soc_brcm_bcm2837 | dwcotg fdt soc_brcm_bcm2838 arm/broadcom/bcm2835/bcm2838_pci.c optional soc_brcm_bcm2838 fdt pci arm/broadcom/bcm2835/bcm2838_xhci.c optional soc_brcm_bcm2838 fdt pci xhci arm/broadcom/bcm2835/raspberrypi_gpio.c optional soc_brcm_bcm2837 gpio | soc_brcm_bcm2838 gpio arm/freescale/vybrid/vf_i2c.c optional vf_i2c iicbus SOC_NXP_LS arm/mv/a37x0_gpio.c optional a37x0_gpio gpio fdt arm/mv/a37x0_iic.c optional a37x0_iic iicbus fdt arm/mv/a37x0_spi.c optional a37x0_spi spibus fdt arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt arm/mv/gpio.c optional mv_gpio fdt arm/mv/mvebu_gpio.c optional mv_gpio fdt arm/mv/mvebu_pinctrl.c optional mvebu_pinctrl fdt arm/mv/mv_ap806_clock.c optional SOC_MARVELL_8K fdt arm/mv/mv_ap806_gicp.c optional mv_ap806_gicp fdt arm/mv/mv_ap806_sei.c optional mv_ap806_sei fdt arm/mv/mv_cp110_clock.c optional SOC_MARVELL_8K fdt arm/mv/mv_cp110_icu.c optional mv_cp110_icu fdt arm/mv/mv_cp110_icu_bus.c optional mv_cp110_icu fdt arm/mv/mv_thermal.c optional SOC_MARVELL_8K mv_thermal fdt arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt arm/xilinx/uart_dev_cdnc.c optional uart soc_xilinx_zynq arm64/acpica/acpi_iort.c optional acpi arm64/acpica/acpi_machdep.c optional acpi arm64/acpica/OsdEnvironment.c optional acpi arm64/acpica/acpi_wakeup.c optional acpi arm64/acpica/pci_cfgreg.c optional acpi pci arm64/arm64/autoconf.c standard arm64/arm64/bus_machdep.c standard arm64/arm64/bus_space_asm.S standard arm64/arm64/busdma_bounce.c standard arm64/arm64/busdma_machdep.c standard arm64/arm64/bzero.S standard arm64/arm64/clock.c standard arm64/arm64/copyinout.S standard arm64/arm64/cpu_errata.c standard arm64/arm64/cpufunc_asm.S standard arm64/arm64/db_disasm.c optional ddb arm64/arm64/db_interface.c optional ddb arm64/arm64/db_trace.c optional ddb arm64/arm64/debug_monitor.c standard arm64/arm64/disassem.c optional ddb arm64/arm64/dump_machdep.c standard arm64/arm64/efirt_machdep.c optional efirt arm64/arm64/elf32_machdep.c optional compat_freebsd32 arm64/arm64/elf_machdep.c standard arm64/arm64/exception.S standard arm64/arm64/freebsd32_machdep.c optional compat_freebsd32 arm64/arm64/gicv3_its.c optional intrng fdt arm64/arm64/gic_v3.c standard arm64/arm64/gic_v3_acpi.c optional acpi arm64/arm64/gic_v3_fdt.c optional fdt arm64/arm64/identcpu.c standard arm64/arm64/in_cksum.c optional inet | inet6 arm64/arm64/locore.S standard no-obj arm64/arm64/machdep.c standard arm64/arm64/machdep_boot.c standard arm64/arm64/mem.c standard arm64/arm64/memcpy.S standard arm64/arm64/memmove.S standard arm64/arm64/minidump_machdep.c standard arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt arm64/arm64/pmap.c standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/support.S standard arm64/arm64/swtch.S standard arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard arm64/arm64/uma_machdep.c standard arm64/arm64/undefined.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard arm64/broadcom/brcmmdio/mdio_mux_iproc.c optional fdt arm64/broadcom/brcmmdio/mdio_nexus_iproc.c optional fdt arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c optional fdt pci arm64/broadcom/genet/if_genet.c optional SOC_BRCM_BCM2838 fdt genet arm64/cavium/thunder_pcie_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_pem.c optional soc_cavm_thunderx pci arm64/cavium/thunder_pcie_pem_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_common.c optional soc_cavm_thunderx pci arm64/cloudabi32/cloudabi32_sysvec.c optional compat_cloudabi32 arm64/cloudabi64/cloudabi64_sysvec.c optional compat_cloudabi64 arm64/coresight/coresight.c standard arm64/coresight/coresight_acpi.c optional acpi arm64/coresight/coresight_fdt.c optional fdt arm64/coresight/coresight_if.m standard arm64/coresight/coresight_cmd.c standard arm64/coresight/coresight_cpu_debug.c standard arm64/coresight/coresight_etm4x.c standard arm64/coresight/coresight_etm4x_acpi.c optional acpi arm64/coresight/coresight_etm4x_fdt.c optional fdt arm64/coresight/coresight_funnel.c standard arm64/coresight/coresight_funnel_acpi.c optional acpi arm64/coresight/coresight_funnel_fdt.c optional fdt arm64/coresight/coresight_replicator.c standard arm64/coresight/coresight_replicator_acpi.c optional acpi arm64/coresight/coresight_replicator_fdt.c optional fdt arm64/coresight/coresight_tmc.c standard arm64/coresight/coresight_tmc_acpi.c optional acpi arm64/coresight/coresight_tmc_fdt.c optional fdt arm64/intel/firmware.c optional soc_intel_stratix10 arm64/intel/stratix10-soc-fpga-mgr.c optional soc_intel_stratix10 arm64/intel/stratix10-svc.c optional soc_intel_stratix10 arm64/qoriq/ls1046_gpio.c optional ls1046_gpio gpio fdt SOC_NXP_LS arm64/qoriq/clk/ls1046a_clkgen.c optional clk SOC_NXP_LS arm64/qoriq/clk/qoriq_clk_pll.c optional clk SOC_NXP_LS arm64/qoriq/clk/qoriq_clkgen.c optional clk SOC_NXP_LS arm64/qualcomm/qcom_gcc.c optional qcom_gcc fdt contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" crypto/armv8/armv8_crypto.c optional armv8crypto armv8_crypto_wrap.o optional armv8crypto \ dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8/ ${WERROR} ${NO_WCAST_QUAL} ${PROF} -march=armv8-a+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "armv8_crypto_wrap.o" crypto/des/des_enc.c optional netsmb dev/acpica/acpi_bus_if.m optional acpi dev/acpica/acpi_if.m optional acpi dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/ahci/ahci_fsl_fdt.c optional SOC_NXP_LS ahci fdt dev/ahci/ahci_generic.c optional ahci dev/altera/dwc/if_dwc_socfpga.c optional fdt dwc_socfpga -dev/axgbe/if_axgbe.c optional axgbe -dev/axgbe/xgbe-desc.c optional axgbe -dev/axgbe/xgbe-dev.c optional axgbe -dev/axgbe/xgbe-drv.c optional axgbe -dev/axgbe/xgbe-mdio.c optional axgbe +dev/axgbe/if_axgbe.c optional axa +dev/axgbe/xgbe-desc.c optional axa +dev/axgbe/xgbe-dev.c optional axa +dev/axgbe/xgbe-drv.c optional axa +dev/axgbe/xgbe-mdio.c optional axa +dev/axgbe/xgbe-sysctl.c optional axa +dev/axgbe/xgbe-txrx.c optional axa +dev/axgbe/xgbe_osdep.c optional axa +dev/axgbe/xgbe-phy-v1.c optional axa dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/gpio/pl061.c optional pl061 gpio dev/gpio/pl061_acpi.c optional pl061 gpio acpi dev/gpio/pl061_fdt.c optional pl061 gpio fdt dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sriov.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031000 -mice_ddp -c${.TARGET}" \ no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.16.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.16.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/iicbus/sy8106a.c optional sy8106a fdt dev/iicbus/twsi/mv_twsi.c optional twsi fdt dev/iicbus/twsi/a10_twsi.c optional twsi fdt dev/iicbus/twsi/twsi.c optional twsi fdt dev/mbox/mbox_if.m optional soc_brcm_bcm2837 dev/mmc/host/dwmmc.c optional dwmmc fdt dev/mmc/host/dwmmc_altera.c optional dwmmc dwmmc_altera fdt dev/mmc/host/dwmmc_hisi.c optional dwmmc dwmmc_hisi fdt dev/mmc/host/dwmmc_rockchip.c optional dwmmc rk_dwmmc fdt dev/neta/if_mvneta_fdt.c optional neta fdt dev/neta/if_mvneta.c optional neta mdio mii dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofwpci.c optional fdt pci dev/pci/controller/pci_n1sdp.c optional pci_n1sdp acpi dev/pci/pci_host_generic.c optional pci dev/pci/pci_host_generic_acpi.c optional pci acpi dev/pci/pci_host_generic_fdt.c optional pci fdt dev/pci/pci_dw_mv.c optional pci fdt dev/pci/pci_dw.c optional pci fdt dev/pci/pci_dw_if.m optional pci fdt dev/psci/psci.c standard dev/psci/smccc_arm64.S standard dev/psci/smccc.c standard dev/safexcel/safexcel.c optional safexcel fdt dev/sdhci/sdhci_xenon.c optional sdhci_xenon sdhci fdt dev/uart/uart_cpu_arm64.c optional uart dev/uart/uart_dev_mu.c optional uart uart_mu dev/uart/uart_dev_pl011.c optional uart pl011 dev/usb/controller/dwc_otg_hisi.c optional dwcotg fdt soc_hisi_hi6220 dev/usb/controller/dwc3.c optional fdt dwc3 dev/usb/controller/ehci_mv.c optional ehci_mv fdt dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_acpi.c optional ehci acpi dev/usb/controller/generic_ehci_fdt.c optional ehci fdt dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/usb/controller/musb_otg_allwinner.c optional musb fdt soc_allwinner_a64 dev/usb/controller/usb_nop_xceiv.c optional fdt ext_resources dev/usb/controller/generic_xhci.c optional xhci dev/usb/controller/generic_xhci_acpi.c optional xhci acpi dev/usb/controller/generic_xhci_fdt.c optional xhci fdt dev/vnic/mrml_bridge.c optional vnic fdt dev/vnic/nic_main.c optional vnic pci dev/vnic/nicvf_main.c optional vnic pci pci_iov dev/vnic/nicvf_queues.c optional vnic pci pci_iov dev/vnic/thunder_bgx_fdt.c optional vnic fdt dev/vnic/thunder_bgx.c optional vnic pci dev/vnic/thunder_mdio_fdt.c optional vnic fdt dev/vnic/thunder_mdio.c optional vnic dev/vnic/lmac_if.m optional inet | inet6 | vnic kern/kern_clocksource.c standard kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_devmap.c standard kern/subr_intr.c optional intrng kern/subr_physmem.c standard libkern/bcmp.c standard libkern/memcmp.c standard \ compile-with "${NORMAL_C:N-fsanitize*}" libkern/memset.c standard \ compile-with "${NORMAL_C:N-fsanitize*}" libkern/arm64/crc32c_armv8.S standard cddl/dev/dtrace/aarch64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/aarch64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/aarch64/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" # RockChip Drivers arm64/rockchip/rk3399_emmcphy.c optional fdt rk_emmcphy soc_rockchip_rk3399 arm64/rockchip/rk_dwc3.c optional fdt rk_dwc3 soc_rockchip_rk3399 arm64/rockchip/rk_i2c.c optional fdt rk_i2c soc_rockchip_rk3328 | fdt rk_i2c soc_rockchip_rk3399 arm64/rockchip/rk805.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 arm64/rockchip/rk_grf.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/rk_pinctrl.c optional fdt rk_pinctrl soc_rockchip_rk3328 | fdt rk_pinctrl soc_rockchip_rk3399 arm64/rockchip/rk_gpio.c optional fdt rk_gpio soc_rockchip_rk3328 | fdt rk_gpio soc_rockchip_rk3399 arm64/rockchip/rk_iodomain.c optional fdt rk_iodomain arm64/rockchip/rk_spi.c optional fdt rk_spi arm64/rockchip/rk_usb2phy.c optional fdt rk_usb2phy soc_rockchip_rk3328 | soc_rockchip_rk3399 arm64/rockchip/rk_typec_phy.c optional fdt rk_typec_phy soc_rockchip_rk3399 arm64/rockchip/if_dwc_rk.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 arm64/rockchip/rk_tsadc_if.m optional fdt soc_rockchip_rk3399 arm64/rockchip/rk_tsadc.c optional fdt soc_rockchip_rk3399 arm64/rockchip/rk_pwm.c optional fdt rk_pwm arm64/rockchip/rk_pcie.c optional fdt pci soc_rockchip_rk3399 arm64/rockchip/rk_pcie_phy.c optional fdt pci soc_rockchip_rk3399 dev/dwc/if_dwc.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 dev/dwc/if_dwc_if.m optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 # RockChip Clock support arm64/rockchip/clk/rk_cru.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_armclk.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_composite.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_fract.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_gate.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_mux.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk_clk_pll.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3328_cru.c optional fdt soc_rockchip_rk3328 arm64/rockchip/clk/rk3399_cru.c optional fdt soc_rockchip_rk3399 arm64/rockchip/clk/rk3399_pmucru.c optional fdt soc_rockchip_rk3399 # i.MX8 Clock support arm64/freescale/imx/imx8mq_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_gate.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_mux.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_composite.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_sscg_pll.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_frac_pll.c optional fdt soc_freescale_imx8 # iMX drivers arm/freescale/imx/imx_gpio.c optional gpio soc_freescale_imx8 arm/freescale/imx/imx_i2c.c optional fsliic arm/freescale/imx/imx_machdep.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx7gpc.c optional fdt soc_freescale_imx8 dev/ffec/if_ffec.c optional ffec diff --git a/sys/dev/axgbe/if_axgbe.c b/sys/dev/axgbe/if_axgbe.c index c76cd316a7ff..415c4016e3a9 100644 --- a/sys/dev/axgbe/if_axgbe.c +++ b/sys/dev/axgbe/if_axgbe.c @@ -1,615 +1,622 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2016,2017 SoftIron Inc. - * All rights reserved. + * Copyright (c) 2020 Advanced Micro Devices, Inc. * * This software was developed by Andrew Turner under * the sponsorship of SoftIron Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #include "xgbe.h" #include "xgbe-common.h" static device_probe_t axgbe_probe; static device_attach_t axgbe_attach; struct axgbe_softc { /* Must be first */ struct xgbe_prv_data prv; uint8_t mac_addr[ETHER_ADDR_LEN]; struct ifmedia media; }; static struct ofw_compat_data compat_data[] = { { "amd,xgbe-seattle-v1a", true }, { NULL, false } }; static struct resource_spec old_phy_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Rx/Tx regs */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* Integration regs */ { SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Integration regs */ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Interrupt */ { -1, 0 } }; static struct resource_spec old_mac_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */ /* Per-channel interrupts */ { SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; static struct resource_spec mac_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */ { SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Rx/Tx regs */ { SYS_RES_MEMORY, 3, RF_ACTIVE }, /* Integration regs */ { SYS_RES_MEMORY, 4, RF_ACTIVE }, /* Integration regs */ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */ /* Per-channel and auto-negotiation interrupts */ { SYS_RES_IRQ, 1, RF_ACTIVE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL }, { SYS_RES_IRQ, 5, RF_ACTIVE | RF_OPTIONAL }, { -1, 0 } }; +static struct xgbe_version_data xgbe_v1 = { + .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1, + .xpcs_access = XGBE_XPCS_ACCESS_V1, + .tx_max_fifo_size = 81920, + .rx_max_fifo_size = 81920, + .tx_tstamp_workaround = 1, +}; + MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data"); static void axgbe_init(void *p) { struct axgbe_softc *sc; struct ifnet *ifp; sc = p; ifp = sc->prv.netdev; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; ifp->if_drv_flags |= IFF_DRV_RUNNING; } static int axgbe_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) { struct axgbe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; - int error; + int error = 0; switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO) error = EINVAL; - else - error = xgbe_change_mtu(ifp, ifr->ifr_mtu); + /* TODO - change it to iflib way */ break; case SIOCSIFFLAGS: error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void axgbe_qflush(struct ifnet *ifp) { if_qflush(ifp); } static int axgbe_media_change(struct ifnet *ifp) { struct axgbe_softc *sc; int cur_media; sc = ifp->if_softc; sx_xlock(&sc->prv.an_mutex); cur_media = sc->media.ifm_cur->ifm_media; switch (IFM_SUBTYPE(cur_media)) { case IFM_10G_KR: sc->prv.phy.speed = SPEED_10000; sc->prv.phy.autoneg = AUTONEG_DISABLE; break; case IFM_2500_KX: sc->prv.phy.speed = SPEED_2500; sc->prv.phy.autoneg = AUTONEG_DISABLE; break; case IFM_1000_KX: sc->prv.phy.speed = SPEED_1000; sc->prv.phy.autoneg = AUTONEG_DISABLE; break; case IFM_AUTO: sc->prv.phy.autoneg = AUTONEG_ENABLE; break; } sx_xunlock(&sc->prv.an_mutex); return (-sc->prv.phy_if.phy_config_aneg(&sc->prv)); } static void axgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct axgbe_softc *sc; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; if (!sc->prv.phy.link) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER; if (sc->prv.phy.duplex == DUPLEX_FULL) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; switch (sc->prv.phy.speed) { case SPEED_10000: ifmr->ifm_active |= IFM_10G_KR; break; case SPEED_2500: ifmr->ifm_active |= IFM_2500_KX; break; case SPEED_1000: ifmr->ifm_active |= IFM_1000_KX; break; } } static uint64_t axgbe_get_counter(struct ifnet *ifp, ift_counter c) { struct xgbe_prv_data *pdata = ifp->if_softc; struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; DBGPR("-->%s\n", __func__); pdata->hw_if.read_mmc_stats(pdata); switch(c) { case IFCOUNTER_IPACKETS: return (pstats->rxframecount_gb); case IFCOUNTER_IERRORS: return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g - pstats->rxmulticastframes_g - pstats->rxunicastframes_g); case IFCOUNTER_OPACKETS: return (pstats->txframecount_gb); case IFCOUNTER_OERRORS: return (pstats->txframecount_gb - pstats->txframecount_g); case IFCOUNTER_IBYTES: return (pstats->rxoctetcount_gb); case IFCOUNTER_OBYTES: return (pstats->txoctetcount_gb); default: return (if_get_counter_default(ifp, c)); } } static int axgbe_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "AMD 10 Gigabit Ethernet"); return (BUS_PROBE_DEFAULT); } static int axgbe_get_optional_prop(device_t dev, phandle_t node, const char *name, int *data, size_t len) { if (!OF_hasprop(node, name)) return (-1); if (OF_getencprop(node, name, data, len) <= 0) { device_printf(dev,"%s property is invalid\n", name); return (ENXIO); } return (0); } static int axgbe_attach(device_t dev) { struct axgbe_softc *sc; struct ifnet *ifp; pcell_t phy_handle; device_t phydev; phandle_t node, phy_node; struct resource *mac_res[11]; struct resource *phy_res[4]; ssize_t len; int error, i, j; sc = device_get_softc(dev); + sc->prv.vdata = &xgbe_v1; node = ofw_bus_get_node(dev); if (OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle)) <= 0) { phy_node = node; if (bus_alloc_resources(dev, mac_spec, mac_res)) { device_printf(dev, "could not allocate phy resources\n"); return (ENXIO); } sc->prv.xgmac_res = mac_res[0]; sc->prv.xpcs_res = mac_res[1]; sc->prv.rxtx_res = mac_res[2]; sc->prv.sir0_res = mac_res[3]; sc->prv.sir1_res = mac_res[4]; sc->prv.dev_irq_res = mac_res[5]; sc->prv.per_channel_irq = OF_hasprop(node, XGBE_DMA_IRQS_PROPERTY); for (i = 0, j = 6; j < nitems(mac_res) - 1 && mac_res[j + 1] != NULL; i++, j++) { if (sc->prv.per_channel_irq) { sc->prv.chan_irq_res[i] = mac_res[j]; } } /* The last entry is the auto-negotiation interrupt */ sc->prv.an_irq_res = mac_res[j]; } else { phydev = OF_device_from_xref(phy_handle); phy_node = ofw_bus_get_node(phydev); if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) { device_printf(dev, "could not allocate phy resources\n"); return (ENXIO); } if (bus_alloc_resources(dev, old_mac_spec, mac_res)) { device_printf(dev, "could not allocate mac resources\n"); return (ENXIO); } sc->prv.rxtx_res = phy_res[0]; sc->prv.sir0_res = phy_res[1]; sc->prv.sir1_res = phy_res[2]; sc->prv.an_irq_res = phy_res[3]; sc->prv.xgmac_res = mac_res[0]; sc->prv.xpcs_res = mac_res[1]; sc->prv.dev_irq_res = mac_res[2]; sc->prv.per_channel_irq = OF_hasprop(node, XGBE_DMA_IRQS_PROPERTY); if (sc->prv.per_channel_irq) { for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) && mac_res[j] != NULL; i++, j++) { sc->prv.chan_irq_res[i] = mac_res[j]; } } } if ((len = OF_getproplen(node, "mac-address")) < 0) { device_printf(dev, "No mac-address property\n"); return (EINVAL); } if (len != ETHER_ADDR_LEN) return (EINVAL); OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN); sc->prv.netdev = ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Cannot alloc ifnet\n"); return (ENXIO); } sc->prv.dev = dev; sc->prv.dmat = bus_get_dma_tag(dev); sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full | ADVERTISED_1000baseKX_Full; + /* * Read the needed properties from the phy node. */ /* This is documented as optional, but Linux requires it */ if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set, sizeof(sc->prv.speed_set)) <= 0) { device_printf(dev, "%s property is missing\n", XGBE_SPEEDSET_PROPERTY); return (EINVAL); } error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY, sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc)); if (error > 0) { return (error); } else if (error < 0) { sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC; sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC; sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC; } error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY, sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate)); if (error > 0) { return (error); } else if (error < 0) { sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR; sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR; sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR; } error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY, sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew)); if (error > 0) { return (error); } else if (error < 0) { sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ; sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ; sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ; } error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY, sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp)); if (error > 0) { return (error); } else if (error < 0) { sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP; sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP; sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP; } error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY, sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg)); if (error > 0) { return (error); } else if (error < 0) { sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG; sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG; sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG; } error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY, sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena)); if (error > 0) { return (error); } else if (error < 0) { sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE; sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE; sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE; } /* Check if the NIC is DMA coherent */ sc->prv.coherent = OF_hasprop(node, "dma-coherent"); if (sc->prv.coherent) { - sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN; - sc->prv.arcache = XGBE_DMA_OS_ARCACHE; - sc->prv.awcache = XGBE_DMA_OS_AWCACHE; + sc->prv.arcr = XGBE_DMA_OS_ARCR; + sc->prv.awcr = XGBE_DMA_OS_AWCR; } else { - sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN; - sc->prv.arcache = XGBE_DMA_SYS_ARCACHE; - sc->prv.awcache = XGBE_DMA_SYS_AWCACHE; + sc->prv.arcr = XGBE_DMA_SYS_ARCR; + sc->prv.awcr = XGBE_DMA_SYS_AWCR; } /* Create the lock & workqueues */ spin_lock_init(&sc->prv.xpcs_lock); sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK, taskqueue_thread_enqueue, &sc->prv.dev_workqueue); taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET, "axgbe taskq"); /* Set the needed pointers */ xgbe_init_function_ptrs_phy(&sc->prv.phy_if); xgbe_init_function_ptrs_dev(&sc->prv.hw_if); xgbe_init_function_ptrs_desc(&sc->prv.desc_if); + sc->prv.vdata->init_function_ptrs_phy_impl(&sc->prv.phy_if); /* Reset the hardware */ sc->prv.hw_if.exit(&sc->prv); /* Read the hardware features */ xgbe_get_all_hw_features(&sc->prv); /* Set default values */ - sc->prv.pblx8 = DMA_PBL_X8_ENABLE; sc->prv.tx_desc_count = XGBE_TX_DESC_CNT; sc->prv.tx_sf_mode = MTL_TSF_ENABLE; sc->prv.tx_threshold = MTL_TX_THRESHOLD_64; - sc->prv.tx_pbl = DMA_PBL_16; sc->prv.tx_osp_mode = DMA_OSP_ENABLE; sc->prv.rx_desc_count = XGBE_RX_DESC_CNT; sc->prv.rx_sf_mode = MTL_RSF_DISABLE; sc->prv.rx_threshold = MTL_RX_THRESHOLD_64; - sc->prv.rx_pbl = DMA_PBL_16; + sc->prv.pbl = DMA_PBL_128; sc->prv.pause_autoneg = 1; sc->prv.tx_pause = 1; sc->prv.rx_pause = 1; sc->prv.phy_speed = SPEED_UNKNOWN; sc->prv.power_down = 0; /* TODO: Limit to min(ncpus, hw rings) */ sc->prv.tx_ring_count = 1; sc->prv.tx_q_count = 1; sc->prv.rx_ring_count = 1; sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt; /* Init the PHY */ sc->prv.phy_if.phy_init(&sc->prv); /* Set the coalescing */ xgbe_init_rx_coalesce(&sc->prv); xgbe_init_tx_coalesce(&sc->prv); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = axgbe_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = axgbe_ioctl; - ifp->if_transmit = xgbe_xmit; + /* TODO - change it to iflib way */ ifp->if_qflush = axgbe_qflush; ifp->if_get_counter = axgbe_get_counter; /* TODO: Support HW offload */ ifp->if_capabilities = 0; ifp->if_capenable = 0; ifp->if_hwassist = 0; ether_ifattach(ifp, sc->mac_addr); ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change, axgbe_media_status); #ifdef notyet ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); #endif ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); set_bit(XGBE_DOWN, &sc->prv.dev_state); - if (xgbe_open(ifp) < 0) { - device_printf(dev, "ndo_open failed\n"); - return (ENXIO); - } - + /* TODO - change it to iflib way */ return (0); } static device_method_t axgbe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, axgbe_probe), DEVMETHOD(device_attach, axgbe_attach), + { 0, 0 } }; static devclass_t axgbe_devclass; DEFINE_CLASS_0(axgbe, axgbe_driver, axgbe_methods, sizeof(struct axgbe_softc)); -DRIVER_MODULE(axgbe, simplebus, axgbe_driver, axgbe_devclass, 0, 0); +DRIVER_MODULE(axa, simplebus, axgbe_driver, axgbe_devclass, 0, 0); + static struct ofw_compat_data phy_compat_data[] = { { "amd,xgbe-phy-seattle-v1a", true }, { NULL, false } }; static int axgbephy_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, phy_compat_data)->ocd_data) return (ENXIO); device_set_desc(dev, "AMD 10 Gigabit Ethernet"); return (BUS_PROBE_DEFAULT); } static int axgbephy_attach(device_t dev) { phandle_t node; node = ofw_bus_get_node(dev); OF_device_register_xref(OF_xref_from_node(node), dev); return (0); } static device_method_t axgbephy_methods[] = { /* Device interface */ DEVMETHOD(device_probe, axgbephy_probe), DEVMETHOD(device_attach, axgbephy_attach), + { 0, 0 } }; static devclass_t axgbephy_devclass; DEFINE_CLASS_0(axgbephy, axgbephy_driver, axgbephy_methods, 0); EARLY_DRIVER_MODULE(axgbephy, simplebus, axgbephy_driver, axgbephy_devclass, 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE); diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c new file mode 100644 index 000000000000..4c4883e1cb4f --- /dev/null +++ b/sys/dev/axgbe/if_axgbe_pci.c @@ -0,0 +1,2339 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Contact Information : + * Rajesh Kumar + * Shreyank Amartya + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +#include "miibus_if.h" +#include "ifdi_if.h" +#include "opt_inet.h" +#include "opt_inet6.h" + +MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data"); + +extern struct if_txrx axgbe_txrx; + +/* Function prototypes */ +static void *axgbe_register(device_t); +static int axgbe_if_attach_pre(if_ctx_t); +static int axgbe_if_attach_post(if_ctx_t); +static int axgbe_if_detach(if_ctx_t); +static void axgbe_if_stop(if_ctx_t); +static void axgbe_if_init(if_ctx_t); + +/* Queue related routines */ +static int axgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static int axgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); +static int axgbe_alloc_channels(if_ctx_t); +static void axgbe_if_queues_free(if_ctx_t); +static int axgbe_if_tx_queue_intr_enable(if_ctx_t, uint16_t); +static int axgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); + +/* Interrupt related routines */ +static void axgbe_if_disable_intr(if_ctx_t); +static void axgbe_if_enable_intr(if_ctx_t); +static int axgbe_if_msix_intr_assign(if_ctx_t, int); +static void xgbe_free_intr(struct xgbe_prv_data *, struct resource *, void *, int); + +/* Init and Iflib routines */ +static void axgbe_pci_init(struct xgbe_prv_data *); +static void axgbe_pci_stop(if_ctx_t); +static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *, struct xgbe_channel *); +static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *); +static int axgbe_if_mtu_set(if_ctx_t, uint32_t); +static void axgbe_if_update_admin_status(if_ctx_t); +static void axgbe_if_media_status(if_ctx_t, struct ifmediareq *); +static int axgbe_if_media_change(if_ctx_t); +static int axgbe_if_promisc_set(if_ctx_t, int); +static uint64_t axgbe_if_get_counter(if_ctx_t, ift_counter); +static void axgbe_if_vlan_register(if_ctx_t, uint16_t); +static void axgbe_if_vlan_unregister(if_ctx_t, uint16_t); +#if __FreeBSD_version >= 1300000 +static bool axgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); +#endif +static void axgbe_set_counts(if_ctx_t); +static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *); + +/* MII interface registered functions */ +static int axgbe_miibus_readreg(device_t, int, int); +static int axgbe_miibus_writereg(device_t, int, int, int); +static void axgbe_miibus_statchg(device_t); + +/* ISR routines */ +static int axgbe_dev_isr(void *); +static void axgbe_ecc_isr(void *); +static void axgbe_i2c_isr(void *); +static void axgbe_an_isr(void *); +static int axgbe_msix_que(void *); + +/* Timer routines */ +static void xgbe_service(void *, int); +static void xgbe_service_timer(void *); +static void xgbe_init_timers(struct xgbe_prv_data *); +static void xgbe_stop_timers(struct xgbe_prv_data *); + +/* Dump routines */ +static void xgbe_dump_prop_registers(struct xgbe_prv_data *); + +/* + * Allocate only for MAC (BAR0) and PCS (BAR1) registers, and just point the + * MSI-X table bar (BAR5) to iflib. iflib will do the allocation for MSI-X + * table. + */ +static struct resource_spec axgbe_pci_mac_spec[] = { + { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* MAC regs */ + { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, /* PCS regs */ + { -1, 0 } +}; + +static pci_vendor_info_t axgbe_vendor_info_array[] = +{ + PVID(0x1022, 0x1458, "AMD 10 Gigabit Ethernet Driver"), + PVID(0x1022, 0x1459, "AMD 10 Gigabit Ethernet Driver"), + PVID_END +}; + +static struct xgbe_version_data xgbe_v2a = { + .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, + .xpcs_access = XGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 229376, + .rx_max_fifo_size = 229376, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .irq_reissue_support = 1, + .tx_desc_prefetch = 5, + .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, +}; + +static struct xgbe_version_data xgbe_v2b = { + .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, + .xpcs_access = XGBE_XPCS_ACCESS_V2, + .mmc_64bit = 1, + .tx_max_fifo_size = 65536, + .rx_max_fifo_size = 65536, + .tx_tstamp_workaround = 1, + .ecc_support = 1, + .i2c_support = 1, + .irq_reissue_support = 1, + .tx_desc_prefetch = 5, + .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, +}; + +/* Device Interface */ +static device_method_t ax_methods[] = { + DEVMETHOD(device_register, axgbe_register), + DEVMETHOD(device_probe, iflib_device_probe), + DEVMETHOD(device_attach, iflib_device_attach), + DEVMETHOD(device_detach, iflib_device_detach), + + /* MII interface */ + DEVMETHOD(miibus_readreg, axgbe_miibus_readreg), + DEVMETHOD(miibus_writereg, axgbe_miibus_writereg), + DEVMETHOD(miibus_statchg, axgbe_miibus_statchg), + + DEVMETHOD_END +}; + +static driver_t ax_driver = { + "ax", ax_methods, sizeof(struct axgbe_if_softc), +}; + +devclass_t ax_devclass; +DRIVER_MODULE(axp, pci, ax_driver, ax_devclass, 0, 0); +DRIVER_MODULE(miibus, ax, miibus_driver, miibus_devclass, 0, 0); +IFLIB_PNP_INFO(pci, ax_driver, axgbe_vendor_info_array); + +MODULE_DEPEND(ax, pci, 1, 1, 1); +MODULE_DEPEND(ax, ether, 1, 1, 1); +MODULE_DEPEND(ax, iflib, 1, 1, 1); +MODULE_DEPEND(ax, miibus, 1, 1, 1); + +/* Iflib Interface */ +static device_method_t axgbe_if_methods[] = { + DEVMETHOD(ifdi_attach_pre, axgbe_if_attach_pre), + DEVMETHOD(ifdi_attach_post, axgbe_if_attach_post), + DEVMETHOD(ifdi_detach, axgbe_if_detach), + DEVMETHOD(ifdi_init, axgbe_if_init), + DEVMETHOD(ifdi_stop, axgbe_if_stop), + DEVMETHOD(ifdi_msix_intr_assign, axgbe_if_msix_intr_assign), + DEVMETHOD(ifdi_intr_enable, axgbe_if_enable_intr), + DEVMETHOD(ifdi_intr_disable, axgbe_if_disable_intr), + DEVMETHOD(ifdi_tx_queue_intr_enable, axgbe_if_tx_queue_intr_enable), + DEVMETHOD(ifdi_rx_queue_intr_enable, axgbe_if_rx_queue_intr_enable), + DEVMETHOD(ifdi_tx_queues_alloc, axgbe_if_tx_queues_alloc), + DEVMETHOD(ifdi_rx_queues_alloc, axgbe_if_rx_queues_alloc), + DEVMETHOD(ifdi_queues_free, axgbe_if_queues_free), + DEVMETHOD(ifdi_update_admin_status, axgbe_if_update_admin_status), + DEVMETHOD(ifdi_mtu_set, axgbe_if_mtu_set), + DEVMETHOD(ifdi_media_status, axgbe_if_media_status), + DEVMETHOD(ifdi_media_change, axgbe_if_media_change), + DEVMETHOD(ifdi_promisc_set, axgbe_if_promisc_set), + DEVMETHOD(ifdi_get_counter, axgbe_if_get_counter), + DEVMETHOD(ifdi_vlan_register, axgbe_if_vlan_register), + DEVMETHOD(ifdi_vlan_unregister, axgbe_if_vlan_unregister), +#if __FreeBSD_version >= 1300000 + DEVMETHOD(ifdi_needs_restart, axgbe_if_needs_restart), +#endif + DEVMETHOD_END +}; + +static driver_t axgbe_if_driver = { + "axgbe_if", axgbe_if_methods, sizeof(struct axgbe_if_softc) +}; + +/* Iflib Shared Context */ +static struct if_shared_ctx axgbe_sctx_init = { + .isc_magic = IFLIB_MAGIC, + .isc_driver = &axgbe_if_driver, + .isc_q_align = PAGE_SIZE, + .isc_tx_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), + .isc_tx_maxsegsize = PAGE_SIZE, + .isc_tso_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), + .isc_tso_maxsegsize = PAGE_SIZE, + .isc_rx_maxsize = MJUM9BYTES, + .isc_rx_maxsegsize = MJUM9BYTES, + .isc_rx_nsegments = 1, + .isc_admin_intrcnt = 4, + + .isc_vendor_info = axgbe_vendor_info_array, + .isc_driver_version = XGBE_DRV_VERSION, + + .isc_nrxd_min = {XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MIN}, + .isc_nrxd_default = {XGBE_RX_DESC_CNT_DEFAULT, XGBE_RX_DESC_CNT_DEFAULT}, + .isc_nrxd_max = {XGBE_RX_DESC_CNT_MAX, XGBE_RX_DESC_CNT_MAX}, + .isc_ntxd_min = {XGBE_TX_DESC_CNT_MIN}, + .isc_ntxd_default = {XGBE_TX_DESC_CNT_DEFAULT}, + .isc_ntxd_max = {XGBE_TX_DESC_CNT_MAX}, + + .isc_nfl = 2, + .isc_ntxqs = 1, + .isc_nrxqs = 2, + .isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_SCRATCH | + IFLIB_NEED_ZERO_CSUM | IFLIB_NEED_ETHER_PAD, +}; + +static void * +axgbe_register(device_t dev) +{ + return (&axgbe_sctx_init); +} + +/* MII Interface Functions */ +static int +axgbe_miibus_readreg(device_t dev, int phy, int reg) +{ + struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); + struct xgbe_prv_data *pdata = &sc->pdata; + int val; + + axgbe_printf(3, "%s: phy %d reg %d\n", __func__, phy, reg); + + val = xgbe_phy_mii_read(pdata, phy, reg); + + axgbe_printf(2, "%s: val 0x%x\n", __func__, val); + return (val & 0xFFFF); +} + +static int +axgbe_miibus_writereg(device_t dev, int phy, int reg, int val) +{ + struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); + struct xgbe_prv_data *pdata = &sc->pdata; + + axgbe_printf(3, "%s: phy %d reg %d val 0x%x\n", __func__, phy, reg, val); + + xgbe_phy_mii_write(pdata, phy, reg, val); + + return(0); +} + +static void +axgbe_miibus_statchg(device_t dev) +{ + struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); + struct xgbe_prv_data *pdata = &sc->pdata; + struct mii_data *mii = device_get_softc(pdata->axgbe_miibus); + struct ifnet *ifp = pdata->netdev; + int bmsr; + + axgbe_printf(2, "%s: Link %d/%d\n", __func__, pdata->phy.link, + pdata->phy_link); + + if (mii == NULL || ifp == NULL || + (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + return; + + if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == + (IFM_ACTIVE | IFM_AVALID)) { + + switch (IFM_SUBTYPE(mii->mii_media_active)) { + case IFM_10_T: + case IFM_100_TX: + pdata->phy.link = 1; + break; + case IFM_1000_T: + case IFM_1000_SX: + case IFM_2500_SX: + pdata->phy.link = 1; + break; + default: + pdata->phy.link = 0; + break; + } + } else + pdata->phy_link = 0; + + bmsr = axgbe_miibus_readreg(pdata->dev, pdata->mdio_addr, MII_BMSR); + if (bmsr & BMSR_ANEG) { + + axgbe_printf(2, "%s: Autoneg Done\n", __func__); + + /* Raise AN Interrupt */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + XGBE_AN_CL73_INT_MASK); + } +} + +static int +axgbe_if_attach_pre(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc; + struct xgbe_prv_data *pdata; + struct resource *mac_res[2]; + if_softc_ctx_t scctx; + if_shared_ctx_t sctx; + device_t dev; + unsigned int ma_lo, ma_hi; + unsigned int reg; + + sc = iflib_get_softc(ctx); + sc->pdata.dev = dev = iflib_get_dev(ctx); + sc->sctx = sctx = iflib_get_sctx(ctx); + sc->scctx = scctx = iflib_get_softc_ctx(ctx); + sc->media = iflib_get_media(ctx); + sc->ctx = ctx; + sc->link_status = LINK_STATE_DOWN; + pdata = &sc->pdata; + pdata->netdev = iflib_get_ifp(ctx); + + spin_lock_init(&pdata->xpcs_lock); + + /* Initialize locks */ + mtx_init(&pdata->rss_mutex, "xgbe rss mutex lock", NULL, MTX_DEF); + mtx_init(&pdata->mdio_mutex, "xgbe MDIO mutex lock", NULL, MTX_SPIN); + + /* Allocate VLAN bitmap */ + pdata->active_vlans = bit_alloc(VLAN_NVID, M_AXGBE, M_WAITOK|M_ZERO); + pdata->num_active_vlans = 0; + + /* Get the version data */ + DBGPR("%s: Device ID: 0x%x\n", __func__, pci_get_device(dev)); + if (pci_get_device(dev) == 0x1458) + sc->pdata.vdata = &xgbe_v2a; + else if (pci_get_device(dev) == 0x1459) + sc->pdata.vdata = &xgbe_v2b; + + /* PCI setup */ + if (bus_alloc_resources(dev, axgbe_pci_mac_spec, mac_res)) + return (ENXIO); + + sc->pdata.xgmac_res = mac_res[0]; + sc->pdata.xpcs_res = mac_res[1]; + + /* Set the PCS indirect addressing definition registers*/ + pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; + pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; + + /* Configure the PCS indirect addressing support */ + reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); + pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); + pdata->xpcs_window <<= 6; + pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); + pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); + pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; + DBGPR("xpcs window def : %#010x\n", + pdata->xpcs_window_def_reg); + DBGPR("xpcs window sel : %#010x\n", + pdata->xpcs_window_sel_reg); + DBGPR("xpcs window : %#010x\n", + pdata->xpcs_window); + DBGPR("xpcs window size : %#010x\n", + pdata->xpcs_window_size); + DBGPR("xpcs window mask : %#010x\n", + pdata->xpcs_window_mask); + + /* Enable all interrupts in the hardware */ + XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); + + /* Retrieve the MAC address */ + ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); + ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); + pdata->mac_addr[0] = ma_lo & 0xff; + pdata->mac_addr[1] = (ma_lo >> 8) & 0xff; + pdata->mac_addr[2] = (ma_lo >>16) & 0xff; + pdata->mac_addr[3] = (ma_lo >> 24) & 0xff; + pdata->mac_addr[4] = ma_hi & 0xff; + pdata->mac_addr[5] = (ma_hi >> 8) & 0xff; + if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID)) { + axgbe_error("Invalid mac address\n"); + return (EINVAL); + } + iflib_set_mac(ctx, pdata->mac_addr); + + /* Clock settings */ + pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ; + pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ; + + /* Set the DMA coherency values */ + pdata->coherent = 1; + pdata->arcr = XGBE_DMA_PCI_ARCR; + pdata->awcr = XGBE_DMA_PCI_AWCR; + pdata->awarcr = XGBE_DMA_PCI_AWARCR; + + /* Read the port property registers */ + pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); + pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); + pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); + pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); + pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); + DBGPR("port property 0 = %#010x\n", pdata->pp0); + DBGPR("port property 1 = %#010x\n", pdata->pp1); + DBGPR("port property 2 = %#010x\n", pdata->pp2); + DBGPR("port property 3 = %#010x\n", pdata->pp3); + DBGPR("port property 4 = %#010x\n", pdata->pp4); + + /* Set the maximum channels and queues */ + pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_TX_DMA); + pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_RX_DMA); + pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_TX_QUEUES); + pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_RX_QUEUES); + DBGPR("max tx/rx channel count = %u/%u\n", + pdata->tx_max_channel_count, pdata->rx_max_channel_count); + DBGPR("max tx/rx hw queue count = %u/%u\n", + pdata->tx_max_q_count, pdata->rx_max_q_count); + + axgbe_set_counts(ctx); + + /* Set the maximum fifo amounts */ + pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, + TX_FIFO_SIZE); + pdata->tx_max_fifo_size *= 16384; + pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, + pdata->vdata->tx_max_fifo_size); + pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, + RX_FIFO_SIZE); + pdata->rx_max_fifo_size *= 16384; + pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, + pdata->vdata->rx_max_fifo_size); + DBGPR("max tx/rx max fifo size = %u/%u\n", + pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); + + /* Initialize IFLIB if_softc_ctx_t */ + axgbe_init_iflib_softc_ctx(sc); + + /* Alloc channels */ + if (axgbe_alloc_channels(ctx)) { + axgbe_error("Unable to allocate channel memory\n"); + return (ENOMEM); + } + + TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata); + + /* create the workqueue */ + pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK, + taskqueue_thread_enqueue, &pdata->dev_workqueue); + taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET, + "axgbe dev taskq"); + + /* Init timers */ + xgbe_init_timers(pdata); + + return (0); +} /* axgbe_if_attach_pre */ + +static void +xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) +{ + xgbe_init_function_ptrs_dev(&pdata->hw_if); + xgbe_init_function_ptrs_phy(&pdata->phy_if); + xgbe_init_function_ptrs_i2c(&pdata->i2c_if); + xgbe_init_function_ptrs_desc(&pdata->desc_if); + + pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); +} + +static void +axgbe_set_counts(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx);; + struct xgbe_prv_data *pdata = &sc->pdata; + cpuset_t lcpus; + int cpu_count, err; + size_t len; + + /* Set all function pointers */ + xgbe_init_all_fptrs(pdata); + + /* Populate the hardware features */ + xgbe_get_all_hw_features(pdata); + + if (!pdata->tx_max_channel_count) + pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; + if (!pdata->rx_max_channel_count) + pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; + + if (!pdata->tx_max_q_count) + pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; + if (!pdata->rx_max_q_count) + pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; + + /* + * Calculate the number of Tx and Rx rings to be created + * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set + * the number of Tx queues to the number of Tx channels + * enabled + * -Rx (DMA) Channels do not map 1-to-1 so use the actual + * number of Rx queues or maximum allowed + */ + + /* Get cpu count from sysctl */ + len = sizeof(cpu_count); + err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL, + 0, NULL, 0); + if (err) { + axgbe_error("Unable to fetch number of cpus\n"); + cpu_count = 1; + } + + if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) { + axgbe_error("Unable to fetch CPU list\n"); + /* TODO - handle CPU_COPY(&all_cpus, &lcpus); */ + } + + DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus)); + + pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt); + pdata->tx_ring_count = min(pdata->tx_ring_count, + pdata->tx_max_channel_count); + pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_q_count); + + pdata->tx_q_count = pdata->tx_ring_count; + + pdata->rx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.rx_ch_cnt); + pdata->rx_ring_count = min(pdata->rx_ring_count, + pdata->rx_max_channel_count); + + pdata->rx_q_count = min(pdata->hw_feat.rx_q_cnt, pdata->rx_max_q_count); + + DBGPR("TX/RX max channel count = %u/%u\n", + pdata->tx_max_channel_count, pdata->rx_max_channel_count); + DBGPR("TX/RX max queue count = %u/%u\n", + pdata->tx_max_q_count, pdata->rx_max_q_count); + DBGPR("TX/RX DMA ring count = %u/%u\n", + pdata->tx_ring_count, pdata->rx_ring_count); + DBGPR("TX/RX hardware queue count = %u/%u\n", + pdata->tx_q_count, pdata->rx_q_count); +} /* axgbe_set_counts */ + +static void +axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *sc) +{ + struct xgbe_prv_data *pdata = &sc->pdata; + if_softc_ctx_t scctx = sc->scctx; + if_shared_ctx_t sctx = sc->sctx; + int i; + + scctx->isc_nrxqsets = pdata->rx_q_count; + scctx->isc_ntxqsets = pdata->tx_q_count; + scctx->isc_msix_bar = pci_msix_table_bar(pdata->dev); + scctx->isc_tx_nsegments = 32; + + for (i = 0; i < sctx->isc_ntxqs; i++) { + scctx->isc_txqsizes[i] = + roundup2(scctx->isc_ntxd[i] * sizeof(struct xgbe_ring_desc), + 128); + scctx->isc_txd_size[i] = sizeof(struct xgbe_ring_desc); + } + + for (i = 0; i < sctx->isc_nrxqs; i++) { + scctx->isc_rxqsizes[i] = + roundup2(scctx->isc_nrxd[i] * sizeof(struct xgbe_ring_desc), + 128); + scctx->isc_rxd_size[i] = sizeof(struct xgbe_ring_desc); + } + + scctx->isc_tx_tso_segments_max = 32; + scctx->isc_tx_tso_size_max = XGBE_TSO_MAX_SIZE; + scctx->isc_tx_tso_segsize_max = PAGE_SIZE; + + /* + * Set capabilities + * 1) IFLIB automatically adds IFCAP_HWSTATS, so need to set explicitly + * 2) isc_tx_csum_flags is mandatory if IFCAP_TXCSUM (included in + * IFCAP_HWCSUM) is set + */ + scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP | + CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 | + CSUM_TSO); + scctx->isc_capenable = (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | + IFCAP_JUMBO_MTU | + IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER | + IFCAP_VLAN_HWCSUM | + IFCAP_TSO | IFCAP_VLAN_HWTSO); + scctx->isc_capabilities = scctx->isc_capenable; + + /* + * Set rss_table_size alone when adding RSS support. rss_table_mask + * will be set by IFLIB based on rss_table_size + */ + scctx->isc_rss_table_size = XGBE_RSS_MAX_TABLE_SIZE; + + scctx->isc_ntxqsets_max = XGBE_MAX_QUEUES; + scctx->isc_nrxqsets_max = XGBE_MAX_QUEUES; + + scctx->isc_txrx = &axgbe_txrx; +} + +static int +axgbe_alloc_channels(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel; + int i, j, count; + + DBGPR("%s: txqs %d rxqs %d\n", __func__, pdata->tx_ring_count, + pdata->rx_ring_count); + + /* Iflibe sets based on isc_ntxqsets/nrxqsets */ + count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + /* Allocate channel memory */ + for (i = 0; i < count ; i++) { + channel = (struct xgbe_channel*)malloc(sizeof(struct xgbe_channel), + M_AXGBE, M_NOWAIT | M_ZERO); + + if (channel == NULL) { + for (j = 0; j < i; j++) { + free(pdata->channel[j], M_AXGBE); + pdata->channel[j] = NULL; + } + return (ENOMEM); + } + + pdata->channel[i] = channel; + } + + pdata->total_channel_count = count; + DBGPR("Channel count set to: %u\n", pdata->total_channel_count); + + for (i = 0; i < count; i++) { + + channel = pdata->channel[i]; + snprintf(channel->name, sizeof(channel->name), "channel-%d",i); + + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_tag = rman_get_bustag(pdata->xgmac_res); + bus_space_subregion(channel->dma_tag, + rman_get_bushandle(pdata->xgmac_res), + DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC, + &channel->dma_handle); + channel->tx_ring = NULL; + channel->rx_ring = NULL; + } + + return (0); +} /* axgbe_alloc_channels */ + +static void +xgbe_service(void *ctx, int pending) +{ + struct xgbe_prv_data *pdata = ctx; + struct axgbe_if_softc *sc = (struct axgbe_if_softc *)pdata; + bool prev_state = false; + + /* Get previous link status */ + prev_state = pdata->phy.link; + + pdata->phy_if.phy_status(pdata); + + if (prev_state != pdata->phy.link) { + pdata->phy_link = pdata->phy.link; + axgbe_if_update_admin_status(sc->ctx); + } + + callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); +} + +static void +xgbe_service_timer(void *data) +{ + struct xgbe_prv_data *pdata = data; + + taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work); +} + +static void +xgbe_init_timers(struct xgbe_prv_data *pdata) +{ + callout_init(&pdata->service_timer, 1*hz); +} + +static void +xgbe_start_timers(struct xgbe_prv_data *pdata) +{ + callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); +} + +static void +xgbe_stop_timers(struct xgbe_prv_data *pdata) +{ + callout_drain(&pdata->service_timer); + callout_stop(&pdata->service_timer); +} + +static void +xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) +{ + axgbe_printf(1, "\n************* PHY Reg dump *********************\n"); + + axgbe_printf(1, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); + axgbe_printf(1, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); + axgbe_printf(1, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); + axgbe_printf(1, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); + axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); + axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2, + XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); + axgbe_printf(1, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); + axgbe_printf(1, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); + axgbe_printf(1, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n", + MDIO_AN_ADVERTISE, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); + axgbe_printf(1, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n", + MDIO_AN_ADVERTISE + 1, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); + axgbe_printf(1, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n", + MDIO_AN_ADVERTISE + 2, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); + axgbe_printf(1, "Auto-Neg Completion Reg (%#06x) = %#06x\n", + MDIO_AN_COMP_STAT, + XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); + + axgbe_printf(1, "\n************************************************\n"); +} + +static void +xgbe_dump_prop_registers(struct xgbe_prv_data *pdata) +{ + int i; + + axgbe_printf(1, "\n************* PROP Reg dump ********************\n"); + + for (i = 0 ; i < 38 ; i++) { + axgbe_printf(1, "PROP Offset 0x%08x = %08x\n", + (XP_PROP_0 + (i * 4)), XP_IOREAD(pdata, + (XP_PROP_0 + (i * 4)))); + } +} + +static void +xgbe_dump_dma_registers(struct xgbe_prv_data *pdata, int ch) +{ + struct xgbe_channel *channel; + int i; + + axgbe_printf(1, "\n************* DMA Reg dump *********************\n"); + + axgbe_printf(1, "DMA MR Reg (%08x) = %08x\n", DMA_MR, + XGMAC_IOREAD(pdata, DMA_MR)); + axgbe_printf(1, "DMA SBMR Reg (%08x) = %08x\n", DMA_SBMR, + XGMAC_IOREAD(pdata, DMA_SBMR)); + axgbe_printf(1, "DMA ISR Reg (%08x) = %08x\n", DMA_ISR, + XGMAC_IOREAD(pdata, DMA_ISR)); + axgbe_printf(1, "DMA AXIARCR Reg (%08x) = %08x\n", DMA_AXIARCR, + XGMAC_IOREAD(pdata, DMA_AXIARCR)); + axgbe_printf(1, "DMA AXIAWCR Reg (%08x) = %08x\n", DMA_AXIAWCR, + XGMAC_IOREAD(pdata, DMA_AXIAWCR)); + axgbe_printf(1, "DMA AXIAWARCR Reg (%08x) = %08x\n", DMA_AXIAWARCR, + XGMAC_IOREAD(pdata, DMA_AXIAWARCR)); + axgbe_printf(1, "DMA DSR0 Reg (%08x) = %08x\n", DMA_DSR0, + XGMAC_IOREAD(pdata, DMA_DSR0)); + axgbe_printf(1, "DMA DSR1 Reg (%08x) = %08x\n", DMA_DSR1, + XGMAC_IOREAD(pdata, DMA_DSR1)); + axgbe_printf(1, "DMA DSR2 Reg (%08x) = %08x\n", DMA_DSR2, + XGMAC_IOREAD(pdata, DMA_DSR2)); + axgbe_printf(1, "DMA DSR3 Reg (%08x) = %08x\n", DMA_DSR3, + XGMAC_IOREAD(pdata, DMA_DSR3)); + axgbe_printf(1, "DMA DSR4 Reg (%08x) = %08x\n", DMA_DSR4, + XGMAC_IOREAD(pdata, DMA_DSR4)); + axgbe_printf(1, "DMA TXEDMACR Reg (%08x) = %08x\n", DMA_TXEDMACR, + XGMAC_IOREAD(pdata, DMA_TXEDMACR)); + axgbe_printf(1, "DMA RXEDMACR Reg (%08x) = %08x\n", DMA_RXEDMACR, + XGMAC_IOREAD(pdata, DMA_RXEDMACR)); + + for (i = 0 ; i < 8 ; i++ ) { + + if (ch >= 0) { + if (i != ch) + continue; + } + + channel = pdata->channel[i]; + + axgbe_printf(1, "\n************* DMA CH %d dump ****************\n", i); + + axgbe_printf(1, "DMA_CH_CR Reg (%08x) = %08x\n", + DMA_CH_CR, XGMAC_DMA_IOREAD(channel, DMA_CH_CR)); + axgbe_printf(1, "DMA_CH_TCR Reg (%08x) = %08x\n", + DMA_CH_TCR, XGMAC_DMA_IOREAD(channel, DMA_CH_TCR)); + axgbe_printf(1, "DMA_CH_RCR Reg (%08x) = %08x\n", + DMA_CH_RCR, XGMAC_DMA_IOREAD(channel, DMA_CH_RCR)); + axgbe_printf(1, "DMA_CH_TDLR_HI Reg (%08x) = %08x\n", + DMA_CH_TDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_HI)); + axgbe_printf(1, "DMA_CH_TDLR_LO Reg (%08x) = %08x\n", + DMA_CH_TDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_LO)); + axgbe_printf(1, "DMA_CH_RDLR_HI Reg (%08x) = %08x\n", + DMA_CH_RDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_HI)); + axgbe_printf(1, "DMA_CH_RDLR_LO Reg (%08x) = %08x\n", + DMA_CH_RDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_LO)); + axgbe_printf(1, "DMA_CH_TDTR_LO Reg (%08x) = %08x\n", + DMA_CH_TDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO)); + axgbe_printf(1, "DMA_CH_RDTR_LO Reg (%08x) = %08x\n", + DMA_CH_RDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTR_LO)); + axgbe_printf(1, "DMA_CH_TDRLR Reg (%08x) = %08x\n", + DMA_CH_TDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_TDRLR)); + axgbe_printf(1, "DMA_CH_RDRLR Reg (%08x) = %08x\n", + DMA_CH_RDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_RDRLR)); + axgbe_printf(1, "DMA_CH_IER Reg (%08x) = %08x\n", + DMA_CH_IER, XGMAC_DMA_IOREAD(channel, DMA_CH_IER)); + axgbe_printf(1, "DMA_CH_RIWT Reg (%08x) = %08x\n", + DMA_CH_RIWT, XGMAC_DMA_IOREAD(channel, DMA_CH_RIWT)); + axgbe_printf(1, "DMA_CH_CATDR_LO Reg (%08x) = %08x\n", + DMA_CH_CATDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATDR_LO)); + axgbe_printf(1, "DMA_CH_CARDR_LO Reg (%08x) = %08x\n", + DMA_CH_CARDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARDR_LO)); + axgbe_printf(1, "DMA_CH_CATBR_HI Reg (%08x) = %08x\n", + DMA_CH_CATBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_HI)); + axgbe_printf(1, "DMA_CH_CATBR_LO Reg (%08x) = %08x\n", + DMA_CH_CATBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_LO)); + axgbe_printf(1, "DMA_CH_CARBR_HI Reg (%08x) = %08x\n", + DMA_CH_CARBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_HI)); + axgbe_printf(1, "DMA_CH_CARBR_LO Reg (%08x) = %08x\n", + DMA_CH_CARBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_LO)); + axgbe_printf(1, "DMA_CH_SR Reg (%08x) = %08x\n", + DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); + axgbe_printf(1, "DMA_CH_DSR Reg (%08x) = %08x\n", + DMA_CH_DSR, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); + axgbe_printf(1, "DMA_CH_DCFL Reg (%08x) = %08x\n", + DMA_CH_DCFL, XGMAC_DMA_IOREAD(channel, DMA_CH_DCFL)); + axgbe_printf(1, "DMA_CH_MFC Reg (%08x) = %08x\n", + DMA_CH_MFC, XGMAC_DMA_IOREAD(channel, DMA_CH_MFC)); + axgbe_printf(1, "DMA_CH_TDTRO Reg (%08x) = %08x\n", + DMA_CH_TDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTRO)); + axgbe_printf(1, "DMA_CH_RDTRO Reg (%08x) = %08x\n", + DMA_CH_RDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTRO)); + axgbe_printf(1, "DMA_CH_TDWRO Reg (%08x) = %08x\n", + DMA_CH_TDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDWRO)); + axgbe_printf(1, "DMA_CH_RDWRO Reg (%08x) = %08x\n", + DMA_CH_RDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDWRO)); + } +} + +static void +xgbe_dump_mtl_registers(struct xgbe_prv_data *pdata) +{ + int i; + + axgbe_printf(1, "\n************* MTL Reg dump *********************\n"); + + axgbe_printf(1, "MTL OMR Reg (%08x) = %08x\n", MTL_OMR, + XGMAC_IOREAD(pdata, MTL_OMR)); + axgbe_printf(1, "MTL FDCR Reg (%08x) = %08x\n", MTL_FDCR, + XGMAC_IOREAD(pdata, MTL_FDCR)); + axgbe_printf(1, "MTL FDSR Reg (%08x) = %08x\n", MTL_FDSR, + XGMAC_IOREAD(pdata, MTL_FDSR)); + axgbe_printf(1, "MTL FDDR Reg (%08x) = %08x\n", MTL_FDDR, + XGMAC_IOREAD(pdata, MTL_FDDR)); + axgbe_printf(1, "MTL ISR Reg (%08x) = %08x\n", MTL_ISR, + XGMAC_IOREAD(pdata, MTL_ISR)); + axgbe_printf(1, "MTL RQDCM0R Reg (%08x) = %08x\n", MTL_RQDCM0R, + XGMAC_IOREAD(pdata, MTL_RQDCM0R)); + axgbe_printf(1, "MTL RQDCM1R Reg (%08x) = %08x\n", MTL_RQDCM1R, + XGMAC_IOREAD(pdata, MTL_RQDCM1R)); + axgbe_printf(1, "MTL RQDCM2R Reg (%08x) = %08x\n", MTL_RQDCM2R, + XGMAC_IOREAD(pdata, MTL_RQDCM2R)); + axgbe_printf(1, "MTL TCPM0R Reg (%08x) = %08x\n", MTL_TCPM0R, + XGMAC_IOREAD(pdata, MTL_TCPM0R)); + axgbe_printf(1, "MTL TCPM1R Reg (%08x) = %08x\n", MTL_TCPM1R, + XGMAC_IOREAD(pdata, MTL_TCPM1R)); + + for (i = 0 ; i < 8 ; i++ ) { + + axgbe_printf(1, "\n************* MTL CH %d dump ****************\n", i); + + axgbe_printf(1, "MTL_Q_TQOMR Reg (%08x) = %08x\n", + MTL_Q_TQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); + axgbe_printf(1, "MTL_Q_TQUR Reg (%08x) = %08x\n", + MTL_Q_TQUR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQUR)); + axgbe_printf(1, "MTL_Q_TQDR Reg (%08x) = %08x\n", + MTL_Q_TQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQDR)); + axgbe_printf(1, "MTL_Q_TC0ETSCR Reg (%08x) = %08x\n", + MTL_Q_TC0ETSCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSCR)); + axgbe_printf(1, "MTL_Q_TC0ETSSR Reg (%08x) = %08x\n", + MTL_Q_TC0ETSSR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSSR)); + axgbe_printf(1, "MTL_Q_TC0QWR Reg (%08x) = %08x\n", + MTL_Q_TC0QWR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0QWR)); + + axgbe_printf(1, "MTL_Q_RQOMR Reg (%08x) = %08x\n", + MTL_Q_RQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); + axgbe_printf(1, "MTL_Q_RQMPOCR Reg (%08x) = %08x\n", + MTL_Q_RQMPOCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQMPOCR)); + axgbe_printf(1, "MTL_Q_RQDR Reg (%08x) = %08x\n", + MTL_Q_RQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQDR)); + axgbe_printf(1, "MTL_Q_RQCR Reg (%08x) = %08x\n", + MTL_Q_RQCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQCR)); + axgbe_printf(1, "MTL_Q_RQFCR Reg (%08x) = %08x\n", + MTL_Q_RQFCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); + axgbe_printf(1, "MTL_Q_IER Reg (%08x) = %08x\n", + MTL_Q_IER, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_IER)); + axgbe_printf(1, "MTL_Q_ISR Reg (%08x) = %08x\n", + MTL_Q_ISR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR)); + } +} + +static void +xgbe_dump_mac_registers(struct xgbe_prv_data *pdata) +{ + axgbe_printf(1, "\n************* MAC Reg dump **********************\n"); + + axgbe_printf(1, "MAC TCR Reg (%08x) = %08x\n", MAC_TCR, + XGMAC_IOREAD(pdata, MAC_TCR)); + axgbe_printf(1, "MAC RCR Reg (%08x) = %08x\n", MAC_RCR, + XGMAC_IOREAD(pdata, MAC_RCR)); + axgbe_printf(1, "MAC PFR Reg (%08x) = %08x\n", MAC_PFR, + XGMAC_IOREAD(pdata, MAC_PFR)); + axgbe_printf(1, "MAC WTR Reg (%08x) = %08x\n", MAC_WTR, + XGMAC_IOREAD(pdata, MAC_WTR)); + axgbe_printf(1, "MAC HTR0 Reg (%08x) = %08x\n", MAC_HTR0, + XGMAC_IOREAD(pdata, MAC_HTR0)); + axgbe_printf(1, "MAC HTR1 Reg (%08x) = %08x\n", MAC_HTR1, + XGMAC_IOREAD(pdata, MAC_HTR1)); + axgbe_printf(1, "MAC HTR2 Reg (%08x) = %08x\n", MAC_HTR2, + XGMAC_IOREAD(pdata, MAC_HTR2)); + axgbe_printf(1, "MAC HTR3 Reg (%08x) = %08x\n", MAC_HTR3, + XGMAC_IOREAD(pdata, MAC_HTR3)); + axgbe_printf(1, "MAC HTR4 Reg (%08x) = %08x\n", MAC_HTR4, + XGMAC_IOREAD(pdata, MAC_HTR4)); + axgbe_printf(1, "MAC HTR5 Reg (%08x) = %08x\n", MAC_HTR5, + XGMAC_IOREAD(pdata, MAC_HTR5)); + axgbe_printf(1, "MAC HTR6 Reg (%08x) = %08x\n", MAC_HTR6, + XGMAC_IOREAD(pdata, MAC_HTR6)); + axgbe_printf(1, "MAC HTR7 Reg (%08x) = %08x\n", MAC_HTR7, + XGMAC_IOREAD(pdata, MAC_HTR7)); + axgbe_printf(1, "MAC VLANTR Reg (%08x) = %08x\n", MAC_VLANTR, + XGMAC_IOREAD(pdata, MAC_VLANTR)); + axgbe_printf(1, "MAC VLANHTR Reg (%08x) = %08x\n", MAC_VLANHTR, + XGMAC_IOREAD(pdata, MAC_VLANHTR)); + axgbe_printf(1, "MAC VLANIR Reg (%08x) = %08x\n", MAC_VLANIR, + XGMAC_IOREAD(pdata, MAC_VLANIR)); + axgbe_printf(1, "MAC IVLANIR Reg (%08x) = %08x\n", MAC_IVLANIR, + XGMAC_IOREAD(pdata, MAC_IVLANIR)); + axgbe_printf(1, "MAC RETMR Reg (%08x) = %08x\n", MAC_RETMR, + XGMAC_IOREAD(pdata, MAC_RETMR)); + axgbe_printf(1, "MAC Q0TFCR Reg (%08x) = %08x\n", MAC_Q0TFCR, + XGMAC_IOREAD(pdata, MAC_Q0TFCR)); + axgbe_printf(1, "MAC Q1TFCR Reg (%08x) = %08x\n", MAC_Q1TFCR, + XGMAC_IOREAD(pdata, MAC_Q1TFCR)); + axgbe_printf(1, "MAC Q2TFCR Reg (%08x) = %08x\n", MAC_Q2TFCR, + XGMAC_IOREAD(pdata, MAC_Q2TFCR)); + axgbe_printf(1, "MAC Q3TFCR Reg (%08x) = %08x\n", MAC_Q3TFCR, + XGMAC_IOREAD(pdata, MAC_Q3TFCR)); + axgbe_printf(1, "MAC Q4TFCR Reg (%08x) = %08x\n", MAC_Q4TFCR, + XGMAC_IOREAD(pdata, MAC_Q4TFCR)); + axgbe_printf(1, "MAC Q5TFCR Reg (%08x) = %08x\n", MAC_Q5TFCR, + XGMAC_IOREAD(pdata, MAC_Q5TFCR)); + axgbe_printf(1, "MAC Q6TFCR Reg (%08x) = %08x\n", MAC_Q6TFCR, + XGMAC_IOREAD(pdata, MAC_Q6TFCR)); + axgbe_printf(1, "MAC Q7TFCR Reg (%08x) = %08x\n", MAC_Q7TFCR, + XGMAC_IOREAD(pdata, MAC_Q7TFCR)); + axgbe_printf(1, "MAC RFCR Reg (%08x) = %08x\n", MAC_RFCR, + XGMAC_IOREAD(pdata, MAC_RFCR)); + axgbe_printf(1, "MAC RQC0R Reg (%08x) = %08x\n", MAC_RQC0R, + XGMAC_IOREAD(pdata, MAC_RQC0R)); + axgbe_printf(1, "MAC RQC1R Reg (%08x) = %08x\n", MAC_RQC1R, + XGMAC_IOREAD(pdata, MAC_RQC1R)); + axgbe_printf(1, "MAC RQC2R Reg (%08x) = %08x\n", MAC_RQC2R, + XGMAC_IOREAD(pdata, MAC_RQC2R)); + axgbe_printf(1, "MAC RQC3R Reg (%08x) = %08x\n", MAC_RQC3R, + XGMAC_IOREAD(pdata, MAC_RQC3R)); + axgbe_printf(1, "MAC ISR Reg (%08x) = %08x\n", MAC_ISR, + XGMAC_IOREAD(pdata, MAC_ISR)); + axgbe_printf(1, "MAC IER Reg (%08x) = %08x\n", MAC_IER, + XGMAC_IOREAD(pdata, MAC_IER)); + axgbe_printf(1, "MAC RTSR Reg (%08x) = %08x\n", MAC_RTSR, + XGMAC_IOREAD(pdata, MAC_RTSR)); + axgbe_printf(1, "MAC PMTCSR Reg (%08x) = %08x\n", MAC_PMTCSR, + XGMAC_IOREAD(pdata, MAC_PMTCSR)); + axgbe_printf(1, "MAC RWKPFR Reg (%08x) = %08x\n", MAC_RWKPFR, + XGMAC_IOREAD(pdata, MAC_RWKPFR)); + axgbe_printf(1, "MAC LPICSR Reg (%08x) = %08x\n", MAC_LPICSR, + XGMAC_IOREAD(pdata, MAC_LPICSR)); + axgbe_printf(1, "MAC LPITCR Reg (%08x) = %08x\n", MAC_LPITCR, + XGMAC_IOREAD(pdata, MAC_LPITCR)); + axgbe_printf(1, "MAC TIR Reg (%08x) = %08x\n", MAC_TIR, + XGMAC_IOREAD(pdata, MAC_TIR)); + axgbe_printf(1, "MAC VR Reg (%08x) = %08x\n", MAC_VR, + XGMAC_IOREAD(pdata, MAC_VR)); + axgbe_printf(1, "MAC DR Reg (%08x) = %08x\n", MAC_DR, + XGMAC_IOREAD(pdata, MAC_DR)); + axgbe_printf(1, "MAC HWF0R Reg (%08x) = %08x\n", MAC_HWF0R, + XGMAC_IOREAD(pdata, MAC_HWF0R)); + axgbe_printf(1, "MAC HWF1R Reg (%08x) = %08x\n", MAC_HWF1R, + XGMAC_IOREAD(pdata, MAC_HWF1R)); + axgbe_printf(1, "MAC HWF2R Reg (%08x) = %08x\n", MAC_HWF2R, + XGMAC_IOREAD(pdata, MAC_HWF2R)); + axgbe_printf(1, "MAC MDIOSCAR Reg (%08x) = %08x\n", MAC_MDIOSCAR, + XGMAC_IOREAD(pdata, MAC_MDIOSCAR)); + axgbe_printf(1, "MAC MDIOSCCDR Reg (%08x) = %08x\n", MAC_MDIOSCCDR, + XGMAC_IOREAD(pdata, MAC_MDIOSCCDR)); + axgbe_printf(1, "MAC MDIOISR Reg (%08x) = %08x\n", MAC_MDIOISR, + XGMAC_IOREAD(pdata, MAC_MDIOISR)); + axgbe_printf(1, "MAC MDIOIER Reg (%08x) = %08x\n", MAC_MDIOIER, + XGMAC_IOREAD(pdata, MAC_MDIOIER)); + axgbe_printf(1, "MAC MDIOCL22R Reg (%08x) = %08x\n", MAC_MDIOCL22R, + XGMAC_IOREAD(pdata, MAC_MDIOCL22R)); + axgbe_printf(1, "MAC GPIOCR Reg (%08x) = %08x\n", MAC_GPIOCR, + XGMAC_IOREAD(pdata, MAC_GPIOCR)); + axgbe_printf(1, "MAC GPIOSR Reg (%08x) = %08x\n", MAC_GPIOSR, + XGMAC_IOREAD(pdata, MAC_GPIOSR)); + axgbe_printf(1, "MAC MACA0HR Reg (%08x) = %08x\n", MAC_MACA0HR, + XGMAC_IOREAD(pdata, MAC_MACA0HR)); + axgbe_printf(1, "MAC MACA0LR Reg (%08x) = %08x\n", MAC_TCR, + XGMAC_IOREAD(pdata, MAC_MACA0LR)); + axgbe_printf(1, "MAC MACA1HR Reg (%08x) = %08x\n", MAC_MACA1HR, + XGMAC_IOREAD(pdata, MAC_MACA1HR)); + axgbe_printf(1, "MAC MACA1LR Reg (%08x) = %08x\n", MAC_MACA1LR, + XGMAC_IOREAD(pdata, MAC_MACA1LR)); + axgbe_printf(1, "MAC RSSCR Reg (%08x) = %08x\n", MAC_RSSCR, + XGMAC_IOREAD(pdata, MAC_RSSCR)); + axgbe_printf(1, "MAC RSSDR Reg (%08x) = %08x\n", MAC_RSSDR, + XGMAC_IOREAD(pdata, MAC_RSSDR)); + axgbe_printf(1, "MAC RSSAR Reg (%08x) = %08x\n", MAC_RSSAR, + XGMAC_IOREAD(pdata, MAC_RSSAR)); + axgbe_printf(1, "MAC TSCR Reg (%08x) = %08x\n", MAC_TSCR, + XGMAC_IOREAD(pdata, MAC_TSCR)); + axgbe_printf(1, "MAC SSIR Reg (%08x) = %08x\n", MAC_SSIR, + XGMAC_IOREAD(pdata, MAC_SSIR)); + axgbe_printf(1, "MAC STSR Reg (%08x) = %08x\n", MAC_STSR, + XGMAC_IOREAD(pdata, MAC_STSR)); + axgbe_printf(1, "MAC STNR Reg (%08x) = %08x\n", MAC_STNR, + XGMAC_IOREAD(pdata, MAC_STNR)); + axgbe_printf(1, "MAC STSUR Reg (%08x) = %08x\n", MAC_STSUR, + XGMAC_IOREAD(pdata, MAC_STSUR)); + axgbe_printf(1, "MAC STNUR Reg (%08x) = %08x\n", MAC_STNUR, + XGMAC_IOREAD(pdata, MAC_STNUR)); + axgbe_printf(1, "MAC TSAR Reg (%08x) = %08x\n", MAC_TSAR, + XGMAC_IOREAD(pdata, MAC_TSAR)); + axgbe_printf(1, "MAC TSSR Reg (%08x) = %08x\n", MAC_TSSR, + XGMAC_IOREAD(pdata, MAC_TSSR)); + axgbe_printf(1, "MAC TXSNR Reg (%08x) = %08x\n", MAC_TXSNR, + XGMAC_IOREAD(pdata, MAC_TXSNR)); + axgbe_printf(1, "MAC TXSSR Reg (%08x) = %08x\n", MAC_TXSSR, + XGMAC_IOREAD(pdata, MAC_TXSSR)); +} + +static void +xgbe_dump_rmon_counters(struct xgbe_prv_data *pdata) +{ + struct xgbe_mmc_stats *stats = &pdata->mmc_stats; + + axgbe_printf(1, "\n************* RMON counters dump ***************\n"); + + pdata->hw_if.read_mmc_stats(pdata); + + axgbe_printf(1, "rmon txoctetcount_gb (%08x) = %08lx\n", + MMC_TXOCTETCOUNT_GB_LO, stats->txoctetcount_gb); + axgbe_printf(1, "rmon txframecount_gb (%08x) = %08lx\n", + MMC_TXFRAMECOUNT_GB_LO, stats->txframecount_gb); + axgbe_printf(1, "rmon txbroadcastframes_g (%08x) = %08lx\n", + MMC_TXBROADCASTFRAMES_G_LO, stats->txbroadcastframes_g); + axgbe_printf(1, "rmon txmulticastframes_g (%08x) = %08lx\n", + MMC_TXMULTICASTFRAMES_G_LO, stats->txmulticastframes_g); + axgbe_printf(1, "rmon tx64octets_gb (%08x) = %08lx\n", + MMC_TX64OCTETS_GB_LO, stats->tx64octets_gb); + axgbe_printf(1, "rmon tx65to127octets_gb (%08x) = %08lx\n", + MMC_TX65TO127OCTETS_GB_LO, stats->tx65to127octets_gb); + axgbe_printf(1, "rmon tx128to255octets_gb (%08x) = %08lx\n", + MMC_TX128TO255OCTETS_GB_LO, stats->tx128to255octets_gb); + axgbe_printf(1, "rmon tx256to511octets_gb (%08x) = %08lx\n", + MMC_TX256TO511OCTETS_GB_LO, stats->tx256to511octets_gb); + axgbe_printf(1, "rmon tx512to1023octets_gb (%08x) = %08lx\n", + MMC_TX512TO1023OCTETS_GB_LO, stats->tx512to1023octets_gb); + axgbe_printf(1, "rmon tx1024tomaxoctets_gb (%08x) = %08lx\n", + MMC_TX1024TOMAXOCTETS_GB_LO, stats->tx1024tomaxoctets_gb); + axgbe_printf(1, "rmon txunicastframes_gb (%08x) = %08lx\n", + MMC_TXUNICASTFRAMES_GB_LO, stats->txunicastframes_gb); + axgbe_printf(1, "rmon txmulticastframes_gb (%08x) = %08lx\n", + MMC_TXMULTICASTFRAMES_GB_LO, stats->txmulticastframes_gb); + axgbe_printf(1, "rmon txbroadcastframes_gb (%08x) = %08lx\n", + MMC_TXBROADCASTFRAMES_GB_LO, stats->txbroadcastframes_gb); + axgbe_printf(1, "rmon txunderflowerror (%08x) = %08lx\n", + MMC_TXUNDERFLOWERROR_LO, stats->txunderflowerror); + axgbe_printf(1, "rmon txoctetcount_g (%08x) = %08lx\n", + MMC_TXOCTETCOUNT_G_LO, stats->txoctetcount_g); + axgbe_printf(1, "rmon txframecount_g (%08x) = %08lx\n", + MMC_TXFRAMECOUNT_G_LO, stats->txframecount_g); + axgbe_printf(1, "rmon txpauseframes (%08x) = %08lx\n", + MMC_TXPAUSEFRAMES_LO, stats->txpauseframes); + axgbe_printf(1, "rmon txvlanframes_g (%08x) = %08lx\n", + MMC_TXVLANFRAMES_G_LO, stats->txvlanframes_g); + axgbe_printf(1, "rmon rxframecount_gb (%08x) = %08lx\n", + MMC_RXFRAMECOUNT_GB_LO, stats->rxframecount_gb); + axgbe_printf(1, "rmon rxoctetcount_gb (%08x) = %08lx\n", + MMC_RXOCTETCOUNT_GB_LO, stats->rxoctetcount_gb); + axgbe_printf(1, "rmon rxoctetcount_g (%08x) = %08lx\n", + MMC_RXOCTETCOUNT_G_LO, stats->rxoctetcount_g); + axgbe_printf(1, "rmon rxbroadcastframes_g (%08x) = %08lx\n", + MMC_RXBROADCASTFRAMES_G_LO, stats->rxbroadcastframes_g); + axgbe_printf(1, "rmon rxmulticastframes_g (%08x) = %08lx\n", + MMC_RXMULTICASTFRAMES_G_LO, stats->rxmulticastframes_g); + axgbe_printf(1, "rmon rxcrcerror (%08x) = %08lx\n", + MMC_RXCRCERROR_LO, stats->rxcrcerror); + axgbe_printf(1, "rmon rxrunterror (%08x) = %08lx\n", + MMC_RXRUNTERROR, stats->rxrunterror); + axgbe_printf(1, "rmon rxjabbererror (%08x) = %08lx\n", + MMC_RXJABBERERROR, stats->rxjabbererror); + axgbe_printf(1, "rmon rxundersize_g (%08x) = %08lx\n", + MMC_RXUNDERSIZE_G, stats->rxundersize_g); + axgbe_printf(1, "rmon rxoversize_g (%08x) = %08lx\n", + MMC_RXOVERSIZE_G, stats->rxoversize_g); + axgbe_printf(1, "rmon rx64octets_gb (%08x) = %08lx\n", + MMC_RX64OCTETS_GB_LO, stats->rx64octets_gb); + axgbe_printf(1, "rmon rx65to127octets_gb (%08x) = %08lx\n", + MMC_RX65TO127OCTETS_GB_LO, stats->rx65to127octets_gb); + axgbe_printf(1, "rmon rx128to255octets_gb (%08x) = %08lx\n", + MMC_RX128TO255OCTETS_GB_LO, stats->rx128to255octets_gb); + axgbe_printf(1, "rmon rx256to511octets_gb (%08x) = %08lx\n", + MMC_RX256TO511OCTETS_GB_LO, stats->rx256to511octets_gb); + axgbe_printf(1, "rmon rx512to1023octets_gb (%08x) = %08lx\n", + MMC_RX512TO1023OCTETS_GB_LO, stats->rx512to1023octets_gb); + axgbe_printf(1, "rmon rx1024tomaxoctets_gb (%08x) = %08lx\n", + MMC_RX1024TOMAXOCTETS_GB_LO, stats->rx1024tomaxoctets_gb); + axgbe_printf(1, "rmon rxunicastframes_g (%08x) = %08lx\n", + MMC_RXUNICASTFRAMES_G_LO, stats->rxunicastframes_g); + axgbe_printf(1, "rmon rxlengtherror (%08x) = %08lx\n", + MMC_RXLENGTHERROR_LO, stats->rxlengtherror); + axgbe_printf(1, "rmon rxoutofrangetype (%08x) = %08lx\n", + MMC_RXOUTOFRANGETYPE_LO, stats->rxoutofrangetype); + axgbe_printf(1, "rmon rxpauseframes (%08x) = %08lx\n", + MMC_RXPAUSEFRAMES_LO, stats->rxpauseframes); + axgbe_printf(1, "rmon rxfifooverflow (%08x) = %08lx\n", + MMC_RXFIFOOVERFLOW_LO, stats->rxfifooverflow); + axgbe_printf(1, "rmon rxvlanframes_gb (%08x) = %08lx\n", + MMC_RXVLANFRAMES_GB_LO, stats->rxvlanframes_gb); + axgbe_printf(1, "rmon rxwatchdogerror (%08x) = %08lx\n", + MMC_RXWATCHDOGERROR, stats->rxwatchdogerror); +} + +void +xgbe_dump_i2c_registers(struct xgbe_prv_data *pdata) +{ + axgbe_printf(1, "*************** I2C Registers **************\n"); + axgbe_printf(1, " IC_CON : %010x\n", + XI2C_IOREAD(pdata, 0x00)); + axgbe_printf(1, " IC_TAR : %010x\n", + XI2C_IOREAD(pdata, 0x04)); + axgbe_printf(1, " IC_HS_MADDR : %010x\n", + XI2C_IOREAD(pdata, 0x0c)); + axgbe_printf(1, " IC_INTR_STAT : %010x\n", + XI2C_IOREAD(pdata, 0x2c)); + axgbe_printf(1, " IC_INTR_MASK : %010x\n", + XI2C_IOREAD(pdata, 0x30)); + axgbe_printf(1, " IC_RAW_INTR_STAT : %010x\n", + XI2C_IOREAD(pdata, 0x34)); + axgbe_printf(1, " IC_RX_TL : %010x\n", + XI2C_IOREAD(pdata, 0x38)); + axgbe_printf(1, " IC_TX_TL : %010x\n", + XI2C_IOREAD(pdata, 0x3c)); + axgbe_printf(1, " IC_ENABLE : %010x\n", + XI2C_IOREAD(pdata, 0x6c)); + axgbe_printf(1, " IC_STATUS : %010x\n", + XI2C_IOREAD(pdata, 0x70)); + axgbe_printf(1, " IC_TXFLR : %010x\n", + XI2C_IOREAD(pdata, 0x74)); + axgbe_printf(1, " IC_RXFLR : %010x\n", + XI2C_IOREAD(pdata, 0x78)); + axgbe_printf(1, " IC_ENABLE_STATUS : %010x\n", + XI2C_IOREAD(pdata, 0x9c)); + axgbe_printf(1, " IC_COMP_PARAM1 : %010x\n", + XI2C_IOREAD(pdata, 0xf4)); +} + +static void +xgbe_dump_active_vlans(struct xgbe_prv_data *pdata) +{ + int i; + + for(i=0 ; iactive_vlans[i]); + } + axgbe_printf(1, "\n"); +} + +static void +xgbe_default_config(struct xgbe_prv_data *pdata) +{ + pdata->blen = DMA_SBMR_BLEN_64; + pdata->pbl = DMA_PBL_128; + pdata->aal = 1; + pdata->rd_osr_limit = 8; + pdata->wr_osr_limit = 8; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->tx_threshold = MTL_TX_THRESHOLD_64; + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->rx_sf_mode = MTL_RSF_DISABLE; + pdata->rx_threshold = MTL_RX_THRESHOLD_64; + pdata->pause_autoneg = 1; + pdata->tx_pause = 1; + pdata->rx_pause = 1; + pdata->phy_speed = SPEED_UNKNOWN; + pdata->power_down = 0; + pdata->enable_rss = 1; +} + +static void +axgbe_setup_sysctl(struct xgbe_prv_data *pdata) +{ + struct sysctl_ctx_list *clist; + struct sysctl_oid *parent; + struct sysctl_oid_list *top; + + clist = device_get_sysctl_ctx(pdata->dev); + parent = device_get_sysctl_tree(pdata->dev); + top = SYSCTL_CHILDREN(parent); +} + +static int +axgbe_if_attach_post(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct ifnet *ifp = pdata->netdev; + struct xgbe_phy_if *phy_if = &pdata->phy_if; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + if_softc_ctx_t scctx = sc->scctx; + int i, ret; + + /* Initialize ECC timestamps */ + pdata->tx_sec_period = ticks; + pdata->tx_ded_period = ticks; + pdata->rx_sec_period = ticks; + pdata->rx_ded_period = ticks; + pdata->desc_sec_period = ticks; + pdata->desc_ded_period = ticks; + + /* Reset the hardware */ + ret = hw_if->exit(&sc->pdata); + if (ret) + axgbe_error("%s: exit error %d\n", __func__, ret); + + /* Configure the defaults */ + xgbe_default_config(pdata); + + /* Set default max values if not provided */ + if (!pdata->tx_max_fifo_size) + pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; + if (!pdata->rx_max_fifo_size) + pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; + + DBGPR("%s: tx fifo 0x%x rx fifo 0x%x\n", __func__, + pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); + + /* Set and validate the number of descriptors for a ring */ + MPASS(powerof2(XGBE_TX_DESC_CNT)); + pdata->tx_desc_count = XGBE_TX_DESC_CNT; + MPASS(powerof2(XGBE_RX_DESC_CNT)); + pdata->rx_desc_count = XGBE_RX_DESC_CNT; + + /* Adjust the number of queues based on interrupts assigned */ + if (pdata->channel_irq_count) { + pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, + pdata->channel_irq_count); + pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, + pdata->channel_irq_count); + + DBGPR("adjusted TX %u/%u RX %u/%u\n", + pdata->tx_ring_count, pdata->tx_q_count, + pdata->rx_ring_count, pdata->rx_q_count); + } + + /* Set channel count based on interrupts assigned */ + pdata->channel_count = max_t(unsigned int, scctx->isc_ntxqsets, + scctx->isc_nrxqsets); + DBGPR("Channel count set to: %u\n", pdata->channel_count); + + /* Get RSS key */ +#ifdef RSS + rss_getkey((uint8_t *)pdata->rss_key); +#else + arc4rand(&pdata->rss_key, ARRAY_SIZE(pdata->rss_key), 0); +#endif + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); + XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); + + /* Initialize the PHY device */ + pdata->sysctl_an_cdr_workaround = pdata->vdata->an_cdr_workaround; + phy_if->phy_init(pdata); + + /* Set the coalescing */ + xgbe_init_rx_coalesce(&sc->pdata); + xgbe_init_tx_coalesce(&sc->pdata); + + ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SGMII, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_100_SGMII, 0, NULL); + ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); + + /* Initialize the phy */ + pdata->phy_link = -1; + pdata->phy_speed = SPEED_UNKNOWN; + ret = phy_if->phy_reset(pdata); + if (ret) + return (ret); + + /* Calculate the Rx buffer size before allocating rings */ + ret = xgbe_calc_rx_buf_size(pdata->netdev, if_getmtu(pdata->netdev)); + pdata->rx_buf_size = ret; + DBGPR("%s: rx_buf_size %d\n", __func__, ret); + + /* Setup RSS lookup table */ + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->rx_ring_count); + + /* + * Mark the device down until it is initialized, which happens + * when the device is accessed first (for configuring the iface, + * eg: setting IP) + */ + set_bit(XGBE_DOWN, &pdata->dev_state); + + DBGPR("mtu %d\n", ifp->if_mtu); + scctx->isc_max_frame_size = ifp->if_mtu + 18; + scctx->isc_min_frame_size = XGMAC_MIN_PACKET; + + axgbe_setup_sysctl(pdata); + + axgbe_sysctl_init(pdata); + + return (0); +} /* axgbe_if_attach_post */ + +static void +xgbe_free_intr(struct xgbe_prv_data *pdata, struct resource *res, void *tag, + int rid) +{ + if (tag) + bus_teardown_intr(pdata->dev, res, tag); + + if (res) + bus_release_resource(pdata->dev, SYS_RES_IRQ, rid, res); +} + +static void +axgbe_interrupts_free(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + if_softc_ctx_t scctx = sc->scctx; + struct xgbe_channel *channel; + struct if_irq irq; + int i; + + axgbe_printf(2, "%s: mode %d\n", __func__, scctx->isc_intr); + + /* Free dev_irq */ + iflib_irq_free(ctx, &pdata->dev_irq); + + /* Free ecc_irq */ + xgbe_free_intr(pdata, pdata->ecc_irq_res, pdata->ecc_irq_tag, + pdata->ecc_rid); + + /* Free i2c_irq */ + xgbe_free_intr(pdata, pdata->i2c_irq_res, pdata->i2c_irq_tag, + pdata->i2c_rid); + + /* Free an_irq */ + xgbe_free_intr(pdata, pdata->an_irq_res, pdata->an_irq_tag, + pdata->an_rid); + + for (i = 0; i < scctx->isc_nrxqsets; i++) { + + channel = pdata->channel[i]; + axgbe_printf(2, "%s: rid %d\n", __func__, channel->dma_irq_rid); + irq.ii_res = channel->dma_irq_res; + irq.ii_tag = channel->dma_irq_tag; + iflib_irq_free(ctx, &irq); + } +} + +static int +axgbe_if_detach(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_phy_if *phy_if = &pdata->phy_if; + struct resource *mac_res[2]; + + mac_res[0] = pdata->xgmac_res; + mac_res[1] = pdata->xpcs_res; + + phy_if->phy_exit(pdata); + + /* Free Interrupts */ + axgbe_interrupts_free(ctx); + + /* Free workqueues */ + taskqueue_free(pdata->dev_workqueue); + + /* Release bus resources */ + bus_release_resources(iflib_get_dev(ctx), axgbe_pci_mac_spec, mac_res); + + /* Free VLAN bitmap */ + free(pdata->active_vlans, M_AXGBE); + + axgbe_sysctl_exit(pdata); + + return (0); +} /* axgbe_if_detach */ + +static void +axgbe_pci_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_if *phy_if = &pdata->phy_if; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + int ret = 0; + + hw_if->init(pdata); + + ret = phy_if->phy_start(pdata); + if (ret) { + axgbe_error("%s: phy start %d\n", __func__, ret); + ret = hw_if->exit(pdata); + if (ret) + axgbe_error("%s: exit error %d\n", __func__, ret); + return; + } + + hw_if->enable_tx(pdata); + hw_if->enable_rx(pdata); + + xgbe_start_timers(pdata); + + clear_bit(XGBE_DOWN, &pdata->dev_state); + + xgbe_dump_phy_registers(pdata); + xgbe_dump_prop_registers(pdata); + xgbe_dump_dma_registers(pdata, -1); + xgbe_dump_mtl_registers(pdata); + xgbe_dump_mac_registers(pdata); + xgbe_dump_rmon_counters(pdata); +} + +static void +axgbe_if_init(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + + axgbe_pci_init(pdata); +} + +static void +axgbe_pci_stop(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_phy_if *phy_if = &pdata->phy_if; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + int ret; + + if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { + axgbe_printf(1, "%s: Stopping when XGBE_DOWN\n", __func__); + return; + } + + xgbe_stop_timers(pdata); + taskqueue_drain_all(pdata->dev_workqueue); + + hw_if->disable_tx(pdata); + hw_if->disable_rx(pdata); + + phy_if->phy_stop(pdata); + + ret = hw_if->exit(pdata); + if (ret) + axgbe_error("%s: exit error %d\n", __func__, ret); + + set_bit(XGBE_DOWN, &pdata->dev_state); +} + +static void +axgbe_if_stop(if_ctx_t ctx) +{ + axgbe_pci_stop(ctx); +} + +static void +axgbe_if_disable_intr(if_ctx_t ctx) +{ + /* TODO - implement */ +} + +static void +axgbe_if_enable_intr(if_ctx_t ctx) +{ + /* TODO - implement */ +} + +static int +axgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int ntxqs, + int ntxqsets) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + if_softc_ctx_t scctx = sc->scctx; + struct xgbe_channel *channel; + struct xgbe_ring *tx_ring; + int i, j, k; + + MPASS(scctx->isc_ntxqsets > 0); + MPASS(scctx->isc_ntxqsets == ntxqsets); + MPASS(ntxqs == 1); + + axgbe_printf(1, "%s: txqsets %d/%d txqs %d\n", __func__, + scctx->isc_ntxqsets, ntxqsets, ntxqs); + + for (i = 0 ; i < ntxqsets; i++) { + + channel = pdata->channel[i]; + + tx_ring = (struct xgbe_ring*)malloc(ntxqs * + sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); + + if (tx_ring == NULL) { + axgbe_error("Unable to allocate TX ring memory\n"); + goto tx_ring_fail; + } + + channel->tx_ring = tx_ring; + + for (j = 0; j < ntxqs; j++, tx_ring++) { + tx_ring->rdata = + (struct xgbe_ring_data*)malloc(scctx->isc_ntxd[j] * + sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); + + /* Get the virtual & physical address of hw queues */ + tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j]; + tx_ring->rdesc_paddr = pa[i*ntxqs + j]; + tx_ring->rdesc_count = scctx->isc_ntxd[j]; + spin_lock_init(&tx_ring->lock); + } + } + + axgbe_printf(1, "allocated for %d tx queues\n", scctx->isc_ntxqsets); + + return (0); + +tx_ring_fail: + + for (j = 0; j < i ; j++) { + + channel = pdata->channel[j]; + + tx_ring = channel->tx_ring; + for (k = 0; k < ntxqs ; k++, tx_ring++) { + if (tx_ring && tx_ring->rdata) + free(tx_ring->rdata, M_AXGBE); + } + free(channel->tx_ring, M_AXGBE); + + channel->tx_ring = NULL; + } + + return (ENOMEM); + +} /* axgbe_if_tx_queues_alloc */ + +static int +axgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int nrxqs, + int nrxqsets) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + if_softc_ctx_t scctx = sc->scctx; + struct xgbe_channel *channel; + struct xgbe_ring *rx_ring; + int i, j, k; + + MPASS(scctx->isc_nrxqsets > 0); + MPASS(scctx->isc_nrxqsets == nrxqsets); + MPASS(nrxqs == 2); + + axgbe_printf(1, "%s: rxqsets %d/%d rxqs %d\n", __func__, + scctx->isc_nrxqsets, nrxqsets, nrxqs); + + for (i = 0 ; i < nrxqsets; i++) { + + channel = pdata->channel[i]; + + rx_ring = (struct xgbe_ring*)malloc(nrxqs * + sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); + + if (rx_ring == NULL) { + axgbe_error("Unable to allocate RX ring memory\n"); + goto rx_ring_fail; + } + + channel->rx_ring = rx_ring; + + for (j = 0; j < nrxqs; j++, rx_ring++) { + rx_ring->rdata = + (struct xgbe_ring_data*)malloc(scctx->isc_nrxd[j] * + sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); + + /* Get the virtual and physical address of the hw queues */ + rx_ring->rdesc = (struct xgbe_ring_desc *)va[i*nrxqs + j]; + rx_ring->rdesc_paddr = pa[i*nrxqs + j]; + rx_ring->rdesc_count = scctx->isc_nrxd[j]; + spin_lock_init(&rx_ring->lock); + } + } + + axgbe_printf(2, "allocated for %d rx queues\n", scctx->isc_nrxqsets); + + return (0); + +rx_ring_fail: + + for (j = 0 ; j < i ; j++) { + + channel = pdata->channel[j]; + + rx_ring = channel->rx_ring; + for (k = 0; k < nrxqs ; k++, rx_ring++) { + if (rx_ring && rx_ring->rdata) + free(rx_ring->rdata, M_AXGBE); + } + free(channel->rx_ring, M_AXGBE); + + channel->rx_ring = NULL; + } + + return (ENOMEM); + +} /* axgbe_if_rx_queues_alloc */ + +static void +axgbe_if_queues_free(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + if_softc_ctx_t scctx = sc->scctx; + if_shared_ctx_t sctx = sc->sctx; + struct xgbe_channel *channel; + struct xgbe_ring *tx_ring; + struct xgbe_ring *rx_ring; + int i, j; + + for (i = 0 ; i < scctx->isc_ntxqsets; i++) { + + channel = pdata->channel[i]; + + tx_ring = channel->tx_ring; + for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) { + if (tx_ring && tx_ring->rdata) + free(tx_ring->rdata, M_AXGBE); + } + free(channel->tx_ring, M_AXGBE); + channel->tx_ring = NULL; + } + + for (i = 0 ; i < scctx->isc_nrxqsets; i++) { + + channel = pdata->channel[i]; + + rx_ring = channel->rx_ring; + for (j = 0; j < sctx->isc_nrxqs ; j++, rx_ring++) { + if (rx_ring && rx_ring->rdata) + free(rx_ring->rdata, M_AXGBE); + } + free(channel->rx_ring, M_AXGBE); + channel->rx_ring = NULL; + } + + /* Free Channels */ + for (i = 0; i < pdata->total_channel_count ; i++) { + free(pdata->channel[i], M_AXGBE); + pdata->channel[i] = NULL; + } + + pdata->total_channel_count = 0; + pdata->channel_count = 0; +} /* axgbe_if_queues_free */ + +static void +axgbe_if_vlan_register(if_ctx_t ctx, uint16_t vtag) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + if (!bit_test(pdata->active_vlans, vtag)) { + axgbe_printf(0, "Registering VLAN %d\n", vtag); + + bit_set(pdata->active_vlans, vtag); + hw_if->update_vlan_hash_table(pdata); + pdata->num_active_vlans++; + + axgbe_printf(1, "Total active vlans: %d\n", + pdata->num_active_vlans); + } else + axgbe_printf(0, "VLAN %d already registered\n", vtag); + + xgbe_dump_active_vlans(pdata); +} + +static void +axgbe_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + + if (pdata->num_active_vlans == 0) { + axgbe_printf(1, "No active VLANs to unregister\n"); + return; + } + + if (bit_test(pdata->active_vlans, vtag)){ + axgbe_printf(0, "Un-Registering VLAN %d\n", vtag); + + bit_clear(pdata->active_vlans, vtag); + hw_if->update_vlan_hash_table(pdata); + pdata->num_active_vlans--; + + axgbe_printf(1, "Total active vlans: %d\n", + pdata->num_active_vlans); + } else + axgbe_printf(0, "VLAN %d already unregistered\n", vtag); + + xgbe_dump_active_vlans(pdata); +} + +#if __FreeBSD_version >= 1300000 +static bool +axgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) +{ + switch (event) { + case IFLIB_RESTART_VLAN_CONFIG: + default: + return (true); + } +} +#endif + +static int +axgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + if_softc_ctx_t scctx = sc->scctx; + struct xgbe_channel *channel; + struct if_irq irq; + int i, error, rid = 0, flags; + char buf[16]; + + MPASS(scctx->isc_intr != IFLIB_INTR_LEGACY); + + pdata->isr_as_tasklet = 1; + + if (scctx->isc_intr == IFLIB_INTR_MSI) { + pdata->irq_count = 1; + pdata->channel_irq_count = 1; + return (0); + } + + axgbe_printf(1, "%s: msix %d txqsets %d rxqsets %d\n", __func__, msix, + scctx->isc_ntxqsets, scctx->isc_nrxqsets); + + flags = RF_ACTIVE; + + /* DEV INTR SETUP */ + rid++; + error = iflib_irq_alloc_generic(ctx, &pdata->dev_irq, rid, + IFLIB_INTR_ADMIN, axgbe_dev_isr, sc, 0, "dev_irq"); + if (error) { + axgbe_error("Failed to register device interrupt rid %d name %s\n", + rid, "dev_irq"); + return (error); + } + + /* ECC INTR SETUP */ + rid++; + pdata->ecc_rid = rid; + pdata->ecc_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, + &rid, flags); + if (!pdata->ecc_irq_res) { + axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", + rid, "ecc_irq"); + return (ENOMEM); + } + + error = bus_setup_intr(pdata->dev, pdata->ecc_irq_res, INTR_MPSAFE | + INTR_TYPE_NET, NULL, axgbe_ecc_isr, sc, &pdata->ecc_irq_tag); + if (error) { + axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", + rid, "ecc_irq", error); + return (error); + } + + /* I2C INTR SETUP */ + rid++; + pdata->i2c_rid = rid; + pdata->i2c_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, + &rid, flags); + if (!pdata->i2c_irq_res) { + axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", + rid, "i2c_irq"); + return (ENOMEM); + } + + error = bus_setup_intr(pdata->dev, pdata->i2c_irq_res, INTR_MPSAFE | + INTR_TYPE_NET, NULL, axgbe_i2c_isr, sc, &pdata->i2c_irq_tag); + if (error) { + axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", + rid, "i2c_irq", error); + return (error); + } + + /* AN INTR SETUP */ + rid++; + pdata->an_rid = rid; + pdata->an_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, + &rid, flags); + if (!pdata->an_irq_res) { + axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", + rid, "an_irq"); + return (ENOMEM); + } + + error = bus_setup_intr(pdata->dev, pdata->an_irq_res, INTR_MPSAFE | + INTR_TYPE_NET, NULL, axgbe_an_isr, sc, &pdata->an_irq_tag); + if (error) { + axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", + rid, "an_irq", error); + return (error); + } + + pdata->per_channel_irq = 1; + pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; + rid++; + for (i = 0; i < scctx->isc_nrxqsets; i++, rid++) { + + channel = pdata->channel[i]; + + snprintf(buf, sizeof(buf), "rxq%d", i); + error = iflib_irq_alloc_generic(ctx, &irq, rid, IFLIB_INTR_RX, + axgbe_msix_que, channel, channel->queue_index, buf); + + if (error) { + axgbe_error("Failed to allocated que int %d err: %d\n", + i, error); + return (error); + } + + channel->dma_irq_rid = rid; + channel->dma_irq_res = irq.ii_res; + channel->dma_irq_tag = irq.ii_tag; + axgbe_printf(1, "%s: channel count %d idx %d irq %d\n", + __func__, scctx->isc_nrxqsets, i, rid); + } + pdata->irq_count = msix; + pdata->channel_irq_count = scctx->isc_nrxqsets; + + for (i = 0; i < scctx->isc_ntxqsets; i++) { + + channel = pdata->channel[i]; + + snprintf(buf, sizeof(buf), "txq%d", i); + irq.ii_res = channel->dma_irq_res; + iflib_softirq_alloc_generic(ctx, &irq, IFLIB_INTR_TX, channel, + channel->queue_index, buf); + } + + return (0); +} /* axgbe_if_msix_intr_assign */ + +static int +xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + enum xgbe_int int_id; + + if (channel->tx_ring && channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_RI; + else + return (-1); + + axgbe_printf(1, "%s channel: %d rx_tx interrupt enabled %d\n", + __func__, channel->queue_index, int_id); + return (hw_if->enable_int(channel, int_id)); +} + +static void +xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) +{ + struct xgbe_hw_if *hw_if = &pdata->hw_if; + enum xgbe_int int_id; + + if (channel->tx_ring && channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = XGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = XGMAC_INT_DMA_CH_SR_RI; + else + return; + + axgbe_printf(1, "%s channel: %d rx_tx interrupt disabled %d\n", + __func__, channel->queue_index, int_id); + hw_if->disable_int(channel, int_id); +} + +static void +xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->channel_count; i++) + xgbe_disable_rx_tx_int(pdata, pdata->channel[i]); +} + +static int +axgbe_msix_que(void *arg) +{ + struct xgbe_channel *channel = (struct xgbe_channel *)arg; + struct xgbe_prv_data *pdata = channel->pdata; + unsigned int dma_ch_isr, dma_status; + + axgbe_printf(1, "%s: Channel: %d SR 0x%04x DSR 0x%04x IER:0x%04x D_ISR:0x%04x M_ISR:0x%04x\n", + __func__, channel->queue_index, + XGMAC_DMA_IOREAD(channel, DMA_CH_SR), + XGMAC_DMA_IOREAD(channel, DMA_CH_DSR), + XGMAC_DMA_IOREAD(channel, DMA_CH_IER), + XGMAC_IOREAD(pdata, DMA_ISR), + XGMAC_IOREAD(pdata, MAC_ISR)); + + dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + + /* Disable Tx and Rx channel interrupts */ + xgbe_disable_rx_tx_int(pdata, channel); + + /* Clear the interrupts */ + dma_status = 0; + XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1); + XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1); + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status); + + return (FILTER_SCHEDULE_THREAD); +} + +static int +axgbe_dev_isr(void *arg) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + unsigned int i, dma_isr, dma_ch_isr; + unsigned int mac_isr, mac_mdioisr; + int ret = FILTER_HANDLED; + + dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); + axgbe_printf(2, "%s DMA ISR: 0x%x\n", __func__, dma_isr); + + if (!dma_isr) + return (FILTER_HANDLED); + + for (i = 0; i < pdata->channel_count; i++) { + + if (!(dma_isr & (1 << i))) + continue; + + channel = pdata->channel[i]; + + dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); + axgbe_printf(2, "%s: channel %d SR 0x%x DSR 0x%x\n", __func__, + channel->queue_index, dma_ch_isr, XGMAC_DMA_IOREAD(channel, + DMA_CH_DSR)); + + /* + * The TI or RI interrupt bits may still be set even if using + * per channel DMA interrupts. Check to be sure those are not + * enabled before using the private data napi structure. + */ + if (!pdata->per_channel_irq && + (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || + XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { + + /* Disable Tx and Rx interrupts */ + xgbe_disable_rx_tx_ints(pdata); + } else { + + /* + * Don't clear Rx/Tx status if doing per channel DMA + * interrupts, these will be cleared by the ISR for + * per channel DMA interrupts + */ + XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); + XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); + } + + if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) + pdata->ext_stats.rx_buffer_unavailable++; + + /* Restart the device on a Fatal Bus Error */ + if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) + axgbe_error("%s: Fatal bus error reported 0x%x\n", + __func__, dma_ch_isr); + + /* Clear all interrupt signals */ + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + + ret = FILTER_SCHEDULE_THREAD; + } + + if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { + + mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); + axgbe_printf(2, "%s MAC ISR: 0x%x\n", __func__, mac_isr); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) + hw_if->tx_mmc_int(pdata); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) + hw_if->rx_mmc_int(pdata); + + if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) { + mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR); + + if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR, + SNGLCOMPINT)) + wakeup_one(pdata); + } + + } + + return (ret); +} /* axgbe_dev_isr */ + +static void +axgbe_i2c_isr(void *arg) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; + + sc->pdata.i2c_if.i2c_isr(&sc->pdata); +} + +static void +axgbe_ecc_isr(void *arg) +{ + /* TODO - implement */ +} + +static void +axgbe_an_isr(void *arg) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; + + sc->pdata.phy_if.an_isr(&sc->pdata); +} + +static int +axgbe_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + int ret; + + if (qid < pdata->tx_q_count) { + ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); + if (ret) { + axgbe_error("Enable TX INT failed\n"); + return (ret); + } + } else + axgbe_error("Queue ID exceed channel count\n"); + + return (0); +} + +static int +axgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + int ret; + + if (qid < pdata->rx_q_count) { + ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); + if (ret) { + axgbe_error("Enable RX INT failed\n"); + return (ret); + } + } else + axgbe_error("Queue ID exceed channel count\n"); + + return (0); +} + +static void +axgbe_if_update_admin_status(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + + axgbe_printf(1, "%s: phy_link %d status %d speed %d\n", __func__, + pdata->phy_link, sc->link_status, pdata->phy.speed); + + if (pdata->phy_link < 0) + return; + + if (pdata->phy_link) { + if (sc->link_status == LINK_STATE_DOWN) { + sc->link_status = LINK_STATE_UP; + if (pdata->phy.speed & SPEED_10000) + iflib_link_state_change(ctx, LINK_STATE_UP, + IF_Gbps(10)); + else if (pdata->phy.speed & SPEED_2500) + iflib_link_state_change(ctx, LINK_STATE_UP, + IF_Gbps(2.5)); + else if (pdata->phy.speed & SPEED_1000) + iflib_link_state_change(ctx, LINK_STATE_UP, + IF_Gbps(1)); + else if (pdata->phy.speed & SPEED_100) + iflib_link_state_change(ctx, LINK_STATE_UP, + IF_Mbps(100)); + else if (pdata->phy.speed & SPEED_10) + iflib_link_state_change(ctx, LINK_STATE_UP, + IF_Mbps(10)); + } + } else { + if (sc->link_status == LINK_STATE_UP) { + sc->link_status = LINK_STATE_DOWN; + iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); + } + } +} + +static int +axgbe_if_media_change(if_ctx_t ctx) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct ifmedia *ifm = iflib_get_media(ctx); + + sx_xlock(&sc->pdata.an_mutex); + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_10G_KR: + sc->pdata.phy.speed = SPEED_10000; + sc->pdata.phy.autoneg = AUTONEG_DISABLE; + break; + case IFM_2500_KX: + sc->pdata.phy.speed = SPEED_2500; + sc->pdata.phy.autoneg = AUTONEG_DISABLE; + break; + case IFM_1000_KX: + sc->pdata.phy.speed = SPEED_1000; + sc->pdata.phy.autoneg = AUTONEG_DISABLE; + break; + case IFM_100_TX: + sc->pdata.phy.speed = SPEED_100; + sc->pdata.phy.autoneg = AUTONEG_DISABLE; + break; + case IFM_AUTO: + sc->pdata.phy.autoneg = AUTONEG_ENABLE; + break; + } + sx_xunlock(&sc->pdata.an_mutex); + + return (-sc->pdata.phy_if.phy_config_aneg(&sc->pdata)); +} + +static int +axgbe_if_promisc_set(if_ctx_t ctx, int flags) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + + if (XGMAC_IOREAD_BITS(&sc->pdata, MAC_PFR, PR) == 1) + return (0); + + XGMAC_IOWRITE_BITS(&sc->pdata, MAC_PFR, PR, 1); + XGMAC_IOWRITE_BITS(&sc->pdata, MAC_PFR, VTFE, 0); + + return (0); +} + +static uint64_t +axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct ifnet *ifp = iflib_get_ifp(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; + + pdata->hw_if.read_mmc_stats(pdata); + + switch(cnt) { + case IFCOUNTER_IPACKETS: + return (pstats->rxframecount_gb); + case IFCOUNTER_IERRORS: + return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g - + pstats->rxmulticastframes_g - pstats->rxunicastframes_g); + case IFCOUNTER_OPACKETS: + return (pstats->txframecount_gb); + case IFCOUNTER_OERRORS: + return (pstats->txframecount_gb - pstats->txframecount_g); + case IFCOUNTER_IBYTES: + return (pstats->rxoctetcount_gb); + case IFCOUNTER_OBYTES: + return (pstats->txoctetcount_gb); + default: + return (if_get_counter_default(ifp, cnt)); + } +} + +static int +axgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + int ret; + + if (mtu > XGMAC_JUMBO_PACKET_MTU) + return (EINVAL); + + ret = xgbe_calc_rx_buf_size(pdata->netdev, mtu); + pdata->rx_buf_size = ret; + axgbe_printf(1, "%s: rx_buf_size %d\n", __func__, ret); + + sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + return (0); +} + +static void +axgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) +{ + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + + ifmr->ifm_status = IFM_AVALID; + if (!sc->pdata.phy.link) + return; + + ifmr->ifm_active = IFM_ETHER; + ifmr->ifm_status |= IFM_ACTIVE; + + axgbe_printf(1, "Speed 0x%x Mode %d\n", sc->pdata.phy.speed, + pdata->phy_if.phy_impl.cur_mode(pdata)); + pdata->phy_if.phy_impl.get_type(pdata, ifmr); + + ifmr->ifm_active |= IFM_FDX; + ifmr->ifm_active |= IFM_ETH_TXPAUSE; + ifmr->ifm_active |= IFM_ETH_RXPAUSE; +} diff --git a/sys/dev/axgbe/xgbe-common.h b/sys/dev/axgbe/xgbe-common.h index bc081352bf53..dc13310dd4a3 100644 --- a/sys/dev/axgbe/xgbe-common.h +++ b/sys/dev/axgbe/xgbe-common.h @@ -1,1310 +1,1746 @@ /* * AMD 10Gb Ethernet driver * + * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. + * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __XGBE_COMMON_H__ #define __XGBE_COMMON_H__ #include #include /* DMA register offsets */ #define DMA_MR 0x3000 #define DMA_SBMR 0x3004 #define DMA_ISR 0x3008 #define DMA_AXIARCR 0x3010 #define DMA_AXIAWCR 0x3018 +#define DMA_AXIAWARCR 0x301c #define DMA_DSR0 0x3020 #define DMA_DSR1 0x3024 +#define DMA_DSR2 0x3028 +#define DMA_DSR3 0x302C +#define DMA_DSR4 0x3030 +#define DMA_TXEDMACR 0x3040 +#define DMA_RXEDMACR 0x3044 /* DMA register entry bit positions and sizes */ -#define DMA_AXIARCR_DRC_INDEX 0 -#define DMA_AXIARCR_DRC_WIDTH 4 -#define DMA_AXIARCR_DRD_INDEX 4 -#define DMA_AXIARCR_DRD_WIDTH 2 -#define DMA_AXIARCR_TEC_INDEX 8 -#define DMA_AXIARCR_TEC_WIDTH 4 -#define DMA_AXIARCR_TED_INDEX 12 -#define DMA_AXIARCR_TED_WIDTH 2 -#define DMA_AXIARCR_THC_INDEX 16 -#define DMA_AXIARCR_THC_WIDTH 4 -#define DMA_AXIARCR_THD_INDEX 20 -#define DMA_AXIARCR_THD_WIDTH 2 -#define DMA_AXIAWCR_DWC_INDEX 0 -#define DMA_AXIAWCR_DWC_WIDTH 4 -#define DMA_AXIAWCR_DWD_INDEX 4 -#define DMA_AXIAWCR_DWD_WIDTH 2 -#define DMA_AXIAWCR_RPC_INDEX 8 -#define DMA_AXIAWCR_RPC_WIDTH 4 -#define DMA_AXIAWCR_RPD_INDEX 12 -#define DMA_AXIAWCR_RPD_WIDTH 2 -#define DMA_AXIAWCR_RHC_INDEX 16 -#define DMA_AXIAWCR_RHC_WIDTH 4 -#define DMA_AXIAWCR_RHD_INDEX 20 -#define DMA_AXIAWCR_RHD_WIDTH 2 -#define DMA_AXIAWCR_TDC_INDEX 24 -#define DMA_AXIAWCR_TDC_WIDTH 4 -#define DMA_AXIAWCR_TDD_INDEX 28 -#define DMA_AXIAWCR_TDD_WIDTH 2 #define DMA_ISR_MACIS_INDEX 17 #define DMA_ISR_MACIS_WIDTH 1 #define DMA_ISR_MTLIS_INDEX 16 #define DMA_ISR_MTLIS_WIDTH 1 +#define DMA_MR_INTM_INDEX 12 +#define DMA_MR_INTM_WIDTH 2 #define DMA_MR_SWR_INDEX 0 #define DMA_MR_SWR_WIDTH 1 +#define DMA_RXEDMACR_RDPS_INDEX 0 +#define DMA_RXEDMACR_RDPS_WIDTH 3 +#define DMA_SBMR_AAL_INDEX 12 +#define DMA_SBMR_AAL_WIDTH 1 #define DMA_SBMR_EAME_INDEX 11 #define DMA_SBMR_EAME_WIDTH 1 -#define DMA_SBMR_BLEN_256_INDEX 7 -#define DMA_SBMR_BLEN_256_WIDTH 1 +#define DMA_SBMR_BLEN_INDEX 1 +#define DMA_SBMR_BLEN_WIDTH 7 +#define DMA_SBMR_RD_OSR_LMT_INDEX 16 +#define DMA_SBMR_RD_OSR_LMT_WIDTH 6 #define DMA_SBMR_UNDEF_INDEX 0 #define DMA_SBMR_UNDEF_WIDTH 1 +#define DMA_SBMR_WR_OSR_LMT_INDEX 24 +#define DMA_SBMR_WR_OSR_LMT_WIDTH 6 +#define DMA_TXEDMACR_TDPS_INDEX 0 +#define DMA_TXEDMACR_TDPS_WIDTH 3 /* DMA register values */ +#define DMA_SBMR_BLEN_256 256 +#define DMA_SBMR_BLEN_128 128 +#define DMA_SBMR_BLEN_64 64 +#define DMA_SBMR_BLEN_32 32 +#define DMA_SBMR_BLEN_16 16 +#define DMA_SBMR_BLEN_8 8 +#define DMA_SBMR_BLEN_4 4 #define DMA_DSR_RPS_WIDTH 4 #define DMA_DSR_TPS_WIDTH 4 #define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) #define DMA_DSR0_RPS_START 8 #define DMA_DSR0_TPS_START 12 #define DMA_DSRX_FIRST_QUEUE 3 #define DMA_DSRX_INC 4 #define DMA_DSRX_QPR 4 #define DMA_DSRX_RPS_START 0 #define DMA_DSRX_TPS_START 4 #define DMA_TPS_STOPPED 0x00 #define DMA_TPS_SUSPENDED 0x06 /* DMA channel register offsets * Multiple channels can be active. The first channel has registers * that begin at 0x3100. Each subsequent channel has registers that * are accessed using an offset of 0x80 from the previous channel. */ #define DMA_CH_BASE 0x3100 #define DMA_CH_INC 0x80 #define DMA_CH_CR 0x00 #define DMA_CH_TCR 0x04 #define DMA_CH_RCR 0x08 #define DMA_CH_TDLR_HI 0x10 #define DMA_CH_TDLR_LO 0x14 #define DMA_CH_RDLR_HI 0x18 #define DMA_CH_RDLR_LO 0x1c #define DMA_CH_TDTR_LO 0x24 #define DMA_CH_RDTR_LO 0x2c #define DMA_CH_TDRLR 0x30 #define DMA_CH_RDRLR 0x34 #define DMA_CH_IER 0x38 #define DMA_CH_RIWT 0x3c #define DMA_CH_CATDR_LO 0x44 #define DMA_CH_CARDR_LO 0x4c #define DMA_CH_CATBR_HI 0x50 #define DMA_CH_CATBR_LO 0x54 #define DMA_CH_CARBR_HI 0x58 #define DMA_CH_CARBR_LO 0x5c #define DMA_CH_SR 0x60 +#define DMA_CH_DSR 0x64 +#define DMA_CH_DCFL 0x68 +#define DMA_CH_MFC 0x6c +#define DMA_CH_TDTRO 0x70 +#define DMA_CH_RDTRO 0x74 +#define DMA_CH_TDWRO 0x78 +#define DMA_CH_RDWRO 0x7C /* DMA channel register entry bit positions and sizes */ #define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_WIDTH 1 #define DMA_CH_CR_SPH_INDEX 24 #define DMA_CH_CR_SPH_WIDTH 1 -#define DMA_CH_IER_AIE_INDEX 15 +#define DMA_CH_IER_AIE20_INDEX 15 +#define DMA_CH_IER_AIE20_WIDTH 1 +#define DMA_CH_IER_AIE_INDEX 14 #define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_FBEE_INDEX 12 #define DMA_CH_IER_FBEE_WIDTH 1 -#define DMA_CH_IER_NIE_INDEX 16 +#define DMA_CH_IER_NIE20_INDEX 16 +#define DMA_CH_IER_NIE20_WIDTH 1 +#define DMA_CH_IER_NIE_INDEX 15 #define DMA_CH_IER_NIE_WIDTH 1 #define DMA_CH_IER_RBUE_INDEX 7 #define DMA_CH_IER_RBUE_WIDTH 1 #define DMA_CH_IER_RIE_INDEX 6 #define DMA_CH_IER_RIE_WIDTH 1 #define DMA_CH_IER_RSE_INDEX 8 #define DMA_CH_IER_RSE_WIDTH 1 #define DMA_CH_IER_TBUE_INDEX 2 #define DMA_CH_IER_TBUE_WIDTH 1 #define DMA_CH_IER_TIE_INDEX 0 #define DMA_CH_IER_TIE_WIDTH 1 #define DMA_CH_IER_TXSE_INDEX 1 #define DMA_CH_IER_TXSE_WIDTH 1 #define DMA_CH_RCR_PBL_INDEX 16 #define DMA_CH_RCR_PBL_WIDTH 6 #define DMA_CH_RCR_RBSZ_INDEX 1 #define DMA_CH_RCR_RBSZ_WIDTH 14 #define DMA_CH_RCR_SR_INDEX 0 #define DMA_CH_RCR_SR_WIDTH 1 #define DMA_CH_RIWT_RWT_INDEX 0 #define DMA_CH_RIWT_RWT_WIDTH 8 #define DMA_CH_SR_FBE_INDEX 12 #define DMA_CH_SR_FBE_WIDTH 1 #define DMA_CH_SR_RBU_INDEX 7 #define DMA_CH_SR_RBU_WIDTH 1 #define DMA_CH_SR_RI_INDEX 6 #define DMA_CH_SR_RI_WIDTH 1 #define DMA_CH_SR_RPS_INDEX 8 #define DMA_CH_SR_RPS_WIDTH 1 #define DMA_CH_SR_TBU_INDEX 2 #define DMA_CH_SR_TBU_WIDTH 1 #define DMA_CH_SR_TI_INDEX 0 #define DMA_CH_SR_TI_WIDTH 1 #define DMA_CH_SR_TPS_INDEX 1 #define DMA_CH_SR_TPS_WIDTH 1 #define DMA_CH_TCR_OSP_INDEX 4 #define DMA_CH_TCR_OSP_WIDTH 1 #define DMA_CH_TCR_PBL_INDEX 16 #define DMA_CH_TCR_PBL_WIDTH 6 #define DMA_CH_TCR_ST_INDEX 0 #define DMA_CH_TCR_ST_WIDTH 1 #define DMA_CH_TCR_TSE_INDEX 12 #define DMA_CH_TCR_TSE_WIDTH 1 /* DMA channel register values */ #define DMA_OSP_DISABLE 0x00 #define DMA_OSP_ENABLE 0x01 #define DMA_PBL_1 1 #define DMA_PBL_2 2 #define DMA_PBL_4 4 #define DMA_PBL_8 8 #define DMA_PBL_16 16 #define DMA_PBL_32 32 #define DMA_PBL_64 64 /* 8 x 8 */ #define DMA_PBL_128 128 /* 8 x 16 */ #define DMA_PBL_256 256 /* 8 x 32 */ #define DMA_PBL_X8_DISABLE 0x00 #define DMA_PBL_X8_ENABLE 0x01 /* MAC register offsets */ #define MAC_TCR 0x0000 #define MAC_RCR 0x0004 #define MAC_PFR 0x0008 #define MAC_WTR 0x000c #define MAC_HTR0 0x0010 +#define MAC_HTR1 0x0014 +#define MAC_HTR2 0x0018 +#define MAC_HTR3 0x001c +#define MAC_HTR4 0x0020 +#define MAC_HTR5 0x0024 +#define MAC_HTR6 0x0028 +#define MAC_HTR7 0x002c #define MAC_VLANTR 0x0050 #define MAC_VLANHTR 0x0058 #define MAC_VLANIR 0x0060 #define MAC_IVLANIR 0x0064 #define MAC_RETMR 0x006c #define MAC_Q0TFCR 0x0070 +#define MAC_Q1TFCR 0x0074 +#define MAC_Q2TFCR 0x0078 +#define MAC_Q3TFCR 0x007c +#define MAC_Q4TFCR 0x0080 +#define MAC_Q5TFCR 0x0084 +#define MAC_Q6TFCR 0x0088 +#define MAC_Q7TFCR 0x008c #define MAC_RFCR 0x0090 #define MAC_RQC0R 0x00a0 #define MAC_RQC1R 0x00a4 #define MAC_RQC2R 0x00a8 #define MAC_RQC3R 0x00ac #define MAC_ISR 0x00b0 #define MAC_IER 0x00b4 #define MAC_RTSR 0x00b8 #define MAC_PMTCSR 0x00c0 #define MAC_RWKPFR 0x00c4 #define MAC_LPICSR 0x00d0 #define MAC_LPITCR 0x00d4 +#define MAC_TIR 0x00e0 #define MAC_VR 0x0110 #define MAC_DR 0x0114 #define MAC_HWF0R 0x011c #define MAC_HWF1R 0x0120 #define MAC_HWF2R 0x0124 +#define MAC_MDIOSCAR 0x0200 +#define MAC_MDIOSCCDR 0x0204 +#define MAC_MDIOISR 0x0214 +#define MAC_MDIOIER 0x0218 +#define MAC_MDIOCL22R 0x0220 #define MAC_GPIOCR 0x0278 #define MAC_GPIOSR 0x027c #define MAC_MACA0HR 0x0300 #define MAC_MACA0LR 0x0304 #define MAC_MACA1HR 0x0308 #define MAC_MACA1LR 0x030c #define MAC_RSSCR 0x0c80 #define MAC_RSSAR 0x0c88 #define MAC_RSSDR 0x0c8c #define MAC_TSCR 0x0d00 #define MAC_SSIR 0x0d04 #define MAC_STSR 0x0d08 #define MAC_STNR 0x0d0c #define MAC_STSUR 0x0d10 #define MAC_STNUR 0x0d14 #define MAC_TSAR 0x0d18 #define MAC_TSSR 0x0d20 #define MAC_TXSNR 0x0d30 #define MAC_TXSSR 0x0d34 #define MAC_QTFCR_INC 4 #define MAC_MACA_INC 4 #define MAC_HTR_INC 4 #define MAC_RQC2_INC 4 #define MAC_RQC2_Q_PER_REG 4 /* MAC register entry bit positions and sizes */ #define MAC_HWF0R_ADDMACADRSEL_INDEX 18 #define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 #define MAC_HWF0R_ARPOFFSEL_INDEX 9 #define MAC_HWF0R_ARPOFFSEL_WIDTH 1 #define MAC_HWF0R_EEESEL_INDEX 13 #define MAC_HWF0R_EEESEL_WIDTH 1 #define MAC_HWF0R_GMIISEL_INDEX 1 #define MAC_HWF0R_GMIISEL_WIDTH 1 #define MAC_HWF0R_MGKSEL_INDEX 7 #define MAC_HWF0R_MGKSEL_WIDTH 1 #define MAC_HWF0R_MMCSEL_INDEX 8 #define MAC_HWF0R_MMCSEL_WIDTH 1 #define MAC_HWF0R_RWKSEL_INDEX 6 #define MAC_HWF0R_RWKSEL_WIDTH 1 #define MAC_HWF0R_RXCOESEL_INDEX 16 #define MAC_HWF0R_RXCOESEL_WIDTH 1 #define MAC_HWF0R_SAVLANINS_INDEX 27 #define MAC_HWF0R_SAVLANINS_WIDTH 1 #define MAC_HWF0R_SMASEL_INDEX 5 #define MAC_HWF0R_SMASEL_WIDTH 1 #define MAC_HWF0R_TSSEL_INDEX 12 #define MAC_HWF0R_TSSEL_WIDTH 1 #define MAC_HWF0R_TSSTSSEL_INDEX 25 #define MAC_HWF0R_TSSTSSEL_WIDTH 2 #define MAC_HWF0R_TXCOESEL_INDEX 14 #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF0R_VXN_INDEX 29 +#define MAC_HWF0R_VXN_WIDTH 1 #define MAC_HWF1R_ADDR64_INDEX 14 #define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 #define MAC_HWF1R_ADVTHWORD_WIDTH 1 #define MAC_HWF1R_DBGMEMA_INDEX 19 #define MAC_HWF1R_DBGMEMA_WIDTH 1 #define MAC_HWF1R_DCBEN_INDEX 16 #define MAC_HWF1R_DCBEN_WIDTH 1 #define MAC_HWF1R_HASHTBLSZ_INDEX 24 #define MAC_HWF1R_HASHTBLSZ_WIDTH 3 #define MAC_HWF1R_L3L4FNUM_INDEX 27 #define MAC_HWF1R_L3L4FNUM_WIDTH 4 #define MAC_HWF1R_NUMTC_INDEX 21 #define MAC_HWF1R_NUMTC_WIDTH 3 #define MAC_HWF1R_RSSEN_INDEX 20 #define MAC_HWF1R_RSSEN_WIDTH 1 #define MAC_HWF1R_RXFIFOSIZE_INDEX 0 #define MAC_HWF1R_RXFIFOSIZE_WIDTH 5 #define MAC_HWF1R_SPHEN_INDEX 17 #define MAC_HWF1R_SPHEN_WIDTH 1 #define MAC_HWF1R_TSOEN_INDEX 18 #define MAC_HWF1R_TSOEN_WIDTH 1 #define MAC_HWF1R_TXFIFOSIZE_INDEX 6 #define MAC_HWF1R_TXFIFOSIZE_WIDTH 5 #define MAC_HWF2R_AUXSNAPNUM_INDEX 28 #define MAC_HWF2R_AUXSNAPNUM_WIDTH 3 #define MAC_HWF2R_PPSOUTNUM_INDEX 24 #define MAC_HWF2R_PPSOUTNUM_WIDTH 3 #define MAC_HWF2R_RXCHCNT_INDEX 12 #define MAC_HWF2R_RXCHCNT_WIDTH 4 #define MAC_HWF2R_RXQCNT_INDEX 0 #define MAC_HWF2R_RXQCNT_WIDTH 4 #define MAC_HWF2R_TXCHCNT_INDEX 18 #define MAC_HWF2R_TXCHCNT_WIDTH 4 #define MAC_HWF2R_TXQCNT_INDEX 6 #define MAC_HWF2R_TXQCNT_WIDTH 4 #define MAC_IER_TSIE_INDEX 12 #define MAC_IER_TSIE_WIDTH 1 #define MAC_ISR_MMCRXIS_INDEX 9 #define MAC_ISR_MMCRXIS_WIDTH 1 #define MAC_ISR_MMCTXIS_INDEX 10 #define MAC_ISR_MMCTXIS_WIDTH 1 #define MAC_ISR_PMTIS_INDEX 4 #define MAC_ISR_PMTIS_WIDTH 1 +#define MAC_ISR_SMI_INDEX 1 +#define MAC_ISR_SMI_WIDTH 1 #define MAC_ISR_TSIS_INDEX 12 #define MAC_ISR_TSIS_WIDTH 1 #define MAC_MACA1HR_AE_INDEX 31 #define MAC_MACA1HR_AE_WIDTH 1 +#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12 +#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1 +#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12 +#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1 +#define MAC_MDIOSCAR_DA_INDEX 21 +#define MAC_MDIOSCAR_DA_WIDTH 5 +#define MAC_MDIOSCAR_PA_INDEX 16 +#define MAC_MDIOSCAR_PA_WIDTH 5 +#define MAC_MDIOSCAR_RA_INDEX 0 +#define MAC_MDIOSCAR_RA_WIDTH 16 +#define MAC_MDIOSCCDR_BUSY_INDEX 22 +#define MAC_MDIOSCCDR_BUSY_WIDTH 1 +#define MAC_MDIOSCCDR_CMD_INDEX 16 +#define MAC_MDIOSCCDR_CMD_WIDTH 2 +#define MAC_MDIOSCCDR_CR_INDEX 19 +#define MAC_MDIOSCCDR_CR_WIDTH 3 +#define MAC_MDIOSCCDR_DATA_INDEX 0 +#define MAC_MDIOSCCDR_DATA_WIDTH 16 +#define MAC_MDIOSCCDR_SADDR_INDEX 18 +#define MAC_MDIOSCCDR_SADDR_WIDTH 1 #define MAC_PFR_HMC_INDEX 2 #define MAC_PFR_HMC_WIDTH 1 #define MAC_PFR_HPF_INDEX 10 #define MAC_PFR_HPF_WIDTH 1 #define MAC_PFR_HUC_INDEX 1 #define MAC_PFR_HUC_WIDTH 1 #define MAC_PFR_PM_INDEX 4 #define MAC_PFR_PM_WIDTH 1 #define MAC_PFR_PR_INDEX 0 #define MAC_PFR_PR_WIDTH 1 #define MAC_PFR_VTFE_INDEX 16 #define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PFR_VUCC_INDEX 22 +#define MAC_PFR_VUCC_WIDTH 1 #define MAC_PMTCSR_MGKPKTEN_INDEX 1 #define MAC_PMTCSR_MGKPKTEN_WIDTH 1 #define MAC_PMTCSR_PWRDWN_INDEX 0 #define MAC_PMTCSR_PWRDWN_WIDTH 1 #define MAC_PMTCSR_RWKFILTRST_INDEX 31 #define MAC_PMTCSR_RWKFILTRST_WIDTH 1 #define MAC_PMTCSR_RWKPKTEN_INDEX 2 #define MAC_PMTCSR_RWKPKTEN_WIDTH 1 #define MAC_Q0TFCR_PT_INDEX 16 #define MAC_Q0TFCR_PT_WIDTH 16 #define MAC_Q0TFCR_TFE_INDEX 1 #define MAC_Q0TFCR_TFE_WIDTH 1 #define MAC_RCR_ACS_INDEX 1 #define MAC_RCR_ACS_WIDTH 1 #define MAC_RCR_CST_INDEX 2 #define MAC_RCR_CST_WIDTH 1 #define MAC_RCR_DCRCC_INDEX 3 #define MAC_RCR_DCRCC_WIDTH 1 #define MAC_RCR_HDSMS_INDEX 12 #define MAC_RCR_HDSMS_WIDTH 3 #define MAC_RCR_IPC_INDEX 9 #define MAC_RCR_IPC_WIDTH 1 #define MAC_RCR_JE_INDEX 8 #define MAC_RCR_JE_WIDTH 1 #define MAC_RCR_LM_INDEX 10 #define MAC_RCR_LM_WIDTH 1 #define MAC_RCR_RE_INDEX 0 #define MAC_RCR_RE_WIDTH 1 +#define MAC_RCR_ARPEN_INDEX 31 +#define MAC_RCR_ARPEN_WIDTH 1 #define MAC_RFCR_PFCE_INDEX 8 #define MAC_RFCR_PFCE_WIDTH 1 #define MAC_RFCR_RFE_INDEX 0 #define MAC_RFCR_RFE_WIDTH 1 #define MAC_RFCR_UP_INDEX 1 #define MAC_RFCR_UP_WIDTH 1 #define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_WIDTH 2 #define MAC_RSSAR_ADDRT_INDEX 2 #define MAC_RSSAR_ADDRT_WIDTH 1 #define MAC_RSSAR_CT_INDEX 1 #define MAC_RSSAR_CT_WIDTH 1 #define MAC_RSSAR_OB_INDEX 0 #define MAC_RSSAR_OB_WIDTH 1 #define MAC_RSSAR_RSSIA_INDEX 8 #define MAC_RSSAR_RSSIA_WIDTH 8 #define MAC_RSSCR_IP2TE_INDEX 1 #define MAC_RSSCR_IP2TE_WIDTH 1 #define MAC_RSSCR_RSSE_INDEX 0 #define MAC_RSSCR_RSSE_WIDTH 1 #define MAC_RSSCR_TCP4TE_INDEX 2 #define MAC_RSSCR_TCP4TE_WIDTH 1 #define MAC_RSSCR_UDP4TE_INDEX 3 #define MAC_RSSCR_UDP4TE_WIDTH 1 #define MAC_RSSDR_DMCH_INDEX 0 #define MAC_RSSDR_DMCH_WIDTH 4 #define MAC_SSIR_SNSINC_INDEX 8 #define MAC_SSIR_SNSINC_WIDTH 8 #define MAC_SSIR_SSINC_INDEX 16 #define MAC_SSIR_SSINC_WIDTH 8 #define MAC_TCR_SS_INDEX 29 #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 +#define MAC_TCR_VNE_INDEX 24 +#define MAC_TCR_VNE_WIDTH 1 +#define MAC_TCR_VNM_INDEX 25 +#define MAC_TCR_VNM_WIDTH 1 +#define MAC_TIR_TNID_INDEX 0 +#define MAC_TIR_TNID_WIDTH 16 #define MAC_TSCR_AV8021ASMEN_INDEX 28 #define MAC_TSCR_AV8021ASMEN_WIDTH 1 #define MAC_TSCR_SNAPTYPSEL_INDEX 16 #define MAC_TSCR_SNAPTYPSEL_WIDTH 2 #define MAC_TSCR_TSADDREG_INDEX 5 #define MAC_TSCR_TSADDREG_WIDTH 1 #define MAC_TSCR_TSCFUPDT_INDEX 1 #define MAC_TSCR_TSCFUPDT_WIDTH 1 #define MAC_TSCR_TSCTRLSSR_INDEX 9 #define MAC_TSCR_TSCTRLSSR_WIDTH 1 #define MAC_TSCR_TSENA_INDEX 0 #define MAC_TSCR_TSENA_WIDTH 1 #define MAC_TSCR_TSENALL_INDEX 8 #define MAC_TSCR_TSENALL_WIDTH 1 #define MAC_TSCR_TSEVNTENA_INDEX 14 #define MAC_TSCR_TSEVNTENA_WIDTH 1 #define MAC_TSCR_TSINIT_INDEX 2 #define MAC_TSCR_TSINIT_WIDTH 1 #define MAC_TSCR_TSIPENA_INDEX 11 #define MAC_TSCR_TSIPENA_WIDTH 1 #define MAC_TSCR_TSIPV4ENA_INDEX 13 #define MAC_TSCR_TSIPV4ENA_WIDTH 1 #define MAC_TSCR_TSIPV6ENA_INDEX 12 #define MAC_TSCR_TSIPV6ENA_WIDTH 1 #define MAC_TSCR_TSMSTRENA_INDEX 15 #define MAC_TSCR_TSMSTRENA_WIDTH 1 #define MAC_TSCR_TSVER2ENA_INDEX 10 #define MAC_TSCR_TSVER2ENA_WIDTH 1 #define MAC_TSCR_TXTSSTSM_INDEX 24 #define MAC_TSCR_TXTSSTSM_WIDTH 1 #define MAC_TSSR_TXTSC_INDEX 15 #define MAC_TSSR_TXTSC_WIDTH 1 #define MAC_TXSNR_TXTSSTSMIS_INDEX 31 #define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 #define MAC_VLANHTR_VLHT_INDEX 0 #define MAC_VLANHTR_VLHT_WIDTH 16 #define MAC_VLANIR_VLTI_INDEX 20 #define MAC_VLANIR_VLTI_WIDTH 1 #define MAC_VLANIR_CSVL_INDEX 19 #define MAC_VLANIR_CSVL_WIDTH 1 #define MAC_VLANTR_DOVLTC_INDEX 20 #define MAC_VLANTR_DOVLTC_WIDTH 1 #define MAC_VLANTR_ERSVLM_INDEX 19 #define MAC_VLANTR_ERSVLM_WIDTH 1 #define MAC_VLANTR_ESVL_INDEX 18 #define MAC_VLANTR_ESVL_WIDTH 1 #define MAC_VLANTR_ETV_INDEX 16 #define MAC_VLANTR_ETV_WIDTH 1 #define MAC_VLANTR_EVLS_INDEX 21 #define MAC_VLANTR_EVLS_WIDTH 2 #define MAC_VLANTR_EVLRXS_INDEX 24 #define MAC_VLANTR_EVLRXS_WIDTH 1 #define MAC_VLANTR_VL_INDEX 0 #define MAC_VLANTR_VL_WIDTH 16 #define MAC_VLANTR_VTHM_INDEX 25 #define MAC_VLANTR_VTHM_WIDTH 1 #define MAC_VLANTR_VTIM_INDEX 17 #define MAC_VLANTR_VTIM_WIDTH 1 #define MAC_VR_DEVID_INDEX 8 #define MAC_VR_DEVID_WIDTH 8 #define MAC_VR_SNPSVER_INDEX 0 #define MAC_VR_SNPSVER_WIDTH 8 #define MAC_VR_USERVER_INDEX 16 #define MAC_VR_USERVER_WIDTH 8 /* MMC register offsets */ #define MMC_CR 0x0800 #define MMC_RISR 0x0804 #define MMC_TISR 0x0808 #define MMC_RIER 0x080c #define MMC_TIER 0x0810 #define MMC_TXOCTETCOUNT_GB_LO 0x0814 #define MMC_TXOCTETCOUNT_GB_HI 0x0818 #define MMC_TXFRAMECOUNT_GB_LO 0x081c #define MMC_TXFRAMECOUNT_GB_HI 0x0820 #define MMC_TXBROADCASTFRAMES_G_LO 0x0824 #define MMC_TXBROADCASTFRAMES_G_HI 0x0828 #define MMC_TXMULTICASTFRAMES_G_LO 0x082c #define MMC_TXMULTICASTFRAMES_G_HI 0x0830 #define MMC_TX64OCTETS_GB_LO 0x0834 #define MMC_TX64OCTETS_GB_HI 0x0838 #define MMC_TX65TO127OCTETS_GB_LO 0x083c #define MMC_TX65TO127OCTETS_GB_HI 0x0840 #define MMC_TX128TO255OCTETS_GB_LO 0x0844 #define MMC_TX128TO255OCTETS_GB_HI 0x0848 #define MMC_TX256TO511OCTETS_GB_LO 0x084c #define MMC_TX256TO511OCTETS_GB_HI 0x0850 #define MMC_TX512TO1023OCTETS_GB_LO 0x0854 #define MMC_TX512TO1023OCTETS_GB_HI 0x0858 #define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c #define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860 #define MMC_TXUNICASTFRAMES_GB_LO 0x0864 #define MMC_TXUNICASTFRAMES_GB_HI 0x0868 #define MMC_TXMULTICASTFRAMES_GB_LO 0x086c #define MMC_TXMULTICASTFRAMES_GB_HI 0x0870 #define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 #define MMC_TXBROADCASTFRAMES_GB_HI 0x0878 #define MMC_TXUNDERFLOWERROR_LO 0x087c #define MMC_TXUNDERFLOWERROR_HI 0x0880 #define MMC_TXOCTETCOUNT_G_LO 0x0884 #define MMC_TXOCTETCOUNT_G_HI 0x0888 #define MMC_TXFRAMECOUNT_G_LO 0x088c #define MMC_TXFRAMECOUNT_G_HI 0x0890 #define MMC_TXPAUSEFRAMES_LO 0x0894 #define MMC_TXPAUSEFRAMES_HI 0x0898 #define MMC_TXVLANFRAMES_G_LO 0x089c #define MMC_TXVLANFRAMES_G_HI 0x08a0 #define MMC_RXFRAMECOUNT_GB_LO 0x0900 #define MMC_RXFRAMECOUNT_GB_HI 0x0904 #define MMC_RXOCTETCOUNT_GB_LO 0x0908 #define MMC_RXOCTETCOUNT_GB_HI 0x090c #define MMC_RXOCTETCOUNT_G_LO 0x0910 #define MMC_RXOCTETCOUNT_G_HI 0x0914 #define MMC_RXBROADCASTFRAMES_G_LO 0x0918 #define MMC_RXBROADCASTFRAMES_G_HI 0x091c #define MMC_RXMULTICASTFRAMES_G_LO 0x0920 #define MMC_RXMULTICASTFRAMES_G_HI 0x0924 #define MMC_RXCRCERROR_LO 0x0928 #define MMC_RXCRCERROR_HI 0x092c #define MMC_RXRUNTERROR 0x0930 #define MMC_RXJABBERERROR 0x0934 #define MMC_RXUNDERSIZE_G 0x0938 #define MMC_RXOVERSIZE_G 0x093c #define MMC_RX64OCTETS_GB_LO 0x0940 #define MMC_RX64OCTETS_GB_HI 0x0944 #define MMC_RX65TO127OCTETS_GB_LO 0x0948 #define MMC_RX65TO127OCTETS_GB_HI 0x094c #define MMC_RX128TO255OCTETS_GB_LO 0x0950 #define MMC_RX128TO255OCTETS_GB_HI 0x0954 #define MMC_RX256TO511OCTETS_GB_LO 0x0958 #define MMC_RX256TO511OCTETS_GB_HI 0x095c #define MMC_RX512TO1023OCTETS_GB_LO 0x0960 #define MMC_RX512TO1023OCTETS_GB_HI 0x0964 #define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 #define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c #define MMC_RXUNICASTFRAMES_G_LO 0x0970 #define MMC_RXUNICASTFRAMES_G_HI 0x0974 #define MMC_RXLENGTHERROR_LO 0x0978 #define MMC_RXLENGTHERROR_HI 0x097c #define MMC_RXOUTOFRANGETYPE_LO 0x0980 #define MMC_RXOUTOFRANGETYPE_HI 0x0984 #define MMC_RXPAUSEFRAMES_LO 0x0988 #define MMC_RXPAUSEFRAMES_HI 0x098c #define MMC_RXFIFOOVERFLOW_LO 0x0990 #define MMC_RXFIFOOVERFLOW_HI 0x0994 #define MMC_RXVLANFRAMES_GB_LO 0x0998 #define MMC_RXVLANFRAMES_GB_HI 0x099c #define MMC_RXWATCHDOGERROR 0x09a0 /* MMC register entry bit positions and sizes */ #define MMC_CR_CR_INDEX 0 #define MMC_CR_CR_WIDTH 1 #define MMC_CR_CSR_INDEX 1 #define MMC_CR_CSR_WIDTH 1 #define MMC_CR_ROR_INDEX 2 #define MMC_CR_ROR_WIDTH 1 #define MMC_CR_MCF_INDEX 3 #define MMC_CR_MCF_WIDTH 1 #define MMC_CR_MCT_INDEX 4 #define MMC_CR_MCT_WIDTH 2 #define MMC_RIER_ALL_INTERRUPTS_INDEX 0 #define MMC_RIER_ALL_INTERRUPTS_WIDTH 23 #define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0 #define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1 #define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1 #define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1 #define MMC_RISR_RXOCTETCOUNT_G_INDEX 2 #define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1 #define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3 #define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1 #define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4 #define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1 #define MMC_RISR_RXCRCERROR_INDEX 5 #define MMC_RISR_RXCRCERROR_WIDTH 1 #define MMC_RISR_RXRUNTERROR_INDEX 6 #define MMC_RISR_RXRUNTERROR_WIDTH 1 #define MMC_RISR_RXJABBERERROR_INDEX 7 #define MMC_RISR_RXJABBERERROR_WIDTH 1 #define MMC_RISR_RXUNDERSIZE_G_INDEX 8 #define MMC_RISR_RXUNDERSIZE_G_WIDTH 1 #define MMC_RISR_RXOVERSIZE_G_INDEX 9 #define MMC_RISR_RXOVERSIZE_G_WIDTH 1 #define MMC_RISR_RX64OCTETS_GB_INDEX 10 #define MMC_RISR_RX64OCTETS_GB_WIDTH 1 #define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11 #define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1 #define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12 #define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1 #define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13 #define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1 #define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14 #define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1 #define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15 #define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1 #define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16 #define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1 #define MMC_RISR_RXLENGTHERROR_INDEX 17 #define MMC_RISR_RXLENGTHERROR_WIDTH 1 #define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18 #define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1 #define MMC_RISR_RXPAUSEFRAMES_INDEX 19 #define MMC_RISR_RXPAUSEFRAMES_WIDTH 1 #define MMC_RISR_RXFIFOOVERFLOW_INDEX 20 #define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1 #define MMC_RISR_RXVLANFRAMES_GB_INDEX 21 #define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1 #define MMC_RISR_RXWATCHDOGERROR_INDEX 22 #define MMC_RISR_RXWATCHDOGERROR_WIDTH 1 #define MMC_TIER_ALL_INTERRUPTS_INDEX 0 #define MMC_TIER_ALL_INTERRUPTS_WIDTH 18 #define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0 #define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1 #define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1 #define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1 #define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2 #define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1 #define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3 #define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1 #define MMC_TISR_TX64OCTETS_GB_INDEX 4 #define MMC_TISR_TX64OCTETS_GB_WIDTH 1 #define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5 #define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1 #define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6 #define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1 #define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7 #define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1 #define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8 #define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1 #define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9 #define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1 #define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10 #define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1 #define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11 #define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1 #define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12 #define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1 #define MMC_TISR_TXUNDERFLOWERROR_INDEX 13 #define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1 #define MMC_TISR_TXOCTETCOUNT_G_INDEX 14 #define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1 #define MMC_TISR_TXFRAMECOUNT_G_INDEX 15 #define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1 #define MMC_TISR_TXPAUSEFRAMES_INDEX 16 #define MMC_TISR_TXPAUSEFRAMES_WIDTH 1 #define MMC_TISR_TXVLANFRAMES_G_INDEX 17 #define MMC_TISR_TXVLANFRAMES_G_WIDTH 1 /* MTL register offsets */ #define MTL_OMR 0x1000 #define MTL_FDCR 0x1008 #define MTL_FDSR 0x100c #define MTL_FDDR 0x1010 #define MTL_ISR 0x1020 #define MTL_RQDCM0R 0x1030 +#define MTL_RQDCM1R 0x1034 +#define MTL_RQDCM2R 0x1038 #define MTL_TCPM0R 0x1040 #define MTL_TCPM1R 0x1044 #define MTL_RQDCM_INC 4 #define MTL_RQDCM_Q_PER_REG 4 #define MTL_TCPM_INC 4 #define MTL_TCPM_TC_PER_REG 4 /* MTL register entry bit positions and sizes */ #define MTL_OMR_ETSALG_INDEX 5 #define MTL_OMR_ETSALG_WIDTH 2 #define MTL_OMR_RAA_INDEX 2 #define MTL_OMR_RAA_WIDTH 1 /* MTL queue register offsets * Multiple queues can be active. The first queue has registers * that begin at 0x1100. Each subsequent queue has registers that * are accessed using an offset of 0x80 from the previous queue. */ #define MTL_Q_BASE 0x1100 #define MTL_Q_INC 0x80 #define MTL_Q_TQOMR 0x00 #define MTL_Q_TQUR 0x04 #define MTL_Q_TQDR 0x08 +#define MTL_Q_TC0ETSCR 0x10 +#define MTL_Q_TC0ETSSR 0x14 +#define MTL_Q_TC0QWR 0x18 #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 #define MTL_Q_RQDR 0x48 +#define MTL_Q_RQCR 0x4c #define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ #define MTL_Q_RQDR_PRXQ_INDEX 16 #define MTL_Q_RQDR_PRXQ_WIDTH 14 #define MTL_Q_RQDR_RXQSTS_INDEX 4 #define MTL_Q_RQDR_RXQSTS_WIDTH 2 #define MTL_Q_RQFCR_RFA_INDEX 1 #define MTL_Q_RQFCR_RFA_WIDTH 6 #define MTL_Q_RQFCR_RFD_INDEX 17 #define MTL_Q_RQFCR_RFD_WIDTH 6 #define MTL_Q_RQOMR_EHFC_INDEX 7 #define MTL_Q_RQOMR_EHFC_WIDTH 1 #define MTL_Q_RQOMR_RQS_INDEX 16 #define MTL_Q_RQOMR_RQS_WIDTH 9 #define MTL_Q_RQOMR_RSF_INDEX 5 #define MTL_Q_RQOMR_RSF_WIDTH 1 #define MTL_Q_RQOMR_RTC_INDEX 0 #define MTL_Q_RQOMR_RTC_WIDTH 2 +#define MTL_Q_TQDR_TRCSTS_INDEX 1 +#define MTL_Q_TQDR_TRCSTS_WIDTH 2 +#define MTL_Q_TQDR_TXQSTS_INDEX 4 +#define MTL_Q_TQDR_TXQSTS_WIDTH 1 #define MTL_Q_TQOMR_FTQ_INDEX 0 #define MTL_Q_TQOMR_FTQ_WIDTH 1 #define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 #define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 #define MTL_Q_TQOMR_TQS_INDEX 16 #define MTL_Q_TQOMR_TQS_WIDTH 10 #define MTL_Q_TQOMR_TSF_INDEX 1 #define MTL_Q_TQOMR_TSF_WIDTH 1 #define MTL_Q_TQOMR_TTC_INDEX 4 #define MTL_Q_TQOMR_TTC_WIDTH 3 #define MTL_Q_TQOMR_TXQEN_INDEX 2 #define MTL_Q_TQOMR_TXQEN_WIDTH 2 /* MTL queue register value */ #define MTL_RSF_DISABLE 0x00 #define MTL_RSF_ENABLE 0x01 #define MTL_TSF_DISABLE 0x00 #define MTL_TSF_ENABLE 0x01 #define MTL_RX_THRESHOLD_64 0x00 #define MTL_RX_THRESHOLD_96 0x02 #define MTL_RX_THRESHOLD_128 0x03 #define MTL_TX_THRESHOLD_32 0x01 #define MTL_TX_THRESHOLD_64 0x00 #define MTL_TX_THRESHOLD_96 0x02 #define MTL_TX_THRESHOLD_128 0x03 #define MTL_TX_THRESHOLD_192 0x04 #define MTL_TX_THRESHOLD_256 0x05 #define MTL_TX_THRESHOLD_384 0x06 #define MTL_TX_THRESHOLD_512 0x07 #define MTL_ETSALG_WRR 0x00 #define MTL_ETSALG_WFQ 0x01 #define MTL_ETSALG_DWRR 0x02 #define MTL_RAA_SP 0x00 #define MTL_RAA_WSP 0x01 #define MTL_Q_DISABLED 0x00 #define MTL_Q_ENABLED 0x02 /* MTL traffic class register offsets * Multiple traffic classes can be active. The first class has registers * that begin at 0x1100. Each subsequent queue has registers that * are accessed using an offset of 0x80 from the previous queue. */ #define MTL_TC_BASE MTL_Q_BASE #define MTL_TC_INC MTL_Q_INC #define MTL_TC_ETSCR 0x10 #define MTL_TC_ETSSR 0x14 #define MTL_TC_QWR 0x18 /* MTL traffic class register entry bit positions and sizes */ #define MTL_TC_ETSCR_TSA_INDEX 0 #define MTL_TC_ETSCR_TSA_WIDTH 2 #define MTL_TC_QWR_QW_INDEX 0 #define MTL_TC_QWR_QW_WIDTH 21 /* MTL traffic class register value */ #define MTL_TSA_SP 0x00 #define MTL_TSA_ETS 0x02 /* PCS MMD select register offset * The MMD select register is used for accessing PCS registers * when the underlying APB3 interface is using indirect addressing. * Indirect addressing requires accessing registers in two phases, * an address phase and a data phase. The address phases requires * writing an address selection value to the MMD select regiesters. */ -#define PCS_MMD_SELECT 0xff +#define PCS_V1_WINDOW_SELECT 0x03fc +#define PCS_V2_WINDOW_DEF 0x9060 +#define PCS_V2_WINDOW_SELECT 0x9064 +#define PCS_V2_RV_WINDOW_DEF 0x1060 +#define PCS_V2_RV_WINDOW_SELECT 0x1064 + +/* PCS register entry bit positions and sizes */ +#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 +#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14 +#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2 +#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4 /* SerDes integration register offsets */ #define SIR0_KR_RT_1 0x002c #define SIR0_STATUS 0x0040 #define SIR1_SPEED 0x0000 /* SerDes integration register entry bit positions and sizes */ #define SIR0_KR_RT_1_RESET_INDEX 11 #define SIR0_KR_RT_1_RESET_WIDTH 1 #define SIR0_STATUS_RX_READY_INDEX 0 #define SIR0_STATUS_RX_READY_WIDTH 1 #define SIR0_STATUS_TX_READY_INDEX 8 #define SIR0_STATUS_TX_READY_WIDTH 1 #define SIR1_SPEED_CDR_RATE_INDEX 12 #define SIR1_SPEED_CDR_RATE_WIDTH 4 #define SIR1_SPEED_DATARATE_INDEX 4 #define SIR1_SPEED_DATARATE_WIDTH 2 #define SIR1_SPEED_PLLSEL_INDEX 3 #define SIR1_SPEED_PLLSEL_WIDTH 1 #define SIR1_SPEED_RATECHANGE_INDEX 6 #define SIR1_SPEED_RATECHANGE_WIDTH 1 #define SIR1_SPEED_TXAMP_INDEX 8 #define SIR1_SPEED_TXAMP_WIDTH 4 #define SIR1_SPEED_WORDMODE_INDEX 0 #define SIR1_SPEED_WORDMODE_WIDTH 3 /* SerDes RxTx register offsets */ #define RXTX_REG6 0x0018 #define RXTX_REG20 0x0050 #define RXTX_REG22 0x0058 #define RXTX_REG114 0x01c8 #define RXTX_REG129 0x0204 /* SerDes RxTx register entry bit positions and sizes */ #define RXTX_REG6_RESETB_RXD_INDEX 8 #define RXTX_REG6_RESETB_RXD_WIDTH 1 #define RXTX_REG20_BLWC_ENA_INDEX 2 #define RXTX_REG20_BLWC_ENA_WIDTH 1 #define RXTX_REG114_PQ_REG_INDEX 9 #define RXTX_REG114_PQ_REG_WIDTH 7 #define RXTX_REG129_RXDFE_CONFIG_INDEX 14 #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 +/* MAC Control register offsets */ +#define XP_PROP_0 0x0000 +#define XP_PROP_1 0x0004 +#define XP_PROP_2 0x0008 +#define XP_PROP_3 0x000c +#define XP_PROP_4 0x0010 +#define XP_PROP_5 0x0014 +#define XP_MAC_ADDR_LO 0x0020 +#define XP_MAC_ADDR_HI 0x0024 +#define XP_ECC_ISR 0x0030 +#define XP_ECC_IER 0x0034 +#define XP_ECC_CNT0 0x003c +#define XP_ECC_CNT1 0x0040 +#define XP_DRIVER_INT_REQ 0x0060 +#define XP_DRIVER_INT_RO 0x0064 +#define XP_DRIVER_SCRATCH_0 0x0068 +#define XP_DRIVER_SCRATCH_1 0x006c +#define XP_INT_REISSUE_EN 0x0074 +#define XP_INT_EN 0x0078 +#define XP_I2C_MUTEX 0x0080 +#define XP_MDIO_MUTEX 0x0084 + +/* MAC Control register entry bit positions and sizes */ +#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0 +#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1 +#define XP_DRIVER_INT_RO_STATUS_INDEX 0 +#define XP_DRIVER_INT_RO_STATUS_WIDTH 1 +#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0 +#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8 +#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8 +#define XP_ECC_CNT0_RX_DED_INDEX 24 +#define XP_ECC_CNT0_RX_DED_WIDTH 8 +#define XP_ECC_CNT0_RX_SEC_INDEX 16 +#define XP_ECC_CNT0_RX_SEC_WIDTH 8 +#define XP_ECC_CNT0_TX_DED_INDEX 8 +#define XP_ECC_CNT0_TX_DED_WIDTH 8 +#define XP_ECC_CNT0_TX_SEC_INDEX 0 +#define XP_ECC_CNT0_TX_SEC_WIDTH 8 +#define XP_ECC_CNT1_DESC_DED_INDEX 8 +#define XP_ECC_CNT1_DESC_DED_WIDTH 8 +#define XP_ECC_CNT1_DESC_SEC_INDEX 0 +#define XP_ECC_CNT1_DESC_SEC_WIDTH 8 +#define XP_ECC_IER_DESC_DED_INDEX 5 +#define XP_ECC_IER_DESC_DED_WIDTH 1 +#define XP_ECC_IER_DESC_SEC_INDEX 4 +#define XP_ECC_IER_DESC_SEC_WIDTH 1 +#define XP_ECC_IER_RX_DED_INDEX 3 +#define XP_ECC_IER_RX_DED_WIDTH 1 +#define XP_ECC_IER_RX_SEC_INDEX 2 +#define XP_ECC_IER_RX_SEC_WIDTH 1 +#define XP_ECC_IER_TX_DED_INDEX 1 +#define XP_ECC_IER_TX_DED_WIDTH 1 +#define XP_ECC_IER_TX_SEC_INDEX 0 +#define XP_ECC_IER_TX_SEC_WIDTH 1 +#define XP_ECC_ISR_DESC_DED_INDEX 5 +#define XP_ECC_ISR_DESC_DED_WIDTH 1 +#define XP_ECC_ISR_DESC_SEC_INDEX 4 +#define XP_ECC_ISR_DESC_SEC_WIDTH 1 +#define XP_ECC_ISR_RX_DED_INDEX 3 +#define XP_ECC_ISR_RX_DED_WIDTH 1 +#define XP_ECC_ISR_RX_SEC_INDEX 2 +#define XP_ECC_ISR_RX_SEC_WIDTH 1 +#define XP_ECC_ISR_TX_DED_INDEX 1 +#define XP_ECC_ISR_TX_DED_WIDTH 1 +#define XP_ECC_ISR_TX_SEC_INDEX 0 +#define XP_ECC_ISR_TX_SEC_WIDTH 1 +#define XP_I2C_MUTEX_BUSY_INDEX 31 +#define XP_I2C_MUTEX_BUSY_WIDTH 1 +#define XP_I2C_MUTEX_ID_INDEX 29 +#define XP_I2C_MUTEX_ID_WIDTH 2 +#define XP_I2C_MUTEX_ACTIVE_INDEX 0 +#define XP_I2C_MUTEX_ACTIVE_WIDTH 1 +#define XP_MAC_ADDR_HI_VALID_INDEX 31 +#define XP_MAC_ADDR_HI_VALID_WIDTH 1 +#define XP_PROP_0_CONN_TYPE_INDEX 28 +#define XP_PROP_0_CONN_TYPE_WIDTH 3 +#define XP_PROP_0_MDIO_ADDR_INDEX 16 +#define XP_PROP_0_MDIO_ADDR_WIDTH 5 +#define XP_PROP_0_PORT_ID_INDEX 0 +#define XP_PROP_0_PORT_ID_WIDTH 8 +#define XP_PROP_0_PORT_MODE_INDEX 8 +#define XP_PROP_0_PORT_MODE_WIDTH 4 +#define XP_PROP_0_PORT_SPEEDS_INDEX 23 +#define XP_PROP_0_PORT_SPEEDS_WIDTH 4 +#define XP_PROP_1_MAX_RX_DMA_INDEX 24 +#define XP_PROP_1_MAX_RX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8 +#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5 +#define XP_PROP_1_MAX_TX_DMA_INDEX 16 +#define XP_PROP_1_MAX_TX_DMA_WIDTH 5 +#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0 +#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5 +#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16 +#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0 +#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16 +#define XP_PROP_3_GPIO_MASK_INDEX 28 +#define XP_PROP_3_GPIO_MASK_WIDTH 4 +#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20 +#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4 +#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16 +#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4 +#define XP_PROP_3_GPIO_RX_LOS_INDEX 24 +#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4 +#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12 +#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4 +#define XP_PROP_3_GPIO_ADDR_INDEX 8 +#define XP_PROP_3_GPIO_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_INDEX 0 +#define XP_PROP_3_MDIO_RESET_WIDTH 2 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8 +#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12 +#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4 +#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2 +#define XP_PROP_4_MUX_ADDR_HI_INDEX 8 +#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5 +#define XP_PROP_4_MUX_ADDR_LO_INDEX 0 +#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3 +#define XP_PROP_4_MUX_CHAN_INDEX 4 +#define XP_PROP_4_MUX_CHAN_WIDTH 3 +#define XP_PROP_4_REDRV_ADDR_INDEX 16 +#define XP_PROP_4_REDRV_ADDR_WIDTH 7 +#define XP_PROP_4_REDRV_IF_INDEX 23 +#define XP_PROP_4_REDRV_IF_WIDTH 1 +#define XP_PROP_4_REDRV_LANE_INDEX 24 +#define XP_PROP_4_REDRV_LANE_WIDTH 3 +#define XP_PROP_4_REDRV_MODEL_INDEX 28 +#define XP_PROP_4_REDRV_MODEL_WIDTH 3 +#define XP_PROP_4_REDRV_PRESENT_INDEX 31 +#define XP_PROP_4_REDRV_PRESENT_WIDTH 1 + +/* I2C Control register offsets */ +#define IC_CON 0x0000 +#define IC_TAR 0x0004 +#define IC_DATA_CMD 0x0010 +#define IC_INTR_STAT 0x002c +#define IC_INTR_MASK 0x0030 +#define IC_RAW_INTR_STAT 0x0034 +#define IC_CLR_INTR 0x0040 +#define IC_CLR_TX_ABRT 0x0054 +#define IC_CLR_STOP_DET 0x0060 +#define IC_ENABLE 0x006c +#define IC_TXFLR 0x0074 +#define IC_RXFLR 0x0078 +#define IC_TX_ABRT_SOURCE 0x0080 +#define IC_ENABLE_STATUS 0x009c +#define IC_COMP_PARAM_1 0x00f4 + +/* I2C Control register entry bit positions and sizes */ +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2 +#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8 +#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16 +#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8 +#define IC_CON_MASTER_MODE_INDEX 0 +#define IC_CON_MASTER_MODE_WIDTH 1 +#define IC_CON_RESTART_EN_INDEX 5 +#define IC_CON_RESTART_EN_WIDTH 1 +#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9 +#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1 +#define IC_CON_SLAVE_DISABLE_INDEX 6 +#define IC_CON_SLAVE_DISABLE_WIDTH 1 +#define IC_CON_SPEED_INDEX 1 +#define IC_CON_SPEED_WIDTH 2 +#define IC_DATA_CMD_CMD_INDEX 8 +#define IC_DATA_CMD_CMD_WIDTH 1 +#define IC_DATA_CMD_STOP_INDEX 9 +#define IC_DATA_CMD_STOP_WIDTH 1 +#define IC_ENABLE_ABORT_INDEX 1 +#define IC_ENABLE_ABORT_WIDTH 1 +#define IC_ENABLE_EN_INDEX 0 +#define IC_ENABLE_EN_WIDTH 1 +#define IC_ENABLE_STATUS_EN_INDEX 0 +#define IC_ENABLE_STATUS_EN_WIDTH 1 +#define IC_INTR_MASK_TX_EMPTY_INDEX 4 +#define IC_INTR_MASK_TX_EMPTY_WIDTH 1 +#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2 +#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1 +#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9 +#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6 +#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1 +#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4 +#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1 + +/* I2C Control register value */ +#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001 +#define IC_TX_ABRT_ARB_LOST 0x1000 + /* Descriptor/Packet entry bit positions and sizes */ #define RX_PACKET_ERRORS_CRC_INDEX 2 #define RX_PACKET_ERRORS_CRC_WIDTH 1 #define RX_PACKET_ERRORS_FRAME_INDEX 3 #define RX_PACKET_ERRORS_FRAME_WIDTH 1 #define RX_PACKET_ERRORS_LENGTH_INDEX 0 #define RX_PACKET_ERRORS_LENGTH_WIDTH 1 #define RX_PACKET_ERRORS_OVERRUN_INDEX 1 #define RX_PACKET_ERRORS_OVERRUN_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 -#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 -#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 +#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 +#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_TNP_INDEX 8 +#define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9 +#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC2_HL_INDEX 0 #define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC2_TNP_INDEX 11 +#define RX_NORMAL_DESC2_TNP_WIDTH 1 +#define RX_NORMAL_DESC2_RPNG_INDEX 14 +#define RX_NORMAL_DESC2_RPNG_WIDTH 1 #define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CTXT_INDEX 30 #define RX_NORMAL_DESC3_CTXT_WIDTH 1 #define RX_NORMAL_DESC3_ES_INDEX 15 #define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ETLT_INDEX 16 #define RX_NORMAL_DESC3_ETLT_WIDTH 4 #define RX_NORMAL_DESC3_FD_INDEX 29 #define RX_NORMAL_DESC3_FD_WIDTH 1 #define RX_NORMAL_DESC3_INTE_INDEX 30 #define RX_NORMAL_DESC3_INTE_WIDTH 1 #define RX_NORMAL_DESC3_L34T_INDEX 20 #define RX_NORMAL_DESC3_L34T_WIDTH 4 #define RX_NORMAL_DESC3_LD_INDEX 28 #define RX_NORMAL_DESC3_LD_WIDTH 1 #define RX_NORMAL_DESC3_OWN_INDEX 31 #define RX_NORMAL_DESC3_OWN_WIDTH 1 #define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_WIDTH 14 #define RX_NORMAL_DESC3_RSV_INDEX 26 #define RX_NORMAL_DESC3_RSV_WIDTH 1 #define RX_DESC3_L34T_IPV4_TCP 1 #define RX_DESC3_L34T_IPV4_UDP 2 #define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV4_UNKNOWN 7 #define RX_DESC3_L34T_IPV6_TCP 9 #define RX_DESC3_L34T_IPV6_UDP 10 #define RX_DESC3_L34T_IPV6_ICMP 11 +#define RX_DESC3_L34T_IPV6_UNKNOWN 15 #define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_WIDTH 1 #define RX_CONTEXT_DESC3_TSD_INDEX 6 #define RX_CONTEXT_DESC3_TSD_WIDTH 1 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 #define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4 +#define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1 #define TX_CONTEXT_DESC2_MSS_INDEX 0 #define TX_CONTEXT_DESC2_MSS_WIDTH 15 #define TX_CONTEXT_DESC3_CTXT_INDEX 30 #define TX_CONTEXT_DESC3_CTXT_WIDTH 1 #define TX_CONTEXT_DESC3_TCMSSV_INDEX 26 #define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1 #define TX_CONTEXT_DESC3_VLTV_INDEX 16 #define TX_CONTEXT_DESC3_VLTV_WIDTH 1 #define TX_CONTEXT_DESC3_VT_INDEX 0 #define TX_CONTEXT_DESC3_VT_WIDTH 16 #define TX_NORMAL_DESC2_HL_B1L_INDEX 0 #define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 #define TX_NORMAL_DESC2_IC_INDEX 31 #define TX_NORMAL_DESC2_IC_WIDTH 1 #define TX_NORMAL_DESC2_TTSE_INDEX 30 #define TX_NORMAL_DESC2_TTSE_WIDTH 1 #define TX_NORMAL_DESC2_VTIR_INDEX 14 #define TX_NORMAL_DESC2_VTIR_WIDTH 2 #define TX_NORMAL_DESC3_CIC_INDEX 16 #define TX_NORMAL_DESC3_CIC_WIDTH 2 #define TX_NORMAL_DESC3_CPC_INDEX 26 #define TX_NORMAL_DESC3_CPC_WIDTH 2 #define TX_NORMAL_DESC3_CTXT_INDEX 30 #define TX_NORMAL_DESC3_CTXT_WIDTH 1 #define TX_NORMAL_DESC3_FD_INDEX 29 #define TX_NORMAL_DESC3_FD_WIDTH 1 #define TX_NORMAL_DESC3_FL_INDEX 0 #define TX_NORMAL_DESC3_FL_WIDTH 15 #define TX_NORMAL_DESC3_LD_INDEX 28 #define TX_NORMAL_DESC3_LD_WIDTH 1 #define TX_NORMAL_DESC3_OWN_INDEX 31 #define TX_NORMAL_DESC3_OWN_WIDTH 1 #define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19 #define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4 #define TX_NORMAL_DESC3_TCPPL_INDEX 0 #define TX_NORMAL_DESC3_TCPPL_WIDTH 18 #define TX_NORMAL_DESC3_TSE_INDEX 18 #define TX_NORMAL_DESC3_TSE_WIDTH 1 +#define TX_NORMAL_DESC3_VNP_INDEX 23 +#define TX_NORMAL_DESC3_VNP_WIDTH 3 #define TX_NORMAL_DESC2_VLAN_INSERT 0x2 +#define TX_NORMAL_DESC3_VXLAN_PACKET 0x3 /* MDIO undefined or vendor specific registers */ #ifndef MDIO_PMA_10GBR_PMD_CTRL #define MDIO_PMA_10GBR_PMD_CTRL 0x0096 #endif #ifndef MDIO_PMA_10GBR_FECCTRL #define MDIO_PMA_10GBR_FECCTRL 0x00ab #endif +#ifndef MDIO_PCS_DIG_CTRL +#define MDIO_PCS_DIG_CTRL 0x8000 +#endif + #ifndef MDIO_AN_XNP #define MDIO_AN_XNP 0x0016 #endif #ifndef MDIO_AN_LPX #define MDIO_AN_LPX 0x0019 #endif #ifndef MDIO_AN_COMP_STAT #define MDIO_AN_COMP_STAT 0x0030 #endif #ifndef MDIO_AN_INTMASK #define MDIO_AN_INTMASK 0x8001 #endif #ifndef MDIO_AN_INT #define MDIO_AN_INT 0x8002 #endif +#ifndef MDIO_VEND2_AN_ADVERTISE +#define MDIO_VEND2_AN_ADVERTISE 0x0004 +#endif + +#ifndef MDIO_VEND2_AN_LP_ABILITY +#define MDIO_VEND2_AN_LP_ABILITY 0x0005 +#endif + +#ifndef MDIO_VEND2_AN_CTRL +#define MDIO_VEND2_AN_CTRL 0x8001 +#endif + +#ifndef MDIO_VEND2_AN_STAT +#define MDIO_VEND2_AN_STAT 0x8002 +#endif + +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif +#ifndef MDIO_VEND2_CTRL1_AN_ENABLE +#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12) +#endif + +#ifndef MDIO_VEND2_CTRL1_AN_RESTART +#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS6 +#define MDIO_VEND2_CTRL1_SS6 BIT(6) +#endif + +#ifndef MDIO_VEND2_CTRL1_SS13 +#define MDIO_VEND2_CTRL1_SS13 BIT(13) +#endif + /* MDIO mask values */ +#define XGBE_AN_CL73_INT_CMPLT BIT(0) +#define XGBE_AN_CL73_INC_LINK BIT(1) +#define XGBE_AN_CL73_PG_RCV BIT(2) +#define XGBE_AN_CL73_INT_MASK 0x07 + #define XGBE_XNP_MCF_NULL_MESSAGE 0x001 #define XGBE_XNP_ACK_PROCESSED BIT(12) #define XGBE_XNP_MP_FORMATTED BIT(13) #define XGBE_XNP_NP_EXCHANGE BIT(15) #define XGBE_KR_TRAINING_START BIT(0) #define XGBE_KR_TRAINING_ENABLE BIT(1) +#define XGBE_PCS_CL37_BP BIT(12) + +#define XGBE_AN_CL37_INT_CMPLT BIT(0) +#define XGBE_AN_CL37_INT_MASK 0x01 + +#define XGBE_AN_CL37_HD_MASK 0x40 +#define XGBE_AN_CL37_FD_MASK 0x20 + +#define XGBE_AN_CL37_PCS_MODE_MASK 0x06 +#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00 +#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04 +#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 +#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 + +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable * * The set macro will clear the current bit field value within the * variable and then set the bit field of the variable to the * specified value */ #define GET_BITS(_var, _index, _width) \ (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) #define SET_BITS(_var, _index, _width, _val) \ do { \ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ } while (0) #define GET_BITS_LE(_var, _index, _width) \ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1)) #define SET_BITS_LE(_var, _index, _width, _val) \ do { \ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \ (_var) |= cpu_to_le32((((_val) & \ ((0x1 << (_width)) - 1)) << (_index))); \ } while (0) /* Bit setting and getting macros based on register fields * The get macro uses the bit field definitions formed using the input * names to extract the current bit field value from within the * variable * * The set macro uses the bit field definitions formed using the input * names to set the bit field of the variable to the specified value */ #define XGMAC_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XGMAC_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XGMAC_GET_BITS_LE(_var, _prefix, _field) \ GET_BITS_LE((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \ SET_BITS_LE((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) /* Macros for reading or writing registers * The ioread macros will get bit fields or full values using the * register definitions formed using the input names * * The iowrite macros will set bit fields or full values using the * register definitions formed using the input names */ #define XGMAC_IOREAD(_pdata, _reg) \ bus_read_4((_pdata)->xgmac_res, _reg) #define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XGMAC_IOWRITE(_pdata, _reg, _val) \ bus_write_4((_pdata)->xgmac_res, _reg, (_val)) #define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ - u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \ + uint32_t reg_val = XGMAC_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XGMAC_IOWRITE((_pdata), _reg, reg_val); \ } while (0) /* Macros for reading or writing MTL queue or traffic class registers * Similar to the standard read and write macros except that the * base register value is calculated by the queue or traffic class number */ #define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \ bus_read_4((_pdata)->xgmac_res, \ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg) #define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ bus_write_4((_pdata)->xgmac_res, \ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg, (_val)) #define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ do { \ - u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ + uint32_t reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ } while (0) /* Macros for reading or writing DMA channel registers * Similar to the standard read and write macros except that the * base register value is obtained from the ring */ #define XGMAC_DMA_IOREAD(_channel, _reg) \ bus_space_read_4((_channel)->dma_tag, (_channel)->dma_handle, _reg) #define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \ bus_space_write_4((_channel)->dma_tag, (_channel)->dma_handle, \ _reg, (_val)) #define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ do { \ - u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \ + uint32_t reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * within the register values of XPCS registers. */ -#define XPCS_IOWRITE(_pdata, _off, _val) \ +#define XPCS_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XPCS_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XPCS32_IOWRITE(_pdata, _off, _val) \ bus_write_4((_pdata)->xpcs_res, (_off), _val) -#define XPCS_IOREAD(_pdata, _off) \ +#define XPCS32_IOREAD(_pdata, _off) \ bus_read_4((_pdata)->xpcs_res, (_off)) +#define XPCS16_IOWRITE(_pdata, _off, _val) \ + bus_write_2((_pdata)->xpcs_res, (_off), _val) + +#define XPCS16_IOREAD(_pdata, _off) \ + bus_read_2((_pdata)->xpcs_res, (_off)) + /* Macros for building, reading or writing register values or bits * within the register values of SerDes integration registers. */ #define XSIR_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XSIR_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XSIR0_IOREAD(_pdata, _reg) \ bus_read_2((_pdata)->sir0_res, _reg) #define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XSIR0_IOWRITE(_pdata, _reg, _val) \ bus_write_2((_pdata)->sir0_res, _reg, (_val)) #define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ - u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \ + uint16_t reg_val = XSIR0_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XSIR0_IOWRITE((_pdata), _reg, reg_val); \ } while (0) #define XSIR1_IOREAD(_pdata, _reg) \ bus_read_2((_pdata)->sir1_res, _reg) #define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XSIR1_IOWRITE(_pdata, _reg, _val) \ bus_write_2((_pdata)->sir1_res, _reg, (_val)) #define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ - u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \ + uint16_t reg_val = XSIR1_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XSIR1_IOWRITE((_pdata), _reg, reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * within the register values of SerDes RxTx registers. */ #define XRXTX_IOREAD(_pdata, _reg) \ bus_read_2((_pdata)->rxtx_res, _reg) #define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XRXTX_IOWRITE(_pdata, _reg, _val) \ bus_write_2((_pdata)->rxtx_res, _reg, (_val)) #define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ - u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \ + uint16_t reg_val = XRXTX_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XRXTX_IOWRITE((_pdata), _reg, reg_val); \ } while (0) +/* Macros for building, reading or writing register values or bits + * within the register values of MAC Control registers. + */ +#define XP_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XP_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XP_IOREAD(_pdata, _reg) \ + bus_read_4((_pdata)->xgmac_res, _reg + XGBE_MAC_PROP_OFFSET) + +#define XP_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XP_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XP_IOWRITE(_pdata, _reg, _val) \ + bus_write_4((_pdata)->xgmac_res, _reg + XGBE_MAC_PROP_OFFSET, \ + (_val)) + +#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + uint32_t reg_val = XP_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XP_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + +/* Macros for building, reading or writing register values or bits + * within the register values of I2C Control registers. + */ +#define XI2C_GET_BITS(_var, _prefix, _field) \ + GET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH) + +#define XI2C_SET_BITS(_var, _prefix, _field, _val) \ + SET_BITS((_var), \ + _prefix##_##_field##_INDEX, \ + _prefix##_##_field##_WIDTH, (_val)) + +#define XI2C_IOREAD(_pdata, _reg) \ + bus_read_4((_pdata)->xgmac_res, _reg + XGBE_I2C_CTRL_OFFSET) + +#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \ + GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH) + +#define XI2C_IOWRITE(_pdata, _reg, _val) \ + bus_write_4((_pdata)->xgmac_res, _reg + XGBE_I2C_CTRL_OFFSET, \ + (_val)) + +#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \ +do { \ + uint32_t reg_val = XI2C_IOREAD((_pdata), (_reg)); \ + SET_BITS(reg_val, \ + _reg##_##_field##_INDEX, \ + _reg##_##_field##_WIDTH, (_val)); \ + XI2C_IOWRITE((_pdata), (_reg), reg_val); \ +} while (0) + /* Macros for building, reading or writing register values or bits * using MDIO. Different from above because of the use of standardized * Linux include values. No shifting is performed with the bit * operations, everything works on mask values. */ #define XMDIO_READ(_pdata, _mmd, _reg) \ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff))) #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ (XMDIO_READ((_pdata), _mmd, _reg) & _mask) #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val))) #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ do { \ - u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \ + uint32_t mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \ mmd_val &= ~_mask; \ mmd_val |= (_val); \ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \ } while (0) #endif diff --git a/sys/dev/axgbe/xgbe-dcb.c b/sys/dev/axgbe/xgbe-dcb.c new file mode 100644 index 000000000000..c64a31f0696f --- /dev/null +++ b/sys/dev/axgbe/xgbe-dcb.c @@ -0,0 +1,272 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" + +#if 0 +static int xgbe_dcb_ieee_getets(struct xgbe_prv_data *pdata, + struct ieee_ets *ets) +{ + /* Set number of supported traffic classes */ + ets->ets_cap = pdata->hw_feat.tc_cnt; + + if (pdata->ets) { + ets->cbs = pdata->ets->cbs; + memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, pdata->ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, pdata->ets->prio_tc, + sizeof(ets->prio_tc)); + } + + return (0); +} + +static int xgbe_dcb_ieee_setets(struct xgbe_prv_data *pdata, + struct ieee_ets *ets) +{ + unsigned int i, tc_ets, tc_ets_weight; + u8 max_tc = 0; + + tc_ets = 0; + tc_ets_weight = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + axgbe_printf(1, + "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i, + ets->tc_tx_bw[i], ets->tc_rx_bw[i], + ets->tc_tsa[i]); + axgbe_printf(1, "PRIO%u: TC=%hhu\n", i, + ets->prio_tc[i]); + + max_tc = max_t(u8, max_tc, ets->prio_tc[i]); + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i])) + max_tc = max_t(u8, max_tc, i); + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + tc_ets = 1; + tc_ets_weight += ets->tc_tx_bw[i]; + break; + default: + axgbe_error( + "unsupported TSA algorithm (%hhu)\n", + ets->tc_tsa[i]); + return (-EINVAL); + } + } + + /* Check maximum traffic class requested */ + if (max_tc >= pdata->hw_feat.tc_cnt) { + axgbe_error( + "exceeded number of supported traffic classes\n"); + return (-EINVAL); + } + + /* Weights must add up to 100% */ + if (tc_ets && (tc_ets_weight != 100)) { + axgbe_error( + "sum of ETS algorithm weights is not 100 (%u)\n", + tc_ets_weight); + return (-EINVAL); + } + + if (!pdata->ets) { + pdata->ets = (struct ieee_ets *)malloc(sizeof(struct ieee_ets), + M_AXGBE, M_NOWAIT); //TODO - when to free? + + if (!pdata->ets) + return (-ENOMEM); + } + + pdata->num_tcs = max_tc + 1; + memcpy(pdata->ets, ets, sizeof(*pdata->ets)); + + pdata->hw_if.config_dcb_tc(pdata); + + return (0); +} + +static int xgbe_dcb_ieee_getpfc(struct xgbe_prv_data *pdata, + struct ieee_pfc *pfc) +{ + + /* Set number of supported PFC traffic classes */ + pfc->pfc_cap = pdata->hw_feat.tc_cnt; + + if (pdata->pfc) { + pfc->pfc_en = pdata->pfc->pfc_en; + pfc->mbc = pdata->pfc->mbc; + pfc->delay = pdata->pfc->delay; + } + + return (0); +} + +static int xgbe_dcb_ieee_setpfc(struct xgbe_prv_data *pdata, + struct ieee_pfc *pfc) +{ + + axgbe_printf(1, + "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%d\n", + pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + + /* Check PFC for supported number of traffic classes */ + if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) { + axgbe_error( + "PFC requested for unsupported traffic class\n"); + return (-EINVAL); + } + + if (!pdata->pfc) { + pdata->pfc = (struct ieee_pfc *)malloc(sizeof(struct ieee_pfc), + M_AXGBE, M_NOWAIT); //TODO - when to free? + + if (!pdata->pfc) + return (-ENOMEM); + } + + memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc)); + + pdata->hw_if.config_dcb_pfc(pdata); + + return (0); +} + +static u8 xgbe_dcb_getdcbx(struct xgbe_prv_data *pdata) +{ + return (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE); +} + +static u8 xgbe_dcb_setdcbx(struct xgbe_prv_data *pdata, u8 dcbx) +{ + u8 support = xgbe_dcb_getdcbx(pdata); + + axgbe_printf(1, "DCBX=%#hhx\n", dcbx); + + if (dcbx & ~support) + return (1); + + if ((dcbx & support) != support) + return (1); + + return (0); +} +#endif diff --git a/sys/dev/axgbe/xgbe-desc.c b/sys/dev/axgbe/xgbe-desc.c index a2f1f98881e9..f74d600b301f 100644 --- a/sys/dev/axgbe/xgbe-desc.c +++ b/sys/dev/axgbe/xgbe-desc.c @@ -1,539 +1,211 @@ /* * AMD 10Gb Ethernet driver * + * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. + * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "xgbe.h" #include "xgbe-common.h" -static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *); - -static void xgbe_free_ring(struct xgbe_prv_data *pdata, - struct xgbe_ring *ring) -{ - struct xgbe_ring_data *rdata; - unsigned int i; - - if (!ring) - return; - - bus_dmamap_destroy(ring->mbuf_dmat, ring->mbuf_map); - bus_dma_tag_destroy(ring->mbuf_dmat); - - ring->mbuf_map = NULL; - ring->mbuf_dmat = NULL; - - if (ring->rdata) { - for (i = 0; i < ring->rdesc_count; i++) { - rdata = XGBE_GET_DESC_DATA(ring, i); - xgbe_unmap_rdata(pdata, rdata); - } - - free(ring->rdata, M_AXGBE); - ring->rdata = NULL; - } - - bus_dmamap_unload(ring->rdesc_dmat, ring->rdesc_map); - bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map); - bus_dma_tag_destroy(ring->rdesc_dmat); - - ring->rdesc_map = NULL; - ring->rdesc_dmat = NULL; - ring->rdesc = NULL; -} - -static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel; - unsigned int i; - - DBGPR("-->xgbe_free_ring_resources\n"); - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - xgbe_free_ring(pdata, channel->tx_ring); - xgbe_free_ring(pdata, channel->rx_ring); - } - - DBGPR("<--xgbe_free_ring_resources\n"); -} - -static void xgbe_ring_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, - int error) -{ - if (error) - return; - *(bus_addr_t *) arg = segs->ds_addr; -} - -static int xgbe_init_ring(struct xgbe_prv_data *pdata, - struct xgbe_ring *ring, unsigned int rdesc_count) -{ - bus_size_t len; - int err, flags; - - DBGPR("-->xgbe_init_ring\n"); - - if (!ring) - return 0; - - flags = 0; - if (pdata->coherent) - flags = BUS_DMA_COHERENT; - - /* Descriptors */ - ring->rdesc_count = rdesc_count; - len = sizeof(struct xgbe_ring_desc) * rdesc_count; - err = bus_dma_tag_create(pdata->dmat, 512, 0, BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, flags, NULL, NULL, - &ring->rdesc_dmat); - if (err != 0) { - printf("Unable to create the DMA tag: %d\n", err); - return -err; - } - - err = bus_dmamem_alloc(ring->rdesc_dmat, (void **)&ring->rdesc, - BUS_DMA_WAITOK | BUS_DMA_COHERENT, &ring->rdesc_map); - if (err != 0) { - bus_dma_tag_destroy(ring->rdesc_dmat); - printf("Unable to allocate DMA memory: %d\n", err); - return -err; - } - err = bus_dmamap_load(ring->rdesc_dmat, ring->rdesc_map, ring->rdesc, - len, xgbe_ring_dmamap_cb, &ring->rdesc_paddr, 0); - if (err != 0) { - bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map); - bus_dma_tag_destroy(ring->rdesc_dmat); - printf("Unable to load DMA memory\n"); - return -err; - } - - /* Descriptor information */ - ring->rdata = malloc(rdesc_count * sizeof(struct xgbe_ring_data), - M_AXGBE, M_WAITOK | M_ZERO); - - /* Create the space DMA tag for mbufs */ - err = bus_dma_tag_create(pdata->dmat, 1, 0, BUS_SPACE_MAXADDR, - BUS_SPACE_MAXADDR, NULL, NULL, XGBE_TX_MAX_BUF_SIZE * rdesc_count, - rdesc_count, XGBE_TX_MAX_BUF_SIZE, flags, NULL, NULL, - &ring->mbuf_dmat); - if (err != 0) - return -err; - - err = bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map); - if (err != 0) - return -err; - - DBGPR("<--xgbe_init_ring\n"); - - return 0; -} - -static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel; - unsigned int i; - int ret; - - DBGPR("-->xgbe_alloc_ring_resources\n"); - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - ret = xgbe_init_ring(pdata, channel->tx_ring, - pdata->tx_desc_count); - if (ret) { - printf("error initializing Tx ring\n"); - goto err_ring; - } - - ret = xgbe_init_ring(pdata, channel->rx_ring, - pdata->rx_desc_count); - if (ret) { - printf("error initializing Rx ring\n"); - goto err_ring; - } - } - - DBGPR("<--xgbe_alloc_ring_resources\n"); - - return 0; - -err_ring: - xgbe_free_ring_resources(pdata); - - return ret; -} - -static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, - struct xgbe_ring *ring, - struct xgbe_ring_data *rdata) -{ - bus_dmamap_t mbuf_map; - bus_dma_segment_t segs[2]; - struct mbuf *m0, *m1; - int err, nsegs; - - m0 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); - if (m0 == NULL) - return (-ENOBUFS); - - m1 = m_getjcl(M_NOWAIT, MT_DATA, 0, MCLBYTES); - if (m1 == NULL) { - m_freem(m0); - return (-ENOBUFS); - } - - m0->m_next = m1; - m0->m_flags |= M_PKTHDR; - m0->m_len = MHLEN; - m0->m_pkthdr.len = MHLEN + MCLBYTES; - - m1->m_len = MCLBYTES; - m1->m_next = NULL; - m1->m_pkthdr.len = MCLBYTES; - - err = bus_dmamap_create(ring->mbuf_dmat, 0, &mbuf_map); - if (err != 0) { - m_freem(m0); - return (-err); - } - - err = bus_dmamap_load_mbuf_sg(ring->mbuf_dmat, mbuf_map, m0, segs, - &nsegs, BUS_DMA_NOWAIT); - if (err != 0) { - m_freem(m0); - bus_dmamap_destroy(ring->mbuf_dmat, mbuf_map); - return (-err); - } - - KASSERT(nsegs == 2, - ("xgbe_map_rx_buffer: Unable to handle multiple segments %d", - nsegs)); - - rdata->mb = m0; - rdata->mbuf_free = 0; - rdata->mbuf_dmat = ring->mbuf_dmat; - rdata->mbuf_map = mbuf_map; - rdata->mbuf_hdr_paddr = segs[0].ds_addr; - rdata->mbuf_data_paddr = segs[1].ds_addr; - - return 0; -} - -static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) +static void +xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; bus_addr_t rdesc_paddr; unsigned int i, j; DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { + for (i = 0; i < pdata->channel_count; i++) { + + channel = pdata->channel[i]; + ring = channel->tx_ring; if (!ring) break; rdesc = ring->rdesc; rdesc_paddr = ring->rdesc_paddr; for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); rdata->rdesc = rdesc; rdata->rdata_paddr = rdesc_paddr; rdesc++; rdesc_paddr += sizeof(struct xgbe_ring_desc); } ring->cur = 0; ring->dirty = 0; memset(&ring->tx, 0, sizeof(ring->tx)); hw_if->tx_desc_init(channel); } DBGPR("<--xgbe_wrapper_tx_descriptor_init\n"); } -static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) +static void +xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_desc *rdesc; struct xgbe_ring_data *rdata; bus_addr_t rdesc_paddr; unsigned int i, j; DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { + for (i = 0; i < pdata->channel_count; i++) { + + channel = pdata->channel[i]; + ring = channel->rx_ring; if (!ring) break; rdesc = ring->rdesc; rdesc_paddr = ring->rdesc_paddr; for (j = 0; j < ring->rdesc_count; j++) { rdata = XGBE_GET_DESC_DATA(ring, j); rdata->rdesc = rdesc; rdata->rdata_paddr = rdesc_paddr; - if (xgbe_map_rx_buffer(pdata, ring, rdata)) - break; - rdesc++; rdesc_paddr += sizeof(struct xgbe_ring_desc); } ring->cur = 0; ring->dirty = 0; hw_if->rx_desc_init(channel); } } -static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata, - struct xgbe_ring_data *rdata) -{ - - if (rdata->mbuf_map != NULL) - bus_dmamap_destroy(rdata->mbuf_dmat, rdata->mbuf_map); - - if (rdata->mbuf_free) - m_freem(rdata->mb); - - rdata->mb = NULL; - rdata->mbuf_free = 0; - rdata->mbuf_hdr_paddr = 0; - rdata->mbuf_data_paddr = 0; - rdata->mbuf_len = 0; - - memset(&rdata->tx, 0, sizeof(rdata->tx)); - memset(&rdata->rx, 0, sizeof(rdata->rx)); -} - -struct xgbe_map_tx_skb_data { - struct xgbe_ring *ring; - struct xgbe_packet_data *packet; - unsigned int cur_index; -}; - -static void xgbe_map_tx_skb_cb(void *callback_arg, bus_dma_segment_t *segs, - int nseg, bus_size_t mapsize, int error) -{ - struct xgbe_map_tx_skb_data *data; - struct xgbe_ring_data *rdata; - struct xgbe_ring *ring; - int i; - - if (error != 0) - return; - - data = callback_arg; - ring = data->ring; - - for (i = 0; i < nseg; i++) { - rdata = XGBE_GET_DESC_DATA(ring, data->cur_index); - - KASSERT(segs[i].ds_len <= XGBE_TX_MAX_BUF_SIZE, - ("%s: Segment size is too large %ld > %d", __func__, - segs[i].ds_len, XGBE_TX_MAX_BUF_SIZE)); - - if (i == 0) { - rdata->mbuf_dmat = ring->mbuf_dmat; - bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map); - } - - rdata->mbuf_hdr_paddr = 0; - rdata->mbuf_data_paddr = segs[i].ds_addr; - rdata->mbuf_len = segs[i].ds_len; - - data->packet->length += rdata->mbuf_len; - - data->cur_index++; - } -} - -static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct mbuf *m) +void +xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) { - struct xgbe_ring *ring = channel->tx_ring; - struct xgbe_map_tx_skb_data cbdata; - struct xgbe_ring_data *rdata; - struct xgbe_packet_data *packet; - unsigned int start_index, cur_index; - int err; - DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur); - - start_index = ring->cur; - cur_index = ring->cur; - - packet = &ring->packet_data; - packet->rdesc_count = 0; - packet->length = 0; - - cbdata.ring = ring; - cbdata.packet = packet; - cbdata.cur_index = cur_index; - - err = bus_dmamap_load_mbuf(ring->mbuf_dmat, ring->mbuf_map, m, - xgbe_map_tx_skb_cb, &cbdata, BUS_DMA_NOWAIT); - if (err != 0) /* TODO: Undo the mapping */ - return (-err); - - cur_index = cbdata.cur_index; - - /* Save the mbuf address in the last entry. We always have some data - * that has been mapped so rdata is always advanced past the last - * piece of mapped data - use the entry pointed to by cur_index - 1. - */ - rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1); - rdata->mb = m; - rdata->mbuf_free = 1; - - /* Save the number of descriptor entries used */ - packet->rdesc_count = cur_index - start_index; - - DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count); - - return packet->rdesc_count; -} - -void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if) -{ - DBGPR("-->xgbe_init_function_ptrs_desc\n"); - - desc_if->alloc_ring_resources = xgbe_alloc_ring_resources; - desc_if->free_ring_resources = xgbe_free_ring_resources; - desc_if->map_tx_skb = xgbe_map_tx_skb; - desc_if->map_rx_buffer = xgbe_map_rx_buffer; - desc_if->unmap_rdata = xgbe_unmap_rdata; desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init; desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init; - - DBGPR("<--xgbe_init_function_ptrs_desc\n"); } diff --git a/sys/dev/axgbe/xgbe-dev.c b/sys/dev/axgbe/xgbe-dev.c index 3a0c65cfa7c9..86e2bd2c9b74 100644 --- a/sys/dev/axgbe/xgbe-dev.c +++ b/sys/dev/axgbe/xgbe-dev.c @@ -1,2307 +1,2845 @@ /* * AMD 10Gb Ethernet driver * + * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. + * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); -#include -#include - #include "xgbe.h" #include "xgbe-common.h" #include -#include -static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, - unsigned int usec) +static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) +{ + return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); +} + +static unsigned int +xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) { unsigned long rate; unsigned int ret; - DBGPR("-->xgbe_usec_to_riwt\n"); - rate = pdata->sysclk_rate; /* * Convert the input usec value to the watchdog timer value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( usec * ( system_clock_mhz / 10^6 ) / 256 */ ret = (usec * (rate / 1000000)) / 256; - DBGPR("<--xgbe_usec_to_riwt\n"); - - return ret; + return (ret); } -static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, - unsigned int riwt) +static unsigned int +xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt) { unsigned long rate; unsigned int ret; - DBGPR("-->xgbe_riwt_to_usec\n"); - rate = pdata->sysclk_rate; /* * Convert the input watchdog timer value to the usec value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) */ ret = (riwt * 256) / (rate / 1000000); - DBGPR("<--xgbe_riwt_to_usec\n"); - - return ret; + return (ret); } -static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) +static int +xgbe_config_pbl_val(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; + unsigned int pblx8, pbl; unsigned int i; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8, - pdata->pblx8); + pblx8 = DMA_PBL_X8_DISABLE; + pbl = pdata->pbl; - return 0; -} - -static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata) -{ - return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL); -} - -static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel; - unsigned int i; - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) - break; - - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL, - pdata->tx_pbl); + if (pdata->pbl > 32) { + pblx8 = DMA_PBL_X8_ENABLE; + pbl >>= 3; } - return 0; -} - -static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata) -{ - return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL); -} - -static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel; - unsigned int i; + for (i = 0; i < pdata->channel_count; i++) { + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, + pblx8); - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) - break; + if (pdata->channel[i]->tx_ring) + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, + PBL, pbl); - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL, - pdata->rx_pbl); + if (pdata->channel[i]->rx_ring) + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, + PBL, pbl); } - return 0; + return (0); } -static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) +static int +xgbe_config_osp_mode(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->tx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP, - pdata->tx_osp_mode); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, + pdata->tx_osp_mode); } - return 0; + return (0); } -static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) +static int +xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); - return 0; + return (0); } -static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) +static int +xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); - return 0; + return (0); } -static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, - unsigned int val) +static int +xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); - return 0; + return (0); } -static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, - unsigned int val) +static int +xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); - return 0; + return (0); } -static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) +static int +xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT, - pdata->rx_riwt); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, + pdata->rx_riwt); } - return 0; + return (0); } -static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) +static int +xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) { - return 0; + return (0); } -static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) +static void +xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ, - pdata->rx_buf_size); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, + pdata->rx_buf_size); } } -static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) +static void +xgbe_config_tso_mode(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->tx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1); + axgbe_printf(0, "Enabling TSO in channel %d\n", i); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); } } -static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) +static void +xgbe_config_sph_mode(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); } XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); } -static int xgbe_disable_rss(struct xgbe_prv_data *pdata) +static int +xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + unsigned int wait; + int ret = 0; + + mtx_lock(&pdata->rss_mutex); + + if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { + ret = -EBUSY; + goto unlock; + } + + XGMAC_IOWRITE(pdata, MAC_RSSDR, val); + + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); + + wait = 1000; + while (wait--) { + if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) + goto unlock; + + DELAY(1000); + } + + ret = -EBUSY; + +unlock: + mtx_unlock(&pdata->rss_mutex); + + return (ret); +} + +static int +xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t); + unsigned int *key = (unsigned int *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, + key_regs, *key++); + if (ret) + return (ret); + } + + return (0); +} + +static int +xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) +{ + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i, + pdata->rss_table[i]); + if (ret) + return (ret); + } + + return (0); +} + +static int +xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return (xgbe_write_rss_hash_key(pdata)); +} + +static int +xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); + + return (xgbe_write_rss_lookup_table(pdata)); +} + +static int +xgbe_enable_rss(struct xgbe_prv_data *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return (-EOPNOTSUPP); + + /* Program the hash key */ + ret = xgbe_write_rss_hash_key(pdata); + if (ret) + return (ret); + + /* Program the lookup table */ + ret = xgbe_write_rss_lookup_table(pdata); + if (ret) + return (ret); + + /* Set the RSS options */ + XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); + + /* Enable RSS */ + XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); + + axgbe_printf(0, "RSS Enabled\n"); + + return (0); +} + +static int +xgbe_disable_rss(struct xgbe_prv_data *pdata) { if (!pdata->hw_feat.rss) - return -EOPNOTSUPP; + return (-EOPNOTSUPP); XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); - return 0; + axgbe_printf(0, "RSS Disabled\n"); + + return (0); } -static void xgbe_config_rss(struct xgbe_prv_data *pdata) +static void +xgbe_config_rss(struct xgbe_prv_data *pdata) { + int ret; if (!pdata->hw_feat.rss) return; - xgbe_disable_rss(pdata); + /* Check if the interface has RSS capability */ + if (pdata->enable_rss) + ret = xgbe_enable_rss(pdata); + else + ret = xgbe_disable_rss(pdata); + + if (ret) + axgbe_error("error configuring RSS, RSS disabled\n"); } -static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) +static int +xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_QTFCR_INC; } - return 0; + return (0); } -static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) +static int +xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); + unsigned int ehfc = 0; + + if (pdata->rx_rfd[i]) { + /* Flow control thresholds are established */ + /* TODO - enable pfc/ets support */ + ehfc = 1; + } + + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + + axgbe_printf(1, "flow control %s for RXq%u\n", + ehfc ? "enabled" : "disabled", i); } /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); /* Enable transmit flow control */ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); + /* Set pause time */ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_QTFCR_INC; } - return 0; + return (0); } -static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) +static int +xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); - return 0; + return (0); } -static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) +static int +xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); - return 0; + return (0); } -static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) +static int +xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) { - if (pdata->tx_pause) xgbe_enable_tx_flow_control(pdata); else xgbe_disable_tx_flow_control(pdata); - return 0; + return (0); } -static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) +static int +xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) { - if (pdata->rx_pause) xgbe_enable_rx_flow_control(pdata); else xgbe_disable_rx_flow_control(pdata); - return 0; + return (0); } -static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) +static void +xgbe_config_flow_control(struct xgbe_prv_data *pdata) { - xgbe_config_tx_flow_control(pdata); xgbe_config_rx_flow_control(pdata); XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); } -static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) +static void +xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; - unsigned int dma_ch_isr, dma_ch_ier; - unsigned int i; + unsigned int i, ver; + + /* Set the interrupt mode if supported */ + if (pdata->channel_irq_mode) + XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, + pdata->channel_irq_mode); + + ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel[i]; - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { /* Clear all the interrupts which are set */ - dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); - XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, + XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ - dma_ch_ier = 0; + channel->curr_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + if (ver < 0x21) { + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); + } else { + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); + } + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts * TIE - Transmit Interrupt Enable (unless using - * per channel interrupts) + * per channel interrupts in edge triggered + * mode) */ - if (!pdata->per_channel_irq) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + if (!pdata->per_channel_irq || pdata->channel_irq_mode) + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable * RIE - Receive Interrupt Enable (unless using - * per channel interrupts) + * per channel interrupts in edge triggered + * mode) */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); - if (!pdata->per_channel_irq) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); + if (!pdata->per_channel_irq || pdata->channel_irq_mode) + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, RIE, 1); } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); } } -static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) +static void +xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) { unsigned int mtl_q_isr; unsigned int q_count, i; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { /* Clear all the interrupts which are set */ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); /* No MTL interrupts to be enabled */ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); } } -static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) +static void +xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) { unsigned int mac_ier = 0; /* Enable Timestamp interrupt */ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); /* Enable all counter interrupts */ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); -} -static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) -{ - if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3) - return 0; - - XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3); - - return 0; + /* Enable MDIO single command completion interrupt */ + XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); } -static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata) +static int +xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) { - if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2) - return 0; + unsigned int ss; - XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2); - - return 0; -} - -static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) -{ - if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0) - return 0; + switch (speed) { + case SPEED_1000: + ss = 0x03; + break; + case SPEED_2500: + ss = 0x02; + break; + case SPEED_10000: + ss = 0x00; + break; + default: + return (-EINVAL); + } - XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0); + if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); - return 0; + return (0); } -static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +static int +xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) { /* Put the VLAN tag in the Rx descriptor */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); /* Don't check the VLAN type */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); /* Check only C-TAG (0x8100) packets */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); /* Enable VLAN tag stripping */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); - return 0; + axgbe_printf(0, "VLAN Stripping Enabled\n"); + + return (0); } -static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +static int +xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); - return 0; + axgbe_printf(0, "VLAN Stripping Disabled\n"); + + return (0); } -static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +static int +xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) { /* Enable VLAN filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); /* Enable VLAN Hash Table filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); /* Disable VLAN tag inverse matching */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); /* Only filter on the lower 12-bits of the VLAN tag */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); /* In order for the VLAN Hash Table filtering to be effective, * the VLAN tag identifier in the VLAN Tag Register must not * be zero. Set the VLAN tag identifier to "1" to enable the * VLAN Hash Table filtering. This implies that a VLAN tag of * 1 will always pass filtering. */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); - return 0; + axgbe_printf(0, "VLAN filtering Enabled\n"); + + return (0); } -static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +static int +xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) { /* Disable VLAN filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); - return 0; + axgbe_printf(0, "VLAN filtering Disabled\n"); + + return (0); +} + +static uint32_t +xgbe_vid_crc32_le(__le16 vid_le) +{ + uint32_t crc = ~0; + uint32_t temp = 0; + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= CRC32_POLY_LE; + } + + return (crc); } -static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) +static int +xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) { - u16 vlan_hash_table = 0; + uint32_t crc; + uint16_t vid; + uint16_t vlan_hash_table = 0; + __le16 vid_le = 0; + + axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__, + XGMAC_IOREAD(pdata, MAC_VLANHTR)); + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) { + + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x " + "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc, + vlan_hash_table); + } /* Set the VLAN Hash Table filtering register */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); - return 0; + axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__, + XGMAC_IOREAD(pdata, MAC_VLANHTR)); + + return (0); } -static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, - unsigned int enable) +static int +xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) - return 0; + return (0); + + axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); /* Hardware will still perform VLAN filtering in promiscuous mode */ - xgbe_disable_rx_vlan_filtering(pdata); + if (enable) { + axgbe_printf(1, "Disabling rx vlan filtering\n"); + xgbe_disable_rx_vlan_filtering(pdata); + } else { + if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { + axgbe_printf(1, "Enabling rx vlan filtering\n"); + xgbe_enable_rx_vlan_filtering(pdata); + } + } - return 0; + return (0); } -static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, - unsigned int enable) +static int +xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) - return 0; + return (0); + axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); - return 0; + return (0); } -static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, - char *addr, unsigned int *mac_reg) +static void +xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg) { unsigned int mac_addr_hi, mac_addr_lo; - u8 *mac_addr; + uint8_t *mac_addr; mac_addr_lo = 0; mac_addr_hi = 0; if (addr) { - mac_addr = (u8 *)&mac_addr_lo; + mac_addr = (uint8_t *)&mac_addr_lo; mac_addr[0] = addr[0]; mac_addr[1] = addr[1]; mac_addr[2] = addr[2]; mac_addr[3] = addr[3]; - mac_addr = (u8 *)&mac_addr_hi; + mac_addr = (uint8_t *)&mac_addr_hi; mac_addr[0] = addr[4]; mac_addr[1] = addr[5]; + axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg); + XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); } XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); *mac_reg += MAC_MACA_INC; XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); *mac_reg += MAC_MACA_INC; } -static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) +static void +xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) { unsigned int mac_reg; unsigned int addn_macs; mac_reg = MAC_MACA1HR; addn_macs = pdata->hw_feat.addn_mac; xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg); addn_macs--; /* Clear remaining additional MAC address entries */ while (addn_macs--) xgbe_set_mac_reg(pdata, NULL, &mac_reg); } -static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) +static int +xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) { + /* TODO - add support to set mac hash table */ xgbe_set_mac_addn_addrs(pdata); - return 0; + return (0); } -static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr) +static int +xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr) { unsigned int mac_addr_hi, mac_addr_lo; mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | (addr[0] << 0); XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); - return 0; + return (0); } -static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) +static int +xgbe_config_rx_mode(struct xgbe_prv_data *pdata) { unsigned int pr_mode, am_mode; - /* XXX */ - pr_mode = 0; - am_mode = 0; + pr_mode = ((pdata->netdev->if_drv_flags & IFF_PPROMISC) != 0); + am_mode = ((pdata->netdev->if_drv_flags & IFF_ALLMULTI) != 0); xgbe_set_promiscuous_mode(pdata, pr_mode); xgbe_set_all_multicast_mode(pdata, am_mode); xgbe_add_mac_addresses(pdata); - return 0; + return (0); } -static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, - int mmd_reg) +static int +xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) +{ + unsigned int reg; + + if (gpio > 15) + return (-EINVAL); + + reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); + + reg &= ~(1 << (gpio + 16)); + XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); + + return (0); +} + +static int +xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) +{ + unsigned int reg; + + if (gpio > 15) + return (-EINVAL); + + reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); + + reg |= (1 << (gpio + 16)); + XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); + + return (0); +} + +static int +xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { unsigned long flags; - unsigned int mmd_address; + unsigned int mmd_address, index, offset; int mmd_data; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); - /* The PCS registers are accessed using mmio. The underlying APB3 + /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * - * The mmio interface is based on 32-bit offsets and values. All + * The mmio interface is based on 16-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the - * offset 2 bits and reading 32 bits of data. + * offset 1 bit and reading 16 bits of data. */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + spin_lock_irqsave(&pdata->xpcs_lock, flags); - XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); - mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + mmd_data = XPCS16_IOREAD(pdata, offset); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); - return mmd_data; + return (mmd_data); } -static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, - int mmd_reg, int mmd_data) +static void +xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, + int mmd_data) +{ + unsigned long flags; + unsigned int mmd_address, index, offset; + + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 16-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 1 bit and writing 16 bits of data. + */ + mmd_address <<= 1; + index = mmd_address & ~pdata->xpcs_window_mask; + offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); + + spin_lock_irqsave(&pdata->xpcs_lock, flags); + XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); + XPCS16_IOWRITE(pdata, offset, mmd_data); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); +} + +static int +xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { - unsigned int mmd_address; unsigned long flags; + unsigned int mmd_address; + int mmd_data; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ spin_lock_irqsave(&pdata->xpcs_lock, flags); - XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); - XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); + XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); + mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); -} -static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) -{ - return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); + return (mmd_data); } -static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) +static void +xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, + int mmd_data) { - XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); + unsigned int mmd_address; + unsigned long flags; - return 0; + if (mmd_reg & MII_ADDR_C45) + mmd_address = mmd_reg & ~MII_ADDR_C45; + else + mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); + + /* The PCS registers are accessed using mmio. The underlying APB3 + * management interface uses indirect addressing to access the MMD + * register sets. This requires accessing of the PCS register in two + * phases, an address phase and a data phase. + * + * The mmio interface is based on 32-bit offsets and values. All + * register offsets must therefore be adjusted by left shifting the + * offset 2 bits and writing 32 bits of data. + */ + spin_lock_irqsave(&pdata->xpcs_lock, flags); + XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); + XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } -static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) +static int +xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { - XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); + switch (pdata->vdata->xpcs_access) { + case XGBE_XPCS_ACCESS_V1: + return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg)); - return 0; + case XGBE_XPCS_ACCESS_V2: + default: + return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg)); + } } -static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) +static void +xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, + int mmd_data) { - struct xgbe_ring_desc *rdesc = rdata->rdesc; - - /* Reset the Tx descriptor - * Set buffer 1 (lo) address to zero - * Set buffer 1 (hi) address to zero - * Reset all other control bits (IC, TTSE, B2L & B1L) - * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) - */ - rdesc->desc0 = 0; - rdesc->desc1 = 0; - rdesc->desc2 = 0; - rdesc->desc3 = 0; + switch (pdata->vdata->xpcs_access) { + case XGBE_XPCS_ACCESS_V1: + return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data)); - dsb(sy); + case XGBE_XPCS_ACCESS_V2: + default: + return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data)); + } } -static void xgbe_tx_desc_init(struct xgbe_channel *channel) +static unsigned int +xgbe_create_mdio_sca(int port, int reg) { - struct xgbe_ring *ring = channel->tx_ring; - struct xgbe_ring_data *rdata; - int i; - int start_index = ring->cur; - - DBGPR("-->tx_desc_init\n"); - - /* Initialze all descriptors */ - for (i = 0; i < ring->rdesc_count; i++) { - rdata = XGBE_GET_DESC_DATA(ring, i); - - /* Initialize Tx descriptor */ - xgbe_tx_desc_reset(rdata); - } + unsigned int mdio_sca, da; - /* Update the total number of Tx descriptors */ - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; - /* Update the starting address of descriptor ring */ - rdata = XGBE_GET_DESC_DATA(ring, start_index); - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, - upper_32_bits(rdata->rdata_paddr)); - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, - lower_32_bits(rdata->rdata_paddr)); + mdio_sca = 0; + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); - DBGPR("<--tx_desc_init\n"); + return (mdio_sca); } -static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, - struct xgbe_ring_data *rdata, unsigned int index) +static int +xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, + uint16_t val) { - struct xgbe_ring_desc *rdesc = rdata->rdesc; - unsigned int inte; - - inte = 1; + unsigned int mdio_sca, mdio_sccd; - /* Reset the Rx descriptor - * Set buffer 1 (lo) address to header dma address (lo) - * Set buffer 1 (hi) address to header dma address (hi) - * Set buffer 2 (lo) address to buffer dma address (lo) - * Set buffer 2 (hi) address to buffer dma address (hi) and - * set control bits OWN and INTE - */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_hdr_paddr)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_hdr_paddr)); - rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr)); - rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr)); + mtx_lock_spin(&pdata->mdio_mutex); - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); + mdio_sca = xgbe_create_mdio_sca(addr, reg); + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); - dsb(sy); + mdio_sccd = 0; + XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); + XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); + XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); - XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); + if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == + EWOULDBLOCK) { + axgbe_error("%s: MDIO write error\n", __func__); + mtx_unlock_spin(&pdata->mdio_mutex); + return (-ETIMEDOUT); + } - dsb(sy); + mtx_unlock_spin(&pdata->mdio_mutex); + return (0); } -static void xgbe_rx_desc_init(struct xgbe_channel *channel) +static int +xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg) { - struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_ring *ring = channel->rx_ring; - struct xgbe_ring_data *rdata; - unsigned int start_index = ring->cur; - unsigned int i; - - DBGPR("-->rx_desc_init\n"); - - /* Initialize all descriptors */ - for (i = 0; i < ring->rdesc_count; i++) { - rdata = XGBE_GET_DESC_DATA(ring, i); + unsigned int mdio_sca, mdio_sccd; - /* Initialize Rx descriptor */ - xgbe_rx_desc_reset(pdata, rdata, i); - } + mtx_lock_spin(&pdata->mdio_mutex); - bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map, - BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + mdio_sca = xgbe_create_mdio_sca(addr, reg); + XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); - /* Update the total number of Rx descriptors */ - XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); + mdio_sccd = 0; + XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); + XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); + XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); - /* Update the starting address of descriptor ring */ - rdata = XGBE_GET_DESC_DATA(ring, start_index); - XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, - upper_32_bits(rdata->rdata_paddr)); - XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, - lower_32_bits(rdata->rdata_paddr)); + if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == + EWOULDBLOCK) { + axgbe_error("%s: MDIO read error\n", __func__); + mtx_unlock_spin(&pdata->mdio_mutex); + return (-ETIMEDOUT); + } - /* Update the Rx Descriptor Tail Pointer */ - rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); - XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, - lower_32_bits(rdata->rdata_paddr)); + mtx_unlock_spin(&pdata->mdio_mutex); - DBGPR("<--rx_desc_init\n"); + return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA)); } -static void xgbe_tx_start_xmit(struct xgbe_channel *channel, - struct xgbe_ring *ring) +static int +xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, + enum xgbe_mdio_mode mode) { - struct xgbe_ring_data *rdata; + unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); - /* Issue a poll command to Tx DMA by writing address - * of next immediate free descriptor */ - rdata = XGBE_GET_DESC_DATA(ring, ring->cur); - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, - lower_32_bits(rdata->rdata_paddr)); + switch (mode) { + case XGBE_MDIO_MODE_CL22: + if (port > XGMAC_MAX_C22_PORT) + return (-EINVAL); + reg_val |= (1 << port); + break; + case XGBE_MDIO_MODE_CL45: + break; + default: + return (-EINVAL); + } - ring->tx.xmit_more = 0; + XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); + + return (0); } -static void xgbe_dev_xmit(struct xgbe_channel *channel) +static int +xgbe_tx_complete(struct xgbe_ring_desc *rdesc) { - struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_ring *ring = channel->tx_ring; - struct xgbe_ring_data *rdata; - struct xgbe_ring_desc *rdesc; - struct xgbe_packet_data *packet = &ring->packet_data; - unsigned int tx_set_ic; - int start_index = ring->cur; - int cur_index = ring->cur; - int i; - - DBGPR("-->xgbe_dev_xmit\n"); - - /* Determine if an interrupt should be generated for this Tx: - * Interrupt: - * - Tx frame count exceeds the frame count setting - * - Addition of Tx frame count to the frame count since the - * last interrupt was set exceeds the frame count setting - * No interrupt: - * - No frame count setting specified (ethtool -C ethX tx-frames 0) - * - Addition of Tx frame count to the frame count since the - * last interrupt was set does not exceed the frame count setting - */ - ring->coalesce_count += packet->tx_packets; - if (!pdata->tx_frames) - tx_set_ic = 0; - else if (packet->tx_packets > pdata->tx_frames) - tx_set_ic = 1; - else if ((ring->coalesce_count % pdata->tx_frames) < - packet->tx_packets) - tx_set_ic = 1; - else - tx_set_ic = 0; - tx_set_ic = 1; - - rdata = XGBE_GET_DESC_DATA(ring, cur_index); - rdesc = rdata->rdesc; - - /* Update buffer address (for TSO this is the header) */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr)); - - /* Update the buffer length */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, - rdata->mbuf_len); - - /* Timestamp enablement check */ - if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); - - /* Mark it as First Descriptor */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); - - /* Mark it as a NORMAL descriptor */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); - - /* Set OWN bit if not the first descriptor */ - if (cur_index != start_index) - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); - - /* Enable CRC and Pad Insertion */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); + return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN)); +} - /* Set the total length to be transmitted */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, - packet->length); +static int +xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); - for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { - cur_index++; - rdata = XGBE_GET_DESC_DATA(ring, cur_index); - rdesc = rdata->rdesc; + axgbe_printf(0, "Receive checksum offload Disabled\n"); + return (0); +} - /* Update buffer address */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr)); +static int +xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); - /* Update the buffer length */ - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, - rdata->mbuf_len); + axgbe_printf(0, "Receive checksum offload Enabled\n"); + return (0); +} - /* Set OWN bit */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); +static void +xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) +{ + struct xgbe_ring_desc *rdesc = rdata->rdesc; - /* Mark it as NORMAL descriptor */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); - } + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + rdesc->desc0 = 0; + rdesc->desc1 = 0; + rdesc->desc2 = 0; + rdesc->desc3 = 0; - /* Set LAST bit for the last descriptor */ - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); + wmb(); +} - /* Set IC bit based on Tx coalescing settings */ - if (tx_set_ic) - XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); +static void +xgbe_tx_desc_init(struct xgbe_channel *channel) +{ + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata; + int i; + int start_index = ring->cur; - /* Save the Tx info to report back during cleanup */ - rdata->tx.packets = packet->tx_packets; - rdata->tx.bytes = packet->tx_bytes; + /* Initialze all descriptors */ + for (i = 0; i < ring->rdesc_count; i++) { + rdata = XGBE_GET_DESC_DATA(ring, i); - /* Sync the DMA buffers */ - bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map, - BUS_DMASYNC_PREWRITE); - bus_dmamap_sync(ring->mbuf_dmat, ring->mbuf_map, - BUS_DMASYNC_PREWRITE); + /* Initialize Tx descriptor */ + xgbe_tx_desc_reset(rdata); + } - /* In case the Tx DMA engine is running, make sure everything - * is written to the descriptor(s) before setting the OWN bit - * for the first descriptor - */ + /* Update the total number of Tx descriptors */ + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); - /* Set OWN bit for the first descriptor */ + /* Update the starting address of descriptor ring */ rdata = XGBE_GET_DESC_DATA(ring, start_index); - rdesc = rdata->rdesc; - XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, + upper_32_bits(rdata->rdata_paddr)); + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, + lower_32_bits(rdata->rdata_paddr)); +} - /* Sync to ensure the OWN bit was seen */ - bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map, - BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); +static void +xgbe_rx_desc_init(struct xgbe_channel *channel) +{ + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + unsigned int start_index = ring->cur; - ring->cur = cur_index + 1; - xgbe_tx_start_xmit(channel, ring); + /* + * Just set desc_count and the starting address of the desc list + * here. Rest will be done as part of the txrx path. + */ - DBGPR(" %s: descriptors %u to %u written\n", - channel->name, start_index & (ring->rdesc_count - 1), - (ring->cur - 1) & (ring->rdesc_count - 1)); + /* Update the total number of Rx descriptors */ + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); - DBGPR("<--xgbe_dev_xmit\n"); + /* Update the starting address of descriptor ring */ + rdata = XGBE_GET_DESC_DATA(ring, start_index); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, + upper_32_bits(rdata->rdata_paddr)); + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, + lower_32_bits(rdata->rdata_paddr)); } -static int xgbe_dev_read(struct xgbe_channel *channel) +static int +xgbe_dev_read(struct xgbe_channel *channel) { + struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; - unsigned int err, etlt; + unsigned int err, etlt, l34t; - DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); + axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur); rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; - bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - - dsb(sy); - /* Check for data availability */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) - return 1; + return (1); + + rmb(); + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { + /* TODO - Timestamp Context Descriptor */ - dsb(sy); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT, 1); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CONTEXT_NEXT, 0); + return (0); + } /* Normal Descriptor, be sure Context Descriptor bit is off */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); /* Indicate if a Context Descriptor is next */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - CONTEXT_NEXT, 1); + CONTEXT_NEXT, 1); /* Get the header length */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + FIRST, 1); rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, - RX_NORMAL_DESC2, HL); - } + RX_NORMAL_DESC2, HL); + if (rdata->rx.hdr_len) + pdata->ext_stats.rx_split_header_packets++; + } else + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + FIRST, 0); - /* Get the packet length */ - rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); + /* Get the RSS hash */ + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + RSS_HASH, 1); + + packet->rss_hash = le32_to_cpu(rdesc->desc1); + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_TCP: + packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4; + break; + case RX_DESC3_L34T_IPV4_UDP: + packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4; + break; + case RX_DESC3_L34T_IPV6_TCP: + packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6; + break; + case RX_DESC3_L34T_IPV6_UDP: + packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6; + break; + default: + packet->rss_hash_type = M_HASHTYPE_OPAQUE; + break; + } + } + /* Not all the data has been transferred for this packet */ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { - /* Not all the data has been transferred for this packet */ + /* This is not the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - INCOMPLETE, 1); - return 0; + LAST, 0); + return (0); } /* This is the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - INCOMPLETE, 0); + LAST, 1); + + /* Get the packet length */ + rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); + + /* Set checksum done indicator as appropriate */ + /* TODO - add tunneling support */ + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 1); /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); + axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt); + + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ + if (etlt == 0x09) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + VLAN_CTAG, 1); + packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, + RX_NORMAL_DESC0, OVT); + axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag); + } + } else { + unsigned int tnp = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNP); - if (err && etlt) { - if ((etlt == 0x05) || (etlt == 0x06)) + if ((etlt == 0x05) || (etlt == 0x06)) { + axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n", + __func__, l34t, err, etlt); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, - CSUM_DONE, 0); - else + TNPCSUM_DONE, 0); + pdata->ext_stats.rx_csum_errors++; + } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { + axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n", + __func__, l34t, err, etlt); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 0); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + pdata->ext_stats.rx_vxlan_csum_errors++; + } else { + axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n", + __func__, tnp, l34t, err, etlt); + axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n", + __func__, channel->queue_index, + XGMAC_DMA_IOREAD(channel, DMA_CH_SR), + XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); + axgbe_printf(1, "%s: ring cur %d dirty %d\n", + __func__, ring->cur, ring->dirty); + axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n", + __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2, + rdesc->desc3); XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, - FRAME, 1); + FRAME, 1); + } } - bus_dmamap_sync(ring->mbuf_dmat, rdata->mbuf_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); - - DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, - ring->cur & (ring->rdesc_count - 1), ring->cur); + axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", + channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); - return 0; + return (0); } -static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) +static int +xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ - return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); + return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT)); } -static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) +static int +xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ - return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); + return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD)); } -static int xgbe_enable_int(struct xgbe_channel *channel, - enum xgbe_int int_id) +static int +xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; + struct xgbe_prv_data *pdata = channel->pdata; - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n", + channel->curr_ier); switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: - dma_ch_ier |= channel->saved_ier; + channel->curr_ier |= channel->saved_ier; break; default: - return -1; + return (-1); } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); + + axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n", + channel->curr_ier); - return 0; + return (0); } -static int xgbe_disable_int(struct xgbe_channel *channel, - enum xgbe_int int_id) +static int +xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; + struct xgbe_prv_data *pdata = channel->pdata; - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n", + channel->curr_ier); switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: - channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; - dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; + channel->saved_ier = channel->curr_ier; + channel->curr_ier = 0; break; default: - return -1; + return (-1); } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); + + axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n", + channel->curr_ier); - return 0; + return (0); } -static int xgbe_exit(struct xgbe_prv_data *pdata) +static int +__xgbe_exit(struct xgbe_prv_data *pdata) { unsigned int count = 2000; - DBGPR("-->xgbe_exit\n"); - /* Issue a software reset */ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); DELAY(10); /* Poll Until Poll Condition */ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) DELAY(500); if (!count) - return -EBUSY; + return (-EBUSY); + + return (0); +} + +static int +xgbe_exit(struct xgbe_prv_data *pdata) +{ + int ret; - DBGPR("<--xgbe_exit\n"); + /* To guard against possible incorrectly generated interrupts, + * issue the software reset twice. + */ + ret = __xgbe_exit(pdata); + if (ret) { + axgbe_error("%s: exit error %d\n", __func__, ret); + return (ret); + } - return 0; + return (__xgbe_exit(pdata)); } -static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) +static int +xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) { unsigned int i, count; if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) - return 0; + return (0); for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, MTL_Q_TQOMR, FTQ)) DELAY(500); if (!count) - return -EBUSY; + return (-EBUSY); } - return 0; + return (0); } -static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) +static void +xgbe_config_dma_bus(struct xgbe_prv_data *pdata) { + unsigned int sbmr; + + sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); + /* Set enhanced addressing mode */ - XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); + XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); /* Set the System Bus mode */ - XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); - XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1); -} + XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); + XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); + XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); + XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); + XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); -static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) -{ - unsigned int arcache, awcache; + XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); - arcache = 0; - XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache); - XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain); - XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); + /* Set descriptor fetching threshold */ + if (pdata->vdata->tx_desc_prefetch) + XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, + pdata->vdata->tx_desc_prefetch); - awcache = 0; - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache); - XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain); - XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); + if (pdata->vdata->rx_desc_prefetch) + XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, + pdata->vdata->rx_desc_prefetch); } -static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) +static void +xgbe_config_dma_cache(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); + XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); + if (pdata->awarcr) + XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); +} + +static void +xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) { unsigned int i; /* Set Tx to weighted round robin scheduling algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); /* Set Tx traffic classes to use WRR algorithm with equal weights */ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, - MTL_TSA_ETS); + MTL_TSA_ETS); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); } /* Set Rx to strict priority algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); } -static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, - unsigned int queue_count) +static void +xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, + unsigned int queue, unsigned int q_fifo_size) +{ + unsigned int frame_fifo_size; + unsigned int rfa, rfd; + + frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); + axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n", + __func__, queue, q_fifo_size, frame_fifo_size); + + /* TODO - add pfc/ets related support */ + + /* This path deals with just maximum frame sizes which are + * limited to a jumbo frame of 9,000 (plus headers, etc.) + * so we can never exceed the maximum allowable RFA/RFD + * values. + */ + if (q_fifo_size <= 2048) { + /* rx_rfd to zero to signal no flow control */ + pdata->rx_rfa[queue] = 0; + pdata->rx_rfd[queue] = 0; + return; + } + + if (q_fifo_size <= 4096) { + /* Between 2048 and 4096 */ + pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ + pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ + return; + } + + if (q_fifo_size <= frame_fifo_size) { + /* Between 4096 and max-frame */ + pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ + pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ + return; + } + + if (q_fifo_size <= (frame_fifo_size * 3)) { + /* Between max-frame and 3 max-frames, + * trigger if we get just over a frame of data and + * resume when we have just under half a frame left. + */ + rfa = q_fifo_size - frame_fifo_size; + rfd = rfa + (frame_fifo_size / 2); + } else { + /* Above 3 max-frames - trigger when just over + * 2 frames of space available + */ + rfa = frame_fifo_size * 2; + rfa += XGMAC_FLOW_CONTROL_UNIT; + rfd = rfa + frame_fifo_size; + } + + pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); + pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); + axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__, + queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]); +} + +static void +xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, + unsigned int *fifo) { unsigned int q_fifo_size; - unsigned int p_fifo; + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; + + axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n", + __func__, i, fifo[i], q_fifo_size); + xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); + } +} + +static void +xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->rx_q_count; i++) { + axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i, + pdata->rx_rfa[i], pdata->rx_rfd[i]); + + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, + pdata->rx_rfa[i]); + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, + pdata->rx_rfd[i]); + + axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__, + XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); + } +} - /* Calculate the configured fifo size */ - q_fifo_size = 1 << (fifo_size + 7); +static unsigned int +xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) +{ + /* The configured value may not be the actual amount of fifo RAM */ + return (min_t(unsigned int, pdata->tx_max_fifo_size, + pdata->hw_feat.tx_fifo_size)); +} +static unsigned int +xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) +{ /* The configured value may not be the actual amount of fifo RAM */ - q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); + return (min_t(unsigned int, pdata->rx_max_fifo_size, + pdata->hw_feat.rx_fifo_size)); +} + +static void +xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count, + unsigned int *fifo) +{ + unsigned int q_fifo_size; + unsigned int p_fifo; + unsigned int i; - q_fifo_size = q_fifo_size / queue_count; + q_fifo_size = fifo_size / queue_count; - /* Each increment in the queue fifo size represents 256 bytes of - * fifo, with 0 representing 256 bytes. Distribute the fifo equally - * between the queues. + /* Calculate the fifo setting by dividing the queue's fifo size + * by the fifo allocation increment (with 0 representing the + * base allocation increment so decrement the result by 1). */ - p_fifo = q_fifo_size / 256; + p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; if (p_fifo) p_fifo--; - return p_fifo; + /* Distribute the fifo equally amongst the queues */ + for (i = 0; i < queue_count; i++) + fifo[i] = p_fifo; +} + +static unsigned int +xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count, + unsigned int *fifo) +{ + unsigned int i; + + MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC)); + + if (queue_count <= IEEE_8021QAZ_MAX_TCS) + return (fifo_size); + + /* Rx queues 9 and up are for specialized packets, + * such as PTP or DCB control packets, etc. and + * don't require a large fifo + */ + for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { + fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; + fifo_size -= XGMAC_FIFO_MIN_ALLOC; + } + + return (fifo_size); } -static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) +static void +xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) { unsigned int fifo_size; + unsigned int fifo[XGBE_MAX_QUEUES]; unsigned int i; - fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, - pdata->tx_q_count); + fifo_size = xgbe_get_tx_fifo_size(pdata); + axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size); - for (i = 0; i < pdata->tx_q_count; i++) - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); + xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); + + for (i = 0; i < pdata->tx_q_count; i++) { + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); + axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i, + XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); + } + + axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); } -static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) +static void +xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) { unsigned int fifo_size; + unsigned int fifo[XGBE_MAX_QUEUES]; + unsigned int prio_queues; unsigned int i; - fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, - pdata->rx_q_count); + /* TODO - add pfc/ets related support */ - for (i = 0; i < pdata->rx_q_count; i++) - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); + /* Clear any DCB related fifo/queue information */ + fifo_size = xgbe_get_rx_fifo_size(pdata); + prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); + axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__, + fifo_size, pdata->rx_q_count, prio_queues); + + /* Assign a minimum fifo to the non-VLAN priority queues */ + fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); + + xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); + + for (i = 0; i < pdata->rx_q_count; i++) { + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); + axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i, + XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); + } + + xgbe_calculate_flow_control_threshold(pdata, fifo); + xgbe_config_flow_control_threshold(pdata); + + axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n", + pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); } -static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) +static void +xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) { unsigned int qptc, qptc_extra, queue; unsigned int prio_queues; unsigned int ppq, ppq_extra, prio; unsigned int mask; unsigned int i, j, reg, reg_val; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { + axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, - Q2TCMAP, i); + Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } if (i < qptc_extra) { + axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, - Q2TCMAP, i); + Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } } /* Map the 8 VLAN priority values to available MTL Rx queues */ - prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, - pdata->rx_q_count); + prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; reg = MAC_RQC2R; reg_val = 0; for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { + axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } if (i < ppq_extra) { + axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_RQC2_INC; reg_val = 0; } /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ reg = MTL_RQDCM0R; reg_val = 0; for (i = 0; i < pdata->rx_q_count;) { reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); reg += MTL_RQDCM_INC; reg_val = 0; } } -static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) +static void +xgbe_config_mac_address(struct xgbe_prv_data *pdata) { - unsigned int i; - - for (i = 0; i < pdata->rx_q_count; i++) { - /* Activate flow control when less than 4k left in fifo */ - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2); + xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev)); - /* De-activate flow control when more than 6k left in fifo */ - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4); + /* Filtering is done using perfect filtering and hash filtering */ + if (pdata->hw_feat.hash_table_size) { + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); } } -static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) -{ - - xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev)); -} - -static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) +static void +xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) { unsigned int val; val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0; XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); } -static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) +static void +xgbe_config_mac_speed(struct xgbe_prv_data *pdata) { - switch (pdata->phy_speed) { - case SPEED_10000: - xgbe_set_xgmii_speed(pdata); - break; - - case SPEED_2500: - xgbe_set_gmii_2500_speed(pdata); - break; - - case SPEED_1000: - xgbe_set_gmii_speed(pdata); - break; - case SPEED_UNKNOWN: - break; - default: - panic("TODO %s:%d\n", __FILE__, __LINE__); - } + xgbe_set_speed(pdata, pdata->phy_speed); } -static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) +static void +xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) { - if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM) != 0) + if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM)) xgbe_enable_rx_csum(pdata); else xgbe_disable_rx_csum(pdata); } -static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) +static void +xgbe_config_vlan_support(struct xgbe_prv_data *pdata) { /* Indicate that VLAN Tx CTAGs come from context descriptors */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); /* Set the current VLAN Hash Table register value */ xgbe_update_vlan_hash_table(pdata); - xgbe_disable_rx_vlan_filtering(pdata); - xgbe_disable_rx_vlan_stripping(pdata); + if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { + axgbe_printf(1, "Enabling rx vlan filtering\n"); + xgbe_enable_rx_vlan_filtering(pdata); + } else { + axgbe_printf(1, "Disabling rx vlan filtering\n"); + xgbe_disable_rx_vlan_filtering(pdata); + } + + if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) { + axgbe_printf(1, "Enabling rx vlan stripping\n"); + xgbe_enable_rx_vlan_stripping(pdata); + } else { + axgbe_printf(1, "Disabling rx vlan stripping\n"); + xgbe_disable_rx_vlan_stripping(pdata); + } } -static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) +static uint64_t +xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) { bool read_hi; - u64 val; - - switch (reg_lo) { - /* These registers are always 64 bit */ - case MMC_TXOCTETCOUNT_GB_LO: - case MMC_TXOCTETCOUNT_G_LO: - case MMC_RXOCTETCOUNT_GB_LO: - case MMC_RXOCTETCOUNT_G_LO: - read_hi = true; - break; + uint64_t val; + + if (pdata->vdata->mmc_64bit) { + switch (reg_lo) { + /* These registers are always 32 bit */ + case MMC_RXRUNTERROR: + case MMC_RXJABBERERROR: + case MMC_RXUNDERSIZE_G: + case MMC_RXOVERSIZE_G: + case MMC_RXWATCHDOGERROR: + read_hi = false; + break; - default: - read_hi = false; + default: + read_hi = true; + } + } else { + switch (reg_lo) { + /* These registers are always 64 bit */ + case MMC_TXOCTETCOUNT_GB_LO: + case MMC_TXOCTETCOUNT_G_LO: + case MMC_RXOCTETCOUNT_GB_LO: + case MMC_RXOCTETCOUNT_G_LO: + read_hi = true; + break; + + default: + read_hi = false; + } } val = XGMAC_IOREAD(pdata, reg_lo); if (read_hi) - val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); + val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); - return val; + return (val); } -static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) +static void +xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) stats->txoctetcount_gb += - xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) stats->txframecount_gb += - xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) stats->txbroadcastframes_g += - xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) stats->txmulticastframes_g += - xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) stats->tx64octets_gb += - xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) stats->tx65to127octets_gb += - xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) stats->tx128to255octets_gb += - xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) stats->tx256to511octets_gb += - xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) stats->tx512to1023octets_gb += - xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) stats->tx1024tomaxoctets_gb += - xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) stats->txunicastframes_gb += - xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) stats->txmulticastframes_gb += - xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) stats->txbroadcastframes_g += - xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) stats->txunderflowerror += - xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) stats->txoctetcount_g += - xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) stats->txframecount_g += - xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) stats->txpauseframes += - xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) stats->txvlanframes_g += - xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } -static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) +static void +xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) stats->rxframecount_gb += - xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) stats->rxoctetcount_gb += - xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) stats->rxoctetcount_g += - xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) stats->rxbroadcastframes_g += - xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) stats->rxmulticastframes_g += - xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) stats->rxcrcerror += - xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) stats->rxrunterror += - xgbe_mmc_read(pdata, MMC_RXRUNTERROR); + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) stats->rxjabbererror += - xgbe_mmc_read(pdata, MMC_RXJABBERERROR); + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) stats->rxundersize_g += - xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) stats->rxoversize_g += - xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) stats->rx64octets_gb += - xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) stats->rx65to127octets_gb += - xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) stats->rx128to255octets_gb += - xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) stats->rx256to511octets_gb += - xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) stats->rx512to1023octets_gb += - xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) stats->rx1024tomaxoctets_gb += - xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) stats->rxunicastframes_g += - xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) stats->rxlengtherror += - xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) stats->rxoutofrangetype += - xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) stats->rxpauseframes += - xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) stats->rxfifooverflow += - xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) stats->rxvlanframes_gb += - xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) stats->rxwatchdogerror += - xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); } -static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) +static void +xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; /* Freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); stats->txoctetcount_gb += - xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += - xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += - xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += - xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += - xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += - xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += - xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += - xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += - xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += - xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += - xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += - xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); - stats->txbroadcastframes_g += - xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + stats->txbroadcastframes_gb += + xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += - xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += - xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += - xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += - xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += - xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += - xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += - xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += - xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += - xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += - xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += - xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); + xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += - xgbe_mmc_read(pdata, MMC_RXRUNTERROR); + xgbe_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += - xgbe_mmc_read(pdata, MMC_RXJABBERERROR); + xgbe_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += - xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += - xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); + xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += - xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += - xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += - xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += - xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += - xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += - xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += - xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += - xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += - xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += - xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += - xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += - xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += - xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); + xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); } -static void xgbe_config_mmc(struct xgbe_prv_data *pdata) +static void +xgbe_config_mmc(struct xgbe_prv_data *pdata) { /* Set counters to reset on read */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); /* Reset the counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); } -static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, - struct xgbe_channel *channel) +static void +xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) +{ + unsigned int tx_status; + unsigned long tx_timeout; + + /* The Tx engine cannot be stopped if it is actively processing + * packets. Wait for the Tx queue to empty the Tx fifo. Don't + * wait forever though... + */ + tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); + while (ticks < tx_timeout) { + tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); + if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && + (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) + break; + + DELAY(500); + } + + if (ticks >= tx_timeout) + axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n", + queue); +} + +static void +xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int tx_dsr, tx_pos, tx_qidx; unsigned int tx_status; unsigned long tx_timeout; + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) + return (xgbe_txq_prepare_tx_stop(pdata, queue)); + /* Calculate the status register to read and the position within */ - if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + if (queue < DMA_DSRX_FIRST_QUEUE) { tx_dsr = DMA_DSR0; - tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) + - DMA_DSR0_TPS_START; + tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; } else { - tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + DMA_DSRX_TPS_START; } /* The Tx engine cannot be stopped if it is actively processing * descriptors. Wait for the Tx engine to enter the stopped or * suspended state. Don't wait forever though... */ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < tx_timeout) { tx_status = XGMAC_IOREAD(pdata, tx_dsr); tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); if ((tx_status == DMA_TPS_STOPPED) || (tx_status == DMA_TPS_SUSPENDED)) break; DELAY(500); } + + if (ticks >= tx_timeout) + axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n", + queue); } -static void xgbe_enable_tx(struct xgbe_prv_data *pdata) +static void +xgbe_enable_tx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Enable each Tx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->tx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); } /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, - MTL_Q_ENABLED); + MTL_Q_ENABLED); /* Enable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); } -static void xgbe_disable_tx(struct xgbe_prv_data *pdata) +static void +xgbe_disable_tx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Prepare for Tx DMA channel stop */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) - break; - - xgbe_prepare_tx_stop(pdata, channel); - } + for (i = 0; i < pdata->tx_q_count; i++) + xgbe_prepare_tx_stop(pdata, i); /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); /* Disable each Tx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->tx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); } } -static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, - unsigned int queue) +static void +xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int rx_status; unsigned long rx_timeout; /* The Rx engine cannot be stopped if it is actively processing * packets. Wait for the Rx queue to empty the Rx fifo. Don't * wait forever though... */ rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < rx_timeout) { rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) break; DELAY(500); } + + if (ticks >= rx_timeout) + axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n", + queue); } -static void xgbe_enable_rx(struct xgbe_prv_data *pdata) +static void +xgbe_enable_rx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int reg_val, i; /* Enable each Rx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); } /* Enable each Rx queue */ reg_val = 0; for (i = 0; i < pdata->rx_q_count; i++) reg_val |= (0x02 << (i << 1)); XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); /* Enable MAC Rx */ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); } -static void xgbe_disable_rx(struct xgbe_prv_data *pdata) +static void +xgbe_disable_rx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Disable MAC Rx */ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); /* Prepare for Rx DMA channel stop */ for (i = 0; i < pdata->rx_q_count; i++) xgbe_prepare_rx_stop(pdata, i); /* Disable each Rx queue */ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); /* Disable each Rx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); } } -static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) +static void +xgbe_powerup_tx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Enable each Tx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->tx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); } /* Enable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); } -static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) +static void +xgbe_powerdown_tx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Prepare for Tx DMA channel stop */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) - break; - - xgbe_prepare_tx_stop(pdata, channel); - } + for (i = 0; i < pdata->tx_q_count; i++) + xgbe_prepare_tx_stop(pdata, i); /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->tx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); } } -static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) +static void +xgbe_powerup_rx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Enable each Rx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); } } -static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) +static void +xgbe_powerdown_rx(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; unsigned int i; /* Disable each Rx DMA channel */ - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) break; - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); } } -static int xgbe_init(struct xgbe_prv_data *pdata) +static int +xgbe_init(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; int ret; - DBGPR("-->xgbe_init\n"); - /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); - if (ret) - return ret; + if (ret) { + axgbe_error("error flushing TX queues\n"); + return (ret); + } /* * Initialize DMA related features */ xgbe_config_dma_bus(pdata); xgbe_config_dma_cache(pdata); xgbe_config_osp_mode(pdata); - xgbe_config_pblx8(pdata); - xgbe_config_tx_pbl_val(pdata); - xgbe_config_rx_pbl_val(pdata); + xgbe_config_pbl_val(pdata); xgbe_config_rx_coalesce(pdata); xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); xgbe_config_sph_mode(pdata); xgbe_config_rss(pdata); desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); /* * Initialize MTL related features */ xgbe_config_mtl_mode(pdata); xgbe_config_queue_mapping(pdata); xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); xgbe_config_tx_threshold(pdata, pdata->tx_threshold); xgbe_config_rx_threshold(pdata, pdata->rx_threshold); xgbe_config_tx_fifo_size(pdata); xgbe_config_rx_fifo_size(pdata); - xgbe_config_flow_control_threshold(pdata); /*TODO: Error Packet and undersized good Packet forwarding enable (FEP and FUP) */ xgbe_enable_mtl_interrupts(pdata); /* * Initialize MAC related features */ xgbe_config_mac_address(pdata); xgbe_config_rx_mode(pdata); xgbe_config_jumbo_enable(pdata); xgbe_config_flow_control(pdata); xgbe_config_mac_speed(pdata); xgbe_config_checksum_offload(pdata); xgbe_config_vlan_support(pdata); xgbe_config_mmc(pdata); xgbe_enable_mac_interrupts(pdata); - DBGPR("<--xgbe_init\n"); - - return 0; + return (0); } -void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) +void +xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) { - DBGPR("-->xgbe_init_function_ptrs\n"); hw_if->tx_complete = xgbe_tx_complete; hw_if->set_mac_address = xgbe_set_mac_address; hw_if->config_rx_mode = xgbe_config_rx_mode; hw_if->enable_rx_csum = xgbe_enable_rx_csum; hw_if->disable_rx_csum = xgbe_disable_rx_csum; hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; hw_if->read_mmd_regs = xgbe_read_mmd_regs; hw_if->write_mmd_regs = xgbe_write_mmd_regs; - hw_if->set_gmii_speed = xgbe_set_gmii_speed; - hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed; - hw_if->set_xgmii_speed = xgbe_set_xgmii_speed; + hw_if->set_speed = xgbe_set_speed; + + hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; + hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; + hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; + + hw_if->set_gpio = xgbe_set_gpio; + hw_if->clr_gpio = xgbe_clr_gpio; hw_if->enable_tx = xgbe_enable_tx; hw_if->disable_tx = xgbe_disable_tx; hw_if->enable_rx = xgbe_enable_rx; hw_if->disable_rx = xgbe_disable_rx; hw_if->powerup_tx = xgbe_powerup_tx; hw_if->powerdown_tx = xgbe_powerdown_tx; hw_if->powerup_rx = xgbe_powerup_rx; hw_if->powerdown_rx = xgbe_powerdown_rx; - hw_if->dev_xmit = xgbe_dev_xmit; hw_if->dev_read = xgbe_dev_read; hw_if->enable_int = xgbe_enable_int; hw_if->disable_int = xgbe_disable_int; hw_if->init = xgbe_init; hw_if->exit = xgbe_exit; /* Descriptor related Sequences have to be initialized here */ hw_if->tx_desc_init = xgbe_tx_desc_init; hw_if->rx_desc_init = xgbe_rx_desc_init; hw_if->tx_desc_reset = xgbe_tx_desc_reset; - hw_if->rx_desc_reset = xgbe_rx_desc_reset; hw_if->is_last_desc = xgbe_is_last_desc; hw_if->is_context_desc = xgbe_is_context_desc; - hw_if->tx_start_xmit = xgbe_tx_start_xmit; /* For FLOW ctrl */ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; /* For RX coalescing */ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; hw_if->usec_to_riwt = xgbe_usec_to_riwt; hw_if->riwt_to_usec = xgbe_riwt_to_usec; /* For RX and TX threshold config */ hw_if->config_rx_threshold = xgbe_config_rx_threshold; hw_if->config_tx_threshold = xgbe_config_tx_threshold; /* For RX and TX Store and Forward Mode config */ hw_if->config_rsf_mode = xgbe_config_rsf_mode; hw_if->config_tsf_mode = xgbe_config_tsf_mode; /* For TX DMA Operating on Second Frame config */ hw_if->config_osp_mode = xgbe_config_osp_mode; - /* For RX and TX PBL config */ - hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val; - hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val; - hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val; - hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val; - hw_if->config_pblx8 = xgbe_config_pblx8; - /* For MMC statistics support */ hw_if->tx_mmc_int = xgbe_tx_mmc_int; hw_if->rx_mmc_int = xgbe_rx_mmc_int; hw_if->read_mmc_stats = xgbe_read_mmc_stats; /* For Receive Side Scaling */ + hw_if->enable_rss = xgbe_enable_rss; hw_if->disable_rss = xgbe_disable_rss; - - DBGPR("<--xgbe_init_function_ptrs\n"); + hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; + hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; } diff --git a/sys/dev/axgbe/xgbe-drv.c b/sys/dev/axgbe/xgbe-drv.c index 81f8f30b927a..017c3c9bc6ac 100644 --- a/sys/dev/axgbe/xgbe-drv.c +++ b/sys/dev/axgbe/xgbe-drv.c @@ -1,1076 +1,347 @@ /* * AMD 10Gb Ethernet driver * + * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. + * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); -#include -#include - #include "xgbe.h" #include "xgbe-common.h" -static int xgbe_one_poll(struct xgbe_channel *channel, int budget); -static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget); - -static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel_mem, *channel; - struct xgbe_ring *tx_ring, *rx_ring; - unsigned int count, i; - int ret = -ENOMEM; - - count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); - - channel_mem = malloc(count * sizeof(struct xgbe_channel), M_AXGBE, - M_WAITOK | M_ZERO); - tx_ring = malloc(pdata->tx_ring_count * sizeof(struct xgbe_ring), - M_AXGBE, M_WAITOK | M_ZERO); - rx_ring = malloc(pdata->rx_ring_count * sizeof(struct xgbe_ring), - M_AXGBE, M_WAITOK | M_ZERO); - - for (i = 0, channel = channel_mem; i < count; i++, channel++) { - snprintf(channel->name, sizeof(channel->name), "channel-%d", i); - channel->pdata = pdata; - channel->queue_index = i; - channel->dma_tag = rman_get_bustag(pdata->xgmac_res); - bus_space_subregion(channel->dma_tag, - rman_get_bushandle(pdata->xgmac_res), - DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC, - &channel->dma_handle); - - if (pdata->per_channel_irq) { - if (pdata->chan_irq_res[i] == NULL) - goto err_irq; - - channel->dma_irq_res = pdata->chan_irq_res[i]; - } - - if (i < pdata->tx_ring_count) { - spin_lock_init(&tx_ring->lock); - channel->tx_ring = tx_ring++; - } - - if (i < pdata->rx_ring_count) { - spin_lock_init(&rx_ring->lock); - channel->rx_ring = rx_ring++; - } - } - - pdata->channel = channel_mem; - pdata->channel_count = count; - - return 0; - -err_irq: - free(rx_ring, M_AXGBE); - free(tx_ring, M_AXGBE); - free(channel_mem, M_AXGBE); - - return ret; -} - -static void xgbe_free_channels(struct xgbe_prv_data *pdata) -{ - if (!pdata->channel) - return; - - free(pdata->channel->rx_ring, M_AXGBE); - free(pdata->channel->tx_ring, M_AXGBE); - free(pdata->channel, M_AXGBE); - - pdata->channel = NULL; - pdata->channel_count = 0; -} - -static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring) -{ - return (ring->rdesc_count - (ring->cur - ring->dirty)); -} - -static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring) -{ - return (ring->cur - ring->dirty); -} - -static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel, - struct xgbe_ring *ring, unsigned int count) -{ - struct xgbe_prv_data *pdata = channel->pdata; - - if (count > xgbe_tx_avail_desc(ring)) { - /* If we haven't notified the hardware because of xmit_more - * support, tell it now - */ - if (ring->tx.xmit_more) - pdata->hw_if.tx_start_xmit(channel, ring); - - return EFBIG; - } - - return 0; -} - -static int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu) +int +xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu) { unsigned int rx_buf_size; - if (mtu > XGMAC_JUMBO_PACKET_MTU) { - return -EINVAL; - } + if (mtu > XGMAC_JUMBO_PACKET_MTU) + return (-EINVAL); rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - rx_buf_size = MIN(XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE); - - rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & - ~(XGBE_RX_BUF_ALIGN - 1); - - return rx_buf_size; -} - -static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata) -{ - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_channel *channel; - enum xgbe_int int_id; - unsigned int i; - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (channel->tx_ring && channel->rx_ring) - int_id = XGMAC_INT_DMA_CH_SR_TI_RI; - else if (channel->tx_ring) - int_id = XGMAC_INT_DMA_CH_SR_TI; - else if (channel->rx_ring) - int_id = XGMAC_INT_DMA_CH_SR_RI; - else - continue; + rx_buf_size = min(max(rx_buf_size, XGBE_RX_MIN_BUF_SIZE), PAGE_SIZE); + rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) & + ~(XGBE_RX_BUF_ALIGN - 1); - hw_if->enable_int(channel, int_id); - } + return (rx_buf_size); } -static void xgbe_isr(void *data) -{ - struct xgbe_prv_data *pdata = data; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_channel *channel; - unsigned int dma_isr, dma_ch_isr; - unsigned int mac_isr; - unsigned int i; - - /* The DMA interrupt status register also reports MAC and MTL - * interrupts. So for polling mode, we just need to check for - * this register to be non-zero - */ - dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); - if (!dma_isr) - return; - - for (i = 0; i < pdata->channel_count; i++) { - if (!(dma_isr & (1 << i))) - continue; - - channel = pdata->channel + i; - - dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); - - /* The TI or RI interrupt bits may still be set even if using - * per channel DMA interrupts. Check to be sure those are not - * enabled before using the private data napi structure. - */ - if (!pdata->per_channel_irq && - (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || - XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { - xgbe_all_poll(pdata, 16); - } - - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) - pdata->ext_stats.rx_buffer_unavailable++; - - /* Restart the device on a Fatal Bus Error */ - if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) - taskqueue_enqueue(taskqueue_thread, - &pdata->restart_work); - - /* Clear all interrupt signals */ - XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); - } - - if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { - mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); - - if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) - hw_if->tx_mmc_int(pdata); - - if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) - hw_if->rx_mmc_int(pdata); - } -} - -static void xgbe_dma_isr(void *data) -{ - struct xgbe_channel *channel = data; - - xgbe_one_poll(channel, 16); -} - -static void xgbe_service(void *ctx, int pending) -{ - struct xgbe_prv_data *pdata = ctx; - - pdata->phy_if.phy_status(pdata); -} - -static void xgbe_service_timer(void *data) -{ - struct xgbe_prv_data *pdata = data; - - DBGPR("--> xgbe_service_timer\n"); - taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work); - - callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata); - DBGPR("<-- xgbe_service_timer\n"); -} - -static void xgbe_init_timers(struct xgbe_prv_data *pdata) -{ - - callout_init(&pdata->service_timer, 1); -} - -static void xgbe_start_timers(struct xgbe_prv_data *pdata) -{ - callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata); -} - -static void xgbe_stop_timers(struct xgbe_prv_data *pdata) -{ - - callout_drain(&pdata->service_timer); -} - -void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) +void +xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) { unsigned int mac_hfr0, mac_hfr1, mac_hfr2; struct xgbe_hw_features *hw_feat = &pdata->hw_feat; DBGPR("-->xgbe_get_all_hw_features\n"); mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); memset(hw_feat, 0, sizeof(*hw_feat)); hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); /* Hardware feature register 0 */ - hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); - hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); - hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); - hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); - hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); - hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); - hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); - hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); - hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); - hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); - hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); - hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, + hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); + hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); + hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); + hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); + hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); + hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); + hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); + hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); + hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); + hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); + hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); + hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ADDMACADRSEL); - hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); + hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN); /* Hardware feature register 1 */ - hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RXFIFOSIZE); - hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, + hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TXFIFOSIZE); - hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD); - hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); - hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); - hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); - hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); - hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); - hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); - hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); + hw_feat->adv_ts_hi = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD); + hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); + hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); + hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); + hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); + hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); + hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); + hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, HASHTBLSZ); hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, L3L4FNUM); /* Hardware feature register 2 */ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM); /* Translate the Hash Table size into actual number */ switch (hw_feat->hash_table_size) { case 0: break; case 1: hw_feat->hash_table_size = 64; break; case 2: hw_feat->hash_table_size = 128; break; case 3: hw_feat->hash_table_size = 256; break; } /* Translate the address width setting into actual number */ switch (hw_feat->dma_width) { case 0: hw_feat->dma_width = 32; break; case 1: hw_feat->dma_width = 40; break; case 2: hw_feat->dma_width = 48; break; default: hw_feat->dma_width = 32; } /* The Queue, Channel and TC counts are zero based so increment them * to get the actual number */ hw_feat->rx_q_cnt++; hw_feat->tx_q_cnt++; hw_feat->rx_ch_cnt++; hw_feat->tx_ch_cnt++; hw_feat->tc_cnt++; - DBGPR("<--xgbe_get_all_hw_features\n"); -} - -static int xgbe_request_irqs(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel; - unsigned int i; - int ret; + /* Translate the fifo sizes into actual numbers */ + hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); + hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); + DBGPR("%s: Tx fifo 0x%x Rx fifo 0x%x\n", __func__, + hw_feat->tx_fifo_size, hw_feat->rx_fifo_size); - ret = bus_setup_intr(pdata->dev, pdata->dev_irq_res, - INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_isr, pdata, - &pdata->dev_irq_tag); - if (ret) { - return ret; - } + DBGPR("Hardware features:\n"); - if (!pdata->per_channel_irq) - return 0; - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - ret = bus_setup_intr(pdata->dev, channel->dma_irq_res, - INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_dma_isr, channel, - &channel->dma_irq_tag); - if (ret != 0) { - goto err_irq; - } - } - - return 0; - -err_irq: - /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ - for (i--, channel--; i < pdata->channel_count; i--, channel--) - bus_teardown_intr(pdata->dev, channel->dma_irq_res, - channel->dma_irq_tag); - - bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag); - - return -ret; -} - -static void xgbe_free_irqs(struct xgbe_prv_data *pdata) -{ - struct xgbe_channel *channel; - unsigned int i; - - bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag); + /* Hardware feature register 0 */ + DBGPR(" 1GbE support : %s\n", + hw_feat->gmii ? "yes" : "no"); + DBGPR(" VLAN hash filter : %s\n", + hw_feat->vlhash ? "yes" : "no"); + DBGPR(" MDIO interface : %s\n", + hw_feat->sma ? "yes" : "no"); + DBGPR(" Wake-up packet support : %s\n", + hw_feat->rwk ? "yes" : "no"); + DBGPR(" Magic packet support : %s\n", + hw_feat->mgk ? "yes" : "no"); + DBGPR(" Management counters : %s\n", + hw_feat->mmc ? "yes" : "no"); + DBGPR(" ARP offload : %s\n", + hw_feat->aoe ? "yes" : "no"); + DBGPR(" IEEE 1588-2008 Timestamp : %s\n", + hw_feat->ts ? "yes" : "no"); + DBGPR(" Energy Efficient Ethernet : %s\n", + hw_feat->eee ? "yes" : "no"); + DBGPR(" TX checksum offload : %s\n", + hw_feat->tx_coe ? "yes" : "no"); + DBGPR(" RX checksum offload : %s\n", + hw_feat->rx_coe ? "yes" : "no"); + DBGPR(" Additional MAC addresses : %u\n", + hw_feat->addn_mac); + DBGPR(" Timestamp source : %s\n", + (hw_feat->ts_src == 1) ? "internal" : + (hw_feat->ts_src == 2) ? "external" : + (hw_feat->ts_src == 3) ? "internal/external" : "n/a"); + DBGPR(" SA/VLAN insertion : %s\n", + hw_feat->sa_vlan_ins ? "yes" : "no"); - if (!pdata->per_channel_irq) - return; + /* Hardware feature register 1 */ + DBGPR(" RX fifo size : %u\n", + hw_feat->rx_fifo_size); + DBGPR(" TX fifo size : %u\n", + hw_feat->tx_fifo_size); + DBGPR(" IEEE 1588 high word : %s\n", + hw_feat->adv_ts_hi ? "yes" : "no"); + DBGPR(" DMA width : %u\n", + hw_feat->dma_width); + DBGPR(" Data Center Bridging : %s\n", + hw_feat->dcb ? "yes" : "no"); + DBGPR(" Split header : %s\n", + hw_feat->sph ? "yes" : "no"); + DBGPR(" TCP Segmentation Offload : %s\n", + hw_feat->tso ? "yes" : "no"); + DBGPR(" Debug memory interface : %s\n", + hw_feat->dma_debug ? "yes" : "no"); + DBGPR(" Receive Side Scaling : %s\n", + hw_feat->rss ? "yes" : "no"); + DBGPR(" Traffic Class count : %u\n", + hw_feat->tc_cnt); + DBGPR(" Hash table size : %u\n", + hw_feat->hash_table_size); + DBGPR(" L3/L4 Filters : %u\n", + hw_feat->l3l4_filter_num); - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - bus_teardown_intr(pdata->dev, channel->dma_irq_res, - channel->dma_irq_tag); + /* Hardware feature register 2 */ + DBGPR(" RX queue count : %u\n", + hw_feat->rx_q_cnt); + DBGPR(" TX queue count : %u\n", + hw_feat->tx_q_cnt); + DBGPR(" RX DMA channel count : %u\n", + hw_feat->rx_ch_cnt); + DBGPR(" TX DMA channel count : %u\n", + hw_feat->rx_ch_cnt); + DBGPR(" PPS outputs : %u\n", + hw_feat->pps_out_num); + DBGPR(" Auxiliary snapshot inputs : %u\n", + hw_feat->aux_snap_num); + + DBGPR("<--xgbe_get_all_hw_features\n"); } -void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) +void +xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; DBGPR("-->xgbe_init_tx_coalesce\n"); pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS; pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES; hw_if->config_tx_coalesce(pdata); DBGPR("<--xgbe_init_tx_coalesce\n"); } -void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) +void +xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; DBGPR("-->xgbe_init_rx_coalesce\n"); pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS); pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES; hw_if->config_rx_coalesce(pdata); DBGPR("<--xgbe_init_rx_coalesce\n"); } - -static void xgbe_free_tx_data(struct xgbe_prv_data *pdata) -{ - struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel; - struct xgbe_ring *ring; - struct xgbe_ring_data *rdata; - unsigned int i, j; - - DBGPR("-->xgbe_free_tx_data\n"); - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - ring = channel->tx_ring; - if (!ring) - break; - - for (j = 0; j < ring->rdesc_count; j++) { - rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_rdata(pdata, rdata); - } - } - - DBGPR("<--xgbe_free_tx_data\n"); -} - -static void xgbe_free_rx_data(struct xgbe_prv_data *pdata) -{ - struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel; - struct xgbe_ring *ring; - struct xgbe_ring_data *rdata; - unsigned int i, j; - - DBGPR("-->xgbe_free_rx_data\n"); - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - ring = channel->rx_ring; - if (!ring) - break; - - for (j = 0; j < ring->rdesc_count; j++) { - rdata = XGBE_GET_DESC_DATA(ring, j); - desc_if->unmap_rdata(pdata, rdata); - } - } - - DBGPR("<--xgbe_free_rx_data\n"); -} - -static int xgbe_phy_init(struct xgbe_prv_data *pdata) -{ - pdata->phy_link = -1; - pdata->phy_speed = SPEED_UNKNOWN; - - return pdata->phy_if.phy_reset(pdata); -} - -static int xgbe_start(struct xgbe_prv_data *pdata) -{ - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_phy_if *phy_if = &pdata->phy_if; - int ret; - - DBGPR("-->xgbe_start\n"); - - hw_if->init(pdata); - - ret = phy_if->phy_start(pdata); - if (ret) - goto err_phy; - - ret = xgbe_request_irqs(pdata); - if (ret) - goto err_napi; - - hw_if->enable_tx(pdata); - hw_if->enable_rx(pdata); - - xgbe_enable_rx_tx_ints(pdata); - - xgbe_start_timers(pdata); - taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work); - - DBGPR("<--xgbe_start\n"); - - return 0; - -err_napi: - phy_if->phy_stop(pdata); - -err_phy: - hw_if->exit(pdata); - - return ret; -} - -static void xgbe_stop(struct xgbe_prv_data *pdata) -{ - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_phy_if *phy_if = &pdata->phy_if; - - DBGPR("-->xgbe_stop\n"); - - xgbe_stop_timers(pdata); - taskqueue_drain_all(pdata->dev_workqueue); - - hw_if->disable_tx(pdata); - hw_if->disable_rx(pdata); - - xgbe_free_irqs(pdata); - - phy_if->phy_stop(pdata); - - hw_if->exit(pdata); - - DBGPR("<--xgbe_stop\n"); -} - -static void xgbe_restart_dev(struct xgbe_prv_data *pdata) -{ - DBGPR("-->xgbe_restart_dev\n"); - - /* If not running, "restart" will happen on open */ - if ((pdata->netdev->if_drv_flags & IFF_DRV_RUNNING) == 0) - return; - - xgbe_stop(pdata); - - xgbe_free_tx_data(pdata); - xgbe_free_rx_data(pdata); - - xgbe_start(pdata); - - DBGPR("<--xgbe_restart_dev\n"); -} - -static void xgbe_restart(void *ctx, int pending) -{ - struct xgbe_prv_data *pdata = ctx; - - xgbe_restart_dev(pdata); -} - -static void xgbe_packet_info(struct xgbe_prv_data *pdata, - struct xgbe_ring *ring, struct mbuf *m0, - struct xgbe_packet_data *packet) -{ - struct mbuf *m; - unsigned int len; - - packet->m = m0; - - packet->rdesc_count = 0; - - packet->tx_packets = 1; - packet->tx_bytes = m_length(m0, NULL); - - for (m = m0; m != NULL; m = m->m_next) { - for (len = m->m_len; len != 0;) { - packet->rdesc_count++; - len -= MIN(len, XGBE_TX_MAX_BUF_SIZE); - } - } -} - -int xgbe_open(struct ifnet *netdev) -{ - struct xgbe_prv_data *pdata = netdev->if_softc; - struct xgbe_desc_if *desc_if = &pdata->desc_if; - int ret; - - DBGPR("-->xgbe_open\n"); - - /* Initialize the phy */ - ret = xgbe_phy_init(pdata); - if (ret) - return ret; - - /* Calculate the Rx buffer size before allocating rings */ - ret = xgbe_calc_rx_buf_size(netdev, if_getmtu(netdev)); - if (ret < 0) { - goto err_ptpclk; - } - pdata->rx_buf_size = ret; - - /* Allocate the channel and ring structures */ - ret = xgbe_alloc_channels(pdata); - if (ret) { - printf("xgbe_alloc_channels failed\n"); - goto err_ptpclk; - } - - /* Allocate the ring descriptors and buffers */ - ret = desc_if->alloc_ring_resources(pdata); - if (ret) { - printf("desc_if->alloc_ring_resources failed\n"); - goto err_channels; - } - - TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata); - TASK_INIT(&pdata->restart_work, 0, xgbe_restart, pdata); - xgbe_init_timers(pdata); - - ret = xgbe_start(pdata); - if (ret) - goto err_rings; - - clear_bit(XGBE_DOWN, &pdata->dev_state); - - DBGPR("<--xgbe_open\n"); - - return 0; - -err_rings: - desc_if->free_ring_resources(pdata); - -err_channels: - xgbe_free_channels(pdata); - -err_ptpclk: - - return ret; -} - -int xgbe_close(struct ifnet *netdev) -{ - struct xgbe_prv_data *pdata = netdev->if_softc; - struct xgbe_desc_if *desc_if = &pdata->desc_if; - - DBGPR("-->xgbe_close\n"); - - /* Stop the device */ - xgbe_stop(pdata); - - /* Free the ring descriptors and buffers */ - desc_if->free_ring_resources(pdata); - - /* Free the channel and ring structures */ - xgbe_free_channels(pdata); - - set_bit(XGBE_DOWN, &pdata->dev_state); - - DBGPR("<--xgbe_close\n"); - - return 0; -} - -int xgbe_xmit(struct ifnet *ifp, struct mbuf *m) -{ - struct xgbe_prv_data *pdata = ifp->if_softc; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel; - struct xgbe_ring *ring; - struct xgbe_packet_data *packet; - int ret; - - M_ASSERTPKTHDR(m); - MPASS(m->m_nextpkt == NULL); - - if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state) || - !pdata->phy.link)) { - m_freem(m); - return (ENETDOWN); - } - - channel = pdata->channel; - ring = channel->tx_ring; - packet = &ring->packet_data; - - /* Calculate preliminary packet info */ - memset(packet, 0, sizeof(*packet)); - xgbe_packet_info(pdata, ring, m, packet); - - /* Check that there are enough descriptors available */ - ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count); - if (ret) - goto tx_netdev_return; - - if (!desc_if->map_tx_skb(channel, m)) { - goto tx_netdev_return; - } - - /* Configure required descriptor fields for transmission */ - hw_if->dev_xmit(channel); - - return 0; - -tx_netdev_return: - m_free(m); - - return 0; -} - -int xgbe_change_mtu(struct ifnet *netdev, int mtu) -{ - struct xgbe_prv_data *pdata = netdev->if_softc; - int ret; - - DBGPR("-->xgbe_change_mtu\n"); - - ret = xgbe_calc_rx_buf_size(netdev, mtu); - if (ret < 0) - return -ret; - - pdata->rx_buf_size = ret; - netdev->if_mtu = mtu; - - xgbe_restart_dev(pdata); - - DBGPR("<--xgbe_change_mtu\n"); - - return 0; -} - -static void xgbe_rx_refresh(struct xgbe_channel *channel) -{ - struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_ring *ring = channel->rx_ring; - struct xgbe_ring_data *rdata; - - while (ring->dirty != ring->cur) { - rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); - - /* Reset rdata values */ - desc_if->unmap_rdata(pdata, rdata); - - if (desc_if->map_rx_buffer(pdata, ring, rdata)) - break; - - hw_if->rx_desc_reset(pdata, rdata, ring->dirty); - - ring->dirty++; - } - - /* Make sure everything is written before the register write */ - dsb(sy); - - /* Update the Rx Tail Pointer Register with address of - * the last cleaned entry */ - rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1); - XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, - lower_32_bits(rdata->rdata_paddr)); -} - -static int xgbe_tx_poll(struct xgbe_channel *channel) -{ - struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_ring *ring = channel->tx_ring; - struct xgbe_ring_data *rdata; - struct xgbe_ring_desc *rdesc; - int processed = 0; - unsigned int cur; - - DBGPR("-->xgbe_tx_poll\n"); - - /* Nothing to do if there isn't a Tx ring for this channel */ - if (!ring) - return 0; - - cur = ring->cur; - - /* Be sure we get ring->cur before accessing descriptor data */ - dsb(sy); - - while ((processed < XGBE_TX_DESC_MAX_PROC) && - (ring->dirty != cur)) { - rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); - rdesc = rdata->rdesc; - - if (!hw_if->tx_complete(rdesc)) - break; - - /* Make sure descriptor fields are read after reading the OWN - * bit */ - dsb(sy); - - /* Free the SKB and reset the descriptor for re-use */ - desc_if->unmap_rdata(pdata, rdata); - hw_if->tx_desc_reset(rdata); - - processed++; - ring->dirty++; - } - - if (!processed) - return 0; - - DBGPR("<--xgbe_tx_poll: processed=%d\n", processed); - - return processed; -} - -static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) -{ - struct xgbe_prv_data *pdata = channel->pdata; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - struct xgbe_ring *ring = channel->rx_ring; - struct xgbe_ring_data *rdata; - struct xgbe_packet_data *packet; - struct ifnet *ifp = pdata->netdev; - struct mbuf *m; - unsigned int incomplete, context_next; - unsigned int received = 0; - int packet_count = 0; - - DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); - - /* Nothing to do if there isn't a Rx ring for this channel */ - if (!ring) - return 0; - - incomplete = 0; - context_next = 0; - - rdata = XGBE_GET_DESC_DATA(ring, ring->cur); - packet = &ring->packet_data; - while (packet_count < budget) { - DBGPR(" cur = %d\n", ring->cur); - -read_again: - rdata = XGBE_GET_DESC_DATA(ring, ring->cur); - - if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3)) - xgbe_rx_refresh(channel); - - if (hw_if->dev_read(channel)) - break; - - m = rdata->mb; - - received++; - ring->cur++; - - incomplete = XGMAC_GET_BITS(packet->attributes, - RX_PACKET_ATTRIBUTES, - INCOMPLETE); - context_next = XGMAC_GET_BITS(packet->attributes, - RX_PACKET_ATTRIBUTES, - CONTEXT_NEXT); - - /* Earlier error, just drain the remaining data */ - if (incomplete || context_next) { - goto read_again; - } - - if (packet->errors) { - rdata->mbuf_free = 1; - goto next_packet; - } - rdata->mb = NULL; - - m->m_pkthdr.len = rdata->rx.hdr_len + rdata->rx.len; - if (rdata->rx.hdr_len != 0) { - m->m_len = rdata->rx.hdr_len; - m->m_next->m_len = rdata->rx.len; - } else { - m->m_len = rdata->rx.len; - m_freem(m->m_next); - m->m_next = NULL; - } - if_setrcvif(m, ifp); - if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); - - ifp->if_input(ifp, m); - -next_packet: - packet_count++; - } - - DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count); - - return packet_count; -} - -static int xgbe_one_poll(struct xgbe_channel *channel, int budget) -{ - int processed = 0; - - DBGPR("-->xgbe_one_poll: budget=%d\n", budget); - - /* Cleanup Tx ring first */ - xgbe_tx_poll(channel); - - /* Process Rx ring next */ - processed = xgbe_rx_poll(channel, budget); - - DBGPR("<--xgbe_one_poll: received = %d\n", processed); - - return processed; -} - -static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget) -{ - struct xgbe_channel *channel; - int ring_budget; - int processed, last_processed; - unsigned int i; - - DBGPR("-->xgbe_all_poll: budget=%d\n", budget); - - processed = 0; - ring_budget = budget / pdata->rx_ring_count; - do { - last_processed = processed; - - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - /* Cleanup Tx ring first */ - xgbe_tx_poll(channel); - - /* Process Rx ring next */ - if (ring_budget > (budget - processed)) - ring_budget = budget - processed; - processed += xgbe_rx_poll(channel, ring_budget); - } - } while ((processed < budget) && (processed != last_processed)); - - DBGPR("<--xgbe_all_poll: received = %d\n", processed); - - return processed; -} diff --git a/sys/dev/axgbe/xgbe-i2c.c b/sys/dev/axgbe/xgbe-i2c.c new file mode 100644 index 000000000000..b24d19f19e0a --- /dev/null +++ b/sys/dev/axgbe/xgbe-i2c.c @@ -0,0 +1,532 @@ +/* + * AMD 10Gb Ethernet driver + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" +#include "xgbe-common.h" + +#define XGBE_ABORT_COUNT 500 +#define XGBE_DISABLE_COUNT 1000 + +#define XGBE_STD_SPEED 1 + +#define XGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX) +#define XGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX) +#define XGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX) +#define XGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX) +#define XGBE_DEFAULT_INT_MASK (XGBE_INTR_RX_FULL | \ + XGBE_INTR_TX_EMPTY | \ + XGBE_INTR_TX_ABRT | \ + XGBE_INTR_STOP_DET) + +#define XGBE_I2C_READ BIT(8) +#define XGBE_I2C_STOP BIT(9) + +static int +xgbe_i2c_abort(struct xgbe_prv_data *pdata) +{ + unsigned int wait = XGBE_ABORT_COUNT; + + /* Must be enabled to recognize the abort request */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1); + + /* Issue the abort */ + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1); + + while (wait--) { + if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT)) + return (0); + + DELAY(500); + } + + return (-EBUSY); +} + +static int +xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable) +{ + unsigned int wait = XGBE_DISABLE_COUNT; + unsigned int mode = enable ? 1 : 0; + + while (wait--) { + XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode); + if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode) + return (0); + + DELAY(100); + } + + return (-EBUSY); +} + +static int +xgbe_i2c_disable(struct xgbe_prv_data *pdata) +{ + unsigned int ret; + + ret = xgbe_i2c_set_enable(pdata, false); + if (ret) { + /* Disable failed, try an abort */ + ret = xgbe_i2c_abort(pdata); + if (ret) { + axgbe_error("%s: i2c_abort %d\n", __func__, ret); + return (ret); + } + + /* Abort succeeded, try to disable again */ + ret = xgbe_i2c_set_enable(pdata, false); + } + + axgbe_printf(3, "%s: final i2c_disable %d\n", __func__, ret); + return (ret); +} + +static int +xgbe_i2c_enable(struct xgbe_prv_data *pdata) +{ + return (xgbe_i2c_set_enable(pdata, true)); +} + +static void +xgbe_i2c_clear_all_interrupts(struct xgbe_prv_data *pdata) +{ + XI2C_IOREAD(pdata, IC_CLR_INTR); +} + +static void +xgbe_i2c_disable_interrupts(struct xgbe_prv_data *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, 0); +} + +static void +xgbe_i2c_enable_interrupts(struct xgbe_prv_data *pdata) +{ + XI2C_IOWRITE(pdata, IC_INTR_MASK, XGBE_DEFAULT_INT_MASK); +} + +static void +xgbe_i2c_write(struct xgbe_prv_data *pdata) +{ + struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int tx_slots, cmd; + + /* Configured to never receive Rx overflows, so fill up Tx fifo */ + tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR); + axgbe_printf(3, "%s: tx_slots %d tx_len %d\n", __func__, tx_slots, + state->tx_len); + + while (tx_slots && state->tx_len) { + if (state->op->cmd == XGBE_I2C_CMD_READ) + cmd = XGBE_I2C_READ; + else + cmd = *state->tx_buf++; + + axgbe_printf(3, "%s: cmd %d tx_len %d\n", __func__, cmd, + state->tx_len); + + if (state->tx_len == 1) + XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1); + + XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd); + + tx_slots--; + state->tx_len--; + } + + /* No more Tx operations, so ignore TX_EMPTY and return */ + if (!state->tx_len) + XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0); +} + +static void +xgbe_i2c_read(struct xgbe_prv_data *pdata) +{ + struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int rx_slots; + + /* Anything to be read? */ + axgbe_printf(3, "%s: op cmd %d\n", __func__, state->op->cmd); + if (state->op->cmd != XGBE_I2C_CMD_READ) + return; + + rx_slots = XI2C_IOREAD(pdata, IC_RXFLR); + axgbe_printf(3, "%s: rx_slots %d rx_len %d\n", __func__, rx_slots, + state->rx_len); + + while (rx_slots && state->rx_len) { + *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD); + state->rx_len--; + rx_slots--; + } +} + +static void +xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata, unsigned int isr) +{ + struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; + + if (isr & XGBE_INTR_TX_ABRT) { + state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE); + XI2C_IOREAD(pdata, IC_CLR_TX_ABRT); + } + + if (isr & XGBE_INTR_STOP_DET) + XI2C_IOREAD(pdata, IC_CLR_STOP_DET); +} + +static void +xgbe_i2c_isr(void *data) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; + struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned int isr; + + isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT); + axgbe_printf(3, "%s: isr 0x%x\n", __func__, isr); + if (!isr) + goto reissue_check; + + axgbe_printf(3, "%s: I2C interrupt status=%#010x\n", __func__, isr); + + xgbe_i2c_clear_isr_interrupts(pdata, isr); + + if (isr & XGBE_INTR_TX_ABRT) { + axgbe_printf(1, "%s: I2C TX_ABRT received (%#010x) for target " + "%#04x\n", __func__, state->tx_abort_source, + state->op->target); + + xgbe_i2c_disable_interrupts(pdata); + + state->ret = -EIO; + goto out; + } + + /* Check for data in the Rx fifo */ + xgbe_i2c_read(pdata); + + /* Fill up the Tx fifo next */ + xgbe_i2c_write(pdata); + +out: + /* Complete on an error or STOP condition */ + axgbe_printf(3, "%s: ret %d stop %d\n", __func__, state->ret, + XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)); + + if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)) + pdata->i2c_complete = true; + + return; + +reissue_check: + /* Reissue interrupt if status is not clear */ + if (pdata->vdata->irq_reissue_support) + XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2); +} + +static void +xgbe_i2c_set_mode(struct xgbe_prv_data *pdata) +{ + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_CON); + XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1); + XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1); + XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1); + XI2C_SET_BITS(reg, IC_CON, SPEED, XGBE_STD_SPEED); + XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1); + XI2C_IOWRITE(pdata, IC_CON, reg); +} + +static void +xgbe_i2c_get_features(struct xgbe_prv_data *pdata) +{ + struct xgbe_i2c *i2c = &pdata->i2c; + unsigned int reg; + + reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1); + i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + MAX_SPEED_MODE); + i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + RX_BUFFER_DEPTH); + i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, + TX_BUFFER_DEPTH); + + axgbe_printf(3, "%s: I2C features: %s=%u, %s=%u, %s=%u\n", __func__, + "MAX_SPEED_MODE", i2c->max_speed_mode, + "RX_BUFFER_DEPTH", i2c->rx_fifo_size, + "TX_BUFFER_DEPTH", i2c->tx_fifo_size); +} + +static void +xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr) +{ + XI2C_IOWRITE(pdata, IC_TAR, addr); +} + +static void +xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata) +{ + xgbe_i2c_isr(pdata); +} + +static int +xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op) +{ + struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; + unsigned long timeout; + int ret; + + mtx_lock(&pdata->i2c_mutex); + + axgbe_printf(3, "i2c xfer started ---->>>\n"); + + ret = xgbe_i2c_disable(pdata); + if (ret) { + axgbe_error("failed to disable i2c master\n"); + goto out; + } + + xgbe_i2c_set_target(pdata, op->target); + + memset(state, 0, sizeof(*state)); + state->op = op; + state->tx_len = op->len; + state->tx_buf = op->buf; + state->rx_len = op->len; + state->rx_buf = op->buf; + + xgbe_i2c_clear_all_interrupts(pdata); + ret = xgbe_i2c_enable(pdata); + if (ret) { + axgbe_error("failed to enable i2c master\n"); + goto out; + } + + /* Enabling the interrupts will cause the TX FIFO empty interrupt to + * fire and begin to process the command via the ISR. + */ + xgbe_i2c_enable_interrupts(pdata); + + timeout = ticks + (20 * hz); + while (ticks < timeout) { + + if (!pdata->i2c_complete) { + DELAY(200); + continue; + } + + axgbe_printf(1, "%s: I2C OP complete\n", __func__); + break; + } + + if ((ticks >= timeout) && !pdata->i2c_complete) { + axgbe_error("%s: operation timed out\n", __func__); + ret = -ETIMEDOUT; + goto disable; + } + + ret = state->ret; + axgbe_printf(3, "%s: i2c xfer ret %d abrt_source 0x%x \n", __func__, + ret, state->tx_abort_source); + if (ret) { + + axgbe_error("%s: i2c xfer ret %d abrt_source 0x%x \n", __func__, + ret, state->tx_abort_source); + if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK) + ret = -ENOTCONN; + else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST) + ret = -EAGAIN; + } + + axgbe_printf(3, "i2c xfer finished ---->>>\n"); + +disable: + pdata->i2c_complete = false; + xgbe_i2c_disable_interrupts(pdata); + xgbe_i2c_disable(pdata); + +out: + mtx_unlock(&pdata->i2c_mutex); + return (ret); +} + +static void +xgbe_i2c_stop(struct xgbe_prv_data *pdata) +{ + if (!pdata->i2c.started) + return; + + axgbe_printf(3, "stopping I2C\n"); + + pdata->i2c.started = 0; + + xgbe_i2c_disable_interrupts(pdata); + xgbe_i2c_disable(pdata); + xgbe_i2c_clear_all_interrupts(pdata); +} + +static int +xgbe_i2c_start(struct xgbe_prv_data *pdata) +{ + if (pdata->i2c.started) + return (0); + + pdata->i2c.started = 1; + + return (0); +} + +static int +xgbe_i2c_init(struct xgbe_prv_data *pdata) +{ + int ret; + + /* initialize lock for i2c */ + mtx_init(&pdata->i2c_mutex, "xgbe i2c mutex lock", NULL, MTX_DEF); + pdata->i2c_complete = false; + + xgbe_i2c_disable_interrupts(pdata); + + ret = xgbe_i2c_disable(pdata); + if (ret) { + axgbe_error("failed to disable i2c master\n"); + return (ret); + } + + xgbe_i2c_get_features(pdata); + + xgbe_i2c_set_mode(pdata); + + xgbe_i2c_clear_all_interrupts(pdata); + + xgbe_dump_i2c_registers(pdata); + + return (0); +} + +void +xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *i2c_if) +{ + i2c_if->i2c_init = xgbe_i2c_init; + + i2c_if->i2c_start = xgbe_i2c_start; + i2c_if->i2c_stop = xgbe_i2c_stop; + + i2c_if->i2c_xfer = xgbe_i2c_xfer; + + i2c_if->i2c_isr = xgbe_i2c_combined_isr; +} diff --git a/sys/dev/axgbe/xgbe-mdio.c b/sys/dev/axgbe/xgbe-mdio.c index 850a58e04107..a716c1a7b797 100644 --- a/sys/dev/axgbe/xgbe-mdio.c +++ b/sys/dev/axgbe/xgbe-mdio.c @@ -1,1174 +1,1634 @@ /* * AMD 10Gb Ethernet driver * + * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. + * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); -#include -#include - #include "xgbe.h" #include "xgbe-common.h" static void xgbe_an_state_machine(struct xgbe_prv_data *pdata); -static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata) +static void +xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) { - unsigned int reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + int reg; - reg |= XGBE_KR_TRAINING_ENABLE; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); + reg &= ~XGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); } -static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata) +static void +xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata) { - unsigned int reg; + int reg; - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg &= ~XGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); - reg &= ~XGBE_KR_TRAINING_ENABLE; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); + reg &= ~XGBE_PCS_CL37_BP; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); } -static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata) +static void +xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata) { - unsigned int reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + int reg; - reg |= MDIO_CTRL1_LPOWER; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); + reg |= XGBE_PCS_CL37_BP; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); - DELAY(75); - - reg &= ~MDIO_CTRL1_LPOWER; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + reg |= XGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); } -static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata) +static void +xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata) { - /* Assert Rx and Tx ratechange */ - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } -static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata) +static void +xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata) { - unsigned int wait; - u16 status; - - /* Release Rx and Tx ratechange */ - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0); + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); +} - /* Wait for Rx and Tx ready */ - wait = XGBE_RATECHANGE_COUNT; - while (wait--) { - DELAY(50); +static void +xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata) +{ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK); +} - status = XSIR0_IOREAD(pdata, SIR0_STATUS); - if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && - XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) - goto rx_reset; +static void +xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata) +{ + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + xgbe_an73_enable_interrupts(pdata); + break; + case XGBE_AN_MODE_CL37: + case XGBE_AN_MODE_CL37_SGMII: + xgbe_an37_enable_interrupts(pdata); + break; + default: + break; } - -rx_reset: - /* Perform Rx reset for the DFE changes */ - XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1); } -static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata) +static void +xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata) { - unsigned int reg; - - /* Enable KR training */ - xgbe_an_enable_kr_training(pdata); + xgbe_an73_clear_interrupts(pdata); + xgbe_an37_clear_interrupts(pdata); +} +static void +xgbe_kr_mode(struct xgbe_prv_data *pdata) +{ /* Set MAC to 10G speed */ - pdata->hw_if.set_xgmii_speed(pdata); - - /* Set PCS to KR/10G speed */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); - reg &= ~MDIO_PCS_CTRL2_TYPE; - reg |= MDIO_PCS_CTRL2_10GBR; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); + pdata->hw_if.set_speed(pdata, SPEED_10000); - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); - reg &= ~MDIO_CTRL1_SPEEDSEL; - reg |= MDIO_CTRL1_SPEED10G; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); - - xgbe_pcs_power_cycle(pdata); - - /* Set SerDes to 10G speed */ - xgbe_serdes_start_ratechange(pdata); - - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL); - - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, - pdata->serdes_cdr_rate[XGBE_SPEED_10000]); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, - pdata->serdes_tx_amp[XGBE_SPEED_10000]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, - pdata->serdes_blwc[XGBE_SPEED_10000]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, - pdata->serdes_pq_skew[XGBE_SPEED_10000]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, - pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]); - XRXTX_IOWRITE(pdata, RXTX_REG22, - pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]); - - xgbe_serdes_complete_ratechange(pdata); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR); } -static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata) +static void +xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) { - unsigned int reg; - - /* Disable KR training */ - xgbe_an_disable_kr_training(pdata); - /* Set MAC to 2.5G speed */ - pdata->hw_if.set_gmii_2500_speed(pdata); - - /* Set PCS to KX/1G speed */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); - reg &= ~MDIO_PCS_CTRL2_TYPE; - reg |= MDIO_PCS_CTRL2_10GBX; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); + pdata->hw_if.set_speed(pdata, SPEED_2500); - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); - reg &= ~MDIO_CTRL1_SPEEDSEL; - reg |= MDIO_CTRL1_SPEED1G; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500); +} - xgbe_pcs_power_cycle(pdata); +static void +xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) +{ + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); - /* Set SerDes to 2.5G speed */ - xgbe_serdes_start_ratechange(pdata); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000); +} - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL); +static void +xgbe_sfi_mode(struct xgbe_prv_data *pdata) +{ + /* If a KR re-driver is present, change to KR mode instead */ + if (pdata->kr_redrv) + return (xgbe_kr_mode(pdata)); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, - pdata->serdes_cdr_rate[XGBE_SPEED_2500]); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, - pdata->serdes_tx_amp[XGBE_SPEED_2500]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, - pdata->serdes_blwc[XGBE_SPEED_2500]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, - pdata->serdes_pq_skew[XGBE_SPEED_2500]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, - pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]); - XRXTX_IOWRITE(pdata, RXTX_REG22, - pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]); + /* Set MAC to 10G speed */ + pdata->hw_if.set_speed(pdata, SPEED_10000); - xgbe_serdes_complete_ratechange(pdata); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI); } -static void xgbe_gmii_mode(struct xgbe_prv_data *pdata) +static void +xgbe_x_mode(struct xgbe_prv_data *pdata) { - unsigned int reg; - - /* Disable KR training */ - xgbe_an_disable_kr_training(pdata); - /* Set MAC to 1G speed */ - pdata->hw_if.set_gmii_speed(pdata); + pdata->hw_if.set_speed(pdata, SPEED_1000); - /* Set PCS to KX/1G speed */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); - reg &= ~MDIO_PCS_CTRL2_TYPE; - reg |= MDIO_PCS_CTRL2_10GBX; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X); +} - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); - reg &= ~MDIO_CTRL1_SPEEDSEL; - reg |= MDIO_CTRL1_SPEED1G; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); +static void +xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) +{ + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); - xgbe_pcs_power_cycle(pdata); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000); +} - /* Set SerDes to 1G speed */ - xgbe_serdes_start_ratechange(pdata); +static void +xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) +{ + /* Set MAC to 1G speed */ + pdata->hw_if.set_speed(pdata, SPEED_1000); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL); + /* Call PHY implementation support to complete rate change */ + pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100); +} - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, - pdata->serdes_cdr_rate[XGBE_SPEED_1000]); - XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, - pdata->serdes_tx_amp[XGBE_SPEED_1000]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, - pdata->serdes_blwc[XGBE_SPEED_1000]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, - pdata->serdes_pq_skew[XGBE_SPEED_1000]); - XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, - pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]); - XRXTX_IOWRITE(pdata, RXTX_REG22, - pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]); +static enum xgbe_mode +xgbe_cur_mode(struct xgbe_prv_data *pdata) +{ + return (pdata->phy_if.phy_impl.cur_mode(pdata)); +} - xgbe_serdes_complete_ratechange(pdata); +static bool +xgbe_in_kr_mode(struct xgbe_prv_data *pdata) +{ + return (xgbe_cur_mode(pdata) == XGBE_MODE_KR); } -static void xgbe_cur_mode(struct xgbe_prv_data *pdata, - enum xgbe_mode *mode) +static void +xgbe_change_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { - unsigned int reg; + switch (mode) { + case XGBE_MODE_KX_1000: + xgbe_kx_1000_mode(pdata); + break; + case XGBE_MODE_KX_2500: + xgbe_kx_2500_mode(pdata); + break; + case XGBE_MODE_KR: + xgbe_kr_mode(pdata); + break; + case XGBE_MODE_SGMII_100: + xgbe_sgmii_100_mode(pdata); + break; + case XGBE_MODE_SGMII_1000: + xgbe_sgmii_1000_mode(pdata); + break; + case XGBE_MODE_X: + xgbe_x_mode(pdata); + break; + case XGBE_MODE_SFI: + xgbe_sfi_mode(pdata); + break; + case XGBE_MODE_UNKNOWN: + break; + default: + axgbe_error("invalid operation mode requested (%u)\n", mode); + } +} - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); - if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR) - *mode = XGBE_MODE_KR; - else - *mode = XGBE_MODE_KX; +static void +xgbe_switch_mode(struct xgbe_prv_data *pdata) +{ + xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); } -static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata) +static bool +xgbe_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { - enum xgbe_mode mode; + if (mode == xgbe_cur_mode(pdata)) + return (false); - xgbe_cur_mode(pdata, &mode); + xgbe_change_mode(pdata, mode); - return (mode == XGBE_MODE_KR); + return (true); } -static void xgbe_switch_mode(struct xgbe_prv_data *pdata) +static bool +xgbe_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { - /* If we are in KR switch to KX, and vice-versa */ - if (xgbe_in_kr_mode(pdata)) { - if (pdata->speed_set == XGBE_SPEEDSET_1000_10000) - xgbe_gmii_mode(pdata); - else - xgbe_gmii_2500_mode(pdata); - } else { - xgbe_xgmii_mode(pdata); - } + return (pdata->phy_if.phy_impl.use_mode(pdata, mode)); } -static void xgbe_set_mode(struct xgbe_prv_data *pdata, - enum xgbe_mode mode) +static void +xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, bool restart) { - enum xgbe_mode cur_mode; + unsigned int reg; - xgbe_cur_mode(pdata, &cur_mode); - if (mode != cur_mode) - xgbe_switch_mode(pdata); -} + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1); + reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE; -static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata) -{ - if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) - return true; - } else { - if (pdata->phy.speed == SPEED_10000) - return true; - } + if (enable) + reg |= MDIO_VEND2_CTRL1_AN_ENABLE; - return false; + if (restart) + reg |= MDIO_VEND2_CTRL1_AN_RESTART; + + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); } -static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata) +static void +xgbe_an37_restart(struct xgbe_prv_data *pdata) { - if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & ADVERTISED_2500baseX_Full) - return true; - } else { - if (pdata->phy.speed == SPEED_2500) - return true; - } - - return false; + xgbe_an37_enable_interrupts(pdata); + xgbe_an37_set(pdata, true, true); } -static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata) +static void +xgbe_an37_disable(struct xgbe_prv_data *pdata) { - if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full) - return true; - } else { - if (pdata->phy.speed == SPEED_1000) - return true; - } - - return false; + xgbe_an37_set(pdata, false, false); + xgbe_an37_disable_interrupts(pdata); } -static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart) +static void +xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, bool restart) { unsigned int reg; + /* Disable KR training for now */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg &= ~XGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + + /* Update AN settings */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); reg &= ~MDIO_AN_CTRL1_ENABLE; if (enable) reg |= MDIO_AN_CTRL1_ENABLE; if (restart) reg |= MDIO_AN_CTRL1_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); } -static void xgbe_restart_an(struct xgbe_prv_data *pdata) +static void +xgbe_an73_restart(struct xgbe_prv_data *pdata) { - xgbe_set_an(pdata, true, true); + xgbe_an73_enable_interrupts(pdata); + xgbe_an73_set(pdata, true, true); } -static void xgbe_disable_an(struct xgbe_prv_data *pdata) +static void +xgbe_an73_disable(struct xgbe_prv_data *pdata) { - xgbe_set_an(pdata, false, false); + xgbe_an73_set(pdata, false, false); + xgbe_an73_disable_interrupts(pdata); + + pdata->an_start = 0; } -static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata, - enum xgbe_rx *state) +static void +xgbe_an_restart(struct xgbe_prv_data *pdata) +{ + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + xgbe_an73_restart(pdata); + break; + case XGBE_AN_MODE_CL37: + case XGBE_AN_MODE_CL37_SGMII: + xgbe_an37_restart(pdata); + break; + default: + break; + } +} + +static void +xgbe_an_disable(struct xgbe_prv_data *pdata) +{ + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + xgbe_an73_disable(pdata); + break; + case XGBE_AN_MODE_CL37: + case XGBE_AN_MODE_CL37_SGMII: + xgbe_an37_disable(pdata); + break; + default: + break; + } +} + +static void +xgbe_an_disable_all(struct xgbe_prv_data *pdata) +{ + xgbe_an73_disable(pdata); + xgbe_an37_disable(pdata); +} + +static enum xgbe_an +xgbe_an73_tx_training(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int ad_reg, lp_reg, reg; *state = XGBE_RX_COMPLETE; /* If we're not in KR mode then we're done */ if (!xgbe_in_kr_mode(pdata)) - return XGBE_AN_PAGE_RECEIVED; + return (XGBE_AN_PAGE_RECEIVED); /* Enable/Disable FEC */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL); reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) reg |= pdata->fec_ability; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); /* Start KR training */ - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - if (reg & XGBE_KR_TRAINING_ENABLE) { - XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1); + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); - reg |= XGBE_KR_TRAINING_START; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, - reg); + /* Start KR training */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg |= XGBE_KR_TRAINING_ENABLE; + reg |= XGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0); - } + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); - return XGBE_AN_PAGE_RECEIVED; + return (XGBE_AN_PAGE_RECEIVED); } -static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata, - enum xgbe_rx *state) +static enum xgbe_an +xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { - u16 msg; + uint16_t msg; *state = XGBE_RX_XNP; msg = XGBE_XNP_MCF_NULL_MESSAGE; msg |= XGBE_XNP_MP_FORMATTED; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg); - return XGBE_AN_PAGE_RECEIVED; + return (XGBE_AN_PAGE_RECEIVED); } -static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata, - enum xgbe_rx *state) +static enum xgbe_an +xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int link_support; unsigned int reg, ad_reg, lp_reg; /* Read Base Ability register 2 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); /* Check for a supported mode, otherwise restart in a different one */ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20; if (!(reg & link_support)) - return XGBE_AN_INCOMPAT_LINK; + return (XGBE_AN_INCOMPAT_LINK); /* Check Extended Next Page support */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); - return ((ad_reg & XGBE_XNP_NP_EXCHANGE) || + return (((ad_reg & XGBE_XNP_NP_EXCHANGE) || (lp_reg & XGBE_XNP_NP_EXCHANGE)) - ? xgbe_an_tx_xnp(pdata, state) - : xgbe_an_tx_training(pdata, state); + ? xgbe_an73_tx_xnp(pdata, state) + : xgbe_an73_tx_training(pdata, state)); } -static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata, - enum xgbe_rx *state) +static enum xgbe_an +xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int ad_reg, lp_reg; /* Check Extended Next Page support */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX); - return ((ad_reg & XGBE_XNP_NP_EXCHANGE) || + return (((ad_reg & XGBE_XNP_NP_EXCHANGE) || (lp_reg & XGBE_XNP_NP_EXCHANGE)) - ? xgbe_an_tx_xnp(pdata, state) - : xgbe_an_tx_training(pdata, state); + ? xgbe_an73_tx_xnp(pdata, state) + : xgbe_an73_tx_training(pdata, state)); } -static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata) +static enum xgbe_an +xgbe_an73_page_received(struct xgbe_prv_data *pdata) { enum xgbe_rx *state; unsigned long an_timeout; enum xgbe_an ret; if (!pdata->an_start) { pdata->an_start = ticks; } else { an_timeout = pdata->an_start + ((uint64_t)XGBE_AN_MS_TIMEOUT * (uint64_t)hz) / 1000ull; if ((int)(ticks - an_timeout) > 0) { /* Auto-negotiation timed out, reset state */ pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; pdata->an_start = ticks; + + axgbe_printf(2, "CL73 AN timed out, resetting state\n"); } } - state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state - : &pdata->kx_state; + state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state : &pdata->kx_state; switch (*state) { case XGBE_RX_BPA: - ret = xgbe_an_rx_bpa(pdata, state); + ret = xgbe_an73_rx_bpa(pdata, state); break; case XGBE_RX_XNP: - ret = xgbe_an_rx_xnp(pdata, state); + ret = xgbe_an73_rx_xnp(pdata, state); break; default: ret = XGBE_AN_ERROR; } - return ret; + return (ret); } -static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata) +static enum xgbe_an +xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) { /* Be sure we aren't looping trying to negotiate */ if (xgbe_in_kr_mode(pdata)) { pdata->kr_state = XGBE_RX_ERROR; - if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) && - !(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) - return XGBE_AN_NO_LINK; + if (!(XGBE_ADV(&pdata->phy, 1000baseKX_Full)) && + !(XGBE_ADV(&pdata->phy, 2500baseX_Full))) + return (XGBE_AN_NO_LINK); if (pdata->kx_state != XGBE_RX_BPA) - return XGBE_AN_NO_LINK; + return (XGBE_AN_NO_LINK); } else { pdata->kx_state = XGBE_RX_ERROR; - if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full)) - return XGBE_AN_NO_LINK; + if (!(XGBE_ADV(&pdata->phy, 10000baseKR_Full))) + return (XGBE_AN_NO_LINK); if (pdata->kr_state != XGBE_RX_BPA) - return XGBE_AN_NO_LINK; + return (XGBE_AN_NO_LINK); } - xgbe_disable_an(pdata); + xgbe_an_disable(pdata); xgbe_switch_mode(pdata); - xgbe_restart_an(pdata); + xgbe_an_restart(pdata); - return XGBE_AN_INCOMPAT_LINK; + return (XGBE_AN_INCOMPAT_LINK); } -static void xgbe_an_isr(void *data) +static void +xgbe_an37_isr(struct xgbe_prv_data *pdata) { - struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; + unsigned int reg; /* Disable AN interrupts */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + xgbe_an37_disable_interrupts(pdata); + + /* Save the interrupt(s) that fired */ + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); + pdata->an_int = reg & XGBE_AN_CL37_INT_MASK; + pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK; + + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + reg &= ~XGBE_AN_CL37_INT_MASK; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); + + xgbe_an_state_machine(pdata); + } else { + /* Enable AN interrupts */ + xgbe_an37_enable_interrupts(pdata); + + /* Reissue interrupt if status is not clear */ + if (pdata->vdata->irq_reissue_support) + XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); + } +} + +static void +xgbe_an73_isr(struct xgbe_prv_data *pdata) +{ + /* Disable AN interrupts */ + xgbe_an73_disable_interrupts(pdata); /* Save the interrupt(s) that fired */ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); if (pdata->an_int) { /* Clear the interrupt(s) that fired and process them */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); xgbe_an_state_machine(pdata); } else { /* Enable AN interrupts */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, - XGBE_AN_INT_MASK); + xgbe_an73_enable_interrupts(pdata); + + /* Reissue interrupt if status is not clear */ + if (pdata->vdata->irq_reissue_support) + XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); + } +} + +static void +xgbe_an_isr_task(unsigned long data) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; + + axgbe_printf(2, "AN interrupt received\n"); + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + xgbe_an73_isr(pdata); + break; + case XGBE_AN_MODE_CL37: + case XGBE_AN_MODE_CL37_SGMII: + xgbe_an37_isr(pdata); + break; + default: + break; } } -static void xgbe_an_state_machine(struct xgbe_prv_data *pdata) +static void +xgbe_an_combined_isr(struct xgbe_prv_data *pdata) +{ + xgbe_an_isr_task((unsigned long)pdata); +} + +static const char * +xgbe_state_as_string(enum xgbe_an state) +{ + switch (state) { + case XGBE_AN_READY: + return ("Ready"); + case XGBE_AN_PAGE_RECEIVED: + return ("Page-Received"); + case XGBE_AN_INCOMPAT_LINK: + return ("Incompatible-Link"); + case XGBE_AN_COMPLETE: + return ("Complete"); + case XGBE_AN_NO_LINK: + return ("No-Link"); + case XGBE_AN_ERROR: + return ("Error"); + default: + return ("Undefined"); + } +} + +static void +xgbe_an37_state_machine(struct xgbe_prv_data *pdata) { enum xgbe_an cur_state = pdata->an_state; - sx_xlock(&pdata->an_mutex); + if (!pdata->an_int) + return; + + if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) { + pdata->an_state = XGBE_AN_COMPLETE; + pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT; + + /* If SGMII is enabled, check the link status */ + if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) && + !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS)) + pdata->an_state = XGBE_AN_NO_LINK; + } + + axgbe_printf(2, "%s: CL37 AN %s\n", __func__, + xgbe_state_as_string(pdata->an_state)); + + cur_state = pdata->an_state; + + switch (pdata->an_state) { + case XGBE_AN_READY: + break; + + case XGBE_AN_COMPLETE: + axgbe_printf(2, "Auto negotiation successful\n"); + break; + + case XGBE_AN_NO_LINK: + break; + + default: + pdata->an_state = XGBE_AN_ERROR; + } + + if (pdata->an_state == XGBE_AN_ERROR) { + axgbe_printf(2, "error during auto-negotiation, state=%u\n", + cur_state); + + pdata->an_int = 0; + xgbe_an37_clear_interrupts(pdata); + } + + if (pdata->an_state >= XGBE_AN_COMPLETE) { + pdata->an_result = pdata->an_state; + pdata->an_state = XGBE_AN_READY; + + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + axgbe_printf(2, "CL37 AN result: %s\n", + xgbe_state_as_string(pdata->an_result)); + } + + axgbe_printf(2, "%s: an_state %d an_int %d an_mode %d an_status %d\n", + __func__, pdata->an_state, pdata->an_int, pdata->an_mode, + pdata->an_status); + + xgbe_an37_enable_interrupts(pdata); +} + +static void +xgbe_an73_state_machine(struct xgbe_prv_data *pdata) +{ + enum xgbe_an cur_state = pdata->an_state; if (!pdata->an_int) goto out; next_int: - if (pdata->an_int & XGBE_AN_PG_RCV) { + if (pdata->an_int & XGBE_AN_CL73_PG_RCV) { pdata->an_state = XGBE_AN_PAGE_RECEIVED; - pdata->an_int &= ~XGBE_AN_PG_RCV; - } else if (pdata->an_int & XGBE_AN_INC_LINK) { + pdata->an_int &= ~XGBE_AN_CL73_PG_RCV; + } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) { pdata->an_state = XGBE_AN_INCOMPAT_LINK; - pdata->an_int &= ~XGBE_AN_INC_LINK; - } else if (pdata->an_int & XGBE_AN_INT_CMPLT) { + pdata->an_int &= ~XGBE_AN_CL73_INC_LINK; + } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; - pdata->an_int &= ~XGBE_AN_INT_CMPLT; + pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT; } else { pdata->an_state = XGBE_AN_ERROR; } - pdata->an_result = pdata->an_state; - again: + axgbe_printf(2, "CL73 AN %s\n", + xgbe_state_as_string(pdata->an_state)); + cur_state = pdata->an_state; switch (pdata->an_state) { case XGBE_AN_READY: pdata->an_supported = 0; break; case XGBE_AN_PAGE_RECEIVED: - pdata->an_state = xgbe_an_page_received(pdata); + pdata->an_state = xgbe_an73_page_received(pdata); pdata->an_supported++; break; case XGBE_AN_INCOMPAT_LINK: pdata->an_supported = 0; pdata->parallel_detect = 0; - pdata->an_state = xgbe_an_incompat_link(pdata); + pdata->an_state = xgbe_an73_incompat_link(pdata); break; case XGBE_AN_COMPLETE: pdata->parallel_detect = pdata->an_supported ? 0 : 1; + axgbe_printf(2, "%s successful\n", + pdata->an_supported ? "Auto negotiation" + : "Parallel detection"); break; case XGBE_AN_NO_LINK: break; default: pdata->an_state = XGBE_AN_ERROR; } if (pdata->an_state == XGBE_AN_NO_LINK) { pdata->an_int = 0; - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); + xgbe_an73_clear_interrupts(pdata); } else if (pdata->an_state == XGBE_AN_ERROR) { + axgbe_printf(2, + "error during auto-negotiation, state=%u\n", + cur_state); + pdata->an_int = 0; - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); + xgbe_an73_clear_interrupts(pdata); } if (pdata->an_state >= XGBE_AN_COMPLETE) { pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; + + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + + axgbe_printf(2, "CL73 AN result: %s\n", + xgbe_state_as_string(pdata->an_result)); } if (cur_state != pdata->an_state) goto again; if (pdata->an_int) goto next_int; out: /* Enable AN interrupts on the way out */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK); + xgbe_an73_enable_interrupts(pdata); +} + +static void +xgbe_an_state_machine(struct xgbe_prv_data *pdata) +{ + sx_xlock(&pdata->an_mutex); + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + xgbe_an73_state_machine(pdata); + break; + case XGBE_AN_MODE_CL37: + case XGBE_AN_MODE_CL37_SGMII: + xgbe_an37_state_machine(pdata); + break; + default: + break; + } + + /* Reissue interrupt if status is not clear */ + if (pdata->vdata->irq_reissue_support) + XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); sx_xunlock(&pdata->an_mutex); } -static void xgbe_an_init(struct xgbe_prv_data *pdata) +static void +xgbe_an37_init(struct xgbe_prv_data *pdata) { + struct xgbe_phy local_phy; unsigned int reg; + pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy); + + axgbe_printf(2, "%s: advertising 0x%x\n", __func__, local_phy.advertising); + + /* Set up Advertisement register */ + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); + if (XGBE_ADV(&local_phy, Pause)) + reg |= 0x100; + else + reg &= ~0x100; + + if (XGBE_ADV(&local_phy, Asym_Pause)) + reg |= 0x80; + else + reg &= ~0x80; + + /* Full duplex, but not half */ + reg |= XGBE_AN_CL37_FD_MASK; + reg &= ~XGBE_AN_CL37_HD_MASK; + + axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg); + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg); + + /* Set up the Control register */ + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); + axgbe_printf(2, "%s: AN_ADVERTISE reg 0x%x an_mode %d\n", __func__, + reg, pdata->an_mode); + reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK; + reg &= ~XGBE_AN_CL37_PCS_MODE_MASK; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL37: + reg |= XGBE_AN_CL37_PCS_MODE_BASEX; + break; + case XGBE_AN_MODE_CL37_SGMII: + reg |= XGBE_AN_CL37_PCS_MODE_SGMII; + break; + default: + break; + } + + reg |= XGBE_AN_CL37_MII_CTRL_8BIT; + axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg); + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); + + axgbe_printf(2, "CL37 AN (%s) initialized\n", + (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII"); +} + +static void +xgbe_an73_init(struct xgbe_prv_data *pdata) +{ + /* + * This local_phy is needed because phy-v2 alters the + * advertising flag variable. so phy-v1 an_advertising is just copying + */ + struct xgbe_phy local_phy; + unsigned int reg; + + pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy); + /* Set up Advertisement register 3 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); - reg &= ~0xc000; + if (XGBE_ADV(&local_phy, 10000baseR_FEC)) + reg |= 0xc000; + else + reg &= ~0xc000; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); /* Set up Advertisement register 2 next */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); - if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) + if (XGBE_ADV(&local_phy, 10000baseKR_Full)) reg |= 0x80; else reg &= ~0x80; - if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) || - (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) + if (XGBE_ADV(&local_phy, 1000baseKX_Full) || + XGBE_ADV(&local_phy, 2500baseX_Full)) reg |= 0x20; else reg &= ~0x20; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg); /* Set up Advertisement register 1 last */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); - if (pdata->phy.advertising & ADVERTISED_Pause) + if (XGBE_ADV(&local_phy, Pause)) reg |= 0x400; else reg &= ~0x400; - if (pdata->phy.advertising & ADVERTISED_Asym_Pause) + if (XGBE_ADV(&local_phy, Asym_Pause)) reg |= 0x800; else reg &= ~0x800; /* We don't intend to perform XNP */ reg &= ~XGBE_XNP_NP_EXCHANGE; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); + + axgbe_printf(2, "CL73 AN initialized\n"); +} + +static void +xgbe_an_init(struct xgbe_prv_data *pdata) +{ + /* Set up advertisement registers based on current settings */ + pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata); + axgbe_printf(2, "%s: setting up an_mode %d\n", __func__, pdata->an_mode); + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + xgbe_an73_init(pdata); + break; + case XGBE_AN_MODE_CL37: + case XGBE_AN_MODE_CL37_SGMII: + xgbe_an37_init(pdata); + break; + default: + break; + } +} + +static const char * +xgbe_phy_fc_string(struct xgbe_prv_data *pdata) +{ + if (pdata->tx_pause && pdata->rx_pause) + return ("rx/tx"); + else if (pdata->rx_pause) + return ("rx"); + else if (pdata->tx_pause) + return ("tx"); + else + return ("off"); +} + +static const char * +xgbe_phy_speed_string(int speed) +{ + switch (speed) { + case SPEED_100: + return ("100Mbps"); + case SPEED_1000: + return ("1Gbps"); + case SPEED_2500: + return ("2.5Gbps"); + case SPEED_10000: + return ("10Gbps"); + case SPEED_UNKNOWN: + return ("Unknown"); + default: + return ("Unsupported"); + } +} + +static void +xgbe_phy_print_status(struct xgbe_prv_data *pdata) +{ + if (pdata->phy.link) + axgbe_printf(0, + "Link is UP - %s/%s - flow control %s\n", + xgbe_phy_speed_string(pdata->phy.speed), + pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half", + xgbe_phy_fc_string(pdata)); + else + axgbe_printf(0, "Link is DOWN\n"); } -static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) +static void +xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) { + int new_state = 0; + + axgbe_printf(1, "link %d/%d tx %d/%d rx %d/%d speed %d/%d autoneg %d/%d\n", + pdata->phy_link, pdata->phy.link, + pdata->tx_pause, pdata->phy.tx_pause, + pdata->rx_pause, pdata->phy.rx_pause, + pdata->phy_speed, pdata->phy.speed, + pdata->pause_autoneg, pdata->phy.pause_autoneg); if (pdata->phy.link) { /* Flow control support */ pdata->pause_autoneg = pdata->phy.pause_autoneg; if (pdata->tx_pause != pdata->phy.tx_pause) { - pdata->hw_if.config_tx_flow_control(pdata); + new_state = 1; + axgbe_printf(2, "tx pause %d/%d\n", pdata->tx_pause, + pdata->phy.tx_pause); pdata->tx_pause = pdata->phy.tx_pause; + pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { - pdata->hw_if.config_rx_flow_control(pdata); + new_state = 1; + axgbe_printf(2, "rx pause %d/%d\n", pdata->rx_pause, + pdata->phy.rx_pause); pdata->rx_pause = pdata->phy.rx_pause; + pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ if (pdata->phy_speed != pdata->phy.speed) { + new_state = 1; pdata->phy_speed = pdata->phy.speed; } if (pdata->phy_link != pdata->phy.link) { + new_state = 1; pdata->phy_link = pdata->phy.link; } } else if (pdata->phy_link) { + new_state = 1; pdata->phy_link = 0; pdata->phy_speed = SPEED_UNKNOWN; } + + axgbe_printf(2, "phy_link %d Link %d new_state %d\n", pdata->phy_link, + pdata->phy.link, new_state); + + if (new_state) + xgbe_phy_print_status(pdata); } -static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) +static bool +xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) { + return (pdata->phy_if.phy_impl.valid_speed(pdata, speed)); +} - /* Disable auto-negotiation */ - xgbe_disable_an(pdata); +static int +xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) +{ + enum xgbe_mode mode; - /* Validate/Set specified speed */ - switch (pdata->phy.speed) { - case SPEED_10000: - xgbe_set_mode(pdata, XGBE_MODE_KR); - break; + axgbe_printf(2, "fixed PHY configuration\n"); - case SPEED_2500: - case SPEED_1000: - xgbe_set_mode(pdata, XGBE_MODE_KX); + /* Disable auto-negotiation */ + xgbe_an_disable(pdata); + + /* Set specified mode for specified speed */ + mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed); + switch (mode) { + case XGBE_MODE_KX_1000: + case XGBE_MODE_KX_2500: + case XGBE_MODE_KR: + case XGBE_MODE_SGMII_100: + case XGBE_MODE_SGMII_1000: + case XGBE_MODE_X: + case XGBE_MODE_SFI: break; - + case XGBE_MODE_UNKNOWN: default: - return -EINVAL; + return (-EINVAL); } /* Validate duplex mode */ if (pdata->phy.duplex != DUPLEX_FULL) - return -EINVAL; + return (-EINVAL); - return 0; + xgbe_set_mode(pdata, mode); + + return (0); } -static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +static int +__xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) { + int ret; + unsigned int reg; + + sx_xlock(&pdata->an_mutex); + set_bit(XGBE_LINK_INIT, &pdata->dev_state); pdata->link_check = ticks; - if (pdata->phy.autoneg != AUTONEG_ENABLE) - return xgbe_phy_config_fixed(pdata); + ret = pdata->phy_if.phy_impl.an_config(pdata); + if (ret) { + axgbe_error("%s: an_config fail %d\n", __func__, ret); + goto out; + } + + if (pdata->phy.autoneg != AUTONEG_ENABLE) { + ret = xgbe_phy_config_fixed(pdata); + if (ret || !pdata->kr_redrv) { + if (ret) + axgbe_error("%s: fix conf fail %d\n", __func__, ret); + goto out; + } + + axgbe_printf(2, "AN redriver support\n"); + } else + axgbe_printf(2, "AN PHY configuration\n"); /* Disable auto-negotiation interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK); + axgbe_printf(2, "%s: set_mode %d AN int reg value 0x%x\n", __func__, + set_mode, reg); /* Clear any auto-negotitation interrupts */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); /* Start auto-negotiation in a supported mode */ - if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) { - xgbe_set_mode(pdata, XGBE_MODE_KR); - } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) || - (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) { - xgbe_set_mode(pdata, XGBE_MODE_KX); - } else { - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); - return -EINVAL; + if (set_mode) { + /* Start auto-negotiation in a supported mode */ + if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { + xgbe_set_mode(pdata, XGBE_MODE_KR); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { + xgbe_set_mode(pdata, XGBE_MODE_KX_2500); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { + xgbe_set_mode(pdata, XGBE_MODE_KX_1000); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { + xgbe_set_mode(pdata, XGBE_MODE_SFI); + } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { + xgbe_set_mode(pdata, XGBE_MODE_X); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); + } else { + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); + ret = -EINVAL; + goto out; + } } /* Disable and stop any in progress auto-negotiation */ - xgbe_disable_an(pdata); + xgbe_an_disable_all(pdata); /* Clear any auto-negotitation interrupts */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); + xgbe_an_clear_interrupts_all(pdata); pdata->an_result = XGBE_AN_READY; pdata->an_state = XGBE_AN_READY; pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; /* Re-enable auto-negotiation interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK); /* Set up advertisement registers based on current settings */ xgbe_an_init(pdata); /* Enable and start auto-negotiation */ - xgbe_restart_an(pdata); - - return 0; -} + xgbe_an_restart(pdata); -static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) -{ - int ret; - - sx_xlock(&pdata->an_mutex); - - ret = __xgbe_phy_config_aneg(pdata); - if (ret) +out: + if (ret) { + axgbe_printf(0, "%s: set_mode %d AN int reg value 0x%x ret value %d\n", + __func__, set_mode, reg, ret); set_bit(XGBE_LINK_ERR, &pdata->dev_state); - else + } else clear_bit(XGBE_LINK_ERR, &pdata->dev_state); sx_unlock(&pdata->an_mutex); - return ret; + return (ret); +} + +static int +xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +{ + return (__xgbe_phy_config_aneg(pdata, true)); +} + +static int +xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata) +{ + return (__xgbe_phy_config_aneg(pdata, false)); } -static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) +static bool +xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) { return (pdata->an_result == XGBE_AN_COMPLETE); } -static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) +static void +xgbe_check_link_timeout(struct xgbe_prv_data *pdata) { unsigned long link_timeout; link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz); - if ((int)(ticks - link_timeout) >= 0) { + if ((int)(ticks - link_timeout) > 0) { + axgbe_printf(2, "AN link timeout\n"); xgbe_phy_config_aneg(pdata); } } -static void xgbe_phy_status_force(struct xgbe_prv_data *pdata) +static enum xgbe_mode +xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) { - if (xgbe_in_kr_mode(pdata)) { - pdata->phy.speed = SPEED_10000; - } else { - switch (pdata->speed_set) { - case XGBE_SPEEDSET_1000_10000: - pdata->phy.speed = SPEED_1000; - break; - - case XGBE_SPEEDSET_2500_10000: - pdata->phy.speed = SPEED_2500; - break; - } - } - pdata->phy.duplex = DUPLEX_FULL; + return (pdata->phy_if.phy_impl.an_outcome(pdata)); } -static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) +static void +xgbe_phy_status_result(struct xgbe_prv_data *pdata) { - unsigned int ad_reg, lp_reg; + enum xgbe_mode mode; - pdata->phy.lp_advertising = 0; + XGBE_ZERO_LP_ADV(&pdata->phy); if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) - return xgbe_phy_status_force(pdata); - - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; - - /* Compare Advertisement and Link Partner register 1 */ - ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); - lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); - if (lp_reg & 0x400) - pdata->phy.lp_advertising |= ADVERTISED_Pause; - if (lp_reg & 0x800) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; - - if (pdata->phy.pause_autoneg) { - /* Set flow control based on auto-negotiation result */ - pdata->phy.tx_pause = 0; - pdata->phy.rx_pause = 0; - - if (ad_reg & lp_reg & 0x400) { - pdata->phy.tx_pause = 1; - pdata->phy.rx_pause = 1; - } else if (ad_reg & lp_reg & 0x800) { - if (ad_reg & 0x400) - pdata->phy.rx_pause = 1; - else if (lp_reg & 0x400) - pdata->phy.tx_pause = 1; - } - } - - /* Compare Advertisement and Link Partner register 2 */ - ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); - lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); - if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; - if (lp_reg & 0x20) { - switch (pdata->speed_set) { - case XGBE_SPEEDSET_1000_10000: - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; - break; - case XGBE_SPEEDSET_2500_10000: - pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full; - break; - } - } + mode = xgbe_cur_mode(pdata); + else + mode = xgbe_phy_status_aneg(pdata); - ad_reg &= lp_reg; - if (ad_reg & 0x80) { + axgbe_printf(3, "%s: xgbe mode %d\n", __func__, mode); + switch (mode) { + case XGBE_MODE_SGMII_100: + pdata->phy.speed = SPEED_100; + break; + case XGBE_MODE_X: + case XGBE_MODE_KX_1000: + case XGBE_MODE_SGMII_1000: + pdata->phy.speed = SPEED_1000; + break; + case XGBE_MODE_KX_2500: + pdata->phy.speed = SPEED_2500; + break; + case XGBE_MODE_KR: + case XGBE_MODE_SFI: pdata->phy.speed = SPEED_10000; - xgbe_set_mode(pdata, XGBE_MODE_KR); - } else if (ad_reg & 0x20) { - switch (pdata->speed_set) { - case XGBE_SPEEDSET_1000_10000: - pdata->phy.speed = SPEED_1000; - break; - - case XGBE_SPEEDSET_2500_10000: - pdata->phy.speed = SPEED_2500; - break; - } - - xgbe_set_mode(pdata, XGBE_MODE_KX); - } else { + break; + case XGBE_MODE_UNKNOWN: + default: + axgbe_printf(1, "%s: unknown mode\n", __func__); pdata->phy.speed = SPEED_UNKNOWN; } - /* Compare Advertisement and Link Partner register 3 */ - ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); - lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + pdata->phy.duplex = DUPLEX_FULL; + axgbe_printf(2, "%s: speed %d duplex %d\n", __func__, pdata->phy.speed, + pdata->phy.duplex); + + if (xgbe_set_mode(pdata, mode) && pdata->an_again) + xgbe_phy_reconfig_aneg(pdata); } -static void xgbe_phy_status(struct xgbe_prv_data *pdata) +static void +xgbe_phy_status(struct xgbe_prv_data *pdata) { - unsigned int reg, link_aneg; + bool link_aneg; + int an_restart; if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) { + axgbe_error("%s: LINK_ERR\n", __func__); pdata->phy.link = 0; goto adjust_link; } link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); + axgbe_printf(3, "link_aneg - %d\n", link_aneg); /* Get the link status. Link status is latched low, so read * once to clear and then read again to get current state */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); - pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0; + pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, + &an_restart); + + axgbe_printf(1, "link_status returned Link:%d an_restart:%d aneg:%d\n", + pdata->phy.link, an_restart, link_aneg); + + if (an_restart) { + xgbe_phy_config_aneg(pdata); + return; + } if (pdata->phy.link) { + axgbe_printf(2, "Link Active\n"); if (link_aneg && !xgbe_phy_aneg_done(pdata)) { + axgbe_printf(1, "phy_link set check timeout\n"); xgbe_check_link_timeout(pdata); return; } - xgbe_phy_status_aneg(pdata); + axgbe_printf(2, "%s: Link write phy_status result\n", __func__); + xgbe_phy_status_result(pdata); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) clear_bit(XGBE_LINK_INIT, &pdata->dev_state); + } else { + axgbe_printf(2, "Link Deactive\n"); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { + axgbe_printf(1, "phy_link not set check timeout\n"); xgbe_check_link_timeout(pdata); - if (link_aneg) + if (link_aneg) { + axgbe_printf(2, "link_aneg case\n"); return; + } } - xgbe_phy_status_aneg(pdata); + xgbe_phy_status_result(pdata); + } adjust_link: + axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link); xgbe_phy_adjust_link(pdata); } -static void xgbe_phy_stop(struct xgbe_prv_data *pdata) +static void +xgbe_phy_stop(struct xgbe_prv_data *pdata) { + axgbe_printf(2, "stopping PHY\n"); - /* Disable auto-negotiation */ - xgbe_disable_an(pdata); + if (!pdata->phy_started) + return; - /* Disable auto-negotiation interrupts */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + /* Indicate the PHY is down */ + pdata->phy_started = 0; + + /* Disable auto-negotiation */ + xgbe_an_disable_all(pdata); - bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag); + pdata->phy_if.phy_impl.stop(pdata); pdata->phy.link = 0; xgbe_phy_adjust_link(pdata); } -static int xgbe_phy_start(struct xgbe_prv_data *pdata) +static int +xgbe_phy_start(struct xgbe_prv_data *pdata) { int ret; - ret = bus_setup_intr(pdata->dev, pdata->an_irq_res, - INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_an_isr, pdata, - &pdata->an_irq_tag); + DBGPR("-->xgbe_phy_start\n"); + + ret = pdata->phy_if.phy_impl.start(pdata); if (ret) { - return -ret; + axgbe_error("%s: impl start ret %d\n", __func__, ret); + return (ret); } /* Set initial mode - call the mode setting routines * directly to insure we are properly configured */ - if (xgbe_use_xgmii_mode(pdata)) { - xgbe_xgmii_mode(pdata); - } else if (xgbe_use_gmii_mode(pdata)) { - xgbe_gmii_mode(pdata); - } else if (xgbe_use_gmii_2500_mode(pdata)) { - xgbe_gmii_2500_mode(pdata); + if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { + axgbe_printf(2, "%s: KR\n", __func__); + xgbe_kr_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { + axgbe_printf(2, "%s: KX 2500\n", __func__); + xgbe_kx_2500_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { + axgbe_printf(2, "%s: KX 1000\n", __func__); + xgbe_kx_1000_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { + axgbe_printf(2, "%s: SFI\n", __func__); + xgbe_sfi_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { + axgbe_printf(2, "%s: X\n", __func__); + xgbe_x_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { + axgbe_printf(2, "%s: SGMII 1000\n", __func__); + xgbe_sgmii_1000_mode(pdata); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { + axgbe_printf(2, "%s: SGMII 100\n", __func__); + xgbe_sgmii_100_mode(pdata); } else { + axgbe_error("%s: invalid mode\n", __func__); ret = -EINVAL; - goto err_irq; + goto err_stop; } + /* Indicate the PHY is up and running */ + pdata->phy_started = 1; + /* Set up advertisement registers based on current settings */ xgbe_an_init(pdata); /* Enable auto-negotiation interrupts */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); + xgbe_an_enable_interrupts(pdata); - return xgbe_phy_config_aneg(pdata); + ret = xgbe_phy_config_aneg(pdata); + if (ret) + axgbe_error("%s: phy_config_aneg %d\n", __func__, ret); + + return (ret); -err_irq: - bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag); +err_stop: + pdata->phy_if.phy_impl.stop(pdata); - return ret; + return (ret); } -static int xgbe_phy_reset(struct xgbe_prv_data *pdata) +static int +xgbe_phy_reset(struct xgbe_prv_data *pdata) { - unsigned int count, reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); - reg |= MDIO_CTRL1_RESET; - XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); - - count = 50; - do { - DELAY(20); - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); - } while ((reg & MDIO_CTRL1_RESET) && --count); + int ret; - if (reg & MDIO_CTRL1_RESET) - return -ETIMEDOUT; + ret = pdata->phy_if.phy_impl.reset(pdata); + if (ret) { + axgbe_error("%s: impl phy reset %d\n", __func__, ret); + return (ret); + } /* Disable auto-negotiation for now */ - xgbe_disable_an(pdata); + xgbe_an_disable_all(pdata); /* Clear auto-negotiation interrupts */ - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); + xgbe_an_clear_interrupts_all(pdata); - return 0; + return (0); } -static void xgbe_phy_init(struct xgbe_prv_data *pdata) +static int +xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) { + + if (XGBE_ADV(&pdata->phy, 10000baseKR_Full)) + return (SPEED_10000); + else if (XGBE_ADV(&pdata->phy, 10000baseT_Full)) + return (SPEED_10000); + else if (XGBE_ADV(&pdata->phy, 2500baseX_Full)) + return (SPEED_2500); + else if (XGBE_ADV(&pdata->phy, 2500baseT_Full)) + return (SPEED_2500); + else if (XGBE_ADV(&pdata->phy, 1000baseKX_Full)) + return (SPEED_1000); + else if (XGBE_ADV(&pdata->phy, 1000baseT_Full)) + return (SPEED_1000); + else if (XGBE_ADV(&pdata->phy, 100baseT_Full)) + return (SPEED_100); + + return (SPEED_UNKNOWN); +} + +static void +xgbe_phy_exit(struct xgbe_prv_data *pdata) +{ + pdata->phy_if.phy_impl.exit(pdata); +} + +static int +xgbe_phy_init(struct xgbe_prv_data *pdata) +{ + int ret = 0; + + DBGPR("-->xgbe_phy_init\n"); + sx_init(&pdata->an_mutex, "axgbe AN lock"); pdata->mdio_mmd = MDIO_MMD_PCS; /* Initialize supported features */ - pdata->phy.supported = SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; - pdata->phy.supported |= SUPPORTED_10000baseKR_Full; - switch (pdata->speed_set) { - case XGBE_SPEEDSET_1000_10000: - pdata->phy.supported |= SUPPORTED_1000baseKX_Full; - break; - case XGBE_SPEEDSET_2500_10000: - pdata->phy.supported |= SUPPORTED_2500baseX_Full; - break; - } - pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECABLE); pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); - if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= SUPPORTED_10000baseR_FEC; - pdata->phy.advertising = pdata->phy.supported; + /* Setup the phy (including supported features) */ + ret = pdata->phy_if.phy_impl.init(pdata); + if (ret) + return (ret); + + /* Copy supported link modes to advertising link modes */ + XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); pdata->phy.address = 0; - pdata->phy.autoneg = AUTONEG_ENABLE; - pdata->phy.speed = SPEED_UNKNOWN; - pdata->phy.duplex = DUPLEX_UNKNOWN; + if (XGBE_ADV(&pdata->phy, Autoneg)) { + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + } else { + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata); + pdata->phy.duplex = DUPLEX_FULL; + } pdata->phy.link = 0; pdata->phy.pause_autoneg = pdata->pause_autoneg; pdata->phy.tx_pause = pdata->tx_pause; pdata->phy.rx_pause = pdata->rx_pause; /* Fix up Flow Control advertising */ - pdata->phy.advertising &= ~ADVERTISED_Pause; - pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + XGBE_CLR_ADV(&pdata->phy, Pause); + XGBE_CLR_ADV(&pdata->phy, Asym_Pause); if (pdata->rx_pause) { - pdata->phy.advertising |= ADVERTISED_Pause; - pdata->phy.advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_ADV(&pdata->phy, Pause); + XGBE_SET_ADV(&pdata->phy, Asym_Pause); + } + + if (pdata->tx_pause) { + if (XGBE_ADV(&pdata->phy, Asym_Pause)) + XGBE_CLR_ADV(&pdata->phy, Asym_Pause); + else + XGBE_SET_ADV(&pdata->phy, Asym_Pause); } - if (pdata->tx_pause) - pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + return (0); } -void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) +void +xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) { - phy_if->phy_init = xgbe_phy_init; + phy_if->phy_init = xgbe_phy_init; + phy_if->phy_exit = xgbe_phy_exit; phy_if->phy_reset = xgbe_phy_reset; phy_if->phy_start = xgbe_phy_start; - phy_if->phy_stop = xgbe_phy_stop; + phy_if->phy_stop = xgbe_phy_stop; phy_if->phy_status = xgbe_phy_status; phy_if->phy_config_aneg = xgbe_phy_config_aneg; + + phy_if->phy_valid_speed = xgbe_phy_valid_speed; + + phy_if->an_isr = xgbe_an_combined_isr; } diff --git a/sys/dev/axgbe/xgbe-phy-v1.c b/sys/dev/axgbe/xgbe-phy-v1.c new file mode 100644 index 000000000000..7bfb20de23aa --- /dev/null +++ b/sys/dev/axgbe/xgbe-phy-v1.c @@ -0,0 +1,707 @@ +/* + * AMD 10Gb Ethernet driver + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" +#include "xgbe-common.h" + +struct xgbe_phy_data { + /* 1000/10000 vs 2500/10000 indicator */ + unsigned int speed_set; + + /* SerDes UEFI configurable settings. + * Switching between modes/speeds requires new values for some + * SerDes settings. The values can be supplied as device + * properties in array format. The first array entry is for + * 1GbE, second for 2.5GbE and third for 10GbE + */ + uint32_t blwc[XGBE_SPEEDS]; + uint32_t cdr_rate[XGBE_SPEEDS]; + uint32_t pq_skew[XGBE_SPEEDS]; + uint32_t tx_amp[XGBE_SPEEDS]; + uint32_t dfe_tap_cfg[XGBE_SPEEDS]; + uint32_t dfe_tap_ena[XGBE_SPEEDS]; +}; + +static void +xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1); +} + +static void +xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0); +} + +static enum xgbe_mode +xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + enum xgbe_mode mode; + unsigned int ad_reg, lp_reg; + + XGBE_SET_LP_ADV(&pdata->phy, Autoneg); + XGBE_SET_LP_ADV(&pdata->phy, Backplane); + + /* Compare Advertisement and Link Partner register 1 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + if (lp_reg & 0x400) + XGBE_SET_LP_ADV(&pdata->phy, Pause); + if (lp_reg & 0x800) + XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); + + axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", + __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); + + if (pdata->phy.pause_autoneg) { + /* Set flow control based on auto-negotiation result */ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (ad_reg & lp_reg & 0x400) { + pdata->phy.tx_pause = 1; + pdata->phy.rx_pause = 1; + } else if (ad_reg & lp_reg & 0x800) { + if (ad_reg & 0x400) + pdata->phy.rx_pause = 1; + else if (lp_reg & 0x400) + pdata->phy.tx_pause = 1; + } + } + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); + if (lp_reg & 0x20) { + if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000) + XGBE_SET_LP_ADV(&pdata->phy, 2500baseX_Full); + else + XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); + } + + ad_reg &= lp_reg; + if (ad_reg & 0x80) { + pdata->phy.speed = SPEED_10000; + mode = XGBE_MODE_KR; + } else if (ad_reg & 0x20) { + switch (pdata->speed_set) { + case XGBE_SPEEDSET_1000_10000: + pdata->phy.speed = SPEED_1000; + mode = XGBE_MODE_KX_1000; + break; + + case XGBE_SPEEDSET_2500_10000: + pdata->phy.speed = SPEED_2500; + mode = XGBE_MODE_KX_2500; + break; + } + } else { + mode = XGBE_MODE_UNKNOWN; + pdata->phy.speed = SPEED_UNKNOWN; + } + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); + + return (mode); +} + +static void +xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, struct xgbe_phy *dphy) +{ + XGBE_LM_COPY(dphy, advertising, &pdata->phy, advertising); +} + +static int +xgbe_phy_an_config(struct xgbe_prv_data *pdata) +{ + /* Nothing uniquely required for an configuration */ + return (0); +} + +static enum xgbe_an_mode +xgbe_phy_an_mode(struct xgbe_prv_data *pdata) +{ + return (XGBE_AN_MODE_CL73); +} + +static void +xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata) +{ + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + + reg |= MDIO_CTRL1_LPOWER; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + + DELAY(75); + + reg &= ~MDIO_CTRL1_LPOWER; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); +} + +static void +xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata) +{ + /* Assert Rx and Tx ratechange */ + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1); +} + +static void +xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata) +{ + unsigned int wait; + uint16_t status; + + /* Release Rx and Tx ratechange */ + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0); + + /* Wait for Rx and Tx ready */ + wait = XGBE_RATECHANGE_COUNT; + while (wait--) { + DELAY(50); + + status = XSIR0_IOREAD(pdata, SIR0_STATUS); + if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && + XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) + goto rx_reset; + } + + axgbe_printf(2, "SerDes rx/tx not ready (%#hx)\n", status); + +rx_reset: + /* Perform Rx reset for the DFE changes */ + XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1); +} + +static void +xgbe_phy_kr_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + /* Set PCS to KR/10G speed */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); + reg &= ~MDIO_PCS_CTRL2_TYPE; + reg |= MDIO_PCS_CTRL2_10GBR; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + reg &= ~MDIO_CTRL1_SPEEDSEL; + reg |= MDIO_CTRL1_SPEED10G; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + + xgbe_phy_pcs_power_cycle(pdata); + + /* Set SerDes to 10G speed */ + xgbe_phy_start_ratechange(pdata); + + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL); + + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, + phy_data->cdr_rate[XGBE_SPEED_10000]); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, + phy_data->tx_amp[XGBE_SPEED_10000]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, + phy_data->blwc[XGBE_SPEED_10000]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, + phy_data->pq_skew[XGBE_SPEED_10000]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, + phy_data->dfe_tap_cfg[XGBE_SPEED_10000]); + XRXTX_IOWRITE(pdata, RXTX_REG22, + phy_data->dfe_tap_ena[XGBE_SPEED_10000]); + + xgbe_phy_complete_ratechange(pdata); + + axgbe_printf(2, "10GbE KR mode set\n"); +} + +static void +xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + /* Set PCS to KX/1G speed */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); + reg &= ~MDIO_PCS_CTRL2_TYPE; + reg |= MDIO_PCS_CTRL2_10GBX; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + reg &= ~MDIO_CTRL1_SPEEDSEL; + reg |= MDIO_CTRL1_SPEED1G; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + + xgbe_phy_pcs_power_cycle(pdata); + + /* Set SerDes to 2.5G speed */ + xgbe_phy_start_ratechange(pdata); + + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL); + + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, + phy_data->cdr_rate[XGBE_SPEED_2500]); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, + phy_data->tx_amp[XGBE_SPEED_2500]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, + phy_data->blwc[XGBE_SPEED_2500]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, + phy_data->pq_skew[XGBE_SPEED_2500]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, + phy_data->dfe_tap_cfg[XGBE_SPEED_2500]); + XRXTX_IOWRITE(pdata, RXTX_REG22, + phy_data->dfe_tap_ena[XGBE_SPEED_2500]); + + xgbe_phy_complete_ratechange(pdata); + + axgbe_printf(2, "2.5GbE KX mode set\n"); +} + +static void +xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int reg; + + /* Set PCS to KX/1G speed */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); + reg &= ~MDIO_PCS_CTRL2_TYPE; + reg |= MDIO_PCS_CTRL2_10GBX; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + reg &= ~MDIO_CTRL1_SPEEDSEL; + reg |= MDIO_CTRL1_SPEED1G; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + + xgbe_phy_pcs_power_cycle(pdata); + + /* Set SerDes to 1G speed */ + xgbe_phy_start_ratechange(pdata); + + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL); + + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE, + phy_data->cdr_rate[XGBE_SPEED_1000]); + XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP, + phy_data->tx_amp[XGBE_SPEED_1000]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA, + phy_data->blwc[XGBE_SPEED_1000]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG, + phy_data->pq_skew[XGBE_SPEED_1000]); + XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG, + phy_data->dfe_tap_cfg[XGBE_SPEED_1000]); + XRXTX_IOWRITE(pdata, RXTX_REG22, + phy_data->dfe_tap_ena[XGBE_SPEED_1000]); + + xgbe_phy_complete_ratechange(pdata); + + axgbe_printf(2, "1GbE KX mode set\n"); +} + +static enum xgbe_mode +xgbe_phy_cur_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + enum xgbe_mode mode; + unsigned int reg; + + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2); + reg &= MDIO_PCS_CTRL2_TYPE; + + if (reg == MDIO_PCS_CTRL2_10GBR) { + mode = XGBE_MODE_KR; + } else { + if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000) + mode = XGBE_MODE_KX_2500; + else + mode = XGBE_MODE_KX_1000; + } + + return (mode); +} + +static enum xgbe_mode +xgbe_phy_switch_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + enum xgbe_mode mode; + + /* If we are in KR switch to KX, and vice-versa */ + if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) { + if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000) + mode = XGBE_MODE_KX_2500; + else + mode = XGBE_MODE_KX_1000; + } else { + mode = XGBE_MODE_KR; + } + + return (mode); +} + +static enum xgbe_mode +xgbe_phy_get_mode(struct xgbe_prv_data *pdata, int speed) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (speed) { + case SPEED_1000: + return ((phy_data->speed_set == XGBE_SPEEDSET_1000_10000) + ? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN); + case SPEED_2500: + return ((phy_data->speed_set == XGBE_SPEEDSET_2500_10000) + ? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN); + case SPEED_10000: + return (XGBE_MODE_KR); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static void +xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + switch (mode) { + case XGBE_MODE_KX_1000: + xgbe_phy_kx_1000_mode(pdata); + break; + case XGBE_MODE_KX_2500: + xgbe_phy_kx_2500_mode(pdata); + break; + case XGBE_MODE_KR: + xgbe_phy_kr_mode(pdata); + break; + default: + break; + } +} + +static void +xgbe_phy_get_type(struct xgbe_prv_data *pdata, struct ifmediareq * ifmr) +{ + + switch (pdata->phy.speed) { + case SPEED_10000: + ifmr->ifm_active |= IFM_10G_KR; + break; + case SPEED_2500: + ifmr->ifm_active |= IFM_2500_KX; + break; + case SPEED_1000: + ifmr->ifm_active |= IFM_1000_KX; + break; + default: + ifmr->ifm_active |= IFM_OTHER; + break; + } +} + +static bool +xgbe_phy_check_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode, bool advert) +{ + + if (pdata->phy.autoneg == AUTONEG_ENABLE) + return (advert); + else { + enum xgbe_mode cur_mode; + + cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed); + if (cur_mode == mode) + return (true); + } + + return (false); +} + +static bool +xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + + switch (mode) { + case XGBE_MODE_KX_1000: + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 1000baseKX_Full))); + case XGBE_MODE_KX_2500: + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 2500baseX_Full))); + case XGBE_MODE_KR: + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 10000baseKR_Full))); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (speed) { + case SPEED_1000: + if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000) + return (false); + return (true); + case SPEED_2500: + if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000) + return (false); + return (true); + case SPEED_10000: + return (true); + default: + return (false); + } +} + +static int +xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) +{ + unsigned int reg; + + *an_restart = 0; + + /* Link status is latched low, so read once to clear + * and then read again to get current state + */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + + return ((reg & MDIO_STAT1_LSTATUS) ? 1 : 0); +} + +static void +xgbe_phy_stop(struct xgbe_prv_data *pdata) +{ + /* Nothing uniquely required for stop */ +} + +static int +xgbe_phy_start(struct xgbe_prv_data *pdata) +{ + /* Nothing uniquely required for start */ + return (0); +} + +static int +xgbe_phy_reset(struct xgbe_prv_data *pdata) +{ + unsigned int reg, count; + + /* Perform a software reset of the PCS */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + reg |= MDIO_CTRL1_RESET; + XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg); + + count = 50; + do { + DELAY(20); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1); + } while ((reg & MDIO_CTRL1_RESET) && --count); + + if (reg & MDIO_CTRL1_RESET) + return (-ETIMEDOUT); + + return (0); +} + +static void +xgbe_phy_exit(struct xgbe_prv_data *pdata) +{ + /* Nothing uniquely required for exit */ +} + +static int +xgbe_phy_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data; + + phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO); + + /* Initialize supported features */ + XGBE_ZERO_SUP(&pdata->phy); + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, Backplane); + XGBE_SET_SUP(&pdata->phy, 10000baseKR_Full); + switch (phy_data->speed_set) { + case XGBE_SPEEDSET_1000_10000: + XGBE_SET_SUP(&pdata->phy, 1000baseKX_Full); + break; + case XGBE_SPEEDSET_2500_10000: + XGBE_SET_SUP(&pdata->phy, 2500baseX_Full); + break; + } + + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); + + pdata->phy_data = phy_data; + + return (0); +} + +void +xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if) +{ + struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; + + phy_impl->init = xgbe_phy_init; + phy_impl->exit = xgbe_phy_exit; + + phy_impl->reset = xgbe_phy_reset; + phy_impl->start = xgbe_phy_start; + phy_impl->stop = xgbe_phy_stop; + + phy_impl->link_status = xgbe_phy_link_status; + + phy_impl->valid_speed = xgbe_phy_valid_speed; + + phy_impl->use_mode = xgbe_phy_use_mode; + phy_impl->set_mode = xgbe_phy_set_mode; + phy_impl->get_mode = xgbe_phy_get_mode; + phy_impl->switch_mode = xgbe_phy_switch_mode; + phy_impl->cur_mode = xgbe_phy_cur_mode; + phy_impl->get_type = xgbe_phy_get_type; + + phy_impl->an_mode = xgbe_phy_an_mode; + + phy_impl->an_config = xgbe_phy_an_config; + + phy_impl->an_advertising = xgbe_phy_an_advertising; + + phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; +} diff --git a/sys/dev/axgbe/xgbe-phy-v2.c b/sys/dev/axgbe/xgbe-phy-v2.c new file mode 100644 index 000000000000..8039909df057 --- /dev/null +++ b/sys/dev/axgbe/xgbe-phy-v2.c @@ -0,0 +1,3771 @@ +/* + * AMD 10Gb Ethernet driver + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" +#include "xgbe-common.h" + +struct mtx xgbe_phy_comm_lock; + +#define XGBE_PHY_PORT_SPEED_100 BIT(0) +#define XGBE_PHY_PORT_SPEED_1000 BIT(1) +#define XGBE_PHY_PORT_SPEED_2500 BIT(2) +#define XGBE_PHY_PORT_SPEED_10000 BIT(3) + +#define XGBE_MUTEX_RELEASE 0x80000000 + +#define XGBE_SFP_DIRECT 7 +#define GPIO_MASK_WIDTH 4 + +/* I2C target addresses */ +#define XGBE_SFP_SERIAL_ID_ADDRESS 0x50 +#define XGBE_SFP_DIAG_INFO_ADDRESS 0x51 +#define XGBE_SFP_PHY_ADDRESS 0x56 +#define XGBE_GPIO_ADDRESS_PCA9555 0x20 + +/* SFP sideband signal indicators */ +#define XGBE_GPIO_NO_TX_FAULT BIT(0) +#define XGBE_GPIO_NO_RATE_SELECT BIT(1) +#define XGBE_GPIO_NO_MOD_ABSENT BIT(2) +#define XGBE_GPIO_NO_RX_LOS BIT(3) + +/* Rate-change complete wait/retry count */ +#define XGBE_RATECHANGE_COUNT 500 + +/* CDR delay values for KR support (in usec) */ +#define XGBE_CDR_DELAY_INIT 10000 +#define XGBE_CDR_DELAY_INC 10000 +#define XGBE_CDR_DELAY_MAX 100000 + +/* RRC frequency during link status check */ +#define XGBE_RRC_FREQUENCY 10 + +enum xgbe_port_mode { + XGBE_PORT_MODE_RSVD = 0, + XGBE_PORT_MODE_BACKPLANE, + XGBE_PORT_MODE_BACKPLANE_2500, + XGBE_PORT_MODE_1000BASE_T, + XGBE_PORT_MODE_1000BASE_X, + XGBE_PORT_MODE_NBASE_T, + XGBE_PORT_MODE_10GBASE_T, + XGBE_PORT_MODE_10GBASE_R, + XGBE_PORT_MODE_SFP, + XGBE_PORT_MODE_MAX, +}; + +enum xgbe_conn_type { + XGBE_CONN_TYPE_NONE = 0, + XGBE_CONN_TYPE_SFP, + XGBE_CONN_TYPE_MDIO, + XGBE_CONN_TYPE_RSVD1, + XGBE_CONN_TYPE_BACKPLANE, + XGBE_CONN_TYPE_MAX, +}; + +/* SFP/SFP+ related definitions */ +enum xgbe_sfp_comm { + XGBE_SFP_COMM_DIRECT = 0, + XGBE_SFP_COMM_PCA9545, +}; + +enum xgbe_sfp_cable { + XGBE_SFP_CABLE_UNKNOWN = 0, + XGBE_SFP_CABLE_ACTIVE, + XGBE_SFP_CABLE_PASSIVE, +}; + +enum xgbe_sfp_base { + XGBE_SFP_BASE_UNKNOWN = 0, + XGBE_SFP_BASE_1000_T, + XGBE_SFP_BASE_1000_SX, + XGBE_SFP_BASE_1000_LX, + XGBE_SFP_BASE_1000_CX, + XGBE_SFP_BASE_10000_SR, + XGBE_SFP_BASE_10000_LR, + XGBE_SFP_BASE_10000_LRM, + XGBE_SFP_BASE_10000_ER, + XGBE_SFP_BASE_10000_CR, +}; + +enum xgbe_sfp_speed { + XGBE_SFP_SPEED_UNKNOWN = 0, + XGBE_SFP_SPEED_100_1000, + XGBE_SFP_SPEED_1000, + XGBE_SFP_SPEED_10000, +}; + +/* SFP Serial ID Base ID values relative to an offset of 0 */ +#define XGBE_SFP_BASE_ID 0 +#define XGBE_SFP_ID_SFP 0x03 + +#define XGBE_SFP_BASE_EXT_ID 1 +#define XGBE_SFP_EXT_ID_SFP 0x04 + +#define XGBE_SFP_BASE_10GBE_CC 3 +#define XGBE_SFP_BASE_10GBE_CC_SR BIT(4) +#define XGBE_SFP_BASE_10GBE_CC_LR BIT(5) +#define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6) +#define XGBE_SFP_BASE_10GBE_CC_ER BIT(7) + +#define XGBE_SFP_BASE_1GBE_CC 6 +#define XGBE_SFP_BASE_1GBE_CC_SX BIT(0) +#define XGBE_SFP_BASE_1GBE_CC_LX BIT(1) +#define XGBE_SFP_BASE_1GBE_CC_CX BIT(2) +#define XGBE_SFP_BASE_1GBE_CC_T BIT(3) + +#define XGBE_SFP_BASE_CABLE 8 +#define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2) +#define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3) + +#define XGBE_SFP_BASE_BR 12 +#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a +#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d +#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 +#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 + +#define XGBE_SFP_BASE_CU_CABLE_LEN 18 + +#define XGBE_SFP_BASE_VENDOR_NAME 20 +#define XGBE_SFP_BASE_VENDOR_NAME_LEN 16 +#define XGBE_SFP_BASE_VENDOR_PN 40 +#define XGBE_SFP_BASE_VENDOR_PN_LEN 16 +#define XGBE_SFP_BASE_VENDOR_REV 56 +#define XGBE_SFP_BASE_VENDOR_REV_LEN 4 + +#define XGBE_SFP_BASE_CC 63 + +/* SFP Serial ID Extended ID values relative to an offset of 64 */ +#define XGBE_SFP_BASE_VENDOR_SN 4 +#define XGBE_SFP_BASE_VENDOR_SN_LEN 16 + +#define XGBE_SFP_EXTD_OPT1 1 +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) + +#define XGBE_SFP_EXTD_DIAG 28 +#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) + +#define XGBE_SFP_EXTD_SFF_8472 30 + +#define XGBE_SFP_EXTD_CC 31 + +struct xgbe_sfp_eeprom { + uint8_t base[64]; + uint8_t extd[32]; + uint8_t vendor[32]; +}; + +#define XGBE_SFP_DIAGS_SUPPORTED(_x) \ + ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \ + !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) + +#define XGBE_SFP_EEPROM_BASE_LEN 256 +#define XGBE_SFP_EEPROM_DIAG_LEN 256 +#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \ + XGBE_SFP_EEPROM_DIAG_LEN) + +#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " +#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " + +struct xgbe_sfp_ascii { + union { + char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; + char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1]; + char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1]; + char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1]; + } u; +}; + +/* MDIO PHY reset types */ +enum xgbe_mdio_reset { + XGBE_MDIO_RESET_NONE = 0, + XGBE_MDIO_RESET_I2C_GPIO, + XGBE_MDIO_RESET_INT_GPIO, + XGBE_MDIO_RESET_MAX, +}; + +/* Re-driver related definitions */ +enum xgbe_phy_redrv_if { + XGBE_PHY_REDRV_IF_MDIO = 0, + XGBE_PHY_REDRV_IF_I2C, + XGBE_PHY_REDRV_IF_MAX, +}; + +enum xgbe_phy_redrv_model { + XGBE_PHY_REDRV_MODEL_4223 = 0, + XGBE_PHY_REDRV_MODEL_4227, + XGBE_PHY_REDRV_MODEL_MAX, +}; + +enum xgbe_phy_redrv_mode { + XGBE_PHY_REDRV_MODE_CX = 5, + XGBE_PHY_REDRV_MODE_SR = 9, +}; + +#define XGBE_PHY_REDRV_MODE_REG 0x12b0 + +/* PHY related configuration information */ +struct xgbe_phy_data { + enum xgbe_port_mode port_mode; + + unsigned int port_id; + + unsigned int port_speeds; + + enum xgbe_conn_type conn_type; + + enum xgbe_mode cur_mode; + enum xgbe_mode start_mode; + + unsigned int rrc_count; + + unsigned int mdio_addr; + + /* SFP Support */ + enum xgbe_sfp_comm sfp_comm; + unsigned int sfp_mux_address; + unsigned int sfp_mux_channel; + + unsigned int sfp_gpio_address; + unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_inputs; + unsigned int sfp_gpio_rx_los; + unsigned int sfp_gpio_tx_fault; + unsigned int sfp_gpio_mod_absent; + unsigned int sfp_gpio_rate_select; + + unsigned int sfp_rx_los; + unsigned int sfp_tx_fault; + unsigned int sfp_mod_absent; + unsigned int sfp_changed; + unsigned int sfp_phy_avail; + unsigned int sfp_cable_len; + enum xgbe_sfp_base sfp_base; + enum xgbe_sfp_cable sfp_cable; + enum xgbe_sfp_speed sfp_speed; + struct xgbe_sfp_eeprom sfp_eeprom; + + /* External PHY support */ + enum xgbe_mdio_mode phydev_mode; + uint32_t phy_id; + int phydev; + enum xgbe_mdio_reset mdio_reset; + unsigned int mdio_reset_addr; + unsigned int mdio_reset_gpio; + + /* Re-driver support */ + unsigned int redrv; + unsigned int redrv_if; + unsigned int redrv_addr; + unsigned int redrv_lane; + unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; + + uint8_t port_sfp_inputs; +}; + +static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata); + +static int +xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *i2c_op) +{ + return (pdata->i2c_if.i2c_xfer(pdata, i2c_op)); +} + +static int +xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg, + unsigned int val) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_i2c_op i2c_op; + __be16 *redrv_val; + uint8_t redrv_data[5], csum; + unsigned int i, retry; + int ret; + + /* High byte of register contains read/write indicator */ + redrv_data[0] = ((reg >> 8) & 0xff) << 1; + redrv_data[1] = reg & 0xff; + redrv_val = (__be16 *)&redrv_data[2]; + *redrv_val = cpu_to_be16(val); + + /* Calculate 1 byte checksum */ + csum = 0; + for (i = 0; i < 4; i++) { + csum += redrv_data[i]; + if (redrv_data[i] > csum) + csum++; + } + redrv_data[4] = ~csum; + + retry = 1; +again1: + i2c_op.cmd = XGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = sizeof(redrv_data); + i2c_op.buf = redrv_data; + ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return (ret); + } + + retry = 1; +again2: + i2c_op.cmd = XGBE_I2C_CMD_READ; + i2c_op.target = phy_data->redrv_addr; + i2c_op.len = 1; + i2c_op.buf = redrv_data; + ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again2; + + return (ret); + } + + if (redrv_data[0] != 0xff) { + axgbe_error("Redriver write checksum error\n"); + ret = -EIO; + } + + return (ret); +} + +static int +xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target, void *val, + unsigned int val_len) +{ + struct xgbe_i2c_op i2c_op; + int retry, ret; + + retry = 1; +again: + /* Write the specfied register */ + i2c_op.cmd = XGBE_I2C_CMD_WRITE; + i2c_op.target = target; + i2c_op.len = val_len; + i2c_op.buf = val; + ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); + if ((ret == -EAGAIN) && retry--) + goto again; + + return (ret); +} + +static int +xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target, void *reg, + unsigned int reg_len, void *val, unsigned int val_len) +{ + struct xgbe_i2c_op i2c_op; + int retry, ret; + + axgbe_printf(3, "%s: target 0x%x reg_len %d val_len %d\n", __func__, + target, reg_len, val_len); + retry = 1; +again1: + /* Set the specified register to read */ + i2c_op.cmd = XGBE_I2C_CMD_WRITE; + i2c_op.target = target; + i2c_op.len = reg_len; + i2c_op.buf = reg; + ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); + axgbe_printf(3, "%s: ret1 %d retry %d\n", __func__, ret, retry); + if (ret) { + if ((ret == -EAGAIN) && retry--) + goto again1; + + return (ret); + } + + retry = 1; +again2: + /* Read the specfied register */ + i2c_op.cmd = XGBE_I2C_CMD_READ; + i2c_op.target = target; + i2c_op.len = val_len; + i2c_op.buf = val; + ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); + axgbe_printf(3, "%s: ret2 %d retry %d\n", __func__, ret, retry); + if ((ret == -EAGAIN) && retry--) + goto again2; + + return (ret); +} + +static int +xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_i2c_op i2c_op; + uint8_t mux_channel; + + if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT) + return (0); + + /* Select no mux channels */ + mux_channel = 0; + i2c_op.cmd = XGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return (xgbe_phy_i2c_xfer(pdata, &i2c_op)); +} + +static int +xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_i2c_op i2c_op; + uint8_t mux_channel; + + if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT) + return (0); + + /* Select desired mux channel */ + mux_channel = 1 << phy_data->sfp_mux_channel; + i2c_op.cmd = XGBE_I2C_CMD_WRITE; + i2c_op.target = phy_data->sfp_mux_address; + i2c_op.len = sizeof(mux_channel); + i2c_op.buf = &mux_channel; + + return (xgbe_phy_i2c_xfer(pdata, &i2c_op)); +} + +static void +xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) +{ + mtx_unlock(&xgbe_phy_comm_lock); +} + +static int +xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned long timeout; + unsigned int mutex_id; + + /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, + * the driver needs to take the software mutex and then the hardware + * mutexes before being able to use the busses. + */ + mtx_lock(&xgbe_phy_comm_lock); + + /* Clear the mutexes */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE); + + /* Mutex formats are the same for I2C and MDIO/GPIO */ + mutex_id = 0; + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id); + XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1); + + timeout = ticks + (5 * hz); + while (ticks < timeout) { + /* Must be all zeroes in order to obtain the mutex */ + if (XP_IOREAD(pdata, XP_I2C_MUTEX) || + XP_IOREAD(pdata, XP_MDIO_MUTEX)) { + DELAY(200); + continue; + } + + /* Obtain the mutex */ + XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); + XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); + + return (0); + } + + mtx_unlock(&xgbe_phy_comm_lock); + + axgbe_error("unable to obtain hardware mutexes\n"); + + return (-ETIMEDOUT); +} + +static int +xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, + uint16_t val) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (reg & MII_ADDR_C45) { + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) + return (-ENOTSUP); + } else { + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) + return (-ENOTSUP); + } + + return (pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val)); +} + +static int +xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, uint16_t val) +{ + __be16 *mii_val; + uint8_t mii_data[3]; + int ret; + + ret = xgbe_phy_sfp_get_mux(pdata); + if (ret) + return (ret); + + mii_data[0] = reg & 0xff; + mii_val = (__be16 *)&mii_data[1]; + *mii_val = cpu_to_be16(val); + + ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS, + mii_data, sizeof(mii_data)); + + xgbe_phy_sfp_put_mux(pdata); + + return (ret); +} + +int +xgbe_phy_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + axgbe_printf(3, "%s: addr %d reg %d val %#x\n", __func__, addr, reg, val); + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return (ret); + + if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) + ret = xgbe_phy_i2c_mii_write(pdata, reg, val); + else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) + ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val); + else + ret = -ENOTSUP; + + xgbe_phy_put_comm_ownership(pdata); + + return (ret); +} + +static int +xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr, int reg) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (reg & MII_ADDR_C45) { + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) + return (-ENOTSUP); + } else { + if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) + return (-ENOTSUP); + } + + return (pdata->hw_if.read_ext_mii_regs(pdata, addr, reg)); +} + +static int +xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg) +{ + __be16 mii_val; + uint8_t mii_reg; + int ret; + + ret = xgbe_phy_sfp_get_mux(pdata); + if (ret) + return (ret); + + mii_reg = reg; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS, + &mii_reg, sizeof(mii_reg), + &mii_val, sizeof(mii_val)); + if (!ret) + ret = be16_to_cpu(mii_val); + + xgbe_phy_sfp_put_mux(pdata); + + return (ret); +} + +int +xgbe_phy_mii_read(struct xgbe_prv_data *pdata, int addr, int reg) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + axgbe_printf(3, "%s: addr %d reg %d\n", __func__, addr, reg); + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return (ret); + + if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) + ret = xgbe_phy_i2c_mii_read(pdata, reg); + else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) + ret = xgbe_phy_mdio_mii_read(pdata, addr, reg); + else + ret = -ENOTSUP; + + xgbe_phy_put_comm_ownership(pdata); + + return (ret); +} + +static void +xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed) + return; + + XGBE_ZERO_SUP(&pdata->phy); + + if (phy_data->sfp_mod_absent) { + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.pause_autoneg = AUTONEG_ENABLE; + + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, TP); + XGBE_SET_SUP(&pdata->phy, FIBRE); + + XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); + + return; + } + + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_1000_T: + case XGBE_SFP_BASE_1000_SX: + case XGBE_SFP_BASE_1000_LX: + case XGBE_SFP_BASE_1000_CX: + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_ENABLE; + pdata->phy.pause_autoneg = AUTONEG_ENABLE; + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) + XGBE_SET_SUP(&pdata->phy, 100baseT_Full); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); + } else { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + XGBE_SET_SUP(&pdata->phy, 1000baseX_Full); + } + break; + case XGBE_SFP_BASE_10000_SR: + case XGBE_SFP_BASE_10000_LR: + case XGBE_SFP_BASE_10000_LRM: + case XGBE_SFP_BASE_10000_ER: + case XGBE_SFP_BASE_10000_CR: + pdata->phy.speed = SPEED_10000; + pdata->phy.duplex = DUPLEX_FULL; + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.pause_autoneg = AUTONEG_DISABLE; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_10000_SR: + XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full); + break; + case XGBE_SFP_BASE_10000_LR: + XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full); + break; + case XGBE_SFP_BASE_10000_LRM: + XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full); + break; + case XGBE_SFP_BASE_10000_ER: + XGBE_SET_SUP(&pdata->phy, 10000baseER_Full); + break; + case XGBE_SFP_BASE_10000_CR: + XGBE_SET_SUP(&pdata->phy, 10000baseCR_Full); + break; + default: + break; + } + } + break; + default: + pdata->phy.speed = SPEED_UNKNOWN; + pdata->phy.duplex = DUPLEX_UNKNOWN; + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.pause_autoneg = AUTONEG_DISABLE; + break; + } + + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_1000_T: + case XGBE_SFP_BASE_1000_CX: + case XGBE_SFP_BASE_10000_CR: + XGBE_SET_SUP(&pdata->phy, TP); + break; + default: + XGBE_SET_SUP(&pdata->phy, FIBRE); + break; + } + + XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); + + axgbe_printf(1, "%s: link speed %d spf_base 0x%x pause_autoneg %d " + "advert 0x%x support 0x%x\n", __func__, pdata->phy.speed, + phy_data->sfp_base, pdata->phy.pause_autoneg, + pdata->phy.advertising, pdata->phy.supported); +} + +static bool +xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, + enum xgbe_sfp_speed sfp_speed) +{ + uint8_t *sfp_base, min, max; + + sfp_base = sfp_eeprom->base; + + switch (sfp_speed) { + case XGBE_SFP_SPEED_1000: + min = XGBE_SFP_BASE_BR_1GBE_MIN; + max = XGBE_SFP_BASE_BR_1GBE_MAX; + break; + case XGBE_SFP_SPEED_10000: + min = XGBE_SFP_BASE_BR_10GBE_MIN; + max = XGBE_SFP_BASE_BR_10GBE_MAX; + break; + default: + return (false); + } + + return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && + (sfp_base[XGBE_SFP_BASE_BR] <= max)); +} + +static void +xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->phydev) + phy_data->phydev = 0; +} + +static bool +xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int phy_id = phy_data->phy_id; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return (false); + + if ((phy_id & 0xfffffff0) != 0x01ff0cc0) + return (false); + + /* Enable Base-T AN */ + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0001); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0000); + + /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */ + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1b, 0x9084); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x09, 0x0e00); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x8140); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x04, 0x0d01); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140); + + axgbe_printf(3, "Finisar PHY quirk in place\n"); + + return (true); +} + +static bool +xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + unsigned int phy_id = phy_data->phy_id; + int reg; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return (false); + + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], + XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) + return (false); + + /* For Bel-Fuse, use the extra AN flag */ + pdata->an_again = 1; + + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], + XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) + return (false); + + if ((phy_id & 0xfffffff0) != 0x03625d10) + return (false); + + /* Disable RGMII mode */ + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, 0x7007); + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x18); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, reg & ~0x0080); + + /* Enable fiber register bank */ + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); + reg &= 0x03ff; + reg &= ~0x0001; + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | + reg | 0x0001); + + /* Power down SerDes */ + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg | 0x00800); + + /* Configure SGMII-to-Copper mode */ + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); + reg &= 0x03ff; + reg &= ~0x0006; + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | + reg | 0x0004); + + /* Power up SerDes */ + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800); + + /* Enable copper register bank */ + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); + reg &= 0x03ff; + reg &= ~0x0001; + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | + reg); + + /* Power up SerDes */ + reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); + xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800); + + axgbe_printf(3, "BelFuse PHY quirk in place\n"); + + return (true); +} + +static void +xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) +{ + if (xgbe_phy_belfuse_phy_quirks(pdata)) + return; + + if (xgbe_phy_finisar_phy_quirks(pdata)) + return; +} + +static int +xgbe_get_phy_id(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + uint32_t oui, model, phy_id1, phy_id2; + int phy_reg; + + phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x02); + if (phy_reg < 0) + return (-EIO); + + phy_id1 = (phy_reg & 0xffff); + phy_data->phy_id = (phy_reg & 0xffff) << 16; + + phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x03); + if (phy_reg < 0) + return (-EIO); + + phy_id2 = (phy_reg & 0xffff); + phy_data->phy_id |= (phy_reg & 0xffff); + + oui = MII_OUI(phy_id1, phy_id2); + model = MII_MODEL(phy_id2); + + axgbe_printf(2, "%s: phy_id1: 0x%x phy_id2: 0x%x oui: %#x model %#x\n", + __func__, phy_id1, phy_id2, oui, model); + + return (0); +} + +static int +xgbe_phy_start_aneg(struct xgbe_prv_data *pdata) +{ + uint16_t ctl = 0; + int changed = 0; + int ret; + + if (AUTONEG_ENABLE != pdata->phy.autoneg) { + if (SPEED_1000 == pdata->phy.speed) + ctl |= BMCR_SPEED1; + else if (SPEED_100 == pdata->phy.speed) + ctl |= BMCR_SPEED100; + + if (DUPLEX_FULL == pdata->phy.duplex) + ctl |= BMCR_FDX; + + ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); + if (ret) + return (ret); + + ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR, + (ret & ~(~(BMCR_LOOP | BMCR_ISO | BMCR_PDOWN))) | ctl); + } + + ctl = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); + if (ctl < 0) + return (ctl); + + if (!(ctl & BMCR_AUTOEN) || (ctl & BMCR_ISO)) + changed = 1; + + if (changed > 0) { + ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); + if (ret) + return (ret); + + ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR, + (ret & ~(BMCR_ISO)) | (BMCR_AUTOEN | BMCR_STARTNEG)); + } + + return (0); +} + +static int +xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + axgbe_printf(2, "%s: phydev %d phydev_mode %d sfp_phy_avail %d phy_id " + "0x%08x\n", __func__, phy_data->phydev, phy_data->phydev_mode, + phy_data->sfp_phy_avail, phy_data->phy_id); + + /* If we already have a PHY, just return */ + if (phy_data->phydev) { + axgbe_printf(3, "%s: phy present already\n", __func__); + return (0); + } + + /* Clear the extra AN flag */ + pdata->an_again = 0; + + /* Check for the use of an external PHY */ + if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) { + axgbe_printf(3, "%s: phydev_mode %d\n", __func__, + phy_data->phydev_mode); + return (0); + } + + /* For SFP, only use an external PHY if available */ + if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && + !phy_data->sfp_phy_avail) { + axgbe_printf(3, "%s: port_mode %d avail %d\n", __func__, + phy_data->port_mode, phy_data->sfp_phy_avail); + return (0); + } + + /* Set the proper MDIO mode for the PHY */ + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + axgbe_error("mdio port/clause not compatible (%u/%u) ret %d\n", + phy_data->mdio_addr, phy_data->phydev_mode, ret); + return (ret); + } + + ret = xgbe_get_phy_id(pdata); + if (ret) + return (ret); + axgbe_printf(2, "Get phy_id 0x%08x\n", phy_data->phy_id); + + phy_data->phydev = 1; + xgbe_phy_external_phy_quirks(pdata); + xgbe_phy_start_aneg(pdata); + + return (0); +} + +static void +xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + axgbe_printf(3, "%s: sfp_changed: 0x%x\n", __func__, + phy_data->sfp_changed); + if (!phy_data->sfp_changed) + return; + + phy_data->sfp_phy_avail = 0; + + if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) + return; + + /* Check access to the PHY by reading CTRL1 */ + ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR); + if (ret < 0) { + axgbe_error("%s: ext phy fail %d\n", __func__, ret); + return; + } + + /* Successfully accessed the PHY */ + phy_data->sfp_phy_avail = 1; + axgbe_printf(3, "Successfully accessed External PHY\n"); +} + +static bool +xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) +{ + uint8_t *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) + return (false); + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) + return (false); + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) + return (true); + + return (false); +} + +static bool +xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) +{ + uint8_t *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) + return (false); + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) + return (false); + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) + return (true); + + return (false); +} + +static bool +xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) +{ + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) + return (false); + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) + return (true); + + return (false); +} + +static void +xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + uint8_t *sfp_base; + + sfp_base = sfp_eeprom->base; + + if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP) { + axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_ID]); + return; + } + + if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) { + axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_EXT_ID]); + return; + } + + /* Update transceiver signals (eeprom extd/options) */ + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); + + /* Assume ACTIVE cable unless told it is PASSIVE */ + if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { + phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; + phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; + } else + phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; + + /* Determine the type of SFP */ + if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) + phy_data->sfp_base = XGBE_SFP_BASE_10000_SR; + else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR) + phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; + else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM) + phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM; + else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER) + phy_data->sfp_base = XGBE_SFP_BASE_10000_ER; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX) + phy_data->sfp_base = XGBE_SFP_BASE_1000_SX; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX) + phy_data->sfp_base = XGBE_SFP_BASE_1000_LX; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX) + phy_data->sfp_base = XGBE_SFP_BASE_1000_CX; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T) + phy_data->sfp_base = XGBE_SFP_BASE_1000_T; + else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) && + xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) + phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; + + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_1000_T: + phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000; + break; + case XGBE_SFP_BASE_1000_SX: + case XGBE_SFP_BASE_1000_LX: + case XGBE_SFP_BASE_1000_CX: + phy_data->sfp_speed = XGBE_SFP_SPEED_1000; + break; + case XGBE_SFP_BASE_10000_SR: + case XGBE_SFP_BASE_10000_LR: + case XGBE_SFP_BASE_10000_LRM: + case XGBE_SFP_BASE_10000_ER: + case XGBE_SFP_BASE_10000_CR: + phy_data->sfp_speed = XGBE_SFP_SPEED_10000; + break; + default: + break; + } + axgbe_printf(3, "%s: sfp_base: 0x%x sfp_speed: 0x%x sfp_cable: 0x%x " + "rx_los 0x%x tx_fault 0x%x\n", __func__, phy_data->sfp_base, + phy_data->sfp_speed, phy_data->sfp_cable, phy_data->sfp_rx_los, + phy_data->sfp_tx_fault); +} + +static void +xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata, + struct xgbe_sfp_eeprom *sfp_eeprom) +{ + struct xgbe_sfp_ascii sfp_ascii; + char *sfp_data = (char *)&sfp_ascii; + + axgbe_printf(3, "SFP detected:\n"); + memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], + XGBE_SFP_BASE_VENDOR_NAME_LEN); + sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0'; + axgbe_printf(3, " vendor: %s\n", + sfp_data); + + memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], + XGBE_SFP_BASE_VENDOR_PN_LEN); + sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0'; + axgbe_printf(3, " part number: %s\n", + sfp_data); + + memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV], + XGBE_SFP_BASE_VENDOR_REV_LEN); + sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0'; + axgbe_printf(3, " revision level: %s\n", + sfp_data); + + memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN], + XGBE_SFP_BASE_VENDOR_SN_LEN); + sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0'; + axgbe_printf(3, " serial number: %s\n", + sfp_data); +} + +static bool +xgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, unsigned int len) +{ + uint8_t cc; + + for (cc = 0; len; buf++, len--) + cc += *buf; + + return ((cc == cc_in) ? true : false); +} + +static void +dump_sfp_eeprom(struct xgbe_prv_data *pdata, uint8_t *sfp_base) +{ + axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_ID] : 0x%04x\n", + sfp_base[XGBE_SFP_BASE_ID]); + axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_EXT_ID] : 0x%04x\n", + sfp_base[XGBE_SFP_BASE_EXT_ID]); + axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_CABLE] : 0x%04x\n", + sfp_base[XGBE_SFP_BASE_CABLE]); +} + +static int +xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_sfp_eeprom sfp_eeprom, *eeprom; + uint8_t eeprom_addr, *base; + int ret; + + ret = xgbe_phy_sfp_get_mux(pdata); + if (ret) { + axgbe_error("I2C error setting SFP MUX\n"); + return (ret); + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + &sfp_eeprom, sizeof(sfp_eeprom)); + + eeprom = &sfp_eeprom; + base = eeprom->base; + dump_sfp_eeprom(pdata, base); + if (ret) { + axgbe_error("I2C error reading SFP EEPROM\n"); + goto put; + } + + /* Validate the contents read */ + if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC], + sfp_eeprom.base, sizeof(sfp_eeprom.base) - 1)) { + axgbe_error("verify eeprom base failed\n"); + ret = -EINVAL; + goto put; + } + + if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC], + sfp_eeprom.extd, sizeof(sfp_eeprom.extd) - 1)) { + axgbe_error("verify eeprom extd failed\n"); + ret = -EINVAL; + goto put; + } + + /* Check for an added or changed SFP */ + if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) { + phy_data->sfp_changed = 1; + + xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom); + + memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); + + xgbe_phy_free_phy_device(pdata); + } else + phy_data->sfp_changed = 0; + +put: + xgbe_phy_sfp_put_mux(pdata); + + return (ret); +} + +static void +xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + uint8_t gpio_reg, gpio_ports[2]; + int ret, prev_sfp_inputs = phy_data->port_sfp_inputs; + int shift = GPIO_MASK_WIDTH * (3 - phy_data->port_id); + + /* Read the input port registers */ + axgbe_printf(3, "%s: befor sfp_mod:%d sfp_gpio_address:0x%x\n", + __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_address); + + gpio_reg = 0; + ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, &gpio_reg, + sizeof(gpio_reg), gpio_ports, sizeof(gpio_ports)); + if (ret) { + axgbe_error("%s: I2C error reading SFP GPIO addr:0x%x\n", + __func__, phy_data->sfp_gpio_address); + return; + } + + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; + phy_data->port_sfp_inputs = (phy_data->sfp_gpio_inputs >> shift) & 0x0F; + + if (prev_sfp_inputs != phy_data->port_sfp_inputs) + axgbe_printf(0, "%s: port_sfp_inputs: 0x%0x\n", __func__, + phy_data->port_sfp_inputs); + + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); + + axgbe_printf(3, "%s: after sfp_mod:%d sfp_gpio_inputs:0x%x\n", + __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_inputs); +} + +static void +xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_free_phy_device(pdata); + + phy_data->sfp_mod_absent = 1; + phy_data->sfp_phy_avail = 0; + memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom)); +} + +static void +xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data) +{ + phy_data->sfp_rx_los = 0; + phy_data->sfp_tx_fault = 0; + phy_data->sfp_mod_absent = 1; + phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; + phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; + phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; +} + +static void +xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret, prev_sfp_state = phy_data->sfp_mod_absent; + + /* Reset the SFP signals and info */ + xgbe_phy_sfp_reset(phy_data); + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + /* Read the SFP signals and check for module presence */ + xgbe_phy_sfp_signals(pdata); + if (phy_data->sfp_mod_absent) { + if (prev_sfp_state != phy_data->sfp_mod_absent) + axgbe_error("%s: mod absent\n", __func__); + xgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + ret = xgbe_phy_sfp_read_eeprom(pdata); + if (ret) { + /* Treat any error as if there isn't an SFP plugged in */ + axgbe_error("%s: eeprom read failed\n", __func__); + xgbe_phy_sfp_reset(phy_data); + xgbe_phy_sfp_mod_absent(pdata); + goto put; + } + + xgbe_phy_sfp_parse_eeprom(pdata); + + xgbe_phy_sfp_external_phy(pdata); + +put: + xgbe_phy_sfp_phy_settings(pdata); + + axgbe_printf(3, "%s: phy speed: 0x%x duplex: 0x%x autoneg: 0x%x " + "pause_autoneg: 0x%x\n", __func__, pdata->phy.speed, + pdata->phy.duplex, pdata->phy.autoneg, pdata->phy.pause_autoneg); + + xgbe_phy_put_comm_ownership(pdata); +} + +static int +xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + uint8_t eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX]; + struct xgbe_sfp_eeprom *sfp_eeprom; + int ret; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { + ret = -ENXIO; + goto done; + } + + if (phy_data->sfp_mod_absent) { + ret = -EIO; + goto done; + } + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) { + ret = -EIO; + goto done; + } + + ret = xgbe_phy_sfp_get_mux(pdata); + if (ret) { + axgbe_error("I2C error setting SFP MUX\n"); + ret = -EIO; + goto put_own; + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + eeprom_data, XGBE_SFP_EEPROM_BASE_LEN); + if (ret) { + axgbe_error("I2C error reading SFP EEPROM\n"); + ret = -EIO; + goto put_mux; + } + + sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data; + + if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) { + /* Read the SFP diagnostic eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + eeprom_data + XGBE_SFP_EEPROM_BASE_LEN, + XGBE_SFP_EEPROM_DIAG_LEN); + if (ret) { + axgbe_error("I2C error reading SFP DIAGS\n"); + ret = -EIO; + goto put_mux; + } + } + +put_mux: + xgbe_phy_sfp_put_mux(pdata); + +put_own: + xgbe_phy_put_comm_ownership(pdata); + +done: + return (ret); +} + +static int +xgbe_phy_module_info(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return (-ENXIO); + + if (phy_data->sfp_mod_absent) + return (-EIO); + + return (0); +} + +static void +xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (!phy_data->phydev) + return; + + if (pdata->phy.pause) + XGBE_SET_LP_ADV(&pdata->phy, Pause); + + if (pdata->phy.asym_pause) + XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); + + axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, + pdata->phy.tx_pause, pdata->phy.rx_pause); +} + +static enum xgbe_mode +xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) +{ + enum xgbe_mode mode; + + XGBE_SET_LP_ADV(&pdata->phy, Autoneg); + XGBE_SET_LP_ADV(&pdata->phy, TP); + + axgbe_printf(1, "%s: pause_autoneg %d\n", __func__, + pdata->phy.pause_autoneg); + + /* Use external PHY to determine flow control */ + if (pdata->phy.pause_autoneg) + xgbe_phy_phydev_flowctrl(pdata); + + switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) { + case XGBE_SGMII_AN_LINK_SPEED_100: + if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { + XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Full); + mode = XGBE_MODE_SGMII_100; + } else { + /* Half-duplex not supported */ + XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Half); + mode = XGBE_MODE_UNKNOWN; + } + break; + case XGBE_SGMII_AN_LINK_SPEED_1000: + if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { + XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Full); + mode = XGBE_MODE_SGMII_1000; + } else { + /* Half-duplex not supported */ + XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Half); + mode = XGBE_MODE_UNKNOWN; + } + break; + default: + mode = XGBE_MODE_UNKNOWN; + } + + return (mode); +} + +static enum xgbe_mode +xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) +{ + enum xgbe_mode mode; + unsigned int ad_reg, lp_reg; + + XGBE_SET_LP_ADV(&pdata->phy, Autoneg); + XGBE_SET_LP_ADV(&pdata->phy, FIBRE); + + /* Compare Advertisement and Link Partner register */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY); + if (lp_reg & 0x100) + XGBE_SET_LP_ADV(&pdata->phy, Pause); + if (lp_reg & 0x80) + XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); + + axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", + __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); + + if (pdata->phy.pause_autoneg) { + /* Set flow control based on auto-negotiation result */ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (ad_reg & lp_reg & 0x100) { + pdata->phy.tx_pause = 1; + pdata->phy.rx_pause = 1; + } else if (ad_reg & lp_reg & 0x80) { + if (ad_reg & 0x100) + pdata->phy.rx_pause = 1; + else if (lp_reg & 0x100) + pdata->phy.tx_pause = 1; + } + } + + axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, + pdata->phy.rx_pause); + + if (lp_reg & 0x20) + XGBE_SET_LP_ADV(&pdata->phy, 1000baseX_Full); + + /* Half duplex is not supported */ + ad_reg &= lp_reg; + mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN; + + return (mode); +} + +static enum xgbe_mode +xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + enum xgbe_mode mode; + unsigned int ad_reg, lp_reg; + + XGBE_SET_LP_ADV(&pdata->phy, Autoneg); + XGBE_SET_LP_ADV(&pdata->phy, Backplane); + + axgbe_printf(1, "%s: pause_autoneg %d\n", __func__, + pdata->phy.pause_autoneg); + + /* Use external PHY to determine flow control */ + if (pdata->phy.pause_autoneg) + xgbe_phy_phydev_flowctrl(pdata); + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); + if (lp_reg & 0x20) + XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); + + ad_reg &= lp_reg; + if (ad_reg & 0x80) { + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + mode = XGBE_MODE_KR; + break; + default: + mode = XGBE_MODE_SFI; + break; + } + } else if (ad_reg & 0x20) { + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + mode = XGBE_MODE_KX_1000; + break; + case XGBE_PORT_MODE_1000BASE_X: + mode = XGBE_MODE_X; + break; + case XGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_1000_T: + if ((phy_data->phydev) && + (pdata->phy.speed == SPEED_100)) + mode = XGBE_MODE_SGMII_100; + else + mode = XGBE_MODE_SGMII_1000; + break; + case XGBE_SFP_BASE_1000_SX: + case XGBE_SFP_BASE_1000_LX: + case XGBE_SFP_BASE_1000_CX: + default: + mode = XGBE_MODE_X; + break; + } + break; + default: + if ((phy_data->phydev) && + (pdata->phy.speed == SPEED_100)) + mode = XGBE_MODE_SGMII_100; + else + mode = XGBE_MODE_SGMII_1000; + break; + } + } else { + mode = XGBE_MODE_UNKNOWN; + } + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); + + return (mode); +} + +static enum xgbe_mode +xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) +{ + enum xgbe_mode mode; + unsigned int ad_reg, lp_reg; + + XGBE_SET_LP_ADV(&pdata->phy, Autoneg); + XGBE_SET_LP_ADV(&pdata->phy, Backplane); + + /* Compare Advertisement and Link Partner register 1 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); + if (lp_reg & 0x400) + XGBE_SET_LP_ADV(&pdata->phy, Pause); + if (lp_reg & 0x800) + XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); + + axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", + __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); + + if (pdata->phy.pause_autoneg) { + /* Set flow control based on auto-negotiation result */ + pdata->phy.tx_pause = 0; + pdata->phy.rx_pause = 0; + + if (ad_reg & lp_reg & 0x400) { + pdata->phy.tx_pause = 1; + pdata->phy.rx_pause = 1; + } else if (ad_reg & lp_reg & 0x800) { + if (ad_reg & 0x400) + pdata->phy.rx_pause = 1; + else if (lp_reg & 0x400) + pdata->phy.tx_pause = 1; + } + } + + axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, + pdata->phy.rx_pause); + + /* Compare Advertisement and Link Partner register 2 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); + if (lp_reg & 0x80) + XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); + if (lp_reg & 0x20) + XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); + + ad_reg &= lp_reg; + if (ad_reg & 0x80) + mode = XGBE_MODE_KR; + else if (ad_reg & 0x20) + mode = XGBE_MODE_KX_1000; + else + mode = XGBE_MODE_UNKNOWN; + + /* Compare Advertisement and Link Partner register 3 */ + ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); + lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); + if (lp_reg & 0xc000) + XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); + + return (mode); +} + +static enum xgbe_mode +xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) +{ + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + return (xgbe_phy_an73_outcome(pdata)); + case XGBE_AN_MODE_CL73_REDRV: + return (xgbe_phy_an73_redrv_outcome(pdata)); + case XGBE_AN_MODE_CL37: + return (xgbe_phy_an37_outcome(pdata)); + case XGBE_AN_MODE_CL37_SGMII: + return (xgbe_phy_an37_sgmii_outcome(pdata)); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static void +xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, struct xgbe_phy *dphy) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + XGBE_LM_COPY(dphy, advertising, &pdata->phy, advertising); + + /* Without a re-driver, just return current advertising */ + if (!phy_data->redrv) + return; + + /* With the KR re-driver we need to advertise a single speed */ + XGBE_CLR_ADV(dphy, 1000baseKX_Full); + XGBE_CLR_ADV(dphy, 10000baseKR_Full); + + /* Advertise FEC support is present */ + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + XGBE_SET_ADV(dphy, 10000baseR_FEC); + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + XGBE_SET_ADV(dphy, 10000baseKR_Full); + break; + case XGBE_PORT_MODE_BACKPLANE_2500: + XGBE_SET_ADV(dphy, 1000baseKX_Full); + break; + case XGBE_PORT_MODE_1000BASE_T: + case XGBE_PORT_MODE_1000BASE_X: + case XGBE_PORT_MODE_NBASE_T: + XGBE_SET_ADV(dphy, 1000baseKX_Full); + break; + case XGBE_PORT_MODE_10GBASE_T: + if ((phy_data->phydev) && + (pdata->phy.speed == SPEED_10000)) + XGBE_SET_ADV(dphy, 10000baseKR_Full); + else + XGBE_SET_ADV(dphy, 1000baseKX_Full); + break; + case XGBE_PORT_MODE_10GBASE_R: + XGBE_SET_ADV(dphy, 10000baseKR_Full); + break; + case XGBE_PORT_MODE_SFP: + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_1000_T: + case XGBE_SFP_BASE_1000_SX: + case XGBE_SFP_BASE_1000_LX: + case XGBE_SFP_BASE_1000_CX: + XGBE_SET_ADV(dphy, 1000baseKX_Full); + break; + default: + XGBE_SET_ADV(dphy, 10000baseKR_Full); + break; + } + break; + default: + XGBE_SET_ADV(dphy, 10000baseKR_Full); + break; + } +} + +static int +xgbe_phy_an_config(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + ret = xgbe_phy_find_phy_device(pdata); + if (ret) + return (ret); + + axgbe_printf(2, "%s: find_phy_device return %s.\n", __func__, + ret ? "Failure" : "Success"); + + if (!phy_data->phydev) + return (0); + + ret = xgbe_phy_start_aneg(pdata); + return (ret); +} + +static enum xgbe_an_mode +xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data) +{ + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_1000_T: + return (XGBE_AN_MODE_CL37_SGMII); + case XGBE_SFP_BASE_1000_SX: + case XGBE_SFP_BASE_1000_LX: + case XGBE_SFP_BASE_1000_CX: + return (XGBE_AN_MODE_CL37); + default: + return (XGBE_AN_MODE_NONE); + } +} + +static enum xgbe_an_mode +xgbe_phy_an_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + /* A KR re-driver will always require CL73 AN */ + if (phy_data->redrv) + return (XGBE_AN_MODE_CL73_REDRV); + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + return (XGBE_AN_MODE_CL73); + case XGBE_PORT_MODE_BACKPLANE_2500: + return (XGBE_AN_MODE_NONE); + case XGBE_PORT_MODE_1000BASE_T: + return (XGBE_AN_MODE_CL37_SGMII); + case XGBE_PORT_MODE_1000BASE_X: + return (XGBE_AN_MODE_CL37); + case XGBE_PORT_MODE_NBASE_T: + return (XGBE_AN_MODE_CL37_SGMII); + case XGBE_PORT_MODE_10GBASE_T: + return (XGBE_AN_MODE_CL73); + case XGBE_PORT_MODE_10GBASE_R: + return (XGBE_AN_MODE_NONE); + case XGBE_PORT_MODE_SFP: + return (xgbe_phy_an_sfp_mode(phy_data)); + default: + return (XGBE_AN_MODE_NONE); + } +} + +static int +xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata, + enum xgbe_phy_redrv_mode mode) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + uint16_t redrv_reg, redrv_val; + + redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + redrv_val = (uint16_t)mode; + + return (pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, + redrv_reg, redrv_val)); +} + +static int +xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata, + enum xgbe_phy_redrv_mode mode) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int redrv_reg; + int ret; + + /* Calculate the register to write */ + redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); + + ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode); + + return (ret); +} + +static void +xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + enum xgbe_phy_redrv_mode mode; + int ret; + + if (!phy_data->redrv) + return; + + mode = XGBE_PHY_REDRV_MODE_CX; + if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && + (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) && + (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR)) + mode = XGBE_PHY_REDRV_MODE_SR; + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return; + + axgbe_printf(2, "%s: redrv_if set: %d\n", __func__, phy_data->redrv_if); + if (phy_data->redrv_if) + xgbe_phy_set_redrv_mode_i2c(pdata, mode); + else + xgbe_phy_set_redrv_mode_mdio(pdata, mode); + + xgbe_phy_put_comm_ownership(pdata); +} + +static void +xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd, + unsigned int sub_cmd) +{ + unsigned int s0 = 0; + unsigned int wait; + + /* Log if a previous command did not complete */ + if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) + axgbe_error("firmware mailbox not ready for command\n"); + + /* Construct the command */ + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd); + XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd); + + /* Issue the command */ + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); + XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); + XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); + + /* Wait for command to complete */ + wait = XGBE_RATECHANGE_COUNT; + while (wait--) { + if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) { + axgbe_printf(3, "%s: Rate change done\n", __func__); + return; + } + + DELAY(2000); + } + + axgbe_printf(3, "firmware mailbox command did not complete\n"); +} + +static void +xgbe_phy_rrc(struct xgbe_prv_data *pdata) +{ + /* Receiver Reset Cycle */ + xgbe_phy_perform_ratechange(pdata, 5, 0); + + axgbe_printf(3, "receiver reset complete\n"); +} + +static void +xgbe_phy_power_off(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + /* Power off */ + xgbe_phy_perform_ratechange(pdata, 0, 0); + + phy_data->cur_mode = XGBE_MODE_UNKNOWN; + + axgbe_printf(3, "phy powered off\n"); +} + +static void +xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 10G/SFI */ + axgbe_printf(3, "%s: cable %d len %d\n", __func__, phy_data->sfp_cable, + phy_data->sfp_cable_len); + + if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) + xgbe_phy_perform_ratechange(pdata, 3, 0); + else { + if (phy_data->sfp_cable_len <= 1) + xgbe_phy_perform_ratechange(pdata, 3, 1); + else if (phy_data->sfp_cable_len <= 3) + xgbe_phy_perform_ratechange(pdata, 3, 2); + else + xgbe_phy_perform_ratechange(pdata, 3, 3); + } + + phy_data->cur_mode = XGBE_MODE_SFI; + + axgbe_printf(3, "10GbE SFI mode set\n"); +} + +static void +xgbe_phy_x_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 1G/X */ + xgbe_phy_perform_ratechange(pdata, 1, 3); + + phy_data->cur_mode = XGBE_MODE_X; + + axgbe_printf(3, "1GbE X mode set\n"); +} + +static void +xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 1G/SGMII */ + xgbe_phy_perform_ratechange(pdata, 1, 2); + + phy_data->cur_mode = XGBE_MODE_SGMII_1000; + + axgbe_printf(2, "1GbE SGMII mode set\n"); +} + +static void +xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 100M/SGMII */ + xgbe_phy_perform_ratechange(pdata, 1, 1); + + phy_data->cur_mode = XGBE_MODE_SGMII_100; + + axgbe_printf(3, "100MbE SGMII mode set\n"); +} + +static void +xgbe_phy_kr_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 10G/KR */ + xgbe_phy_perform_ratechange(pdata, 4, 0); + + phy_data->cur_mode = XGBE_MODE_KR; + + axgbe_printf(3, "10GbE KR mode set\n"); +} + +static void +xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 2.5G/KX */ + xgbe_phy_perform_ratechange(pdata, 2, 0); + + phy_data->cur_mode = XGBE_MODE_KX_2500; + + axgbe_printf(3, "2.5GbE KX mode set\n"); +} + +static void +xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + xgbe_phy_set_redrv_mode(pdata); + + /* 1G/KX */ + xgbe_phy_perform_ratechange(pdata, 1, 3); + + phy_data->cur_mode = XGBE_MODE_KX_1000; + + axgbe_printf(3, "1GbE KX mode set\n"); +} + +static enum xgbe_mode +xgbe_phy_cur_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + return (phy_data->cur_mode); +} + +static enum xgbe_mode +xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + /* No switching if not 10GBase-T */ + if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T) + return (xgbe_phy_cur_mode(pdata)); + + switch (xgbe_phy_cur_mode(pdata)) { + case XGBE_MODE_SGMII_100: + case XGBE_MODE_SGMII_1000: + return (XGBE_MODE_KR); + case XGBE_MODE_KR: + default: + return (XGBE_MODE_SGMII_1000); + } +} + +static enum xgbe_mode +xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata) +{ + return (XGBE_MODE_KX_2500); +} + +static enum xgbe_mode +xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata) +{ + /* If we are in KR switch to KX, and vice-versa */ + switch (xgbe_phy_cur_mode(pdata)) { + case XGBE_MODE_KX_1000: + return (XGBE_MODE_KR); + case XGBE_MODE_KR: + default: + return (XGBE_MODE_KX_1000); + } +} + +static enum xgbe_mode +xgbe_phy_switch_mode(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + return (xgbe_phy_switch_bp_mode(pdata)); + case XGBE_PORT_MODE_BACKPLANE_2500: + return (xgbe_phy_switch_bp_2500_mode(pdata)); + case XGBE_PORT_MODE_1000BASE_T: + case XGBE_PORT_MODE_NBASE_T: + case XGBE_PORT_MODE_10GBASE_T: + return (xgbe_phy_switch_baset_mode(pdata)); + case XGBE_PORT_MODE_1000BASE_X: + case XGBE_PORT_MODE_10GBASE_R: + case XGBE_PORT_MODE_SFP: + /* No switching, so just return current mode */ + return (xgbe_phy_cur_mode(pdata)); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static enum xgbe_mode +xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data, int speed) +{ + switch (speed) { + case SPEED_1000: + return (XGBE_MODE_X); + case SPEED_10000: + return (XGBE_MODE_KR); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static enum xgbe_mode +xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data, int speed) +{ + switch (speed) { + case SPEED_100: + return (XGBE_MODE_SGMII_100); + case SPEED_1000: + return (XGBE_MODE_SGMII_1000); + case SPEED_2500: + return (XGBE_MODE_KX_2500); + case SPEED_10000: + return (XGBE_MODE_KR); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static enum xgbe_mode +xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data, int speed) +{ + switch (speed) { + case SPEED_100: + return (XGBE_MODE_SGMII_100); + case SPEED_1000: + if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) + return (XGBE_MODE_SGMII_1000); + else + return (XGBE_MODE_X); + case SPEED_10000: + case SPEED_UNKNOWN: + return (XGBE_MODE_SFI); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static enum xgbe_mode +xgbe_phy_get_bp_2500_mode(int speed) +{ + switch (speed) { + case SPEED_2500: + return (XGBE_MODE_KX_2500); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static enum xgbe_mode +xgbe_phy_get_bp_mode(int speed) +{ + switch (speed) { + case SPEED_1000: + return (XGBE_MODE_KX_1000); + case SPEED_10000: + return (XGBE_MODE_KR); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static enum xgbe_mode +xgbe_phy_get_mode(struct xgbe_prv_data *pdata, int speed) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + return (xgbe_phy_get_bp_mode(speed)); + case XGBE_PORT_MODE_BACKPLANE_2500: + return (xgbe_phy_get_bp_2500_mode(speed)); + case XGBE_PORT_MODE_1000BASE_T: + case XGBE_PORT_MODE_NBASE_T: + case XGBE_PORT_MODE_10GBASE_T: + return (xgbe_phy_get_baset_mode(phy_data, speed)); + case XGBE_PORT_MODE_1000BASE_X: + case XGBE_PORT_MODE_10GBASE_R: + return (xgbe_phy_get_basex_mode(phy_data, speed)); + case XGBE_PORT_MODE_SFP: + return (xgbe_phy_get_sfp_mode(phy_data, speed)); + default: + return (XGBE_MODE_UNKNOWN); + } +} + +static void +xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + switch (mode) { + case XGBE_MODE_KX_1000: + xgbe_phy_kx_1000_mode(pdata); + break; + case XGBE_MODE_KX_2500: + xgbe_phy_kx_2500_mode(pdata); + break; + case XGBE_MODE_KR: + xgbe_phy_kr_mode(pdata); + break; + case XGBE_MODE_SGMII_100: + xgbe_phy_sgmii_100_mode(pdata); + break; + case XGBE_MODE_SGMII_1000: + xgbe_phy_sgmii_1000_mode(pdata); + break; + case XGBE_MODE_X: + xgbe_phy_x_mode(pdata); + break; + case XGBE_MODE_SFI: + xgbe_phy_sfi_mode(pdata); + break; + default: + break; + } +} + +static void +xgbe_phy_get_type(struct xgbe_prv_data *pdata, struct ifmediareq * ifmr) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->phy.speed) { + case SPEED_10000: + if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) + ifmr->ifm_active |= IFM_10G_KR; + else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) + ifmr->ifm_active |= IFM_10G_T; + else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R) + ifmr->ifm_active |= IFM_10G_KR; + else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) + ifmr->ifm_active |= IFM_10G_SFI; + else + ifmr->ifm_active |= IFM_OTHER; + break; + case SPEED_2500: + if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE_2500) + ifmr->ifm_active |= IFM_2500_KX; + else + ifmr->ifm_active |= IFM_OTHER; + break; + case SPEED_1000: + if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) + ifmr->ifm_active |= IFM_1000_KX; + else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_T) + ifmr->ifm_active |= IFM_1000_T; +#if 0 + else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X) + ifmr->ifm_active |= IFM_1000_SX; + ifmr->ifm_active |= IFM_1000_LX; + ifmr->ifm_active |= IFM_1000_CX; +#endif + else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) + ifmr->ifm_active |= IFM_1000_SGMII; + else + ifmr->ifm_active |= IFM_OTHER; + break; + case SPEED_100: + if(phy_data->port_mode == XGBE_PORT_MODE_NBASE_T) + ifmr->ifm_active |= IFM_100_T; + else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) + ifmr->ifm_active |= IFM_1000_SGMII; + else + ifmr->ifm_active |= IFM_OTHER; + break; + default: + ifmr->ifm_active |= IFM_OTHER; + axgbe_printf(1, "Unknown mode detected\n"); + break; + } +} + +static bool +xgbe_phy_check_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode, + bool advert) +{ + + if (pdata->phy.autoneg == AUTONEG_ENABLE) + return (advert); + else { + enum xgbe_mode cur_mode; + + cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed); + if (cur_mode == mode) + return (true); + } + + return (false); +} + +static bool +xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + + switch (mode) { + case XGBE_MODE_X: + return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, + 1000baseX_Full))); + case XGBE_MODE_KR: + return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, + 10000baseKR_Full))); + default: + return (false); + } +} + +static bool +xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + + axgbe_printf(3, "%s: check mode %d\n", __func__, mode); + switch (mode) { + case XGBE_MODE_SGMII_100: + return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, + 100baseT_Full))); + case XGBE_MODE_SGMII_1000: + return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, + 1000baseT_Full))); + case XGBE_MODE_KX_2500: + return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, + 2500baseT_Full))); + case XGBE_MODE_KR: + return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, + 10000baseT_Full))); + default: + return (false); + } +} + +static bool +xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (mode) { + case XGBE_MODE_X: + if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) + return (false); + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 1000baseX_Full))); + case XGBE_MODE_SGMII_100: + if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) + return (false); + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 100baseT_Full))); + case XGBE_MODE_SGMII_1000: + if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) + return (false); + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 1000baseT_Full))); + case XGBE_MODE_SFI: + if (phy_data->sfp_mod_absent) + return (true); + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 10000baseSR_Full) || + XGBE_ADV(&pdata->phy, 10000baseLR_Full) || + XGBE_ADV(&pdata->phy, 10000baseLRM_Full) || + XGBE_ADV(&pdata->phy, 10000baseER_Full) || + XGBE_ADV(&pdata->phy, 10000baseCR_Full))); + default: + return (false); + } +} + +static bool +xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + + switch (mode) { + case XGBE_MODE_KX_2500: + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 2500baseX_Full))); + default: + return (false); + } +} + +static bool +xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + + switch (mode) { + case XGBE_MODE_KX_1000: + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 1000baseKX_Full))); + case XGBE_MODE_KR: + return (xgbe_phy_check_mode(pdata, mode, + XGBE_ADV(&pdata->phy, 10000baseKR_Full))); + default: + return (false); + } +} + +static bool +xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + return (xgbe_phy_use_bp_mode(pdata, mode)); + case XGBE_PORT_MODE_BACKPLANE_2500: + return (xgbe_phy_use_bp_2500_mode(pdata, mode)); + case XGBE_PORT_MODE_1000BASE_T: + axgbe_printf(3, "use_mode %s\n", + xgbe_phy_use_baset_mode(pdata, mode) ? "found" : "Not found"); + case XGBE_PORT_MODE_NBASE_T: + case XGBE_PORT_MODE_10GBASE_T: + return (xgbe_phy_use_baset_mode(pdata, mode)); + case XGBE_PORT_MODE_1000BASE_X: + case XGBE_PORT_MODE_10GBASE_R: + return (xgbe_phy_use_basex_mode(pdata, mode)); + case XGBE_PORT_MODE_SFP: + return (xgbe_phy_use_sfp_mode(pdata, mode)); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data, int speed) +{ + + switch (speed) { + case SPEED_1000: + return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X); + case SPEED_10000: + return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data, int speed) +{ + + switch (speed) { + case SPEED_100: + case SPEED_1000: + return (true); + case SPEED_2500: + return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T); + case SPEED_10000: + return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data, int speed) +{ + + switch (speed) { + case SPEED_100: + return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000); + case SPEED_1000: + return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) || + (phy_data->sfp_speed == XGBE_SFP_SPEED_1000)); + case SPEED_10000: + return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed_bp_2500_mode(int speed) +{ + + switch (speed) { + case SPEED_2500: + return (true); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed_bp_mode(int speed) +{ + + switch (speed) { + case SPEED_1000: + case SPEED_10000: + return (true); + default: + return (false); + } +} + +static bool +xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + return (xgbe_phy_valid_speed_bp_mode(speed)); + case XGBE_PORT_MODE_BACKPLANE_2500: + return (xgbe_phy_valid_speed_bp_2500_mode(speed)); + case XGBE_PORT_MODE_1000BASE_T: + case XGBE_PORT_MODE_NBASE_T: + case XGBE_PORT_MODE_10GBASE_T: + return (xgbe_phy_valid_speed_baset_mode(phy_data, speed)); + case XGBE_PORT_MODE_1000BASE_X: + case XGBE_PORT_MODE_10GBASE_R: + return (xgbe_phy_valid_speed_basex_mode(phy_data, speed)); + case XGBE_PORT_MODE_SFP: + return (xgbe_phy_valid_speed_sfp_mode(phy_data, speed)); + default: + return (false); + } +} + +static int +xgbe_upd_link(struct xgbe_prv_data *pdata) +{ + int reg; + + axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link); + reg = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); + if (reg < 0) + return (reg); + + if ((reg & BMSR_LINK) == 0) + pdata->phy.link = 0; + else + pdata->phy.link = 1; + + axgbe_printf(2, "Link: %d updated reg %#x\n", pdata->phy.link, reg); + return (0); +} + +static int +xgbe_phy_read_status(struct xgbe_prv_data *pdata) +{ + int common_adv_gb; + int common_adv; + int lpagb = 0; + int adv, lpa; + int ret; + + ret = xgbe_upd_link(pdata); + if (ret) { + axgbe_printf(2, "Link Update return %d\n", ret); + return (ret); + } + + if (AUTONEG_ENABLE == pdata->phy.autoneg) { + if (pdata->phy.supported == SUPPORTED_1000baseT_Half || + pdata->phy.supported == SUPPORTED_1000baseT_Full) { + lpagb = xgbe_phy_mii_read(pdata, pdata->mdio_addr, + MII_100T2SR); + if (lpagb < 0) + return (lpagb); + + adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, + MII_100T2CR); + if (adv < 0) + return (adv); + + if (lpagb & GTSR_MAN_MS_FLT) { + if (adv & GTCR_MAN_MS) + axgbe_printf(2, "Master/Slave Resolution " + "failed, maybe conflicting manual settings\n"); + else + axgbe_printf(2, "Master/Slave Resolution failed\n"); + return (-ENOLINK); + } + + if (pdata->phy.supported == SUPPORTED_1000baseT_Half) + XGBE_ADV(&pdata->phy, 1000baseT_Half); + else if (pdata->phy.supported == SUPPORTED_1000baseT_Full) + XGBE_ADV(&pdata->phy, 1000baseT_Full); + + common_adv_gb = lpagb & adv << 2; + } + + lpa = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANLPAR); + if (lpa < 0) + return (lpa); + + if (pdata->phy.supported == SUPPORTED_Autoneg) + XGBE_ADV(&pdata->phy, Autoneg); + + adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANAR); + if (adv < 0) + return (adv); + + common_adv = lpa & adv; + + pdata->phy.speed = SPEED_10; + pdata->phy.duplex = DUPLEX_HALF; + pdata->phy.pause = 0; + pdata->phy.asym_pause = 0; + + axgbe_printf(2, "%s: lpa %#x adv %#x common_adv_gb %#x " + "common_adv %#x\n", __func__, lpa, adv, common_adv_gb, + common_adv); + if (common_adv_gb & (GTSR_LP_1000TFDX | GTSR_LP_1000THDX)) { + axgbe_printf(2, "%s: SPEED 1000\n", __func__); + pdata->phy.speed = SPEED_1000; + + if (common_adv_gb & GTSR_LP_1000TFDX) + pdata->phy.duplex = DUPLEX_FULL; + } else if (common_adv & (ANLPAR_TX_FD | ANLPAR_TX)) { + axgbe_printf(2, "%s: SPEED 100\n", __func__); + pdata->phy.speed = SPEED_100; + + if (common_adv & ANLPAR_TX_FD) + pdata->phy.duplex = DUPLEX_FULL; + } else + if (common_adv & ANLPAR_10_FD) + pdata->phy.duplex = DUPLEX_FULL; + + if (pdata->phy.duplex == DUPLEX_FULL) { + pdata->phy.pause = lpa & ANLPAR_FC ? 1 : 0; + pdata->phy.asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + } + } else { + int bmcr = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); + if (bmcr < 0) + return (bmcr); + + if (bmcr & BMCR_FDX) + pdata->phy.duplex = DUPLEX_FULL; + else + pdata->phy.duplex = DUPLEX_HALF; + + if (bmcr & BMCR_SPEED1) + pdata->phy.speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + pdata->phy.speed = SPEED_100; + else + pdata->phy.speed = SPEED_10; + + pdata->phy.pause = 0; + pdata->phy.asym_pause = 0; + axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x " + "autoneg %#x\n", __func__, pdata->phy.speed, + pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg); + } + + return (0); +} + +static int +xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct mii_data *mii = NULL; + unsigned int reg; + int ret; + + *an_restart = 0; + + if (phy_data->port_mode == XGBE_PORT_MODE_SFP) { + /* Check SFP signals */ + axgbe_printf(3, "%s: calling phy detect\n", __func__); + xgbe_phy_sfp_detect(pdata); + + if (phy_data->sfp_changed) { + axgbe_printf(1, "%s: SFP changed observed\n", __func__); + *an_restart = 1; + return (0); + } + + if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) { + axgbe_printf(1, "%s: SFP absent 0x%x & sfp_rx_los 0x%x\n", + __func__, phy_data->sfp_mod_absent, + phy_data->sfp_rx_los); + return (0); + } + } else { + mii = device_get_softc(pdata->axgbe_miibus); + mii_tick(mii); + + ret = xgbe_phy_read_status(pdata); + if (ret) { + axgbe_printf(2, "Link: Read status returned %d\n", ret); + return (ret); + } + + axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x " + "autoneg %#x\n", __func__, pdata->phy.speed, + pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg); + ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); + ret = (ret < 0) ? ret : (ret & BMSR_ACOMP); + axgbe_printf(2, "Link: BMCR returned %d\n", ret); + if ((pdata->phy.autoneg == AUTONEG_ENABLE) && !ret) + return (0); + + return (pdata->phy.link); + } + + /* Link status is latched low, so read once to clear + * and then read again to get current state + */ + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + axgbe_printf(1, "%s: link_status reg: 0x%x\n", __func__, reg); + if (reg & MDIO_STAT1_LSTATUS) + return (1); + + /* No link, attempt a receiver reset cycle */ + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { + axgbe_printf(1, "ENTERED RRC: rrc_count: %d\n", + phy_data->rrc_count); + phy_data->rrc_count = 0; + xgbe_phy_rrc(pdata); + } + + return (0); +} + +static void +xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_ADDR); + phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_MASK); + phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_RX_LOS); + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_TX_FAULT); + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_MOD_ABS); + phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_RATE_SELECT); + + DBGPR("SFP: gpio_address=%#x\n", phy_data->sfp_gpio_address); + DBGPR("SFP: gpio_mask=%#x\n", phy_data->sfp_gpio_mask); + DBGPR("SFP: gpio_rx_los=%u\n", phy_data->sfp_gpio_rx_los); + DBGPR("SFP: gpio_tx_fault=%u\n", phy_data->sfp_gpio_tx_fault); + DBGPR("SFP: gpio_mod_absent=%u\n", + phy_data->sfp_gpio_mod_absent); + DBGPR("SFP: gpio_rate_select=%u\n", + phy_data->sfp_gpio_rate_select); +} + +static void +xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int mux_addr_hi, mux_addr_lo; + + mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI); + mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO); + if (mux_addr_lo == XGBE_SFP_DIRECT) + return; + + phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; + phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; + phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4, + MUX_CHAN); + + DBGPR("SFP: mux_address=%#x\n", phy_data->sfp_mux_address); + DBGPR("SFP: mux_channel=%u\n", phy_data->sfp_mux_channel); +} + +static void +xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata) +{ + xgbe_phy_sfp_comm_setup(pdata); + xgbe_phy_sfp_gpio_setup(pdata); +} + +static int +xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + unsigned int ret; + + ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio); + if (ret) + return (ret); + + ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio); + + return (ret); +} + +static int +xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + uint8_t gpio_reg, gpio_ports[2], gpio_data[3]; + int ret; + + /* Read the output port registers */ + gpio_reg = 2; + ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr, + &gpio_reg, sizeof(gpio_reg), + gpio_ports, sizeof(gpio_ports)); + if (ret) + return (ret); + + /* Prepare to write the GPIO data */ + gpio_data[0] = 2; + gpio_data[1] = gpio_ports[0]; + gpio_data[2] = gpio_ports[1]; + + /* Set the GPIO pin */ + if (phy_data->mdio_reset_gpio < 8) + gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8)); + else + gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8)); + + /* Write the output port registers */ + ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr, + gpio_data, sizeof(gpio_data)); + if (ret) + return (ret); + + /* Clear the GPIO pin */ + if (phy_data->mdio_reset_gpio < 8) + gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8)); + else + gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8)); + + /* Write the output port registers */ + ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr, + gpio_data, sizeof(gpio_data)); + + return (ret); +} + +static int +xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) + return (0); + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) + return (ret); + + if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) + ret = xgbe_phy_i2c_mdio_reset(pdata); + else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) + ret = xgbe_phy_int_mdio_reset(pdata); + + xgbe_phy_put_comm_ownership(pdata); + + return (ret); +} + +static bool +xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data) +{ + if (!phy_data->redrv) + return (false); + + if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX) + return (true); + + switch (phy_data->redrv_model) { + case XGBE_PHY_REDRV_MODEL_4223: + if (phy_data->redrv_lane > 3) + return (true); + break; + case XGBE_PHY_REDRV_MODEL_4227: + if (phy_data->redrv_lane > 1) + return (true); + break; + default: + return (true); + } + + return (false); +} + +static int +xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) + return (0); + + phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET); + switch (phy_data->mdio_reset) { + case XGBE_MDIO_RESET_NONE: + case XGBE_MDIO_RESET_I2C_GPIO: + case XGBE_MDIO_RESET_INT_GPIO: + break; + default: + axgbe_error("unsupported MDIO reset (%#x)\n", + phy_data->mdio_reset); + return (-EINVAL); + } + + if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { + phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + + XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_ADDR); + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, + MDIO_RESET_I2C_GPIO); + } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, + MDIO_RESET_INT_GPIO); + + return (0); +} + +static bool +xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) + return (false); + break; + case XGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) + return (false); + break; + case XGBE_PORT_MODE_1000BASE_T: + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)) + return (false); + break; + case XGBE_PORT_MODE_1000BASE_X: + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + return (false); + break; + case XGBE_PORT_MODE_NBASE_T: + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)) + return (false); + break; + case XGBE_PORT_MODE_10GBASE_T: + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) + return (false); + break; + case XGBE_PORT_MODE_10GBASE_R: + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) + return (false); + break; + case XGBE_PORT_MODE_SFP: + if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || + (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) + return (false); + break; + default: + break; + } + + return (true); +} + +static bool +xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_BACKPLANE: + case XGBE_PORT_MODE_BACKPLANE_2500: + if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE) + return (false); + break; + case XGBE_PORT_MODE_1000BASE_T: + case XGBE_PORT_MODE_1000BASE_X: + case XGBE_PORT_MODE_NBASE_T: + case XGBE_PORT_MODE_10GBASE_T: + case XGBE_PORT_MODE_10GBASE_R: + if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO) + return (false); + break; + case XGBE_PORT_MODE_SFP: + if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) + return (false); + break; + default: + break; + } + + return (true); +} + +static bool +xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) +{ + + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS)) + return (false); + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE)) + return (false); + + return (true); +} + +static void +xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n", + __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack); + + if (!pdata->sysctl_an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + DELAY(phy_data->phy_cdr_delay + 500); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; + + axgbe_printf(2, "CDR TRACK DONE\n"); +} + +static void +xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n", + __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack); + + if (!pdata->sysctl_an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_OFF); + + xgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void +xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + if (!pdata->sysctl_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void +xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + if (pdata->sysctl_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void +xgbe_phy_an_post(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case XGBE_AN_READY: + case XGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; + else + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + break; + } + break; + default: + break; + } +} + +static void +xgbe_phy_an_pre(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + +static void +xgbe_phy_stop(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + /* If we have an external PHY, free it */ + xgbe_phy_free_phy_device(pdata); + + /* Reset SFP data */ + xgbe_phy_sfp_reset(phy_data); + xgbe_phy_sfp_mod_absent(pdata); + + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + + /* Power off the PHY */ + xgbe_phy_power_off(pdata); + + /* Stop the I2C controller */ + pdata->i2c_if.i2c_stop(pdata); +} + +static int +xgbe_phy_start(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + axgbe_printf(2, "%s: redrv %d redrv_if %d start_mode %d\n", __func__, + phy_data->redrv, phy_data->redrv_if, phy_data->start_mode); + + /* Start the I2C controller */ + ret = pdata->i2c_if.i2c_start(pdata); + if (ret) { + axgbe_error("%s: impl i2c start ret %d\n", __func__, ret); + return (ret); + } + + /* Set the proper MDIO mode for the re-driver */ + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + XGBE_MDIO_MODE_CL22); + if (ret) { + axgbe_error("redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return (ret); + } + } + + /* Start in highest supported mode */ + xgbe_phy_set_mode(pdata, phy_data->start_mode); + + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + + /* After starting the I2C controller, we can check for an SFP */ + switch (phy_data->port_mode) { + case XGBE_PORT_MODE_SFP: + axgbe_printf(3, "%s: calling phy detect\n", __func__); + xgbe_phy_sfp_detect(pdata); + break; + default: + break; + } + + /* If we have an external PHY, start it */ + ret = xgbe_phy_find_phy_device(pdata); + if (ret) { + axgbe_error("%s: impl find phy dev ret %d\n", __func__, ret); + goto err_i2c; + } + + axgbe_printf(3, "%s: impl return success\n", __func__); + return (0); + +err_i2c: + pdata->i2c_if.i2c_stop(pdata); + + return (ret); +} + +static int +xgbe_phy_reset(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + enum xgbe_mode cur_mode; + int ret; + + /* Reset by power cycling the PHY */ + cur_mode = phy_data->cur_mode; + xgbe_phy_power_off(pdata); + xgbe_phy_set_mode(pdata, cur_mode); + + axgbe_printf(3, "%s: mode %d\n", __func__, cur_mode); + if (!phy_data->phydev) { + axgbe_printf(1, "%s: no phydev\n", __func__); + return (0); + } + + /* Reset the external PHY */ + ret = xgbe_phy_mdio_reset(pdata); + if (ret) { + axgbe_error("%s: mdio reset %d\n", __func__, ret); + return (ret); + } + + axgbe_printf(3, "%s: return success\n", __func__); + + return (0); +} + +static void +axgbe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct axgbe_if_softc *sc; + struct xgbe_prv_data *pdata; + struct mii_data *mii; + + sc = ifp->if_softc; + pdata = &sc->pdata; + + axgbe_printf(2, "%s: Invoked\n", __func__); + mtx_lock_spin(&pdata->mdio_mutex); + mii = device_get_softc(pdata->axgbe_miibus); + axgbe_printf(2, "%s: media_active %#x media_status %#x\n", __func__, + mii->mii_media_active, mii->mii_media_status); + mii_pollstat(mii); + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; + mtx_unlock_spin(&pdata->mdio_mutex); +} + +static int +axgbe_ifmedia_upd(struct ifnet *ifp) +{ + struct xgbe_prv_data *pdata; + struct axgbe_if_softc *sc; + struct mii_data *mii; + struct mii_softc *miisc; + int ret; + + sc = ifp->if_softc; + pdata = &sc->pdata; + + axgbe_printf(2, "%s: Invoked\n", __func__); + mtx_lock_spin(&pdata->mdio_mutex); + mii = device_get_softc(pdata->axgbe_miibus); + LIST_FOREACH(miisc, &mii->mii_phys, mii_list) + PHY_RESET(miisc); + ret = mii_mediachg(mii); + mtx_unlock_spin(&pdata->mdio_mutex); + + return (ret); +} + +static void +xgbe_phy_exit(struct xgbe_prv_data *pdata) +{ + if (pdata->axgbe_miibus != NULL) + device_delete_child(pdata->dev, pdata->axgbe_miibus); + + /* free phy_data structure */ + free(pdata->phy_data, M_AXGBE); +} + +static int +xgbe_phy_init(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data; + int ret; + + /* Initialize the global lock */ + if (!mtx_initialized(&xgbe_phy_comm_lock)) + mtx_init(&xgbe_phy_comm_lock, "xgbe phy common lock", NULL, MTX_DEF); + + /* Check if enabled */ + if (!xgbe_phy_port_enabled(pdata)) { + axgbe_error("device is not enabled\n"); + return (-ENODEV); + } + + /* Initialize the I2C controller */ + ret = pdata->i2c_if.i2c_init(pdata); + if (ret) + return (ret); + + phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO); + if (!phy_data) + return (-ENOMEM); + pdata->phy_data = phy_data; + + phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE); + phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID); + phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS); + phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE); + phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR); + + pdata->mdio_addr = phy_data->mdio_addr; + DBGPR("port mode=%u\n", phy_data->port_mode); + DBGPR("port id=%u\n", phy_data->port_id); + DBGPR("port speeds=%#x\n", phy_data->port_speeds); + DBGPR("conn type=%u\n", phy_data->conn_type); + DBGPR("mdio addr=%u\n", phy_data->mdio_addr); + + phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT); + phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF); + phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR); + phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE); + phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL); + + if (phy_data->redrv) { + DBGPR("redrv present\n"); + DBGPR("redrv i/f=%u\n", phy_data->redrv_if); + DBGPR("redrv addr=%#x\n", phy_data->redrv_addr); + DBGPR("redrv lane=%u\n", phy_data->redrv_lane); + DBGPR("redrv model=%u\n", phy_data->redrv_model); + } + + DBGPR("%s: redrv addr=%#x redrv i/f=%u\n", __func__, + phy_data->redrv_addr, phy_data->redrv_if); + /* Validate the connection requested */ + if (xgbe_phy_conn_type_mismatch(pdata)) { + axgbe_error("phy mode/connection mismatch " + "(%#x/%#x)\n", phy_data->port_mode, phy_data->conn_type); + return (-EINVAL); + } + + /* Validate the mode requested */ + if (xgbe_phy_port_mode_mismatch(pdata)) { + axgbe_error("phy mode/speed mismatch " + "(%#x/%#x)\n", phy_data->port_mode, phy_data->port_speeds); + return (-EINVAL); + } + + /* Check for and validate MDIO reset support */ + ret = xgbe_phy_mdio_reset_setup(pdata); + if (ret) { + axgbe_error("%s, mdio_reset_setup ret %d\n", __func__, ret); + return (ret); + } + + /* Validate the re-driver information */ + if (xgbe_phy_redrv_error(phy_data)) { + axgbe_error("phy re-driver settings error\n"); + return (-EINVAL); + } + pdata->kr_redrv = phy_data->redrv; + + /* Indicate current mode is unknown */ + phy_data->cur_mode = XGBE_MODE_UNKNOWN; + + /* Initialize supported features. Current code does not support ethtool */ + XGBE_ZERO_SUP(&pdata->phy); + + DBGPR("%s: port mode %d\n", __func__, phy_data->port_mode); + switch (phy_data->port_mode) { + /* Backplane support */ + case XGBE_PORT_MODE_BACKPLANE: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, Backplane); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { + XGBE_SET_SUP(&pdata->phy, 1000baseKX_Full); + phy_data->start_mode = XGBE_MODE_KX_1000; + } + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { + XGBE_SET_SUP(&pdata->phy, 10000baseKR_Full); + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); + phy_data->start_mode = XGBE_MODE_KR; + } + + phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; + break; + case XGBE_PORT_MODE_BACKPLANE_2500: + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, Backplane); + XGBE_SET_SUP(&pdata->phy, 2500baseX_Full); + phy_data->start_mode = XGBE_MODE_KX_2500; + + phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; + break; + + /* MDIO 1GBase-T support */ + case XGBE_PORT_MODE_1000BASE_T: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, TP); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { + XGBE_SET_SUP(&pdata->phy, 100baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { + XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_1000; + } + + phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; + break; + + /* MDIO Base-X support */ + case XGBE_PORT_MODE_1000BASE_X: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, FIBRE); + XGBE_SET_SUP(&pdata->phy, 1000baseX_Full); + phy_data->start_mode = XGBE_MODE_X; + + phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; + break; + + /* MDIO NBase-T support */ + case XGBE_PORT_MODE_NBASE_T: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, TP); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { + XGBE_SET_SUP(&pdata->phy, 100baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { + XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) { + XGBE_SET_SUP(&pdata->phy, 2500baseT_Full); + phy_data->start_mode = XGBE_MODE_KX_2500; + } + + phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; + break; + + /* 10GBase-T support */ + case XGBE_PORT_MODE_10GBASE_T: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, TP); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { + XGBE_SET_SUP(&pdata->phy, 100baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_100; + } + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { + XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); + phy_data->start_mode = XGBE_MODE_SGMII_1000; + } + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { + XGBE_SET_SUP(&pdata->phy, 10000baseT_Full); + phy_data->start_mode = XGBE_MODE_KR; + } + + phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; + break; + + /* 10GBase-R support */ + case XGBE_PORT_MODE_10GBASE_R: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, FIBRE); + XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full); + XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full); + XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full); + XGBE_SET_SUP(&pdata->phy, 10000baseER_Full); + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); + phy_data->start_mode = XGBE_MODE_SFI; + + phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; + break; + + /* SFP support */ + case XGBE_PORT_MODE_SFP: + XGBE_SET_SUP(&pdata->phy, Autoneg); + XGBE_SET_SUP(&pdata->phy, Pause); + XGBE_SET_SUP(&pdata->phy, Asym_Pause); + XGBE_SET_SUP(&pdata->phy, TP); + XGBE_SET_SUP(&pdata->phy, FIBRE); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) + phy_data->start_mode = XGBE_MODE_SGMII_100; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + phy_data->start_mode = XGBE_MODE_SGMII_1000; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) + phy_data->start_mode = XGBE_MODE_SFI; + + phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; + + xgbe_phy_sfp_setup(pdata); + DBGPR("%s: start %d mode %d adv 0x%x\n", __func__, + phy_data->start_mode, phy_data->phydev_mode, + pdata->phy.advertising); + break; + default: + return (-EINVAL); + } + + axgbe_printf(2, "%s: start %d mode %d adv 0x%x\n", __func__, + phy_data->start_mode, phy_data->phydev_mode, pdata->phy.advertising); + + DBGPR("%s: conn type %d mode %d\n", __func__, + phy_data->conn_type, phy_data->phydev_mode); + if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) && + (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, + phy_data->phydev_mode); + if (ret) { + axgbe_error("mdio port/clause not compatible (%d/%u)\n", + phy_data->mdio_addr, phy_data->phydev_mode); + return (-EINVAL); + } + } + + if (phy_data->redrv && !phy_data->redrv_if) { + ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, + XGBE_MDIO_MODE_CL22); + if (ret) { + axgbe_error("redriver mdio port not compatible (%u)\n", + phy_data->redrv_addr); + return (-EINVAL); + } + } + + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { + ret = mii_attach(pdata->dev, &pdata->axgbe_miibus, pdata->netdev, + (ifm_change_cb_t)axgbe_ifmedia_upd, + (ifm_stat_cb_t)axgbe_ifmedia_sts, BMSR_DEFCAPMASK, + pdata->mdio_addr, MII_OFFSET_ANY, MIIF_FORCEANEG); + + if (ret){ + axgbe_printf(2, "mii attach failed with err=(%d)\n", ret); + return (-EINVAL); + } + } + + DBGPR("%s: return success\n", __func__); + + return (0); +} + +void +xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) +{ + struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; + + phy_impl->init = xgbe_phy_init; + phy_impl->exit = xgbe_phy_exit; + + phy_impl->reset = xgbe_phy_reset; + phy_impl->start = xgbe_phy_start; + phy_impl->stop = xgbe_phy_stop; + + phy_impl->link_status = xgbe_phy_link_status; + + phy_impl->valid_speed = xgbe_phy_valid_speed; + + phy_impl->use_mode = xgbe_phy_use_mode; + phy_impl->set_mode = xgbe_phy_set_mode; + phy_impl->get_mode = xgbe_phy_get_mode; + phy_impl->switch_mode = xgbe_phy_switch_mode; + phy_impl->cur_mode = xgbe_phy_cur_mode; + phy_impl->get_type = xgbe_phy_get_type; + + phy_impl->an_mode = xgbe_phy_an_mode; + + phy_impl->an_config = xgbe_phy_an_config; + + phy_impl->an_advertising = xgbe_phy_an_advertising; + + phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->an_pre = xgbe_phy_an_pre; + phy_impl->an_post = xgbe_phy_an_post; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; + + phy_impl->module_info = xgbe_phy_module_info; + phy_impl->module_eeprom = xgbe_phy_module_eeprom; +} diff --git a/sys/dev/axgbe/xgbe-ptp.c b/sys/dev/axgbe/xgbe-ptp.c new file mode 100644 index 000000000000..a2d2a8b0e05e --- /dev/null +++ b/sys/dev/axgbe/xgbe-ptp.c @@ -0,0 +1,276 @@ +/* + * AMD 10Gb Ethernet driver + * + * This file is available to you under your choice of the following two + * licenses: + * + * License 1: GPLv2 + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * + * This file is free software; you may copy, redistribute and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or (at + * your option) any later version. + * + * This file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + * + * + * License 2: Modified BSD + * + * Copyright (c) 2014 Advanced Micro Devices, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Advanced Micro Devices, Inc. nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This file incorporates work covered by the following copyright and + * permission notice: + * The Synopsys DWC ETHER XGMAC Software Driver and documentation + * (hereinafter "Software") is an unsupported proprietary work of Synopsys, + * Inc. unless otherwise expressly agreed to in writing between Synopsys + * and you. + * + * The Software IS NOT an item of Licensed Software or Licensed Product + * under any End User Software License Agreement or Agreement for Licensed + * Product with Synopsys or any supplement thereto. Permission is hereby + * granted, free of charge, to any person obtaining a copy of this software + * annotated with this license and the Software, to deal in the Software + * without restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is furnished + * to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" + * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" + +static u64 xgbe_cc_read(const struct cyclecounter *cc) +{ + struct xgbe_prv_data *pdata = container_of(cc, + struct xgbe_prv_data, + tstamp_cc); + u64 nsec; + + nsec = pdata->hw_if.get_tstamp_time(pdata); + + return (nsec); +} + +static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 adjust; + u32 addend, diff; + unsigned int neg_adjust = 0; + + if (delta < 0) { + neg_adjust = 1; + delta = -delta; + } + + adjust = pdata->tstamp_addend; + adjust *= delta; + diff = div_u64(adjust, 1000000000UL); + + addend = (neg_adjust) ? pdata->tstamp_addend - diff : + pdata->tstamp_addend + diff; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + pdata->hw_if.update_tstamp_addend(pdata, addend); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return (0); +} + +static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + timecounter_adjtime(&pdata->tstamp_tc, delta); + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return (0); +} + +static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + nsec = timecounter_read(&pdata->tstamp_tc); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + *ts = ns_to_timespec64(nsec); + + return (0); +} + +static int xgbe_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct xgbe_prv_data *pdata = container_of(info, + struct xgbe_prv_data, + ptp_clock_info); + unsigned long flags; + u64 nsec; + + nsec = timespec64_to_ns(ts); + + spin_lock_irqsave(&pdata->tstamp_lock, flags); + + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec); + + spin_unlock_irqrestore(&pdata->tstamp_lock, flags); + + return (0); +} + +static int xgbe_enable(struct ptp_clock_info *info, + void *request, int on) +{ + return (-EOPNOTSUPP); +} + +void xgbe_ptp_register(struct xgbe_prv_data *pdata) +{ + struct ptp_clock_info *info = &pdata->ptp_clock_info; + //struct ptp_clock *clock; + struct cyclecounter *cc = &pdata->tstamp_cc; + u64 dividend; + + snprintf(info->name, sizeof(info->name), "axgbe-ptp"); + //info->owner = THIS_MODULE; + info->max_adj = pdata->ptpclk_rate; + info->adjfreq = xgbe_adjfreq; + info->adjtime = xgbe_adjtime; + info->gettime64 = xgbe_gettime; + info->settime64 = xgbe_settime; + info->enable = xgbe_enable; +#if 0 + clock = ptp_clock_register(info, pdata->dev); + if (IS_ERR(clock)) { + dev_err(pdata->dev, "ptp_clock_register failed\n"); + return; + } + + pdata->ptp_clock = clock; +#endif + /* Calculate the addend: + * addend = 2^32 / (PTP ref clock / 50Mhz) + * = (2^32 * 50Mhz) / PTP ref clock + */ + dividend = 50000000; + dividend <<= 32; + pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate); + + /* Setup the timecounter */ + cc->read = xgbe_cc_read; + cc->mask = CLOCKSOURCE_MASK(64); + cc->mult = 1; + cc->shift = 0; + + timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, + ktime_to_ns(ktime_get_real())); + + /* Disable all timestamping to start */ + XGMAC_IOWRITE(pdata, MAC_TSCR, 0); + pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +} + +void xgbe_ptp_unregister(struct xgbe_prv_data *pdata) +{ +#if 0 + if (pdata->ptp_clock) + ptp_clock_unregister(pdata->ptp_clock); +#endif +} diff --git a/sys/dev/axgbe/xgbe-sysctl.c b/sys/dev/axgbe/xgbe-sysctl.c new file mode 100644 index 000000000000..eee7c61170de --- /dev/null +++ b/sys/dev/axgbe/xgbe-sysctl.c @@ -0,0 +1,1715 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Contact Information : + * Rajesh Kumar + * Arpan Palit + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include + +#include "xgbe.h" +#include "xgbe-common.h" + +#define SYSCTL_BUF_LEN 64 + +typedef enum{ + /* Coalesce flag */ + rx_coalesce_usecs = 1, + rx_max_coalesced_frames, + rx_coalesce_usecs_irq, + rx_max_coalesced_frames_irq, + tx_coalesce_usecs, + tx_max_coalesced_frames, + tx_coalesce_usecs_irq, + tx_max_coalesced_frames_irq, + stats_block_coalesce_usecs, + use_adaptive_rx_coalesce, + use_adaptive_tx_coalesce, + pkt_rate_low, + rx_coalesce_usecs_low, + rx_max_coalesced_frames_low, + tx_coalesce_usecs_low, + tx_max_coalesced_frames_low, + pkt_rate_high, + rx_coalesce_usecs_high, + rx_max_coalesced_frames_high, + tx_coalesce_usecs_high, + tx_max_coalesced_frames_high, + rate_sample_interval, + + /* Pasue flag */ + autoneg, + tx_pause, + rx_pause, + + /* link settings */ + speed, + duplex, + + /* Ring settings */ + rx_pending, + rx_mini_pending, + rx_jumbo_pending, + tx_pending, + + /* Channels settings */ + rx_count, + tx_count, + other_count, + combined_count, +} sysctl_variable_t; + +typedef enum { + SYSL_NONE, + SYSL_BOOL, + SYSL_S32, + SYSL_U8, + SYSL_U16, + SYSL_U32, + SYSL_U64, + SYSL_BE16, + SYSL_IP4, + SYSL_STR, + SYSL_FLAG, + SYSL_MAC, +} sysctl_type_t; + +struct sysctl_info { + uint8_t name[32]; + sysctl_type_t type; + sysctl_variable_t flag; + uint8_t support[16]; +}; + +struct sysctl_op { + /* Coalesce options */ + unsigned int rx_coalesce_usecs; + unsigned int rx_max_coalesced_frames; + unsigned int rx_coalesce_usecs_irq; + unsigned int rx_max_coalesced_frames_irq; + unsigned int tx_coalesce_usecs; + unsigned int tx_max_coalesced_frames; + unsigned int tx_coalesce_usecs_irq; + unsigned int tx_max_coalesced_frames_irq; + unsigned int stats_block_coalesce_usecs; + unsigned int use_adaptive_rx_coalesce; + unsigned int use_adaptive_tx_coalesce; + unsigned int pkt_rate_low; + unsigned int rx_coalesce_usecs_low; + unsigned int rx_max_coalesced_frames_low; + unsigned int tx_coalesce_usecs_low; + unsigned int tx_max_coalesced_frames_low; + unsigned int pkt_rate_high; + unsigned int rx_coalesce_usecs_high; + unsigned int rx_max_coalesced_frames_high; + unsigned int tx_coalesce_usecs_high; + unsigned int tx_max_coalesced_frames_high; + unsigned int rate_sample_interval; + + /* Pasue options */ + unsigned int autoneg; + unsigned int tx_pause; + unsigned int rx_pause; + + /* Link settings options */ + unsigned int speed; + unsigned int duplex; + + /* Ring param options */ + unsigned int rx_max_pending; + unsigned int rx_mini_max_pending; + unsigned int rx_jumbo_max_pending; + unsigned int tx_max_pending; + unsigned int rx_pending; + unsigned int rx_mini_pending; + unsigned int rx_jumbo_pending; + unsigned int tx_pending; + + /* Channels options */ + unsigned int max_rx; + unsigned int max_tx; + unsigned int max_other; + unsigned int max_combined; + unsigned int rx_count; + unsigned int tx_count; + unsigned int other_count; + unsigned int combined_count; +} sys_op; + +#define GSTRING_LEN 32 + +struct xgbe_stats { + char stat_string[GSTRING_LEN]; + int stat_size; + int stat_offset; +}; + +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) + +#define XGMAC_MMC_STAT(_string, _var) \ + { _string, \ + FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \ + offsetof(struct xgbe_prv_data, mmc_stats._var), \ + } + +#define XGMAC_EXT_STAT(_string, _var) \ + { _string, \ + FIELD_SIZEOF(struct xgbe_ext_stats, _var), \ + offsetof(struct xgbe_prv_data, ext_stats._var), \ + } +static const struct xgbe_stats xgbe_gstring_stats[] = { + XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), + XGMAC_MMC_STAT("tx_packets", txframecount_gb), + XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), + XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), + XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), + XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), + XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets), + XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets), + XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), + XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), + XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), + XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), + XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), + XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), + XGMAC_MMC_STAT("tx_pause_frames", txpauseframes), + + XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), + XGMAC_MMC_STAT("rx_packets", rxframecount_gb), + XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), + XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), + XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), + XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), + XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets), + XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), + XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), + XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), + XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), + XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), + XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), + XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), + XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), + XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), + XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), + XGMAC_MMC_STAT("rx_length_errors", rxlengtherror), + XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), + XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), + XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), + XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors), + XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors), + XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), + XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets), + XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable), +}; + +#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats) + +char** alloc_sysctl_buffer(void); +void get_val(char *buf, char **op, char **val, int *n_op); +void fill_data(struct sysctl_op *sys_op, int flag, unsigned int value); + +static int +exit_bad_op(void) +{ + + printf("SYSCTL: bad command line option (s)\n"); + return(-EINVAL); +} + +static inline unsigned +fls_long(unsigned long l) +{ + + if (sizeof(l) == 4) + return (fls(l)); + return (fls64(l)); +} + +static inline __attribute__((const)) +unsigned long __rounddown_pow_of_two(unsigned long n) +{ + + return (1UL << (fls_long(n) - 1)); +} + +static inline int +get_ubuf(struct sysctl_req *req, char *ubuf) +{ + int rc; + + printf("%s: len:0x%li idx:0x%li\n", __func__, req->newlen, + req->newidx); + if (req->newlen >= SYSCTL_BUF_LEN) + return (-EINVAL); + + rc = SYSCTL_IN(req, ubuf, req->newlen); + if (rc) + return (rc); + ubuf[req->newlen] = '\0'; + + return (0); +} + +char** +alloc_sysctl_buffer(void) +{ + char **buffer; + int i; + + buffer = malloc(sizeof(char *)*32, M_AXGBE, M_WAITOK | M_ZERO); + for(i = 0; i < 32; i++) + buffer[i] = malloc(sizeof(char)*32, M_AXGBE, M_WAITOK | M_ZERO); + + return (buffer); +} + +void +get_val(char *buf, char **op, char **val, int *n_op) +{ + int blen = strlen(buf); + int count = 0; + int i, j; + + *n_op = 0; + for (i = 0; i < blen; i++) { + count++; + /* Get sysctl command option */ + for (j = 0; buf[i] != ' '; j++) { + if (i >= blen) + break; + op[*n_op][j] = buf[i++]; + } + op[*n_op][j+1] = '\0'; + if (i >= strlen(buf)) + goto out; + + /* Get sysctl value*/ + i++; + for (j = 0; buf[i] != ' '; j++) { + if (i >= blen) + break; + val[*n_op][j] = buf[i++]; + } + val[*n_op][j+1] = '\0'; + if (i >= strlen(buf)) + goto out; + + *n_op = count; + } + +out: + *n_op = count; +} + +void +fill_data(struct sysctl_op *sys_op, int flag, unsigned int value) +{ + + switch(flag) { + case 1: + sys_op->rx_coalesce_usecs = value; + break; + case 2: + sys_op->rx_max_coalesced_frames = value; + break; + case 3: + sys_op->rx_coalesce_usecs_irq = value; + break; + case 4: + sys_op->rx_max_coalesced_frames_irq = value; + break; + case 5: + sys_op->tx_coalesce_usecs = value; + break; + case 6: + sys_op->tx_max_coalesced_frames = value; + break; + case 7: + sys_op->tx_coalesce_usecs_irq = value; + break; + case 8: + sys_op->tx_max_coalesced_frames_irq = value; + break; + case 9: + sys_op->stats_block_coalesce_usecs = value; + break; + case 10: + sys_op->use_adaptive_rx_coalesce = value; + break; + case 11: + sys_op->use_adaptive_tx_coalesce = value; + break; + case 12: + sys_op->pkt_rate_low = value; + break; + case 13: + sys_op->rx_coalesce_usecs_low = value; + break; + case 14: + sys_op->rx_max_coalesced_frames_low = value; + break; + case 15: + sys_op->tx_coalesce_usecs_low = value; + break; + case 16: + sys_op->tx_max_coalesced_frames_low = value; + break; + case 17: + sys_op->pkt_rate_high = value; + break; + case 18: + sys_op->rx_coalesce_usecs_high = value; + break; + case 19: + sys_op->rx_max_coalesced_frames_high = value; + break; + case 20: + sys_op->tx_coalesce_usecs_high = value; + break; + case 21: + sys_op->tx_max_coalesced_frames_high = value; + break; + case 22: + sys_op->rate_sample_interval = value; + break; + case 23: + sys_op->autoneg = value; + break; + case 24: + sys_op->rx_pause = value; + break; + case 25: + sys_op->tx_pause = value; + break; + case 26: + sys_op->speed = value; + break; + case 27: + sys_op->duplex = value; + break; + case 28: + sys_op->rx_pending = value; + break; + case 29: + sys_op->rx_mini_pending = value; + break; + case 30: + sys_op->rx_jumbo_pending = value; + break; + case 31: + sys_op->tx_pending = value; + break; + default: + printf("Option error\n"); + } +} + +static int +parse_generic_sysctl(struct xgbe_prv_data *pdata, char *buf, + struct sysctl_info *info, unsigned int n_info) +{ + struct sysctl_op *sys_op = pdata->sys_op; + unsigned int value; + char **op, **val; + int n_op = 0; + int rc = 0; + int i, idx; + + op = alloc_sysctl_buffer(); + val = alloc_sysctl_buffer(); + get_val(buf, op, val, &n_op); + + for (i = 0; i < n_op; i++) { + for (idx = 0; idx < n_info; idx++) { + if (strcmp(info[idx].name, op[i]) == 0) { + if (strcmp(info[idx].support, + "not-supported") == 0){ + axgbe_printf(1, "ignoring not-supported " + "option \"%s\"\n", info[idx].name); + break; + } + switch(info[idx].type) { + case SYSL_BOOL: { + if (!strcmp(val[i], "on")) + fill_data(sys_op, + info[idx].flag, 1); + else if (!strcmp(val[i], "off")) + fill_data(sys_op, + info[idx].flag, 0); + else + rc = exit_bad_op(); + break; + } + case SYSL_S32: + sscanf(val[i], "%u", &value); + fill_data(sys_op, info[idx].flag, value); + break; + case SYSL_U8: + if (!strcmp(val[i], "half")) + fill_data(sys_op, + info[idx].flag, DUPLEX_HALF); + else if (!strcmp(val[i], "full")) + fill_data(sys_op, + info[idx].flag, DUPLEX_FULL); + else + exit_bad_op(); + default: + rc = exit_bad_op(); + } + } + } + } + + for(i = 0; i < 32; i++) + free(op[i], M_AXGBE); + free(op, M_AXGBE); + + for(i = 0; i < 32; i++) + free(val[i], M_AXGBE); + free(val, M_AXGBE); + return (rc); +} + + +static int +sysctl_xgmac_reg_addr_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + unsigned int reg; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: sysctl_xgmac_reg: 0x%x\n", __func__, + pdata->sysctl_xgmac_reg); + sbuf_printf(sb, "\nXGMAC reg_addr: 0x%x\n", + pdata->sysctl_xgmac_reg); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", ®); + axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); + pdata->sysctl_xgmac_reg = reg; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_get_drv_info_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + struct xgbe_hw_features *hw_feat = &pdata->hw_feat; + ssize_t buf_size = 64; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + sbuf_printf(sb, "\ndriver: %s", XGBE_DRV_NAME); + sbuf_printf(sb, "\nversion: %s", XGBE_DRV_VERSION); + sbuf_printf(sb, "\nfirmware-version: %d.%d.%d", + XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), + XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); + sbuf_printf(sb, "\nbus-info: %04d:%02d:%02d", + pdata->pcie_bus, pdata->pcie_device, pdata->pcie_func); + + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + return (-EINVAL); +} + +static int +sysctl_get_link_info_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + sbuf_printf(sb, "\nLink is %s", pdata->phy.link ? "Up" : "Down"); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (0); + } + + return (-EINVAL); +} + +#define COALESCE_SYSCTL_INFO(__coalop) \ +{ \ + { "adaptive-rx", SYSL_BOOL, use_adaptive_rx_coalesce, "not-supported" }, \ + { "adaptive-tx", SYSL_BOOL, use_adaptive_tx_coalesce, "not-supported" }, \ + { "sample-interval", SYSL_S32, rate_sample_interval, "not-supported" }, \ + { "stats-block-usecs", SYSL_S32, stats_block_coalesce_usecs, "not-supported" }, \ + { "pkt-rate-low", SYSL_S32, pkt_rate_low, "not-supported" }, \ + { "pkt-rate-high", SYSL_S32, pkt_rate_high, "not-supported" }, \ + { "rx-usecs", SYSL_S32, rx_coalesce_usecs, "supported" }, \ + { "rx-frames", SYSL_S32, rx_max_coalesced_frames, "supported" }, \ + { "rx-usecs-irq", SYSL_S32, rx_coalesce_usecs_irq, "not-supported" }, \ + { "rx-frames-irq", SYSL_S32, rx_max_coalesced_frames_irq, "not-supported" }, \ + { "tx-usecs", SYSL_S32, tx_coalesce_usecs, "not-supported" }, \ + { "tx-frames", SYSL_S32, tx_max_coalesced_frames, "supported" }, \ + { "tx-usecs-irq", SYSL_S32, tx_coalesce_usecs_irq, "not-supported" }, \ + { "tx-frames-irq", SYSL_S32, tx_max_coalesced_frames_irq, "not-supported" }, \ + { "rx-usecs-low", SYSL_S32, rx_coalesce_usecs_low, "not-supported" }, \ + { "rx-frames-low", SYSL_S32, rx_max_coalesced_frames_low, "not-supported"}, \ + { "tx-usecs-low", SYSL_S32, tx_coalesce_usecs_low, "not-supported" }, \ + { "tx-frames-low", SYSL_S32, tx_max_coalesced_frames_low, "not-supported" }, \ + { "rx-usecs-high", SYSL_S32, rx_coalesce_usecs_high, "not-supported" }, \ + { "rx-frames-high", SYSL_S32, rx_max_coalesced_frames_high, "not-supported" }, \ + { "tx-usecs-high", SYSL_S32, tx_coalesce_usecs_high, "not-supported" }, \ + { "tx-frames-high", SYSL_S32, tx_max_coalesced_frames_high, "not-supported" }, \ +} + +static int +sysctl_coalesce_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct sysctl_op *sys_op = pdata->sys_op; + struct sysctl_info sysctl_coalesce[] = COALESCE_SYSCTL_INFO(coalop); + unsigned int rx_frames, rx_riwt, rx_usecs; + unsigned int tx_frames; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + sys_op->rx_coalesce_usecs = pdata->rx_usecs; + sys_op->rx_max_coalesced_frames = pdata->rx_frames; + sys_op->tx_max_coalesced_frames = pdata->tx_frames; + + sbuf_printf(sb, "\nAdaptive RX: %s TX: %s\n", + sys_op->use_adaptive_rx_coalesce ? "on" : "off", + sys_op->use_adaptive_tx_coalesce ? "on" : "off"); + + sbuf_printf(sb, "stats-block-usecs: %u\n" + "sample-interval: %u\n" + "pkt-rate-low: %u\n" + "pkt-rate-high: %u\n" + "\n" + "rx-usecs: %u\n" + "rx-frames: %u\n" + "rx-usecs-irq: %u\n" + "rx-frames-irq: %u\n" + "\n" + "tx-usecs: %u\n" + "tx-frames: %u\n" + "tx-usecs-irq: %u\n" + "tx-frames-irq: %u\n" + "\n" + "rx-usecs-low: %u\n" + "rx-frames-low: %u\n" + "tx-usecs-low: %u\n" + "tx-frames-low: %u\n" + "\n" + "rx-usecs-high: %u\n" + "rx-frames-high: %u\n" + "tx-usecs-high: %u\n" + "tx-frames-high: %u\n", + sys_op->stats_block_coalesce_usecs, + sys_op->rate_sample_interval, + sys_op->pkt_rate_low, + sys_op->pkt_rate_high, + + sys_op->rx_coalesce_usecs, + sys_op->rx_max_coalesced_frames, + sys_op->rx_coalesce_usecs_irq, + sys_op->rx_max_coalesced_frames_irq, + + sys_op->tx_coalesce_usecs, + sys_op->tx_max_coalesced_frames, + sys_op->tx_coalesce_usecs_irq, + sys_op->tx_max_coalesced_frames_irq, + + sys_op->rx_coalesce_usecs_low, + sys_op->rx_max_coalesced_frames_low, + sys_op->tx_coalesce_usecs_low, + sys_op->tx_max_coalesced_frames_low, + + sys_op->rx_coalesce_usecs_high, + sys_op->rx_max_coalesced_frames_high, + sys_op->tx_coalesce_usecs_high, + sys_op->tx_max_coalesced_frames_high); + + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (0); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + parse_generic_sysctl(pdata, buf, sysctl_coalesce, + ARRAY_SIZE(sysctl_coalesce)); + + rx_riwt = hw_if->usec_to_riwt(pdata, sys_op->rx_coalesce_usecs); + rx_usecs = sys_op->rx_coalesce_usecs; + rx_frames = sys_op->rx_max_coalesced_frames; + + /* Use smallest possible value if conversion resulted in zero */ + if (rx_usecs && !rx_riwt) + rx_riwt = 1; + + /* Check the bounds of values for Rx */ + if (rx_riwt > XGMAC_MAX_DMA_RIWT) { + axgbe_printf(2, "rx-usec is limited to %d usecs\n", + hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT)); + return (-EINVAL); + } + if (rx_frames > pdata->rx_desc_count) { + axgbe_printf(2, "rx-frames is limited to %d frames\n", + pdata->rx_desc_count); + return (-EINVAL); + } + + tx_frames = sys_op->tx_max_coalesced_frames; + + /* Check the bounds of values for Tx */ + if (tx_frames > pdata->tx_desc_count) { + axgbe_printf(2, "tx-frames is limited to %d frames\n", + pdata->tx_desc_count); + return (-EINVAL); + } + + pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; + pdata->rx_frames = rx_frames; + hw_if->config_rx_coalesce(pdata); + + pdata->tx_frames = tx_frames; + hw_if->config_tx_coalesce(pdata); + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + + return (rc); +} + +static int +sysctl_pauseparam_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + struct sysctl_op *sys_op = pdata->sys_op; + struct sysctl_info sysctl_pauseparam[] = { + { "autoneg", SYSL_BOOL, autoneg, "supported" }, + { "rx", SYSL_BOOL, rx_pause, "supported" }, + { "tx", SYSL_BOOL, tx_pause, "supported" }, + }; + ssize_t buf_size = 512; + char buf[buf_size]; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + sys_op->autoneg = pdata->phy.pause_autoneg; + sys_op->tx_pause = pdata->phy.tx_pause; + sys_op->rx_pause = pdata->phy.rx_pause; + + sbuf_printf(sb, + "\nAutonegotiate: %s\n" + "RX: %s\n" + "TX: %s\n", + sys_op->autoneg ? "on" : "off", + sys_op->rx_pause ? "on" : "off", + sys_op->tx_pause ? "on" : "off"); + + if (pdata->phy.lp_advertising) { + int an_rx = 0, an_tx = 0; + + if (pdata->phy.advertising & pdata->phy.lp_advertising & + ADVERTISED_Pause) { + an_tx = 1; + an_rx = 1; + } else if (pdata->phy.advertising & + pdata->phy.lp_advertising & ADVERTISED_Asym_Pause) { + if (pdata->phy.advertising & ADVERTISED_Pause) + an_rx = 1; + else if (pdata->phy.lp_advertising & + ADVERTISED_Pause) + an_tx = 1; + } + sbuf_printf(sb, + "\n->\nRX negotiated: %s\n" + "TX negotiated: %s\n", + an_rx ? "on" : "off", + an_tx ? "on" : "off"); + } + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (0); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + parse_generic_sysctl(pdata, buf, sysctl_pauseparam, + ARRAY_SIZE(sysctl_pauseparam)); + + if (sys_op->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) { + axgbe_error("autoneg disabled, pause autoneg not available\n"); + return (-EINVAL); + } + + pdata->phy.pause_autoneg = sys_op->autoneg; + pdata->phy.tx_pause = sys_op->tx_pause; + pdata->phy.rx_pause = sys_op->rx_pause; + + XGBE_CLR_ADV(&pdata->phy, Pause); + XGBE_CLR_ADV(&pdata->phy, Asym_Pause); + + if (sys_op->rx_pause) { + XGBE_SET_ADV(&pdata->phy, Pause); + XGBE_SET_ADV(&pdata->phy, Asym_Pause); + } + + if (sys_op->tx_pause) { + /* Equivalent to XOR of Asym_Pause */ + if (XGBE_ADV(&pdata->phy, Asym_Pause)) + XGBE_CLR_ADV(&pdata->phy, Asym_Pause); + else + XGBE_SET_ADV(&pdata->phy, Asym_Pause); + } + + if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) + rc = pdata->phy_if.phy_config_aneg(pdata); + + } + + return (rc); +} + +static int +sysctl_link_ksettings_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + struct sysctl_op *sys_op = pdata->sys_op; + struct sysctl_info sysctl_linksettings[] = { + { "autoneg", SYSL_BOOL, autoneg, "supported" }, + { "speed", SYSL_U32, speed, "supported" }, + { "duplex", SYSL_U8, duplex, "supported" }, + }; + ssize_t buf_size = 512; + char buf[buf_size], link_modes[16], speed_modes[16]; + struct sbuf *sb; + uint32_t speed; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + sys_op->autoneg = pdata->phy.autoneg; + sys_op->speed = pdata->phy.speed; + sys_op->duplex = pdata->phy.duplex; + + XGBE_LM_COPY(&pdata->phy, supported, &pdata->phy, supported); + XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, advertising); + XGBE_LM_COPY(&pdata->phy, lp_advertising, &pdata->phy, lp_advertising); + + switch (sys_op->speed) { + case 1: + strcpy(link_modes, "Unknown"); + strcpy(speed_modes, "Unknown"); + break; + case 2: + strcpy(link_modes, "10Gbps/Full"); + strcpy(speed_modes, "10000"); + break; + case 3: + strcpy(link_modes, "2.5Gbps/Full"); + strcpy(speed_modes, "2500"); + break; + case 4: + strcpy(link_modes, "1Gbps/Full"); + strcpy(speed_modes, "1000"); + break; + case 5: + strcpy(link_modes, "100Mbps/Full"); + strcpy(speed_modes, "100"); + break; + case 6: + strcpy(link_modes, "10Mbps/Full"); + strcpy(speed_modes, "10"); + break; + } + + sbuf_printf(sb, + "\nlink_modes: %s\n" + "autonegotiation: %s\n" + "speed: %sMbps\n", + link_modes, + (sys_op->autoneg == AUTONEG_DISABLE) ? "off" : "on", + speed_modes); + + switch (sys_op->duplex) { + case DUPLEX_HALF: + sbuf_printf(sb, "Duplex: Half\n"); + break; + case DUPLEX_FULL: + sbuf_printf(sb, "Duplex: Full\n"); + break; + default: + sbuf_printf(sb, "Duplex: Unknown\n"); + break; + } + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (0); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + parse_generic_sysctl(pdata, buf, sysctl_linksettings, + ARRAY_SIZE(sysctl_linksettings)); + + speed = sys_op->speed; + + if ((sys_op->autoneg != AUTONEG_ENABLE) && + (sys_op->autoneg != AUTONEG_DISABLE)) { + axgbe_error("unsupported autoneg %hhu\n", + (unsigned char)sys_op->autoneg); + return (-EINVAL); + } + + if (sys_op->autoneg == AUTONEG_DISABLE) { + if (!pdata->phy_if.phy_valid_speed(pdata, speed)) { + axgbe_error("unsupported speed %u\n", speed); + return (-EINVAL); + } + + if (sys_op->duplex != DUPLEX_FULL) { + axgbe_error("unsupported duplex %hhu\n", + (unsigned char)sys_op->duplex); + return (-EINVAL); + } + } + + pdata->phy.autoneg = sys_op->autoneg; + pdata->phy.speed = speed; + pdata->phy.duplex = sys_op->duplex; + + if (sys_op->autoneg == AUTONEG_ENABLE) + XGBE_SET_ADV(&pdata->phy, Autoneg); + else + XGBE_CLR_ADV(&pdata->phy, Autoneg); + + if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) + rc = pdata->phy_if.phy_config_aneg(pdata); + } + + return (rc); +} + +static int +sysctl_ringparam_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + struct sysctl_op *sys_op = pdata->sys_op; + struct sysctl_info sysctl_ringparam[] = { + { "rx", SYSL_S32, rx_pending, "supported" }, + { "rx-mini", SYSL_S32, rx_mini_pending, "supported" }, + { "rx-jumbo", SYSL_S32, rx_jumbo_pending, "supported" }, + { "tx", SYSL_S32, tx_pending, "supported" }, + }; + ssize_t buf_size = 512; + unsigned int rx, tx; + char buf[buf_size]; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + sys_op->rx_max_pending = XGBE_RX_DESC_CNT_MAX; + sys_op->tx_max_pending = XGBE_TX_DESC_CNT_MAX; + sys_op->rx_pending = pdata->rx_desc_count; + sys_op->tx_pending = pdata->tx_desc_count; + + sbuf_printf(sb, + "\nPre-set maximums:\n" + "RX: %u\n" + "RX Mini: %u\n" + "RX Jumbo: %u\n" + "TX: %u\n", + sys_op->rx_max_pending, + sys_op->rx_mini_max_pending, + sys_op->rx_jumbo_max_pending, + sys_op->tx_max_pending); + + sbuf_printf(sb, + "\nCurrent hardware settings:\n" + "RX: %u\n" + "RX Mini: %u\n" + "RX Jumbo: %u\n" + "TX: %u\n", + sys_op->rx_pending, + sys_op->rx_mini_pending, + sys_op->rx_jumbo_pending, + sys_op->tx_pending); + + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (0); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + parse_generic_sysctl(pdata, buf, sysctl_ringparam, + ARRAY_SIZE(sysctl_ringparam)); + + if (sys_op->rx_mini_pending || sys_op->rx_jumbo_pending) { + axgbe_error("unsupported ring parameter\n"); + return (-EINVAL); + } + + if ((sys_op->rx_pending < XGBE_RX_DESC_CNT_MIN) || + (sys_op->rx_pending > XGBE_RX_DESC_CNT_MAX)) { + axgbe_error("rx ring param must be between %u and %u\n", + XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX); + return (-EINVAL); + } + + if ((sys_op->tx_pending < XGBE_TX_DESC_CNT_MIN) || + (sys_op->tx_pending > XGBE_TX_DESC_CNT_MAX)) { + axgbe_error("tx ring param must be between %u and %u\n", + XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX); + return (-EINVAL); + } + + rx = __rounddown_pow_of_two(sys_op->rx_pending); + if (rx != sys_op->rx_pending) + axgbe_printf(1, "rx ring param rounded to power of 2: %u\n", + rx); + + tx = __rounddown_pow_of_two(sys_op->tx_pending); + if (tx != sys_op->tx_pending) + axgbe_printf(1, "tx ring param rounded to power of 2: %u\n", + tx); + + if ((rx == pdata->rx_desc_count) && + (tx == pdata->tx_desc_count)) + goto out; + + pdata->rx_desc_count = rx; + pdata->tx_desc_count = tx; + + /* TODO - restart dev */ + } + +out: + return (0); +} + +static int +sysctl_channels_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + struct sysctl_op *sys_op = pdata->sys_op; + struct sysctl_info sysctl_channels[] = { + { "rx", SYSL_S32, rx_count, "supported" }, + { "tx", SYSL_S32, tx_count, "supported" }, + { "other", SYSL_S32, other_count, "supported" }, + { "combined", SYSL_S32, combined_count, "supported" }, + }; + unsigned int rx, tx, combined; + ssize_t buf_size = 512; + char buf[buf_size]; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); + rx = min(rx, pdata->channel_irq_count); + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); + tx = min(tx, pdata->channel_irq_count); + tx = min(tx, pdata->tx_max_q_count); + + combined = min(rx, tx); + + sys_op->max_combined = combined; + sys_op->max_rx = rx ? rx - 1 : 0; + sys_op->max_tx = tx ? tx - 1 : 0; + + /* Get current settings based on device state */ + rx = pdata->rx_ring_count; + tx = pdata->tx_ring_count; + + combined = min(rx, tx); + rx -= combined; + tx -= combined; + + sys_op->combined_count = combined; + sys_op->rx_count = rx; + sys_op->tx_count = tx; + + sbuf_printf(sb, + "\nPre-set maximums:\n" + "RX: %u\n" + "TX: %u\n" + "Other: %u\n" + "Combined: %u\n", + sys_op->max_rx, sys_op->max_tx, + sys_op->max_other, + sys_op->max_combined); + + sbuf_printf(sb, + "\nCurrent hardware settings:\n" + "RX: %u\n" + "TX: %u\n" + "Other: %u\n" + "Combined: %u\n", + sys_op->rx_count, sys_op->tx_count, + sys_op->other_count, + sys_op->combined_count); + + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (0); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + parse_generic_sysctl(pdata, buf, sysctl_channels, + ARRAY_SIZE(sysctl_channels)); + + axgbe_error( "channel inputs: combined=%u, rx-only=%u," + " tx-only=%u\n", sys_op->combined_count, + sys_op->rx_count, sys_op->tx_count); + } + + return (rc); +} + + +static int +sysctl_mac_stats_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + struct sbuf *sb; + int rc = 0; + int i; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + pdata->hw_if.read_mmc_stats(pdata); + for (i = 0; i < XGBE_STATS_COUNT; i++) { + sbuf_printf(sb, "\n %s: %lu", + xgbe_gstring_stats[i].stat_string, + *(uint64_t *)((uint8_t *)pdata + xgbe_gstring_stats[i].stat_offset)); + } + for (i = 0; i < pdata->tx_ring_count; i++) { + sbuf_printf(sb, + "\n txq_packets[%d]: %lu" + "\n txq_bytes[%d]: %lu", + i, pdata->ext_stats.txq_packets[i], + i, pdata->ext_stats.txq_bytes[i]); + } + for (i = 0; i < pdata->rx_ring_count; i++) { + sbuf_printf(sb, + "\n rxq_packets[%d]: %lu" + "\n rxq_bytes[%d]: %lu", + i, pdata->ext_stats.rxq_packets[i], + i, pdata->ext_stats.rxq_bytes[i]); + } + + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + return (-EINVAL); +} + +static int +sysctl_xgmac_reg_value_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + unsigned int value; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + value = XGMAC_IOREAD(pdata, pdata->sysctl_xgmac_reg); + axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); + sbuf_printf(sb, "\nXGMAC reg_value: 0x%x\n", value); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", &value); + axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); + XGMAC_IOWRITE(pdata, pdata->sysctl_xgmac_reg, value); + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xpcs_mmd_reg_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + unsigned int reg; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: xpcs_mmd: 0x%x\n", __func__, + pdata->sysctl_xpcs_mmd); + sbuf_printf(sb, "\nXPCS mmd_reg: 0x%x\n", + pdata->sysctl_xpcs_mmd); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", ®); + axgbe_printf(2, "WRITE: %s: mmd_reg: 0x%x\n", __func__, reg); + pdata->sysctl_xpcs_mmd = reg; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xpcs_reg_addr_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + unsigned int reg; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: sysctl_xpcs_reg: 0x%x\n", __func__, + pdata->sysctl_xpcs_reg); + sbuf_printf(sb, "\nXPCS reg_addr: 0x%x\n", + pdata->sysctl_xpcs_reg); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", ®); + axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); + pdata->sysctl_xpcs_reg = reg; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xpcs_reg_value_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + unsigned int value; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + value = XMDIO_READ(pdata, pdata->sysctl_xpcs_mmd, + pdata->sysctl_xpcs_reg); + axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); + sbuf_printf(sb, "\nXPCS reg_value: 0x%x\n", value); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", &value); + axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); + XMDIO_WRITE(pdata, pdata->sysctl_xpcs_mmd, + pdata->sysctl_xpcs_reg, value); + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xprop_reg_addr_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + unsigned int reg; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: sysctl_xprop_reg: 0x%x\n", __func__, + pdata->sysctl_xprop_reg); + sbuf_printf(sb, "\nXPROP reg_addr: 0x%x\n", + pdata->sysctl_xprop_reg); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", ®); + axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); + pdata->sysctl_xprop_reg = reg; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xprop_reg_value_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + unsigned int value; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + value = XP_IOREAD(pdata, pdata->sysctl_xprop_reg); + axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); + sbuf_printf(sb, "\nXPROP reg_value: 0x%x\n", value); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", &value); + axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); + XP_IOWRITE(pdata, pdata->sysctl_xprop_reg, value); + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xi2c_reg_addr_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + unsigned int reg; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: sysctl_xi2c_reg: 0x%x\n", __func__, + pdata->sysctl_xi2c_reg); + sbuf_printf(sb, "\nXI2C reg_addr: 0x%x\n", + pdata->sysctl_xi2c_reg); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", ®); + axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); + pdata->sysctl_xi2c_reg = reg; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_xi2c_reg_value_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + ssize_t buf_size = 64; + char buf[buf_size]; + unsigned int value; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + value = XI2C_IOREAD(pdata, pdata->sysctl_xi2c_reg); + axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); + sbuf_printf(sb, "\nXI2C reg_value: 0x%x\n", value); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%x", &value); + axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); + XI2C_IOWRITE(pdata, pdata->sysctl_xi2c_reg, value); + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_an_cdr_wr_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + unsigned int an_cdr_wr = 0; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: an_cdr_wr: %d\n", __func__, + pdata->sysctl_an_cdr_workaround); + sbuf_printf(sb, "%d\n", pdata->sysctl_an_cdr_workaround); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%u", &an_cdr_wr); + axgbe_printf(2, "WRITE: %s: an_cdr_wr: 0x%d\n", __func__, + an_cdr_wr); + + if (an_cdr_wr) + pdata->sysctl_an_cdr_workaround = 1; + else + pdata->sysctl_an_cdr_workaround = 0; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +static int +sysctl_an_cdr_track_early_handler(SYSCTL_HANDLER_ARGS) +{ + struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; + unsigned int an_cdr_track_early = 0; + ssize_t buf_size = 64; + char buf[buf_size]; + struct sbuf *sb; + int rc = 0; + + if (req->newptr == NULL) { + sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); + if (sb == NULL) { + rc = sb->s_error; + return (rc); + } + + axgbe_printf(2, "READ: %s: an_cdr_track_early %d\n", __func__, + pdata->sysctl_an_cdr_track_early); + sbuf_printf(sb, "%d\n", pdata->sysctl_an_cdr_track_early); + rc = sbuf_finish(sb); + sbuf_delete(sb); + return (rc); + } + + rc = get_ubuf(req, buf); + if (rc == 0) { + sscanf(buf, "%u", &an_cdr_track_early); + axgbe_printf(2, "WRITE: %s: an_cdr_track_early: %d\n", __func__, + an_cdr_track_early); + + if (an_cdr_track_early) + pdata->sysctl_an_cdr_track_early = 1; + else + pdata->sysctl_an_cdr_track_early = 0; + } + + axgbe_printf(2, "%s: rc= %d\n", __func__, rc); + return (rc); +} + +void +axgbe_sysctl_exit(struct xgbe_prv_data *pdata) +{ + + if (pdata->sys_op) + free(pdata->sys_op, M_AXGBE); +} + +void +axgbe_sysctl_init(struct xgbe_prv_data *pdata) +{ + struct sysctl_ctx_list *clist; + struct sysctl_oid_list *top; + struct sysctl_oid *parent; + struct sysctl_op *sys_op; + + sys_op = malloc(sizeof(*sys_op), M_AXGBE, M_WAITOK | M_ZERO); + pdata->sys_op = sys_op; + + clist = device_get_sysctl_ctx(pdata->dev); + parent = device_get_sysctl_tree(pdata->dev); + top = SYSCTL_CHILDREN(parent); + + /* Set defaults */ + pdata->sysctl_xgmac_reg = 0; + pdata->sysctl_xpcs_mmd = 1; + pdata->sysctl_xpcs_reg = 0; + + SYSCTL_ADD_UINT(clist, top, OID_AUTO, "axgbe_debug_level", CTLFLAG_RWTUN, + &pdata->debug_level, 0, "axgbe log level -- higher is verbose"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xgmac_register", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xgmac_reg_addr_handler, "IU", + "xgmac register addr"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xgmac_register_value", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xgmac_reg_value_handler, "IU", + "xgmac register value"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_mmd", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xpcs_mmd_reg_handler, "IU", "xpcs mmd register"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_register", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xpcs_reg_addr_handler, "IU", "xpcs register"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_register_value", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xpcs_reg_value_handler, "IU", + "xpcs register value"); + + if (pdata->xpcs_res) { + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xprop_register", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xprop_reg_addr_handler, + "IU", "xprop register"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xprop_register_value", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xprop_reg_value_handler, + "IU", "xprop register value"); + } + + if (pdata->xpcs_res) { + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xi2c_register", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xi2c_reg_addr_handler, + "IU", "xi2c register"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xi2c_register_value", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_xi2c_reg_value_handler, + "IU", "xi2c register value"); + } + + if (pdata->vdata->an_cdr_workaround) { + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "an_cdr_workaround", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_an_cdr_wr_handler, "IU", + "an cdr workaround"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "an_cdr_track_early", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_an_cdr_track_early_handler, "IU", + "an cdr track early"); + } + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "drv_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_get_drv_info_handler, "IU", + "xgbe drv info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "link_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_get_link_info_handler, "IU", + "xgbe link info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "coalesce_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_coalesce_handler, "IU", + "xgbe coalesce info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "pauseparam_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_pauseparam_handler, "IU", + "xgbe pauseparam info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "link_ksettings_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_link_ksettings_handler, "IU", + "xgbe link_ksettings info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "ringparam_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_ringparam_handler, "IU", + "xgbe ringparam info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "channels_info", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_channels_handler, "IU", + "xgbe channels info"); + + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "mac_stats", + CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, + pdata, 0, sysctl_mac_stats_handler, "IU", + "xgbe mac stats"); +} diff --git a/sys/dev/axgbe/xgbe-txrx.c b/sys/dev/axgbe/xgbe-txrx.c new file mode 100644 index 000000000000..c6872e584f81 --- /dev/null +++ b/sys/dev/axgbe/xgbe-txrx.c @@ -0,0 +1,777 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Contact Information : + * Rajesh Kumar + * Shreyank Amartya + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" +#include "xgbe-common.h" + +/* + * IFLIB interfaces + */ +static int axgbe_isc_txd_encap(void *, if_pkt_info_t); +static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t); +static int axgbe_isc_txd_credits_update(void *, uint16_t, bool); +static void axgbe_isc_rxd_refill(void *, if_rxd_update_t); +static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); +static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); +static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t); + +struct if_txrx axgbe_txrx = { + .ift_txd_encap = axgbe_isc_txd_encap, + .ift_txd_flush = axgbe_isc_txd_flush, + .ift_txd_credits_update = axgbe_isc_txd_credits_update, + .ift_rxd_available = axgbe_isc_rxd_available, + .ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get, + .ift_rxd_refill = axgbe_isc_rxd_refill, + .ift_rxd_flush = axgbe_isc_rxd_flush, + .ift_legacy_intr = NULL +}; + +static void +xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi) +{ + + axgbe_printf(1, "------Packet Info Start------\n"); + axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", + pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); + axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n", + pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag); + axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", + pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); + axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n", + pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz); +} + +static bool +axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, + if_pkt_info_t pi) +{ + struct xgbe_ring_desc *rdesc; + struct xgbe_ring_data *rdata; + bool inc_cur = false; + + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + rdesc = rdata->rdesc; + + axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n", + pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur); + + axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n", + pi->ipi_vtag, ring->tx.cur_vlan_ctag); + + if ((pi->ipi_csum_flags & CSUM_TSO) && + (pi->ipi_tso_segsz != ring->tx.cur_mss)) { + /* + * Set TSO maximum segment size + * Mark as context descriptor + * Indicate this descriptor contains MSS + */ + XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, + MSS, pi->ipi_tso_segsz); + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1); + ring->tx.cur_mss = pi->ipi_tso_segsz; + inc_cur = true; + } + + if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) { + /* + * Mark it as context descriptor + * Set the VLAN tag + * Indicate this descriptor contains the VLAN tag + */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, + VT, pi->ipi_vtag); + XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1); + ring->tx.cur_vlan_ctag = pi->ipi_vtag; + inc_cur = true; + } + + return (inc_cur); +} + +static uint16_t +axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi, + struct xgbe_packet_data *packet) +{ + uint32_t tcp_payload_len = 0, bytes = 0; + uint16_t max_len, hlen, payload_len, pkts = 0; + + packet->tx_packets = packet->tx_bytes = 0; + + hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; + if (pi->ipi_csum_flags & CSUM_TSO) { + + tcp_payload_len = pi->ipi_len - hlen; + axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n", + __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen, + pi->ipi_tcp_hlen); + + max_len = if_getmtu(pdata->netdev) + ETH_HLEN; + if (pi->ipi_vtag) + max_len += VLAN_HLEN; + + while (tcp_payload_len) { + + payload_len = max_len - hlen; + payload_len = min(payload_len, tcp_payload_len); + tcp_payload_len -= payload_len; + pkts++; + bytes += (hlen + payload_len); + axgbe_printf(1, "%s: max_len %d payload_len %d " + "tcp_len %d\n", __func__, max_len, payload_len, + tcp_payload_len); + } + } else { + pkts = 1; + bytes = pi->ipi_len; + } + + packet->tx_packets = pkts; + packet->tx_bytes = bytes; + + axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__, + packet->tx_packets, packet->tx_bytes, hlen); + + return (hlen); +} + +static int +axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel; + struct xgbe_ring *ring; + struct xgbe_ring_desc *rdesc; + struct xgbe_ring_data *rdata; + struct xgbe_packet_data *packet; + unsigned int cur, start, tx_set_ic; + uint16_t offset, hlen, datalen, tcp_payload_len = 0; + int cur_seg = 0; + + xgbe_print_pkt_info(pdata, pi); + + channel = pdata->channel[pi->ipi_qsidx]; + ring = channel->tx_ring; + packet = &ring->packet_data; + cur = start = ring->cur; + + axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n", + __func__, pi->ipi_qsidx, ring->cur, ring->dirty); + + MPASS(pi->ipi_len != 0); + if (__predict_false(pi->ipi_len == 0)) { + axgbe_error("empty packet received from stack\n"); + return (0); + } + + MPASS(ring->cur == pi->ipi_pidx); + if (__predict_false(ring->cur != pi->ipi_pidx)) { + axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__, + ring->cur, pi->ipi_pidx); + } + + /* Determine if an interrupt should be generated for this Tx: + * Interrupt: + * - Tx frame count exceeds the frame count setting + * - Addition of Tx frame count to the frame count since the + * last interrupt was set exceeds the frame count setting + * No interrupt: + * - No frame count setting specified (ethtool -C ethX tx-frames 0) + * - Addition of Tx frame count to the frame count since the + * last interrupt was set does not exceed the frame count setting + */ + memset(packet, 0, sizeof(*packet)); + hlen = axgbe_calculate_tx_parms(pdata, pi, packet); + axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n", + __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen); + + ring->coalesce_count += packet->tx_packets; + if (!pdata->tx_frames) + tx_set_ic = 0; + else if (packet->tx_packets > pdata->tx_frames) + tx_set_ic = 1; + else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets)) + tx_set_ic = 1; + else + tx_set_ic = 0; + + /* Add Context descriptor if needed (for TSO, VLAN cases) */ + if (axgbe_ctx_desc_setup(pdata, ring, pi)) + cur++; + + rdata = XGBE_GET_DESC_DATA(ring, cur); + rdesc = rdata->rdesc; + + axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " + "ipi_len 0x%x\n", __func__, cur, + lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), + upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), + (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); + + /* Update buffer address (for TSO this is the header) */ + rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr)); + + /* Update the buffer length */ + if (hlen == 0) + hlen = pi->ipi_segs[cur_seg].ds_len; + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen); + + /* VLAN tag insertion check */ + if (pi->ipi_vtag) { + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, + TX_NORMAL_DESC2_VLAN_INSERT); + } + + /* Mark it as First Descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); + + /* Mark it as a NORMAL descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); + + /* + * Set the OWN bit if this is not the first descriptor. For first + * descriptor, OWN bit will be set at last so that hardware will + * process the descriptors only after the OWN bit for the first + * descriptor is set + */ + if (cur != start) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + + if (pi->ipi_csum_flags & CSUM_TSO) { + /* Enable TSO */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); + + tcp_payload_len = pi->ipi_len - hlen; + + /* Set TCP payload length*/ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, + tcp_payload_len); + + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, + pi->ipi_tcp_hlen/4); + + axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len, + pi->ipi_tcp_hlen/4); + } else { + /* Enable CRC and Pad Insertion */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); + + /* Enable HW CSUM*/ + if (pi->ipi_csum_flags) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); + + /* Set total length to be transmitted */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len); + } + + cur++; + + for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) { + + if (cur_seg == 0) { + offset = hlen; + datalen = pi->ipi_segs[cur_seg].ds_len - hlen; + } else { + offset = 0; + datalen = pi->ipi_segs[cur_seg].ds_len; + } + + if (datalen) { + rdata = XGBE_GET_DESC_DATA(ring, cur); + rdesc = rdata->rdesc; + + + /* Update buffer address */ + rdesc->desc0 = + cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); + rdesc->desc1 = + cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); + + /* Update the buffer length */ + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen); + + /* Set OWN bit */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + + /* Mark it as NORMAL descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); + + /* Enable HW CSUM*/ + if (pi->ipi_csum_flags) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); + + axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " + "ipi_len 0x%x\n", __func__, cur, + lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), + upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), + (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); + + cur++; + } + } + + /* Set LAST bit for the last descriptor */ + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); + + /* Set IC bit based on Tx coalescing settings */ + if (tx_set_ic) + XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); + + wmb(); + + /* Set OWN bit for the first descriptor */ + rdata = XGBE_GET_DESC_DATA(ring, start); + rdesc = rdata->rdesc; + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); + + ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1)); + + axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur, + ring->dirty); + + return (0); +} + +static void +axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel = pdata->channel[txqid]; + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata = XGBE_GET_DESC_DATA(ring, pidx); + + axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n", + __func__, txqid, pidx, ring->cur, ring->dirty); + + MPASS(ring->cur == pidx); + if (__predict_false(ring->cur != pidx)) { + axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__, + ring->cur, pidx); + } + + wmb(); + + /* Ring Doorbell */ + if (XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO) != + lower_32_bits(rdata->rdata_paddr)) { + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, + lower_32_bits(rdata->rdata_paddr)); + } +} + +static int +axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_hw_if *hw_if = &sc->pdata.hw_if; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel = pdata->channel[txqid]; + struct xgbe_ring *ring = channel->tx_ring; + struct xgbe_ring_data *rdata; + int processed = 0; + + axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n", + __func__, txqid, clear, ring->cur, ring->dirty); + + if (__predict_false(ring->cur == ring->dirty)) { + axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n", + __func__, ring->cur, ring->dirty); + return (0); + } + + /* Check whether the first dirty descriptor is Tx complete */ + rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); + if (!hw_if->tx_complete(rdata->rdesc)) { + axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty); + return (0); + } + + /* + * If clear is false just let the caller know that there + * are descriptors to reclaim + */ + if (!clear) { + axgbe_printf(1, "<-- %s: (!clear)\n", __func__); + return (1); + } + + do { + hw_if->tx_desc_reset(rdata); + processed++; + ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1); + + /* + * tx_complete will return true for unused descriptors also. + * so, check tx_complete only until used descriptors. + */ + if (ring->cur == ring->dirty) + break; + + rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); + } while (hw_if->tx_complete(rdata->rdesc)); + + axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__, + processed, ring->cur, ring->dirty); + + return (processed); +} + +static void +axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx]; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + unsigned int rx_usecs = pdata->rx_usecs; + unsigned int rx_frames = pdata->rx_frames; + unsigned int inte; + uint8_t count = iru->iru_count; + int i, j; + + axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d " + "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx, + iru->iru_pidx, count, ring->cur, ring->dirty); + + for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) { + + if (i == XGBE_RX_DESC_CNT_DEFAULT) + i = 0; + + rdata = XGBE_GET_DESC_DATA(ring, i); + rdesc = rdata->rdesc; + + if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3, + RX_NORMAL_DESC3, OWN))) { + axgbe_error("%s: refill clash, cur %d dirty %d index %d" + "pidx %d\n", __func__, ring->cur, ring->dirty, j, i); + } + + /* Assuming split header is enabled */ + if (iru->iru_flidx == 0) { + + /* Fill header/buffer1 address */ + rdesc->desc0 = + cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); + rdesc->desc1 = + cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); + } else { + + /* Fill data/buffer2 address */ + rdesc->desc2 = + cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); + rdesc->desc3 = + cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); + + if (!rx_usecs && !rx_frames) { + /* No coalescing, interrupt for every descriptor */ + inte = 1; + } else { + /* Set interrupt based on Rx frame coalescing setting */ + if (rx_frames && + !(((ring->dirty + 1) &(ring->rdesc_count - 1)) % rx_frames)) + inte = 1; + else + inte = 0; + } + + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); + + XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); + + wmb(); + + ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1)); + } + } + + axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__, + channel->queue_index, ring->cur, ring->dirty); +} + +static void +axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel = pdata->channel[qsidx]; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + + axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n", + __func__, qsidx, flidx, pidx, ring->cur, ring->dirty); + + if (flidx == 1) { + + rdata = XGBE_GET_DESC_DATA(ring, pidx); + + XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, + lower_32_bits(rdata->rdata_paddr)); + } + + wmb(); +} + +static int +axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_channel *channel = pdata->channel[qsidx]; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_ring_data *rdata; + struct xgbe_ring_desc *rdesc; + unsigned int cur; + int count; + uint8_t incomplete = 1, context_next = 0, running = 0; + + axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n", + __func__, qsidx, idx, budget, ring->cur, ring->dirty); + + cur = ring->cur; + for (count = 0; count <= budget; ) { + + rdata = XGBE_GET_DESC_DATA(ring, cur); + rdesc = rdata->rdesc; + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) + break; + + running = 1; + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) + incomplete = 0; + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) + context_next = 1; + + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) + context_next = 0; + + cur = (cur + 1) & (ring->rdesc_count - 1); + + if (incomplete || context_next) + continue; + + /* Increment pkt count & reset variables for next full packet */ + count++; + incomplete = 1; + context_next = 0; + running = 0; + } + + axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d " + "count %d\n", __func__, qsidx, cur, incomplete, context_next, + running, count); + + return (count); +} + +static unsigned int +xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, + struct xgbe_packet_data *packet) +{ + + /* Always zero if not the first descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) { + axgbe_printf(1, "%s: Not First\n", __func__); + return (0); + } + + /* First descriptor with split header, return header length */ + if (rdata->rx.hdr_len) { + axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len); + return (rdata->rx.hdr_len); + } + + /* First descriptor but not the last descriptor and no split header, + * so the full buffer was used + */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { + axgbe_printf(1, "%s: Not last %d\n", __func__, + pdata->rx_buf_size); + return (256); + } + + /* First descriptor and last descriptor and no split header, so + * calculate how much of the buffer was used + */ + axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len, + pdata->rx_buf_size); + + return (min_t(unsigned int, 256, rdata->rx.len)); +} + +static unsigned int +xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, + struct xgbe_packet_data *packet, unsigned int len) +{ + + /* Always the full buffer if not the last descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { + axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size); + return (pdata->rx_buf_size); + } + + /* Last descriptor so calculate how much of the buffer was used + * for the last bit of data + */ + return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0); +} + +static inline void +axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len, + int pos, int flid) +{ + axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid); + ri->iri_frags[pos].irf_flid = flid; + ri->iri_frags[pos].irf_idx = idx; + ri->iri_frags[pos].irf_len = len; +} + +static int +axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) +{ + struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; + struct xgbe_prv_data *pdata = &sc->pdata; + struct xgbe_hw_if *hw_if = &pdata->hw_if; + struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx]; + struct xgbe_ring *ring = channel->rx_ring; + struct xgbe_packet_data *packet = &ring->packet_data; + struct xgbe_ring_data *rdata; + unsigned int last, context_next, context; + unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur; + int i = 0; + + axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__, + ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty); + + memset(packet, 0, sizeof(struct xgbe_packet_data)); + + while (1) { + +read_again: + if (hw_if->dev_read(channel)) { + axgbe_printf(2, "<-- %s: OWN bit seen on %d\n", + __func__, ring->cur); + break; + } + + rdata = XGBE_GET_DESC_DATA(ring, ring->cur); + prev_cur = ring->cur; + ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1); + + last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + LAST); + + context_next = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, CONTEXT_NEXT); + + context = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, CONTEXT); + + if (!context) { + /* Get the data length in the descriptor buffers */ + buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet); + len += buf1_len; + buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len); + len += buf2_len; + } else + buf1_len = buf2_len = 0; + + if (packet->errors) + axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d " + "buf2 %d len %d frags %d error %d\n", __func__, last, context, + context_next, buf1_len, buf2_len, len, i, packet->errors); + + axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0); + i++; + axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1); + i++; + + if (!last || context_next) + goto read_again; + + break; + } + + if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) { + ri->iri_csum_flags |= CSUM_IP_CHECKED; + ri->iri_csum_flags |= CSUM_IP_VALID; + axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags); + } + + max_len = if_getmtu(pdata->netdev) + ETH_HLEN; + if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) { + ri->iri_flags |= M_VLANTAG; + ri->iri_vtag = packet->vlan_ctag; + max_len += VLAN_HLEN; + axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__, + ri->iri_flags, ri->iri_vtag); + } + + + if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) { + ri->iri_flowid = packet->rss_hash; + ri->iri_rsstype = packet->rss_hash_type; + axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n", + __func__, packet->rss_hash, ri->iri_flowid, + packet->rss_hash_type, ri->iri_rsstype); + } + + if (__predict_false(len == 0)) + axgbe_error("%s: Zero len packet\n", __func__); + + if (__predict_false(len > max_len)) + axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len); + + if (__predict_false(packet->errors)) + axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d " + "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i, + ri->iri_cidx, ring->cur, ring->dirty, packet->errors); + + axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i); + + ri->iri_len = len; + ri->iri_nfrags = i; + + return (0); +} diff --git a/sys/dev/axgbe/xgbe.h b/sys/dev/axgbe/xgbe.h index ee55ef8f0a16..fac642cc16fa 100644 --- a/sys/dev/axgbe/xgbe.h +++ b/sys/dev/axgbe/xgbe.h @@ -1,889 +1,1359 @@ /* * AMD 10Gb Ethernet driver * + * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. + * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * - * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. - * All rights reserved. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __XGBE_H__ #define __XGBE_H__ +#include +#if __FreeBSD_version < 1300000 +#include +#endif +#include +#include +#include + +#include +#include + +#include +#include + #include "xgbe_osdep.h" /* From linux/dcbnl.h */ #define IEEE_8021QAZ_MAX_TCS 8 #define XGBE_DRV_NAME "amd-xgbe" -#define XGBE_DRV_VERSION "1.0.2" +#define XGBE_DRV_VERSION "1.0.3" #define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" /* Descriptor related defines */ #define XGBE_TX_DESC_CNT 512 #define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3) #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) #define XGBE_RX_DESC_CNT 512 +#define XGBE_TX_DESC_CNT_MIN 64 +#define XGBE_TX_DESC_CNT_MAX 4096 +#define XGBE_RX_DESC_CNT_MIN 64 +#define XGBE_RX_DESC_CNT_MAX 4096 +#define XGBE_TX_DESC_CNT_DEFAULT 512 +#define XGBE_RX_DESC_CNT_DEFAULT 512 + #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) /* Descriptors required for maximum contiguous TSO/GSO packet */ #define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) /* Maximum possible descriptors needed for an SKB: * - Maximum number of SKB frags * - Maximum descriptors for contiguous TSO/GSO packet * - Possible context descriptor * - Possible TSO header descriptor */ #define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2) #define XGBE_RX_MIN_BUF_SIZE 1522 #define XGBE_RX_BUF_ALIGN 64 #define XGBE_SKB_ALLOC_SIZE 256 -#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */ +#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZ */ #define XGBE_MAX_DMA_CHANNELS 16 #define XGBE_MAX_QUEUES 16 +#define XGBE_PRIORITY_QUEUES 8 #define XGBE_DMA_STOP_TIMEOUT 5 /* DMA cache settings - Outer sharable, write-back, write-allocate */ -#define XGBE_DMA_OS_AXDOMAIN 0x2 -#define XGBE_DMA_OS_ARCACHE 0xb -#define XGBE_DMA_OS_AWCACHE 0xf +#define XGBE_DMA_OS_ARCR 0x002b2b2b +#define XGBE_DMA_OS_AWCR 0x2f2f2f2f /* DMA cache settings - System, no caches used */ -#define XGBE_DMA_SYS_AXDOMAIN 0x3 -#define XGBE_DMA_SYS_ARCACHE 0x0 -#define XGBE_DMA_SYS_AWCACHE 0x0 +#define XGBE_DMA_SYS_ARCR 0x00303030 +#define XGBE_DMA_SYS_AWCR 0x30303030 -#define XGBE_DMA_INTERRUPT_MASK 0x31c7 +/* DMA cache settings - PCI device */ +#define XGBE_DMA_PCI_ARCR 0x00000003 +#define XGBE_DMA_PCI_AWCR 0x13131313 +#define XGBE_DMA_PCI_AWARCR 0x00000313 + +/* DMA channel interrupt modes */ +#define XGBE_IRQ_MODE_EDGE 0 +#define XGBE_IRQ_MODE_LEVEL 1 #define XGMAC_MIN_PACKET 60 #define XGMAC_STD_PACKET_MTU 1500 #define XGMAC_MAX_STD_PACKET 1518 #define XGMAC_JUMBO_PACKET_MTU 9000 #define XGMAC_MAX_JUMBO_PACKET 9018 +#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */ + +#define XGMAC_PFC_DATA_LEN 46 +#define XGMAC_PFC_DELAYS 14000 + +#define XGMAC_PRIO_QUEUES(_cnt) \ + min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt)) /* Common property names */ #define XGBE_MAC_ADDR_PROPERTY "mac-address" #define XGBE_PHY_MODE_PROPERTY "phy-mode" #define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt" #define XGBE_SPEEDSET_PROPERTY "amd,speed-set" #define XGBE_BLWC_PROPERTY "amd,serdes-blwc" #define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" #define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" #define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp" #define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config" #define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable" /* Device-tree clock names */ #define XGBE_DMA_CLOCK "dma_clk" #define XGBE_PTP_CLOCK "ptp_clk" /* ACPI property names */ #define XGBE_ACPI_DMA_FREQ "amd,dma-freq" #define XGBE_ACPI_PTP_FREQ "amd,ptp-freq" +/* PCI BAR mapping */ +#define XGBE_XGMAC_BAR 0 +#define XGBE_XPCS_BAR 1 +#define XGBE_MAC_PROP_OFFSET 0x1d000 +#define XGBE_I2C_CTRL_OFFSET 0x1e000 + +/* PCI MSI/MSIx support */ +#define XGBE_MSI_BASE_COUNT 4 +#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1) + +/* PCI clock frequencies */ +#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */ +#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */ + /* Timestamp support - values based on 50MHz PTP clock * 50MHz => 20 nsec */ #define XGBE_TSTAMP_SSINC 20 #define XGBE_TSTAMP_SNSINC 0 /* Driver PMT macros */ #define XGMAC_DRIVER_CONTEXT 1 #define XGMAC_IOCTL_CONTEXT 2 +#define XGMAC_FIFO_MIN_ALLOC 2048 +#define XGMAC_FIFO_UNIT 256 +#define XGMAC_FIFO_ALIGN(_x) \ + (((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1)) +#define XGMAC_FIFO_FC_OFF 2048 +#define XGMAC_FIFO_FC_MIN 4096 #define XGBE_FIFO_MAX 81920 #define XGBE_TC_MIN_QUANTUM 10 /* Helper macro for descriptor handling * Always use XGBE_GET_DESC_DATA to access the descriptor data * since the index is free-running and needs to be and-ed * with the descriptor count value of the ring to index to * the proper descriptor data. */ #define XGBE_GET_DESC_DATA(_ring, _idx) \ ((_ring)->rdata + \ ((_idx) & ((_ring)->rdesc_count - 1))) /* Default coalescing parameters */ #define XGMAC_INIT_DMA_TX_USECS 1000 #define XGMAC_INIT_DMA_TX_FRAMES 25 #define XGMAC_MAX_DMA_RIWT 0xff #define XGMAC_INIT_DMA_RX_USECS 30 #define XGMAC_INIT_DMA_RX_FRAMES 25 /* Flow control queue count */ #define XGMAC_MAX_FLOW_CONTROL_QUEUES 8 +/* Flow control threshold units */ +#define XGMAC_FLOW_CONTROL_UNIT 512 +#define XGMAC_FLOW_CONTROL_ALIGN(_x) \ + (((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1)) +#define XGMAC_FLOW_CONTROL_VALUE(_x) \ + (((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2) +#define XGMAC_FLOW_CONTROL_MAX 33280 + /* Maximum MAC address hash table size (256 bits = 8 bytes) */ #define XGBE_MAC_HASH_TABLE_SIZE 8 /* Receive Side Scaling */ #define XGBE_RSS_HASH_KEY_SIZE 40 #define XGBE_RSS_MAX_TABLE_SIZE 256 #define XGBE_RSS_LOOKUP_TABLE_TYPE 0 #define XGBE_RSS_HASH_KEY_TYPE 1 /* Auto-negotiation */ #define XGBE_AN_MS_TIMEOUT 500 #define XGBE_LINK_TIMEOUT 10 +#define XGBE_SGMII_AN_LINK_STATUS BIT(1) +#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +#define XGBE_SGMII_AN_LINK_SPEED_100 0x04 +#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 +#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) + +/* ECC correctable error notification window (seconds) */ +#define XGBE_ECC_LIMIT 60 + #define XGBE_AN_INT_CMPLT 0x01 #define XGBE_AN_INC_LINK 0x02 #define XGBE_AN_PG_RCV 0x04 #define XGBE_AN_INT_MASK 0x07 +#define XGBE_SGMII_AN_LINK_STATUS BIT(1) +#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) +#define XGBE_SGMII_AN_LINK_SPEED_100 0x04 +#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 +#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) + /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 /* Default SerDes settings */ #define XGBE_SPEED_10000_BLWC 0 #define XGBE_SPEED_10000_CDR 0x7 #define XGBE_SPEED_10000_PLL 0x1 #define XGBE_SPEED_10000_PQ 0x12 #define XGBE_SPEED_10000_RATE 0x0 #define XGBE_SPEED_10000_TXAMP 0xa #define XGBE_SPEED_10000_WORD 0x7 #define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1 #define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f #define XGBE_SPEED_2500_BLWC 1 #define XGBE_SPEED_2500_CDR 0x2 #define XGBE_SPEED_2500_PLL 0x0 #define XGBE_SPEED_2500_PQ 0xa #define XGBE_SPEED_2500_RATE 0x1 #define XGBE_SPEED_2500_TXAMP 0xf #define XGBE_SPEED_2500_WORD 0x1 #define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3 #define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0 #define XGBE_SPEED_1000_BLWC 1 #define XGBE_SPEED_1000_CDR 0x2 #define XGBE_SPEED_1000_PLL 0x0 #define XGBE_SPEED_1000_PQ 0xa #define XGBE_SPEED_1000_RATE 0x3 #define XGBE_SPEED_1000_TXAMP 0xf #define XGBE_SPEED_1000_WORD 0x1 #define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3 #define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0 +/* TSO related macros */ +#define XGBE_TSO_MAX_SIZE UINT16_MAX + +/* MDIO port types */ +#define XGMAC_MAX_C22_PORT 3 + +/* Link mode bit operations */ +#define XGBE_ZERO_SUP(_phy) \ + ((_phy)->supported = 0) + +#define XGBE_SET_SUP(_phy, _mode) \ + ((_phy)->supported |= SUPPORTED_##_mode) + +#define XGBE_CLR_SUP(_phy, _mode) \ + ((_phy)->supported &= ~SUPPORTED_##_mode) + +#define XGBE_IS_SUP(_phy, _mode) \ + ((_phy)->supported & SUPPORTED_##_mode) + +#define XGBE_ZERO_ADV(_phy) \ + ((_phy)->advertising = 0) + +#define XGBE_SET_ADV(_phy, _mode) \ + ((_phy)->advertising |= ADVERTISED_##_mode) + +#define XGBE_CLR_ADV(_phy, _mode) \ + ((_phy)->advertising &= ~ADVERTISED_##_mode) + +#define XGBE_ADV(_phy, _mode) \ + ((_phy)->advertising & ADVERTISED_##_mode) + +#define XGBE_ZERO_LP_ADV(_phy) \ + ((_phy)->lp_advertising = 0) + +#define XGBE_SET_LP_ADV(_phy, _mode) \ + ((_phy)->lp_advertising |= ADVERTISED_##_mode) + +#define XGBE_CLR_LP_ADV(_phy, _mode) \ + ((_phy)->lp_advertising &= ~ADVERTISED_##_mode) + +#define XGBE_LP_ADV(_phy, _mode) \ + ((_phy)->lp_advertising & ADVERTISED_##_mode) + +#define XGBE_LM_COPY(_dphy, _dname, _sphy, _sname) \ + ((_dphy)->_dname = (_sphy)->_sname) + struct xgbe_prv_data; struct xgbe_packet_data { struct mbuf *m; unsigned int attributes; unsigned int errors; unsigned int rdesc_count; unsigned int length; - u64 rx_tstamp; + unsigned int header_len; + unsigned int tcp_header_len; + unsigned int tcp_payload_len; + unsigned short mss; + + unsigned short vlan_ctag; + + uint64_t rx_tstamp; unsigned int tx_packets; unsigned int tx_bytes; + + uint32_t rss_hash; + uint32_t rss_hash_type; }; /* Common Rx and Tx descriptor mapping */ struct xgbe_ring_desc { __le32 desc0; __le32 desc1; __le32 desc2; __le32 desc3; }; /* Tx-related ring data */ struct xgbe_tx_ring_data { unsigned int packets; /* BQL packet count */ unsigned int bytes; /* BQL byte count */ }; /* Rx-related ring data */ struct xgbe_rx_ring_data { unsigned short hdr_len; /* Length of received header */ unsigned short len; /* Length of received packet */ }; /* Structure used to hold information related to the descriptor * and the packet associated with the descriptor (always use * use the XGBE_GET_DESC_DATA macro to access this data from the ring) */ struct xgbe_ring_data { struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */ bus_addr_t rdata_paddr; - bus_dma_tag_t mbuf_dmat; - bus_dmamap_t mbuf_map; - bus_addr_t mbuf_hdr_paddr; - bus_addr_t mbuf_data_paddr; - bus_size_t mbuf_len; - - int mbuf_free; - struct mbuf *mb; - struct xgbe_tx_ring_data tx; /* Tx-related data */ struct xgbe_rx_ring_data rx; /* Rx-related data */ + + + /* Incomplete receive save location. If the budget is exhausted + * or the last descriptor (last normal descriptor or a following + * context descriptor) has not been DMA'd yet the current state + * of the receive processing needs to be saved. + */ + unsigned int state_saved; + struct { + struct mbuf *m; + unsigned int len; + unsigned int error; + } state; + }; struct xgbe_ring { /* Ring lock - used just for TX rings at the moment */ spinlock_t lock; /* Per packet related information */ struct xgbe_packet_data packet_data; /* Virtual/DMA addresses and count of allocated descriptor memory */ struct xgbe_ring_desc *rdesc; - bus_dmamap_t rdesc_map; - bus_dma_tag_t rdesc_dmat; bus_addr_t rdesc_paddr; unsigned int rdesc_count; - bus_dma_tag_t mbuf_dmat; - bus_dmamap_t mbuf_map; - /* Array of descriptor data corresponding the descriptor memory * (always use the XGBE_GET_DESC_DATA macro to access this data) */ struct xgbe_ring_data *rdata; /* Ring index values * cur - Tx: index of descriptor to be used for current transfer - * Rx: index of descriptor to check for packet availability + * Rx: index of descriptor to check for packet availability * dirty - Tx: index of descriptor to check for transfer complete - * Rx: index of descriptor to check for buffer reallocation + * Rx: index of descriptor to check for buffer reallocation */ unsigned int cur; unsigned int dirty; /* Coalesce frame count used for interrupt bit setting */ unsigned int coalesce_count; union { struct { unsigned int queue_stopped; unsigned int xmit_more; unsigned short cur_mss; unsigned short cur_vlan_ctag; } tx; }; + + uint16_t prev_pidx; + uint8_t prev_count; + } __aligned(CACHE_LINE_SIZE); /* Structure used to describe the descriptor rings associated with * a DMA channel. */ struct xgbe_channel { char name[16]; /* Address of private data area for device */ struct xgbe_prv_data *pdata; /* Queue index and base address of queue's DMA registers */ unsigned int queue_index; bus_space_tag_t dma_tag; bus_space_handle_t dma_handle; + int dma_irq_rid; /* Per channel interrupt irq number */ struct resource *dma_irq_res; void *dma_irq_tag; + /* Per channel interrupt enablement tracker */ + unsigned int curr_ier; unsigned int saved_ier; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; } __aligned(CACHE_LINE_SIZE); enum xgbe_state { XGBE_DOWN, XGBE_LINK_INIT, XGBE_LINK_ERR, + XGBE_STOPPED, }; enum xgbe_int { XGMAC_INT_DMA_CH_SR_TI, XGMAC_INT_DMA_CH_SR_TPS, XGMAC_INT_DMA_CH_SR_TBU, XGMAC_INT_DMA_CH_SR_RI, XGMAC_INT_DMA_CH_SR_RBU, XGMAC_INT_DMA_CH_SR_RPS, XGMAC_INT_DMA_CH_SR_TI_RI, XGMAC_INT_DMA_CH_SR_FBE, XGMAC_INT_DMA_ALL, }; enum xgbe_int_state { XGMAC_INT_STATE_SAVE, XGMAC_INT_STATE_RESTORE, }; +enum xgbe_ecc_sec { + XGBE_ECC_SEC_TX, + XGBE_ECC_SEC_RX, + XGBE_ECC_SEC_DESC, +}; + enum xgbe_speed { XGBE_SPEED_1000 = 0, XGBE_SPEED_2500, XGBE_SPEED_10000, XGBE_SPEEDS, }; +enum xgbe_xpcs_access { + XGBE_XPCS_ACCESS_V1 = 0, + XGBE_XPCS_ACCESS_V2, +}; + +enum xgbe_an_mode { + XGBE_AN_MODE_CL73 = 0, + XGBE_AN_MODE_CL73_REDRV, + XGBE_AN_MODE_CL37, + XGBE_AN_MODE_CL37_SGMII, + XGBE_AN_MODE_NONE, +}; + enum xgbe_an { XGBE_AN_READY = 0, XGBE_AN_PAGE_RECEIVED, XGBE_AN_INCOMPAT_LINK, XGBE_AN_COMPLETE, XGBE_AN_NO_LINK, XGBE_AN_ERROR, }; enum xgbe_rx { XGBE_RX_BPA = 0, XGBE_RX_XNP, XGBE_RX_COMPLETE, XGBE_RX_ERROR, }; enum xgbe_mode { XGBE_MODE_KR = 0, XGBE_MODE_KX, + XGBE_MODE_KX_1000, + XGBE_MODE_KX_2500, + XGBE_MODE_X, + XGBE_MODE_SGMII_100, + XGBE_MODE_SGMII_1000, + XGBE_MODE_SFI, + XGBE_MODE_UNKNOWN, }; enum xgbe_speedset { XGBE_SPEEDSET_1000_10000 = 0, XGBE_SPEEDSET_2500_10000, }; +enum xgbe_mdio_mode { + XGBE_MDIO_MODE_NONE = 0, + XGBE_MDIO_MODE_CL22, + XGBE_MDIO_MODE_CL45, +}; + struct xgbe_phy { - u32 supported; - u32 advertising; - u32 lp_advertising; + uint32_t supported; + uint32_t advertising; + uint32_t lp_advertising; int address; int autoneg; int speed; int duplex; int link; int pause_autoneg; int tx_pause; int rx_pause; + + int pause; + int asym_pause; +}; + +enum xgbe_i2c_cmd { + XGBE_I2C_CMD_READ = 0, + XGBE_I2C_CMD_WRITE, +}; + +struct xgbe_i2c_op { + enum xgbe_i2c_cmd cmd; + + unsigned int target; + + void *buf; + unsigned int len; +}; + +struct xgbe_i2c_op_state { + struct xgbe_i2c_op *op; + + unsigned int tx_len; + unsigned char *tx_buf; + + unsigned int rx_len; + unsigned char *rx_buf; + + unsigned int tx_abort_source; + + int ret; +}; + +struct xgbe_i2c { + unsigned int started; + unsigned int max_speed_mode; + unsigned int rx_fifo_size; + unsigned int tx_fifo_size; + + struct xgbe_i2c_op_state op_state; }; struct xgbe_mmc_stats { /* Tx Stats */ - u64 txoctetcount_gb; - u64 txframecount_gb; - u64 txbroadcastframes_g; - u64 txmulticastframes_g; - u64 tx64octets_gb; - u64 tx65to127octets_gb; - u64 tx128to255octets_gb; - u64 tx256to511octets_gb; - u64 tx512to1023octets_gb; - u64 tx1024tomaxoctets_gb; - u64 txunicastframes_gb; - u64 txmulticastframes_gb; - u64 txbroadcastframes_gb; - u64 txunderflowerror; - u64 txoctetcount_g; - u64 txframecount_g; - u64 txpauseframes; - u64 txvlanframes_g; + uint64_t txoctetcount_gb; + uint64_t txframecount_gb; + uint64_t txbroadcastframes_g; + uint64_t txmulticastframes_g; + uint64_t tx64octets_gb; + uint64_t tx65to127octets_gb; + uint64_t tx128to255octets_gb; + uint64_t tx256to511octets_gb; + uint64_t tx512to1023octets_gb; + uint64_t tx1024tomaxoctets_gb; + uint64_t txunicastframes_gb; + uint64_t txmulticastframes_gb; + uint64_t txbroadcastframes_gb; + uint64_t txunderflowerror; + uint64_t txoctetcount_g; + uint64_t txframecount_g; + uint64_t txpauseframes; + uint64_t txvlanframes_g; /* Rx Stats */ - u64 rxframecount_gb; - u64 rxoctetcount_gb; - u64 rxoctetcount_g; - u64 rxbroadcastframes_g; - u64 rxmulticastframes_g; - u64 rxcrcerror; - u64 rxrunterror; - u64 rxjabbererror; - u64 rxundersize_g; - u64 rxoversize_g; - u64 rx64octets_gb; - u64 rx65to127octets_gb; - u64 rx128to255octets_gb; - u64 rx256to511octets_gb; - u64 rx512to1023octets_gb; - u64 rx1024tomaxoctets_gb; - u64 rxunicastframes_g; - u64 rxlengtherror; - u64 rxoutofrangetype; - u64 rxpauseframes; - u64 rxfifooverflow; - u64 rxvlanframes_gb; - u64 rxwatchdogerror; + uint64_t rxframecount_gb; + uint64_t rxoctetcount_gb; + uint64_t rxoctetcount_g; + uint64_t rxbroadcastframes_g; + uint64_t rxmulticastframes_g; + uint64_t rxcrcerror; + uint64_t rxrunterror; + uint64_t rxjabbererror; + uint64_t rxundersize_g; + uint64_t rxoversize_g; + uint64_t rx64octets_gb; + uint64_t rx65to127octets_gb; + uint64_t rx128to255octets_gb; + uint64_t rx256to511octets_gb; + uint64_t rx512to1023octets_gb; + uint64_t rx1024tomaxoctets_gb; + uint64_t rxunicastframes_g; + uint64_t rxlengtherror; + uint64_t rxoutofrangetype; + uint64_t rxpauseframes; + uint64_t rxfifooverflow; + uint64_t rxvlanframes_gb; + uint64_t rxwatchdogerror; }; struct xgbe_ext_stats { - u64 tx_tso_packets; - u64 rx_split_header_packets; - u64 rx_buffer_unavailable; + uint64_t tx_tso_packets; + uint64_t rx_split_header_packets; + uint64_t rx_buffer_unavailable; + + uint64_t txq_packets[XGBE_MAX_DMA_CHANNELS]; + uint64_t txq_bytes[XGBE_MAX_DMA_CHANNELS]; + uint64_t rxq_packets[XGBE_MAX_DMA_CHANNELS]; + uint64_t rxq_bytes[XGBE_MAX_DMA_CHANNELS]; + + uint64_t tx_vxlan_packets; + uint64_t rx_vxlan_packets; + uint64_t rx_csum_errors; + uint64_t rx_vxlan_csum_errors; }; struct xgbe_hw_if { int (*tx_complete)(struct xgbe_ring_desc *); - int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr); + int (*set_mac_address)(struct xgbe_prv_data *, uint8_t *addr); int (*config_rx_mode)(struct xgbe_prv_data *); int (*enable_rx_csum)(struct xgbe_prv_data *); int (*disable_rx_csum)(struct xgbe_prv_data *); int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *); int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *); int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *); int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *); int (*update_vlan_hash_table)(struct xgbe_prv_data *); int (*read_mmd_regs)(struct xgbe_prv_data *, int, int); void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int); - int (*set_gmii_speed)(struct xgbe_prv_data *); - int (*set_gmii_2500_speed)(struct xgbe_prv_data *); - int (*set_xgmii_speed)(struct xgbe_prv_data *); + int (*set_speed)(struct xgbe_prv_data *, int); + + int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int, + enum xgbe_mdio_mode); + int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int); + int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, uint16_t); + + int (*set_gpio)(struct xgbe_prv_data *, unsigned int); + int (*clr_gpio)(struct xgbe_prv_data *, unsigned int); void (*enable_tx)(struct xgbe_prv_data *); void (*disable_tx)(struct xgbe_prv_data *); void (*enable_rx)(struct xgbe_prv_data *); void (*disable_rx)(struct xgbe_prv_data *); void (*powerup_tx)(struct xgbe_prv_data *); void (*powerdown_tx)(struct xgbe_prv_data *); void (*powerup_rx)(struct xgbe_prv_data *); void (*powerdown_rx)(struct xgbe_prv_data *); int (*init)(struct xgbe_prv_data *); int (*exit)(struct xgbe_prv_data *); int (*enable_int)(struct xgbe_channel *, enum xgbe_int); int (*disable_int)(struct xgbe_channel *, enum xgbe_int); - void (*dev_xmit)(struct xgbe_channel *); int (*dev_read)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *); void (*tx_desc_reset)(struct xgbe_ring_data *); - void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *, - unsigned int); int (*is_last_desc)(struct xgbe_ring_desc *); int (*is_context_desc)(struct xgbe_ring_desc *); - void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *); /* For FLOW ctrl */ int (*config_tx_flow_control)(struct xgbe_prv_data *); int (*config_rx_flow_control)(struct xgbe_prv_data *); /* For RX coalescing */ int (*config_rx_coalesce)(struct xgbe_prv_data *); int (*config_tx_coalesce)(struct xgbe_prv_data *); unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int); unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int); /* For RX and TX threshold config */ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int); int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int); /* For RX and TX Store and Forward Mode config */ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int); int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int); /* For TX DMA Operate on Second Frame config */ int (*config_osp_mode)(struct xgbe_prv_data *); - /* For RX and TX PBL config */ - int (*config_rx_pbl_val)(struct xgbe_prv_data *); - int (*get_rx_pbl_val)(struct xgbe_prv_data *); - int (*config_tx_pbl_val)(struct xgbe_prv_data *); - int (*get_tx_pbl_val)(struct xgbe_prv_data *); - int (*config_pblx8)(struct xgbe_prv_data *); - /* For MMC statistics */ void (*rx_mmc_int)(struct xgbe_prv_data *); void (*tx_mmc_int)(struct xgbe_prv_data *); void (*read_mmc_stats)(struct xgbe_prv_data *); /* For Receive Side Scaling */ + int (*enable_rss)(struct xgbe_prv_data *); int (*disable_rss)(struct xgbe_prv_data *); + int (*set_rss_hash_key)(struct xgbe_prv_data *, const uint8_t *); + int (*set_rss_lookup_table)(struct xgbe_prv_data *, const uint32_t *); +}; + +/* This structure represents implementation specific routines for an + * implementation of a PHY. All routines are required unless noted below. + * Optional routines: + * an_pre, an_post + * kr_training_pre, kr_training_post + * module_info, module_eeprom + */ +struct xgbe_phy_impl_if { + /* Perform Setup/teardown actions */ + int (*init)(struct xgbe_prv_data *); + void (*exit)(struct xgbe_prv_data *); + + /* Perform start/stop specific actions */ + int (*reset)(struct xgbe_prv_data *); + int (*start)(struct xgbe_prv_data *); + void (*stop)(struct xgbe_prv_data *); + + /* Return the link status */ + int (*link_status)(struct xgbe_prv_data *, int *); + + /* Indicate if a particular speed is valid */ + bool (*valid_speed)(struct xgbe_prv_data *, int); + + /* Check if the specified mode can/should be used */ + bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode); + /* Switch the PHY into various modes */ + void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode); + /* Retrieve mode needed for a specific speed */ + enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int); + /* Retrieve new/next mode when trying to auto-negotiate */ + enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *); + /* Retrieve current mode */ + enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *); + /* Retrieve interface sub-type */ + void (*get_type)(struct xgbe_prv_data *, struct ifmediareq *); + + /* Retrieve current auto-negotiation mode */ + enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *); + + /* Configure auto-negotiation settings */ + int (*an_config)(struct xgbe_prv_data *); + + /* Set/override auto-negotiation advertisement settings */ + void (*an_advertising)(struct xgbe_prv_data *, + struct xgbe_phy *); + + /* Process results of auto-negotiation */ + enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); + + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct xgbe_prv_data *); + void (*an_post)(struct xgbe_prv_data *); + + /* Pre/Post KR training enablement support */ + void (*kr_training_pre)(struct xgbe_prv_data *); + void (*kr_training_post)(struct xgbe_prv_data *); + + /* SFP module related info */ + int (*module_info)(struct xgbe_prv_data *pdata); + int (*module_eeprom)(struct xgbe_prv_data *pdata); }; struct xgbe_phy_if { - /* For initial PHY setup */ - void (*phy_init)(struct xgbe_prv_data *); + /* For PHY setup/teardown */ + int (*phy_init)(struct xgbe_prv_data *); + void (*phy_exit)(struct xgbe_prv_data *); /* For PHY support when setting device up/down */ int (*phy_reset)(struct xgbe_prv_data *); int (*phy_start)(struct xgbe_prv_data *); void (*phy_stop)(struct xgbe_prv_data *); /* For PHY support while device is up */ void (*phy_status)(struct xgbe_prv_data *); int (*phy_config_aneg)(struct xgbe_prv_data *); + + /* For PHY settings validation */ + bool (*phy_valid_speed)(struct xgbe_prv_data *, int); + + /* For single interrupt support */ + void (*an_isr)(struct xgbe_prv_data *); + + /* PHY implementation specific services */ + struct xgbe_phy_impl_if phy_impl; +}; + +struct xgbe_i2c_if { + /* For initial I2C setup */ + int (*i2c_init)(struct xgbe_prv_data *); + + /* For I2C support when setting device up/down */ + int (*i2c_start)(struct xgbe_prv_data *); + void (*i2c_stop)(struct xgbe_prv_data *); + + /* For performing I2C operations */ + int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *); + + /* For single interrupt support */ + void (*i2c_isr)(struct xgbe_prv_data *); }; struct xgbe_desc_if { int (*alloc_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *); int (*map_tx_skb)(struct xgbe_channel *, struct mbuf *); int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *, struct xgbe_ring_data *); void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); }; /* This structure contains flags that indicate what hardware features * or configurations are present in the device. */ struct xgbe_hw_features { /* HW Version */ unsigned int version; /* HW Feature Register0 */ unsigned int gmii; /* 1000 Mbps support */ unsigned int vlhash; /* VLAN Hash Filter */ unsigned int sma; /* SMA(MDIO) Interface */ unsigned int rwk; /* PMT remote wake-up packet */ unsigned int mgk; /* PMT magic packet */ unsigned int mmc; /* RMON module */ unsigned int aoe; /* ARP Offload */ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ unsigned int eee; /* Energy Efficient Ethernet */ unsigned int tx_coe; /* Tx Checksum Offload */ unsigned int rx_coe; /* Rx Checksum Offload */ unsigned int addn_mac; /* Additional MAC Addresses */ unsigned int ts_src; /* Timestamp Source */ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + unsigned int vxn; /* VXLAN/NVGRE */ /* HW Feature Register1 */ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ unsigned int adv_ts_hi; /* Advance Timestamping High Word */ unsigned int dma_width; /* DMA width */ unsigned int dcb; /* DCB Feature */ unsigned int sph; /* Split Header Feature */ unsigned int tso; /* TCP Segmentation Offload */ unsigned int dma_debug; /* DMA Debug Registers */ unsigned int rss; /* Receive Side Scaling */ unsigned int tc_cnt; /* Number of Traffic Classes */ unsigned int hash_table_size; /* Hash Table Size */ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ /* HW Feature Register2 */ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ unsigned int pps_out_num; /* Number of PPS outputs */ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ }; +struct xgbe_version_data { + void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *); + enum xgbe_xpcs_access xpcs_access; + unsigned int mmc_64bit; + unsigned int tx_max_fifo_size; + unsigned int rx_max_fifo_size; + unsigned int tx_tstamp_workaround; + unsigned int ecc_support; + unsigned int i2c_support; + unsigned int irq_reissue_support; + unsigned int tx_desc_prefetch; + unsigned int rx_desc_prefetch; + unsigned int an_cdr_workaround; +}; + struct xgbe_prv_data { struct ifnet *netdev; + struct platform_device *pdev; struct acpi_device *adev; device_t dev; + /* Version related data */ + struct xgbe_version_data *vdata; + /* ACPI or DT flag */ unsigned int use_acpi; /* XGMAC/XPCS related mmio registers */ struct resource *xgmac_res; /* XGMAC CSRs */ struct resource *xpcs_res; /* XPCS MMD registers */ struct resource *rxtx_res; /* SerDes Rx/Tx CSRs */ struct resource *sir0_res; /* SerDes integration registers (1/2) */ struct resource *sir1_res; /* SerDes integration registers (2/2) */ + /* Port property registers */ + unsigned int pp0; + unsigned int pp1; + unsigned int pp2; + unsigned int pp3; + unsigned int pp4; + /* DMA tag */ bus_dma_tag_t dmat; /* XPCS indirect addressing lock */ spinlock_t xpcs_lock; + unsigned int xpcs_window_def_reg; + unsigned int xpcs_window_sel_reg; + unsigned int xpcs_window; + unsigned int xpcs_window_size; + unsigned int xpcs_window_mask; + + /* RSS addressing mutex */ + struct mtx rss_mutex; /* Flags representing xgbe_state */ unsigned long dev_state; - struct resource *dev_irq_res; - struct resource *chan_irq_res[4]; + /* ECC support */ + unsigned long tx_sec_period; + unsigned long tx_ded_period; + unsigned long rx_sec_period; + unsigned long rx_ded_period; + unsigned long desc_sec_period; + unsigned long desc_ded_period; + + unsigned int tx_sec_count; + unsigned int tx_ded_count; + unsigned int rx_sec_count; + unsigned int rx_ded_count; + unsigned int desc_ded_count; + unsigned int desc_sec_count; + + struct if_irq dev_irq; + + struct resource *dev_irq_res; + struct resource *ecc_irq_res; + struct resource *i2c_irq_res; + struct resource *an_irq_res; + + int ecc_rid; + int i2c_rid; + int an_rid; + void *dev_irq_tag; + void *ecc_irq_tag; + void *i2c_irq_tag; + void *an_irq_tag; + + struct resource *chan_irq_res[XGBE_MAX_DMA_CHANNELS]; + unsigned int per_channel_irq; + unsigned int irq_count; + unsigned int channel_irq_count; + unsigned int channel_irq_mode; + char ecc_name[IFNAMSIZ + 32]; + + unsigned int isr_as_tasklet; struct xgbe_hw_if hw_if; struct xgbe_phy_if phy_if; struct xgbe_desc_if desc_if; + struct xgbe_i2c_if i2c_if; /* AXI DMA settings */ unsigned int coherent; - unsigned int axdomain; - unsigned int arcache; - unsigned int awcache; + unsigned int arcr; + unsigned int awcr; + unsigned int awarcr; /* Service routine support */ struct taskqueue *dev_workqueue; struct task service_work; struct callout service_timer; + struct mtx timer_mutex; /* Rings for Tx/Rx on a DMA channel */ - struct xgbe_channel *channel; + struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS]; + unsigned int tx_max_channel_count; + unsigned int rx_max_channel_count; + unsigned int total_channel_count; unsigned int channel_count; unsigned int tx_ring_count; unsigned int tx_desc_count; unsigned int rx_ring_count; unsigned int rx_desc_count; + unsigned int new_tx_ring_count; + unsigned int new_rx_ring_count; + + unsigned int tx_max_q_count; + unsigned int rx_max_q_count; unsigned int tx_q_count; unsigned int rx_q_count; /* Tx/Rx common settings */ - unsigned int pblx8; + unsigned int blen; + unsigned int pbl; + unsigned int aal; + unsigned int rd_osr_limit; + unsigned int wr_osr_limit; /* Tx settings */ unsigned int tx_sf_mode; unsigned int tx_threshold; - unsigned int tx_pbl; unsigned int tx_osp_mode; + unsigned int tx_max_fifo_size; /* Rx settings */ unsigned int rx_sf_mode; unsigned int rx_threshold; - unsigned int rx_pbl; + unsigned int rx_max_fifo_size; /* Tx coalescing settings */ unsigned int tx_usecs; unsigned int tx_frames; /* Rx coalescing settings */ unsigned int rx_riwt; unsigned int rx_usecs; unsigned int rx_frames; /* Current Rx buffer size */ unsigned int rx_buf_size; /* Flow control settings */ unsigned int pause_autoneg; unsigned int tx_pause; unsigned int rx_pause; + unsigned int rx_rfa[XGBE_MAX_QUEUES]; + unsigned int rx_rfd[XGBE_MAX_QUEUES]; /* Receive Side Scaling settings */ - u8 rss_key[XGBE_RSS_HASH_KEY_SIZE]; - u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE]; - u32 rss_options; + uint8_t rss_key[XGBE_RSS_HASH_KEY_SIZE]; + uint32_t rss_table[XGBE_RSS_MAX_TABLE_SIZE]; + uint32_t rss_options; + unsigned int enable_rss; + + /* VXLAN settings */ + unsigned int vxlan_port_set; + unsigned int vxlan_offloads_set; + unsigned int vxlan_force_disable; + unsigned int vxlan_port_count; + uint16_t vxlan_port; + uint64_t vxlan_features; /* Netdev related settings */ unsigned char mac_addr[ETH_ALEN]; + uint64_t netdev_features; struct xgbe_mmc_stats mmc_stats; struct xgbe_ext_stats ext_stats; + /* Filtering support */ + bitstr_t *active_vlans; + unsigned int num_active_vlans; + /* Device clocks */ struct clk *sysclk; unsigned long sysclk_rate; struct clk *ptpclk; unsigned long ptpclk_rate; /* DCB support */ unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; - u8 num_tcs; /* Hardware features of the device */ struct xgbe_hw_features hw_feat; - /* Device restart work structure */ + /* Device work structure */ struct task restart_work; + struct task stopdev_work; /* Keeps track of power mode */ unsigned int power_down; /* Network interface message level setting */ - u32 msg_enable; + uint32_t msg_enable; /* Current PHY settings */ int phy_link; int phy_speed; /* MDIO/PHY related settings */ + unsigned int phy_started; + void *phy_data; struct xgbe_phy phy; int mdio_mmd; unsigned long link_check; + struct mtx mdio_mutex; + unsigned int mdio_addr; + + unsigned int kr_redrv; char an_name[IFNAMSIZ + 32]; + struct taskqueue *an_workqueue; - struct resource *an_irq_res; - void *an_irq_tag; + struct task an_irq_work; unsigned int speed_set; /* SerDes UEFI configurable settings. * Switching between modes/speeds requires new values for some * SerDes settings. The values can be supplied as device * properties in array format. The first array entry is for * 1GbE, second for 2.5GbE and third for 10GbE */ - u32 serdes_blwc[XGBE_SPEEDS]; - u32 serdes_cdr_rate[XGBE_SPEEDS]; - u32 serdes_pq_skew[XGBE_SPEEDS]; - u32 serdes_tx_amp[XGBE_SPEEDS]; - u32 serdes_dfe_tap_cfg[XGBE_SPEEDS]; - u32 serdes_dfe_tap_ena[XGBE_SPEEDS]; + uint32_t serdes_blwc[XGBE_SPEEDS]; + uint32_t serdes_cdr_rate[XGBE_SPEEDS]; + uint32_t serdes_pq_skew[XGBE_SPEEDS]; + uint32_t serdes_tx_amp[XGBE_SPEEDS]; + uint32_t serdes_dfe_tap_cfg[XGBE_SPEEDS]; + uint32_t serdes_dfe_tap_ena[XGBE_SPEEDS]; /* Auto-negotiation state machine support */ unsigned int an_int; + unsigned int an_status; struct sx an_mutex; enum xgbe_an an_result; enum xgbe_an an_state; enum xgbe_rx kr_state; enum xgbe_rx kx_state; + struct task an_work; + unsigned int an_again; unsigned int an_supported; unsigned int parallel_detect; unsigned int fec_ability; unsigned long an_start; + enum xgbe_an_mode an_mode; + + /* I2C support */ + struct xgbe_i2c i2c; + struct mtx i2c_mutex; + bool i2c_complete; unsigned int lpm_ctrl; /* CTRL1 for resume */ + unsigned int an_cdr_track_early; + + uint64_t features; + + device_t axgbe_miibus; + unsigned int sysctl_xgmac_reg; + unsigned int sysctl_xpcs_mmd; + unsigned int sysctl_xpcs_reg; + + unsigned int sysctl_xprop_reg; + unsigned int sysctl_xi2c_reg; + + bool sysctl_an_cdr_workaround; + bool sysctl_an_cdr_track_early; + + int pcie_bus; /* PCIe bus number */ + int pcie_device; /* PCIe device/slot number */ + int pcie_func; /* PCIe function number */ + + void *sys_op; + uint64_t use_adaptive_rx_coalesce; + uint64_t use_adaptive_tx_coalesce; + uint64_t rx_coalesce_usecs; + + unsigned int debug_level; }; -/* Function prototypes*/ +struct axgbe_if_softc { + struct xgbe_prv_data pdata; + if_softc_ctx_t scctx; + if_shared_ctx_t sctx; + if_ctx_t ctx; + struct ifnet *ifp; + struct ifmedia *media; + unsigned int link_status; +}; -int xgbe_open(struct ifnet *); -int xgbe_close(struct ifnet *); -int xgbe_xmit(struct ifnet *, struct mbuf *); -int xgbe_change_mtu(struct ifnet *, int); +/* Function prototypes*/ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *); +void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *); +void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *); void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); +void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *); void xgbe_get_all_hw_features(struct xgbe_prv_data *); void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *); -/* NOTE: Uncomment for function trace log messages in KERNEL LOG */ -#if 0 -#define YDEBUG -#define YDEBUG_MDIO -#endif +int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu); + +void axgbe_sysctl_init(struct xgbe_prv_data *pdata); +void axgbe_sysctl_exit(struct xgbe_prv_data *pdata); + +int xgbe_phy_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, + uint16_t val); +int xgbe_phy_mii_read(struct xgbe_prv_data *pdata, int addr, int reg); + +void xgbe_dump_i2c_registers(struct xgbe_prv_data *); + +uint32_t bitrev32(uint32_t); /* For debug prints */ #ifdef YDEBUG -#define DBGPR(x...) printf(x) +#define DBGPR(x...) device_printf(pdata->dev, x) #else #define DBGPR(x...) do { } while (0) #endif #ifdef YDEBUG_MDIO -#define DBGPR_MDIO(x...) printf(x) +#define DBGPR_MDIO(x...) device_printf(pdata->dev, x) #else #define DBGPR_MDIO(x...) do { } while (0) #endif -#endif +#define axgbe_printf(lvl, ...) do { \ + if (lvl <= pdata->debug_level) \ + device_printf(pdata->dev, __VA_ARGS__); \ +} while (0) + +#define axgbe_error(...) do { \ + device_printf(pdata->dev, __VA_ARGS__); \ +} while (0) + +#endif /* __XGBE_H__ */ diff --git a/sys/dev/axgbe/xgbe_osdep.c b/sys/dev/axgbe/xgbe_osdep.c new file mode 100644 index 000000000000..fae47fcfcba2 --- /dev/null +++ b/sys/dev/axgbe/xgbe_osdep.c @@ -0,0 +1,47 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 Advanced Micro Devices, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Contact Information : + * Rajesh Kumar + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "xgbe.h" +#include "xgbe_osdep.h" + +/* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ +uint32_t bitrev32(uint32_t x) +{ + x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); + x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); + x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); + x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); + + return ((x >> 16) | (x << 16)); +} diff --git a/sys/dev/axgbe/xgbe_osdep.h b/sys/dev/axgbe/xgbe_osdep.h index e6d793ecc43a..b9863bbd756d 100644 --- a/sys/dev/axgbe/xgbe_osdep.h +++ b/sys/dev/axgbe/xgbe_osdep.h @@ -1,188 +1,314 @@ /*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * * Copyright (c) 2016,2017 SoftIron Inc. - * All rights reserved. + * Copyright (c) 2020 Advanced Micro Devices, Inc. * * This software was developed by Andrew Turner under * the sponsorship of SoftIron Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _XGBE_OSDEP_H_ #define _XGBE_OSDEP_H_ -#include -#include #include -#include -#include -#include -#include #include -#include -#include - -#include #include #include #include +#include + +MALLOC_DECLARE(M_AXGBE); -typedef uint8_t u8; -typedef uint16_t u16; +typedef uint16_t __le16; +typedef uint16_t __be16; typedef uint32_t __le32; -typedef uint32_t u32; -typedef uint64_t u64; -typedef struct { - struct mtx lock; -} spinlock_t; +#define BIT(pos) (1ul << pos) + +#define cpu_to_be16(x) be16toh(x) +#define be16_to_cpu(x) htobe16(x) +#define lower_32_bits(x) ((x) & 0xffffffffu) +#define upper_32_bits(x) (((x) >> 32) & 0xffffffffu) +#define cpu_to_le32(x) le32toh(x) +#define le32_to_cpu(x) htole32(x) +#define cpu_to_le16(x) htole16(x) + +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); \ + (bit) = find_next_bit((addr), (size), (bit) + 1)) + +typedef struct mtx spinlock_t; static inline void spin_lock_init(spinlock_t *spinlock) { - - mtx_init(&spinlock->lock, "axgbe_spin", NULL, MTX_DEF); + mtx_init(spinlock, "axgbe_spin", NULL, MTX_SPIN); } #define spin_lock_irqsave(spinlock, flags) \ do { \ (flags) = intr_disable(); \ - mtx_lock(&(spinlock)->lock); \ + mtx_lock_spin(spinlock); \ } while (0) #define spin_unlock_irqrestore(spinlock, flags) \ do { \ - mtx_unlock(&(spinlock)->lock); \ + mtx_unlock_spin(spinlock); \ intr_restore(flags); \ } while (0) -#define BIT(pos) (1ul << pos) - -static inline void -clear_bit(int pos, unsigned long *p) -{ - - atomic_clear_long(p, 1ul << pos); -} - -static inline int -test_bit(int pos, unsigned long *p) -{ - unsigned long val; - - val = *p; - return ((val & 1ul << pos) != 0); -} - -static inline void -set_bit(int pos, unsigned long *p) -{ - - atomic_set_long(p, 1ul << pos); -} - -#define lower_32_bits(x) ((x) & 0xffffffffu) -#define upper_32_bits(x) (((x) >> 32) & 0xffffffffu) -#define cpu_to_le32(x) le32toh(x) -#define le32_to_cpu(x) htole32(x) - -MALLOC_DECLARE(M_AXGBE); - -#define ADVERTISED_Pause 0x01 -#define ADVERTISED_Asym_Pause 0x02 -#define ADVERTISED_Autoneg 0x04 -#define ADVERTISED_Backplane 0x08 -#define ADVERTISED_10000baseKR_Full 0x10 -#define ADVERTISED_2500baseX_Full 0x20 -#define ADVERTISED_1000baseKX_Full 0x40 +#define ADVERTISED_Pause (1 << 0) +#define ADVERTISED_Asym_Pause (1 << 1) +#define ADVERTISED_Autoneg (1 << 2) +#define ADVERTISED_Backplane (1 << 3) +#define ADVERTISED_10000baseKR_Full (1 << 4) +#define ADVERTISED_2500baseX_Full (1 << 5) +#define ADVERTISED_1000baseKX_Full (1 << 6) +#define ADVERTISED_100baseT_Full (1 << 7) +#define ADVERTISED_10000baseR_FEC (1 << 8) +#define ADVERTISED_10000baseT_Full (1 << 9) +#define ADVERTISED_2500baseT_Full (1 << 10) +#define ADVERTISED_1000baseT_Full (1 << 11) +#define ADVERTISED_TP (1 << 12) +#define ADVERTISED_FIBRE (1 << 13) +#define ADVERTISED_1000baseX_Full (1 << 14) +#define ADVERTISED_10000baseSR_Full (1 << 15) +#define ADVERTISED_10000baseLR_Full (1 << 16) +#define ADVERTISED_10000baseLRM_Full (1 << 17) +#define ADVERTISED_10000baseER_Full (1 << 18) +#define ADVERTISED_10000baseCR_Full (1 << 19) +#define ADVERTISED_100baseT_Half (1 << 20) +#define ADVERTISED_1000baseT_Half (1 << 21) + +#define SUPPORTED_Pause (1 << 0) +#define SUPPORTED_Asym_Pause (1 << 1) +#define SUPPORTED_Autoneg (1 << 2) +#define SUPPORTED_Backplane (1 << 3) +#define SUPPORTED_10000baseKR_Full (1 << 4) +#define SUPPORTED_2500baseX_Full (1 << 5) +#define SUPPORTED_1000baseKX_Full (1 << 6) +#define SUPPORTED_100baseT_Full (1 << 7) +#define SUPPORTED_10000baseR_FEC (1 << 8) +#define SUPPORTED_10000baseT_Full (1 << 9) +#define SUPPORTED_2500baseT_Full (1 << 10) +#define SUPPORTED_1000baseT_Full (1 << 11) +#define SUPPORTED_TP (1 << 12) +#define SUPPORTED_FIBRE (1 << 13) +#define SUPPORTED_1000baseX_Full (1 << 14) +#define SUPPORTED_10000baseSR_Full (1 << 15) +#define SUPPORTED_10000baseLR_Full (1 << 16) +#define SUPPORTED_10000baseLRM_Full (1 << 17) +#define SUPPORTED_10000baseER_Full (1 << 18) +#define SUPPORTED_10000baseCR_Full (1 << 19) +#define SUPPORTED_100baseT_Half (1 << 20) +#define SUPPORTED_1000baseT_Half (1 << 21) + +#define LPA_PAUSE_ASYM 0x0800 #define AUTONEG_DISABLE 0 #define AUTONEG_ENABLE 1 #define DUPLEX_UNKNOWN 1 #define DUPLEX_FULL 2 +#define DUPLEX_HALF 3 #define SPEED_UNKNOWN 1 #define SPEED_10000 2 #define SPEED_2500 3 #define SPEED_1000 4 - -#define SUPPORTED_Autoneg 0x01 -#define SUPPORTED_Pause 0x02 -#define SUPPORTED_Asym_Pause 0x04 -#define SUPPORTED_Backplane 0x08 -#define SUPPORTED_10000baseKR_Full 0x10 -#define SUPPORTED_1000baseKX_Full 0x20 -#define SUPPORTED_2500baseX_Full 0x40 -#define SUPPORTED_10000baseR_FEC 0x80 +#define SPEED_100 5 +#define SPEED_10 6 #define BMCR_SPEED100 0x2000 #define MDIO_MMD_PMAPMD 1 #define MDIO_MMD_PCS 3 #define MDIO_MMD_AN 7 +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + #define MDIO_PMA_10GBR_FECABLE 170 #define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 #define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 #define MII_ADDR_C45 (1<<30) #define MDIO_CTRL1 0x00 /* MII_BMCR */ #define MDIO_CTRL1_RESET 0x8000 /* BMCR_RESET */ #define MDIO_CTRL1_SPEEDSELEXT 0x2040 /* BMCR_SPEED1000|BMCR_SPEED100*/ #define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x3c) #define MDIO_AN_CTRL1_ENABLE 0x1000 /* BMCR_AUTOEN */ #define MDIO_CTRL1_LPOWER 0x0800 /* BMCR_PDOWN */ #define MDIO_AN_CTRL1_RESTART 0x0200 /* BMCR_STARTNEG */ #define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00) #define MDIO_STAT1 1 /* MII_BMSR */ #define MDIO_STAT1_LSTATUS 0x0004 /* BMSR_LINK */ +#define MDIO_DEVID1 2 /* MII_PHYSID1 */ +#define MDIO_DEVID2 3 /* MII_PHYSID2 */ +#define MDIO_SPEED 4 +#define MDIO_DEVS1 5 +#define MDIO_DEVS2 6 #define MDIO_CTRL2 0x07 #define MDIO_PCS_CTRL2_10GBR 0x0000 #define MDIO_PCS_CTRL2_10GBX 0x0001 #define MDIO_PCS_CTRL2_TYPE 0x0003 #define MDIO_AN_ADVERTISE 16 #define MDIO_AN_LPA 19 -#define ETH_ALEN ETHER_ADDR_LEN -#define ETH_HLEN ETHER_HDR_LEN -#define ETH_FCS_LEN 4 -#define VLAN_HLEN ETHER_VLAN_ENCAP_LEN +#define ETH_ALEN ETHER_ADDR_LEN +#define ETH_HLEN ETHER_HDR_LEN +#define ETH_FCS_LEN 4 +#define VLAN_HLEN ETHER_VLAN_ENCAP_LEN +#define VLAN_NVID 4096 +#define VLAN_VID_MASK 0x0FFF + +#define CRC32_POLY_LE 0xedb88320 + +#define ARRAY_SIZE(x) nitems(x) + +#define BITS_PER_LONG (sizeof(long) * CHAR_BIT) +#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG) + +#define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n))) + +#define min_t(t, a, b) MIN((t)(a), (t)(b)) +#define max_t(t, a, b) MAX((t)(a), (t)(b)) + +static inline void +clear_bit(int pos, unsigned long *p) +{ + + atomic_clear_long(p, 1ul << pos); +} + +static inline int +test_bit(int pos, unsigned long *p) +{ + unsigned long val; + + val = *p; + return ((val & 1ul << pos) != 0); +} + +static inline void +set_bit(int pos, unsigned long *p) +{ + + atomic_set_long(p, 1ul << pos); +} -#define ARRAY_SIZE(x) nitems(x) +static inline int +__ffsl(long mask) +{ -#define BITS_PER_LONG (sizeof(long) * CHAR_BIT) -#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG) + return (ffsl(mask) - 1); +} -#define NSEC_PER_SEC 1000000000ul +static inline int +fls64(uint64_t mask) +{ -#define min_t(t, a, b) MIN((t)(a), (t)(b)) -#define max_t(t, a, b) MAX((t)(a), (t)(b)) + return (flsll(mask)); +} + +static inline int +get_bitmask_order(unsigned int count) +{ + int order; + + order = fls(count); + return (order); /* We could be slightly more clever with -1 here... */ +} + +static inline unsigned long +find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) +{ + long mask; + int offs; + int bit; + int pos; + + if (offset >= size) + return (size); + pos = offset / BITS_PER_LONG; + offs = offset % BITS_PER_LONG; + bit = BITS_PER_LONG * pos; + addr += pos; + if (offs) { + mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs); + if (mask) + return (bit + __ffsl(mask)); + if (size - bit <= BITS_PER_LONG) + return (size); + bit += BITS_PER_LONG; + addr++; + } + for (size -= bit; size >= BITS_PER_LONG; + size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) { + if (*addr == 0) + continue; + return (bit + __ffsl(*addr)); + } + if (size) { + mask = (*addr) & BITMAP_LAST_WORD_MASK(size); + if (mask) + bit += __ffsl(mask); + else + bit += size; + } + return (bit); +} + +static inline unsigned long +find_first_bit(const unsigned long *addr, unsigned long size) +{ + long mask; + int bit; + + for (bit = 0; size >= BITS_PER_LONG; + size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) { + if (*addr == 0) + continue; + return (bit + __ffsl(*addr)); + } + if (size) { + mask = (*addr) & BITMAP_LAST_WORD_MASK(size); + if (mask) + bit += __ffsl(mask); + else + bit += size; + } + return (bit); +} #endif /* _XGBE_OSDEP_H_ */ diff --git a/sys/modules/Makefile b/sys/modules/Makefile index 9882f32bb9ce..5ebc042fb827 100644 --- a/sys/modules/Makefile +++ b/sys/modules/Makefile @@ -1,824 +1,825 @@ # $FreeBSD$ SYSDIR?=${SRCTOP}/sys .include "${SYSDIR}/conf/kern.opts.mk" SUBDIR_PARALLEL= # Modules that include binary-only blobs of microcode should be selectable by # MK_SOURCELESS_UCODE option (see below). .include "${SYSDIR}/conf/config.mk" .if defined(MODULES_OVERRIDE) && !defined(ALL_MODULES) SUBDIR=${MODULES_OVERRIDE} .else SUBDIR= \ ${_3dfx} \ ${_3dfx_linux} \ ${_aac} \ ${_aacraid} \ accf_data \ accf_dns \ accf_http \ acl_nfs4 \ acl_posix1e \ ${_acpi} \ ae \ ${_aesni} \ age \ ${_agp} \ ahci \ aic7xxx \ alc \ ale \ alq \ ${_amd_ecc_inject} \ ${_amdgpio} \ ${_amdsbwd} \ ${_amdsmn} \ ${_amdtemp} \ amr \ ${_an} \ ${_aout} \ ${_arcmsr} \ ${_allwinner} \ ${_armv8crypto} \ ${_asmc} \ ata \ ath \ ath_dfs \ ath_hal \ ath_hal_ar5210 \ ath_hal_ar5211 \ ath_hal_ar5212 \ ath_hal_ar5416 \ ath_hal_ar9300 \ ath_main \ ath_rate \ ath_pci \ ${_autofs} \ + axgbe \ backlight \ ${_bce} \ ${_bcm283x_clkman} \ ${_bcm283x_pwm} \ bfe \ bge \ bhnd \ ${_bxe} \ ${_bios} \ ${_blake2} \ bnxt \ bridgestp \ bwi \ bwn \ ${_bytgpio} \ ${_chvgpio} \ cam \ ${_cardbus} \ ${_carp} \ cas \ ${_cbb} \ cc \ ${_ccp} \ cd9660 \ cd9660_iconv \ ${_ce} \ ${_cfi} \ ${_chromebook_platform} \ ${_ciss} \ cloudabi \ ${_cloudabi32} \ ${_cloudabi64} \ ${_cmx} \ ${_coretemp} \ ${_cp} \ ${_cpsw} \ ${_cpuctl} \ ${_cpufreq} \ ${_crypto} \ ${_cryptodev} \ ctl \ ${_cxgb} \ ${_cxgbe} \ dc \ dcons \ dcons_crom \ ${_dpms} \ dummynet \ ${_efirt} \ ${_em} \ ${_ena} \ esp \ ${_et} \ evdev \ ${_exca} \ ext2fs \ fdc \ fdescfs \ ${_ffec} \ filemon \ firewire \ firmware \ fusefs \ ${_fxp} \ gem \ geom \ ${_glxiic} \ ${_glxsb} \ gpio \ hifn \ hme \ ${_hpt27xx} \ ${_hptiop} \ ${_hptmv} \ ${_hptnr} \ ${_hptrr} \ hwpmc \ ${_hwpmc_mips24k} \ ${_hwpmc_mips74k} \ ${_hyperv} \ i2c \ ${_iavf} \ ${_ibcore} \ ${_ichwd} \ ${_ice} \ ${_ice_ddp} \ ${_ida} \ if_bridge \ if_disc \ if_edsc \ ${_if_enc} \ if_epair \ ${_if_gif} \ ${_if_gre} \ ${_if_me} \ if_lagg \ ${_if_ndis} \ ${_if_stf} \ if_tuntap \ if_vlan \ if_vxlan \ iflib \ ${_iir} \ imgact_binmisc \ ${_intelspi} \ ${_io} \ ${_ioat} \ ${_ipoib} \ ${_ipdivert} \ ${_ipfilter} \ ${_ipfw} \ ipfw_nat \ ${_ipfw_nat64} \ ${_ipfw_nptv6} \ ${_ipfw_pmod} \ ${_ipmi} \ ip6_mroute_mod \ ip_mroute_mod \ ${_ips} \ ${_ipsec} \ ${_ipw} \ ${_ipwfw} \ ${_isci} \ ${_iser} \ isp \ ${_ispfw} \ ${_itwd} \ ${_iwi} \ ${_iwifw} \ ${_iwm} \ ${_iwmfw} \ ${_iwn} \ ${_iwnfw} \ ${_ix} \ ${_ixv} \ ${_ixl} \ jme \ kbdmux \ kgssapi \ kgssapi_krb5 \ khelp \ krpc \ ksyms \ ${_ktls_ocf} \ le \ lge \ libalias \ libiconv \ libmchain \ lindebugfs \ linuxkpi \ ${_lio} \ lpt \ mac_biba \ mac_bsdextended \ mac_ifoff \ mac_lomac \ mac_mls \ mac_none \ mac_ntpd \ mac_partition \ mac_portacl \ mac_seeotheruids \ mac_stub \ mac_test \ malo \ md \ mdio \ mem \ mfi \ mii \ mlx \ mlxfw \ ${_mlx4} \ ${_mlx4ib} \ ${_mlx4en} \ ${_mlx5} \ ${_mlx5en} \ ${_mlx5ib} \ ${_mly} \ mmc \ mmcsd \ ${_mpr} \ ${_mps} \ mpt \ mqueue \ mrsas \ msdosfs \ msdosfs_iconv \ msk \ ${_mthca} \ mvs \ mwl \ ${_mwlfw} \ mxge \ my \ ${_nctgpio} \ ${_ndis} \ ${_netgraph} \ ${_nfe} \ nfscl \ nfscommon \ nfsd \ nfslockd \ nfssvc \ nge \ nmdm \ nullfs \ ${_ntb} \ ${_nvd} \ ${_nvdimm} \ ${_nvme} \ ${_nvram} \ oce \ ${_ocs_fc} \ otus \ ${_otusfw} \ ow \ ${_padlock} \ ${_padlock_rng} \ ${_pccard} \ ${_pchtherm} \ ${_pcfclock} \ ${_pf} \ ${_pflog} \ ${_pfsync} \ plip \ ${_pms} \ ppbus \ ppc \ ppi \ pps \ procfs \ proto \ pseudofs \ ${_pst} \ pty \ puc \ pwm \ ${_qlxge} \ ${_qlxgb} \ ${_qlxgbe} \ ${_qlnx} \ ral \ ${_ralfw} \ ${_random_fortuna} \ ${_random_other} \ rc4 \ ${_rdma} \ ${_rdrand_rng} \ re \ rl \ ${_rockchip} \ rtwn \ rtwn_pci \ rtwn_usb \ ${_rtwnfw} \ ${_s3} \ ${_safe} \ safexcel \ ${_sbni} \ scc \ ${_sctp} \ sdhci \ ${_sdhci_acpi} \ sdhci_pci \ sdio \ sem \ send \ ${_sfxge} \ sge \ ${_sgx} \ ${_sgx_linux} \ siftr \ siis \ sis \ sk \ ${_smartpqi} \ smbfs \ snp \ sound \ ${_speaker} \ spi \ ${_splash} \ ${_sppp} \ ste \ stge \ ${_sume} \ ${_superio} \ ${_sym} \ ${_syscons} \ sysvipc \ tcp \ ${_ti} \ tmpfs \ ${_toecore} \ ${_tpm} \ ${_twa} \ twe \ tws \ uart \ udf \ udf_iconv \ ufs \ uinput \ unionfs \ usb \ ${_vesa} \ ${_virtio} \ vge \ ${_viawd} \ videomode \ vkbd \ ${_vmd} \ ${_vmm} \ ${_vmware} \ vr \ vte \ ${_wbwd} \ ${_wi} \ wlan \ wlan_acl \ wlan_amrr \ wlan_ccmp \ wlan_rssadapt \ wlan_tkip \ wlan_wep \ wlan_xauth \ ${_wpi} \ ${_wpifw} \ ${_x86bios} \ xdr \ xl \ xz \ zlib .if ${MK_AUTOFS} != "no" || defined(ALL_MODULES) _autofs= autofs .endif .if ${MK_CDDL} != "no" || defined(ALL_MODULES) .if (${MACHINE_CPUARCH} != "arm" || ${MACHINE_ARCH:Marmv[67]*} != "") && \ ${MACHINE_CPUARCH} != "mips" .if ${KERN_OPTS:MKDTRACE_HOOKS} SUBDIR+= dtrace .endif .endif SUBDIR+= opensolaris .endif .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) .if exists(${SRCTOP}/sys/opencrypto) _crypto= crypto _cryptodev= cryptodev _random_fortuna=random_fortuna _random_other= random_other _ktls_ocf= ktls_ocf .endif .endif .if ${MK_CUSE} != "no" || defined(ALL_MODULES) SUBDIR+= cuse .endif .if ${MK_EFI} != "no" .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _efirt= efirt .endif .endif .if (${MK_INET_SUPPORT} != "no" || ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _carp= carp _toecore= toecore _if_enc= if_enc _if_gif= if_gif _if_gre= if_gre _ipfw_pmod= ipfw_pmod .if ${KERN_OPTS:MIPSEC_SUPPORT} && !${KERN_OPTS:MIPSEC} _ipsec= ipsec .endif .if ${KERN_OPTS:MSCTP_SUPPORT} || ${KERN_OPTS:MSCTP} _sctp= sctp .endif .endif .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _if_stf= if_stf .endif .if ${MK_INET_SUPPORT} != "no" || defined(ALL_MODULES) _if_me= if_me _ipdivert= ipdivert _ipfw= ipfw .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nat64= ipfw_nat64 .endif .endif .if ${MK_INET6_SUPPORT} != "no" || defined(ALL_MODULES) _ipfw_nptv6= ipfw_nptv6 .endif .if ${MK_IPFILTER} != "no" || defined(ALL_MODULES) _ipfilter= ipfilter .endif .if ${MK_ISCSI} != "no" || defined(ALL_MODULES) SUBDIR+= cfiscsi SUBDIR+= iscsi SUBDIR+= iscsi_initiator .endif .if !empty(OPT_FDT) SUBDIR+= fdt .endif # Linuxulator .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" SUBDIR+= linprocfs SUBDIR+= linsysfs .endif .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" SUBDIR+= linux .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" SUBDIR+= linux64 SUBDIR+= linux_common .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" _ena= ena .if ${MK_OFED} != "no" || defined(ALL_MODULES) _ibcore= ibcore _ipoib= ipoib _iser= iser .endif _mlx4= mlx4 _mlx5= mlx5 .if (${MK_INET_SUPPORT} != "no" && ${MK_INET6_SUPPORT} != "no") || \ defined(ALL_MODULES) _mlx4en= mlx4en _mlx5en= mlx5en .endif .if ${MK_OFED} != "no" || defined(ALL_MODULES) _mthca= mthca _mlx4ib= mlx4ib _mlx5ib= mlx5ib .endif .endif .if ${MK_NETGRAPH} != "no" || defined(ALL_MODULES) _netgraph= netgraph .endif .if (${MK_PF} != "no" && (${MK_INET_SUPPORT} != "no" || \ ${MK_INET6_SUPPORT} != "no")) || defined(ALL_MODULES) _pf= pf _pflog= pflog .if ${MK_INET_SUPPORT} != "no" _pfsync= pfsync .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" _bce= bce _fxp= fxp _ispfw= ispfw _ti= ti .if ${MACHINE_CPUARCH} != "mips" _mwlfw= mwlfw _otusfw= otusfw _ralfw= ralfw _rtwnfw= rtwnfw .endif .endif .if ${MK_SOURCELESS_UCODE} != "no" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && \ ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "riscv" _cxgbe= cxgbe .endif .if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "arm64" _ice= ice .if ${MK_SOURCELESS_UCODE} != "no" _ice_ddp= ice_ddp .endif .endif # These rely on 64bit atomics .if ${MACHINE_ARCH} != "powerpc" && ${MACHINE_ARCH} != "powerpcspe" && \ ${MACHINE_CPUARCH} != "mips" _mps= mps _mpr= mpr .endif .if ${MK_TESTS} != "no" || defined(ALL_MODULES) SUBDIR+= tests .endif .if ${MK_ZFS} != "no" || (defined(ALL_MODULES) && ${MACHINE_CPUARCH} != "powerpc") SUBDIR+= zfs .endif .if (${MACHINE_CPUARCH} == "mips" && ${MACHINE_ARCH:Mmips64} == "") _hwpmc_mips24k= hwpmc_mips24k _hwpmc_mips74k= hwpmc_mips74k .endif .if ${MACHINE_CPUARCH} != "aarch64" && ${MACHINE_CPUARCH} != "arm" && \ ${MACHINE_CPUARCH} != "mips" && ${MACHINE_CPUARCH} != "powerpc" && \ ${MACHINE_CPUARCH} != "riscv" _syscons= syscons .endif .if ${MACHINE_CPUARCH} != "mips" # no BUS_SPACE_UNSPECIFIED # No barrier instruction support (specific to this driver) _sym= sym # intr_disable() is a macro, causes problems .if ${MK_SOURCELESS_UCODE} != "no" _cxgb= cxgb .endif .endif .if ${MACHINE_CPUARCH} == "aarch64" _allwinner= allwinner _armv8crypto= armv8crypto _em= em _rockchip= rockchip .endif .if ${MACHINE_CPUARCH} == "i386" || ${MACHINE_CPUARCH} == "amd64" _agp= agp _an= an _aout= aout _bios= bios .if ${MK_SOURCELESS_UCODE} != "no" _bxe= bxe .endif _cardbus= cardbus _cbb= cbb _cpuctl= cpuctl _cpufreq= cpufreq _dpms= dpms _em= em _et= et _exca= exca _if_ndis= if_ndis _io= io _itwd= itwd _ix= ix _ixv= ixv .if ${MK_SOURCELESS_UCODE} != "no" _lio= lio .endif _nctgpio= nctgpio _ndis= ndis _ntb= ntb _ocs_fc= ocs_fc _pccard= pccard .if ${MK_OFED} != "no" || defined(ALL_MODULES) _rdma= rdma .endif _safe= safe _speaker= speaker _splash= splash _sppp= sppp _vmware= vmware _wbwd= wbwd _wi= wi _aac= aac _aacraid= aacraid _acpi= acpi .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _aesni= aesni .endif _amd_ecc_inject=amd_ecc_inject _amdsbwd= amdsbwd _amdsmn= amdsmn _amdtemp= amdtemp _arcmsr= arcmsr _asmc= asmc .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _blake2= blake2 .endif _bytgpio= bytgpio _chvgpio= chvgpio _ciss= ciss _chromebook_platform= chromebook_platform _cmx= cmx _coretemp= coretemp .if ${MK_SOURCELESS_HOST} != "no" && empty(KCSAN_ENABLED) _hpt27xx= hpt27xx .endif _hptiop= hptiop .if ${MK_SOURCELESS_HOST} != "no" && empty(KCSAN_ENABLED) _hptmv= hptmv _hptnr= hptnr _hptrr= hptrr .endif _hyperv= hyperv _ichwd= ichwd _ida= ida _iir= iir _intelspi= intelspi _ipmi= ipmi _ips= ips _isci= isci _ipw= ipw _iwi= iwi _iwm= iwm _iwn= iwn .if ${MK_SOURCELESS_UCODE} != "no" _ipwfw= ipwfw _iwifw= iwifw _iwmfw= iwmfw _iwnfw= iwnfw .endif _mly= mly _nfe= nfe _nvd= nvd _nvme= nvme _nvram= nvram .if ${MK_CRYPT} != "no" || defined(ALL_MODULES) _padlock= padlock _padlock_rng= padlock_rng _rdrand_rng= rdrand_rng .endif _pchtherm = pchtherm _s3= s3 _sdhci_acpi= sdhci_acpi _superio= superio _tpm= tpm _twa= twa _vesa= vesa _viawd= viawd _virtio= virtio _wpi= wpi .if ${MK_SOURCELESS_UCODE} != "no" _wpifw= wpifw .endif _x86bios= x86bios .endif .if ${MACHINE_CPUARCH} == "amd64" _amdgpio= amdgpio _ccp= ccp _iavf= iavf _ioat= ioat _ixl= ixl _nvdimm= nvdimm _pms= pms _qlxge= qlxge _qlxgb= qlxgb _sume= sume _vmd= vmd .if ${MK_SOURCELESS_UCODE} != "no" _qlxgbe= qlxgbe _qlnx= qlnx .endif _sfxge= sfxge _sgx= sgx _sgx_linux= sgx_linux _smartpqi= smartpqi .if ${MK_BHYVE} != "no" || defined(ALL_MODULES) .if ${KERN_OPTS:MSMP} _vmm= vmm .endif .endif .endif .if ${MACHINE_CPUARCH} == "i386" # XXX some of these can move to the general case when de-i386'ed # XXX some of these can move now, but are untested on other architectures. _3dfx= 3dfx _3dfx_linux= 3dfx_linux .if ${MK_SOURCELESS_HOST} != "no" _ce= ce .endif .if ${MK_SOURCELESS_UCODE} != "no" _cp= cp .endif _glxiic= glxiic _glxsb= glxsb _pcfclock= pcfclock _pst= pst _sbni= sbni .endif .if ${MACHINE_ARCH} == "armv7" _cfi= cfi _cpsw= cpsw .endif .if ${MACHINE_CPUARCH} == "powerpc" _aacraid= aacraid _agp= agp _an= an _cardbus= cardbus _cbb= cbb _cfi= cfi _cpufreq= cpufreq _exca= exca _ffec= ffec _nvd= nvd _nvme= nvme _pccard= pccard _wi= wi _virtio= virtio .endif .if ${MACHINE_ARCH:Mpowerpc64*} != "" _ipmi= ipmi _ixl= ixl _nvram= opal_nvram .endif .if ${MACHINE_CPUARCH} == "powerpc" && ${MACHINE_ARCH} != "powerpcspe" # Don't build powermac_nvram for powerpcspe, it's never supported. _nvram+= powermac_nvram .endif .if (${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_ARCH:Marmv[67]*} != "" || ${MACHINE_CPUARCH} == "i386") _cloudabi32= cloudabi32 .endif .if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" _cloudabi64= cloudabi64 .endif .endif .if ${MACHINE_ARCH:Marmv[67]*} != "" || ${MACHINE_CPUARCH} == "aarch64" _bcm283x_clkman= bcm283x_clkman _bcm283x_pwm= bcm283x_pwm .endif SUBDIR+=${MODULES_EXTRA} .for reject in ${WITHOUT_MODULES} SUBDIR:= ${SUBDIR:N${reject}} .endfor # Calling kldxref(8) for each module is expensive. .if !defined(NO_XREF) .MAKEFLAGS+= -DNO_XREF afterinstall: .PHONY @if type kldxref >/dev/null 2>&1; then \ ${ECHO} ${KLDXREF_CMD} ${DESTDIR}${KMODDIR}; \ ${KLDXREF_CMD} ${DESTDIR}${KMODDIR}; \ fi .endif SUBDIR:= ${SUBDIR:u:O} .include diff --git a/sys/modules/axgbe/Makefile b/sys/modules/axgbe/Makefile new file mode 100644 index 000000000000..9bf694ef2e3f --- /dev/null +++ b/sys/modules/axgbe/Makefile @@ -0,0 +1,9 @@ +# $FreeBSD$ + +.if ${MACHINE_CPUARCH} == "aarch64" +SUBDIR= if_axa +.elif ${MACHINE_CPUARCH} == "amd64" +SUBDIR= if_axp +.endif + +.include diff --git a/sys/modules/axgbe/if_axa/Makefile b/sys/modules/axgbe/if_axa/Makefile new file mode 100644 index 000000000000..586a446637b8 --- /dev/null +++ b/sys/modules/axgbe/if_axa/Makefile @@ -0,0 +1,12 @@ +#$FreeBSD$ + +.PATH: ${SRCTOP}/sys/dev/axgbe + +KMOD = if_axa +SRCS = device_if.h bus_if.h miibus_if.h ofw_bus_if.h ifdi_if.h vnode_if.h +SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h +SRCS += if_axgbe.c xgbe-sysctl.c xgbe_osdep.c xgbe-desc.c xgbe-drv.c xgbe-mdio.c xgbe-dev.c xgbe-i2c.c xgbe-phy-v1.c xgbe-txrx.c + +CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include + +.include diff --git a/sys/modules/axgbe/if_axp/Makefile b/sys/modules/axgbe/if_axp/Makefile new file mode 100644 index 000000000000..5c2f75c52537 --- /dev/null +++ b/sys/modules/axgbe/if_axp/Makefile @@ -0,0 +1,12 @@ +#$FreeBSD$ + +.PATH: ${SRCTOP}/sys/dev/axgbe + +KMOD = if_axp +SRCS = device_if.h bus_if.h miibus_if.h pci_if.h ifdi_if.h vnode_if.h +SRCS += opt_inet.h opt_inet6.h opt_rss.h opt_iflib.h +SRCS += if_axgbe_pci.c xgbe-sysctl.c xgbe_osdep.c xgbe-desc.c xgbe-drv.c xgbe-mdio.c xgbe-dev.c xgbe-i2c.c xgbe-phy-v2.c xgbe-txrx.c + +CFLAGS+= -I${SRCTOP}/sys/compat/linuxkpi/common/include + +.include