Index: head/sys/arm/mv/files.mv =================================================================== --- head/sys/arm/mv/files.mv (revision 319906) +++ head/sys/arm/mv/files.mv (revision 319907) @@ -1,35 +1,37 @@ # $FreeBSD$ # # The Marvell CPU cores # - Compliant with V5TE architecture # - Super scalar dual issue CPU # - Big/Little Endian # - MMU/MPU # - L1 Cache: Supports streaming and write allocate # - Variable pipeline stages # - Out-of-order execution # - Branch Prediction # - JTAG/ICE # - Vector Floating Point (VFP) unit # arm/mv/gpio.c optional gpio arm/mv/mv_common.c standard arm/mv/mv_localbus.c standard arm/mv/mv_machdep.c standard arm/mv/mv_pci_ctrl.c optional pci | fdt arm/mv/mv_pci.c optional pci arm/mv/mv_ts.c standard arm/mv/timer.c optional !soc_mv_armada38x dev/cesa/cesa.c optional cesa dev/iicbus/twsi/mv_twsi.c optional twsi dev/mge/if_mge.c optional mge +dev/neta/if_mvneta_fdt.c optional neta fdt +dev/neta/if_mvneta.c optional neta mdio mii dev/nand/nfc_mv.c optional nand dev/mvs/mvs_soc.c optional mvs dev/uart/uart_dev_ns8250.c optional uart dev/uart/uart_dev_snps.c optional uart dev/usb/controller/ehci_mv.c optional ehci dev/usb/controller/xhci_mv.c optional xhci dev/ahci/ahci_mv_fdt.c optional ahci kern/kern_clocksource.c standard Index: head/sys/arm/mv/mv_common.c =================================================================== --- head/sys/arm/mv/mv_common.c (revision 319906) +++ head/sys/arm/mv/mv_common.c (revision 319907) @@ -1,2659 +1,2676 @@ /*- * Copyright (C) 2008-2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_IDMA, "idma", "idma dma test memory"); #define IDMA_DEBUG #undef IDMA_DEBUG #define MAX_CPU_WIN 5 #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #ifdef DEBUG #define MV_DUMP_WIN 1 #else #define MV_DUMP_WIN 0 #endif static int win_eth_can_remap(int i); static int decode_win_cesa_valid(void); static int decode_win_cpu_valid(void); static int decode_win_usb_valid(void); static int decode_win_usb3_valid(void); static int decode_win_eth_valid(void); static int decode_win_pcie_valid(void); static int decode_win_sata_valid(void); static int decode_win_sdhci_valid(void); static int decode_win_idma_valid(void); static int decode_win_xor_valid(void); static void decode_win_cpu_setup(void); #ifdef SOC_MV_ARMADAXP static int decode_win_sdram_fixup(void); #endif static void decode_win_cesa_setup(u_long); static void decode_win_usb_setup(u_long); static void decode_win_usb3_setup(u_long); static void decode_win_eth_setup(u_long); +static void decode_win_neta_setup(u_long); static void decode_win_sata_setup(u_long); static void decode_win_ahci_setup(u_long); static void decode_win_sdhci_setup(u_long); static void decode_win_idma_setup(u_long); static void decode_win_xor_setup(u_long); static void decode_win_cesa_dump(u_long); static void decode_win_usb_dump(u_long); static void decode_win_usb3_dump(u_long); static void decode_win_eth_dump(u_long base); +static void decode_win_neta_dump(u_long base); static void decode_win_idma_dump(u_long base); static void decode_win_xor_dump(u_long base); static void decode_win_ahci_dump(u_long base); static void decode_win_sdhci_dump(u_long); static void decode_win_pcie_dump(u_long); static int fdt_get_ranges(const char *, void *, int, int *, int *); #ifdef SOC_MV_ARMADA38X int gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt, int *trig, int *pol); #endif static int win_cpu_from_dt(void); static int fdt_win_setup(void); static uint32_t dev_mask = 0; static int cpu_wins_no = 0; static int eth_port = 0; static int usb_port = 0; static struct decode_win cpu_win_tbl[MAX_CPU_WIN]; const struct decode_win *cpu_wins = cpu_win_tbl; typedef void (*decode_win_setup_t)(u_long); typedef void (*dump_win_t)(u_long); /* * The power status of device feature is only supported on * Kirkwood and Discovery SoCs. */ #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define SOC_MV_POWER_STAT_SUPPORTED 1 #else #define SOC_MV_POWER_STAT_SUPPORTED 0 #endif struct soc_node_spec { const char *compat; decode_win_setup_t decode_handler; dump_win_t dump_handler; }; static struct soc_node_spec soc_nodes[] = { { "mrvl,ge", &decode_win_eth_setup, &decode_win_eth_dump }, + { "marvell,armada-370-neta", &decode_win_neta_setup, &decode_win_neta_dump }, { "mrvl,usb-ehci", &decode_win_usb_setup, &decode_win_usb_dump }, { "marvell,orion-ehci", &decode_win_usb_setup, &decode_win_usb_dump }, { "marvell,armada-380-xhci", &decode_win_usb3_setup, &decode_win_usb3_dump }, { "marvell,armada-380-ahci", &decode_win_ahci_setup, &decode_win_ahci_dump }, { "marvell,armada-380-sdhci", &decode_win_sdhci_setup, &decode_win_sdhci_dump }, { "mrvl,sata", &decode_win_sata_setup, NULL }, { "mrvl,xor", &decode_win_xor_setup, &decode_win_xor_dump }, { "mrvl,idma", &decode_win_idma_setup, &decode_win_idma_dump }, { "mrvl,cesa", &decode_win_cesa_setup, &decode_win_cesa_dump }, { "mrvl,pcie", &decode_win_pcie_setup, &decode_win_pcie_dump }, { NULL, NULL, NULL }, }; struct fdt_pm_mask_entry { char *compat; uint32_t mask; }; static struct fdt_pm_mask_entry fdt_pm_mask_table[] = { { "mrvl,ge", CPU_PM_CTRL_GE(0) }, { "mrvl,ge", CPU_PM_CTRL_GE(1) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(0) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(1) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(2) }, { "mrvl,xor", CPU_PM_CTRL_XOR }, { "mrvl,sata", CPU_PM_CTRL_SATA }, { NULL, 0 } }; static __inline int pm_is_disabled(uint32_t mask) { #if SOC_MV_POWER_STAT_SUPPORTED return (soc_power_ctrl_get(mask) == mask ? 0 : 1); #else return (0); #endif } /* * Disable device using power management register. * 1 - Device Power On * 0 - Device Power Off * Mask can be set in loader. * EXAMPLE: * loader> set hw.pm-disable-mask=0x2 * * Common mask: * |-------------------------------| * | Device | Kirkwood | Discovery | * |-------------------------------| * | USB0 | 0x00008 | 0x020000 | * |-------------------------------| * | USB1 | - | 0x040000 | * |-------------------------------| * | USB2 | - | 0x080000 | * |-------------------------------| * | GE0 | 0x00001 | 0x000002 | * |-------------------------------| * | GE1 | - | 0x000004 | * |-------------------------------| * | IDMA | - | 0x100000 | * |-------------------------------| * | XOR | 0x10000 | 0x200000 | * |-------------------------------| * | CESA | 0x20000 | 0x400000 | * |-------------------------------| * | SATA | 0x04000 | 0x004000 | * --------------------------------| * This feature can be used only on Kirkwood and Discovery * machines. */ static __inline void pm_disable_device(int mask) { #ifdef DIAGNOSTIC uint32_t reg; reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL); printf("Power Management Register: 0%x\n", reg); reg &= ~mask; soc_power_ctrl_set(reg); printf("Device %x is disabled\n", mask); reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL); printf("Power Management Register: 0%x\n", reg); #endif } int fdt_pm(phandle_t node) { uint32_t cpu_pm_ctrl; int i, ena, compat; ena = 1; cpu_pm_ctrl = read_cpu_ctrl(CPU_PM_CTRL); for (i = 0; fdt_pm_mask_table[i].compat != NULL; i++) { if (dev_mask & (1 << i)) continue; compat = ofw_bus_node_is_compatible(node, fdt_pm_mask_table[i].compat); #if defined(SOC_MV_KIRKWOOD) if (compat && (cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) { dev_mask |= (1 << i); ena = 0; break; } else if (compat) { dev_mask |= (1 << i); break; } #else if (compat && (~cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) { dev_mask |= (1 << i); ena = 0; break; } else if (compat) { dev_mask |= (1 << i); break; } #endif } return (ena); } uint32_t read_cpu_ctrl(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg)); } void write_cpu_ctrl(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg, val); } #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) uint32_t read_cpu_mp_clocks(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg)); } void write_cpu_mp_clocks(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg, val); } uint32_t read_cpu_misc(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, reg)); } void write_cpu_misc(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_MISC_BASE, reg, val); } #endif void cpu_reset(void) { #if defined(SOC_MV_ARMADAXP) || defined (SOC_MV_ARMADA38X) write_cpu_misc(RSTOUTn_MASK, SOFT_RST_OUT_EN); write_cpu_misc(SYSTEM_SOFT_RESET, SYS_SOFT_RST); #else write_cpu_ctrl(RSTOUTn_MASK, SOFT_RST_OUT_EN); write_cpu_ctrl(SYSTEM_SOFT_RESET, SYS_SOFT_RST); #endif while (1); } uint32_t cpu_extra_feat(void) { uint32_t dev, rev; uint32_t ef = 0; soc_id(&dev, &rev); switch (dev) { case MV_DEV_88F6281: case MV_DEV_88F6282: case MV_DEV_88RC8180: case MV_DEV_MV78100_Z0: case MV_DEV_MV78100: __asm __volatile("mrc p15, 1, %0, c15, c1, 0" : "=r" (ef)); break; case MV_DEV_88F5182: case MV_DEV_88F5281: __asm __volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (ef)); break; default: if (bootverbose) printf("This ARM Core does not support any extra features\n"); } return (ef); } /* * Get the power status of device. This feature is only supported on * Kirkwood and Discovery SoCs. */ uint32_t soc_power_ctrl_get(uint32_t mask) { #if SOC_MV_POWER_STAT_SUPPORTED if (mask != CPU_PM_CTRL_NONE) mask &= read_cpu_ctrl(CPU_PM_CTRL); return (mask); #else return (mask); #endif } /* * Set the power status of device. This feature is only supported on * Kirkwood and Discovery SoCs. */ void soc_power_ctrl_set(uint32_t mask) { #if !defined(SOC_MV_ORION) if (mask != CPU_PM_CTRL_NONE) write_cpu_ctrl(CPU_PM_CTRL, mask); #endif } void soc_id(uint32_t *dev, uint32_t *rev) { /* * Notice: system identifiers are available in the registers range of * PCIE controller, so using this function is only allowed (and * possible) after the internal registers range has been mapped in via * devmap_bootstrap(). */ *dev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 0) >> 16; *rev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 8) & 0xff; } static void soc_identify(void) { uint32_t d, r, size, mode; const char *dev; const char *rev; soc_id(&d, &r); printf("SOC: "); if (bootverbose) printf("(0x%4x:0x%02x) ", d, r); rev = ""; switch (d) { case MV_DEV_88F5181: dev = "Marvell 88F5181"; if (r == 3) rev = "B1"; break; case MV_DEV_88F5182: dev = "Marvell 88F5182"; if (r == 2) rev = "A2"; break; case MV_DEV_88F5281: dev = "Marvell 88F5281"; if (r == 4) rev = "D0"; else if (r == 5) rev = "D1"; else if (r == 6) rev = "D2"; break; case MV_DEV_88F6281: dev = "Marvell 88F6281"; if (r == 0) rev = "Z0"; else if (r == 2) rev = "A0"; else if (r == 3) rev = "A1"; break; case MV_DEV_88RC8180: dev = "Marvell 88RC8180"; break; case MV_DEV_88RC9480: dev = "Marvell 88RC9480"; break; case MV_DEV_88RC9580: dev = "Marvell 88RC9580"; break; case MV_DEV_88F6781: dev = "Marvell 88F6781"; if (r == 2) rev = "Y0"; break; case MV_DEV_88F6282: dev = "Marvell 88F6282"; if (r == 0) rev = "A0"; else if (r == 1) rev = "A1"; break; case MV_DEV_88F6828: dev = "Marvell 88F6828"; break; case MV_DEV_88F6820: dev = "Marvell 88F6820"; break; case MV_DEV_88F6810: dev = "Marvell 88F6810"; break; case MV_DEV_MV78100_Z0: dev = "Marvell MV78100 Z0"; break; case MV_DEV_MV78100: dev = "Marvell MV78100"; break; case MV_DEV_MV78160: dev = "Marvell MV78160"; break; case MV_DEV_MV78260: dev = "Marvell MV78260"; break; case MV_DEV_MV78460: dev = "Marvell MV78460"; break; default: dev = "UNKNOWN"; break; } printf("%s", dev); if (*rev != '\0') printf(" rev %s", rev); printf(", TClock %dMHz\n", get_tclk() / 1000 / 1000); mode = read_cpu_ctrl(CPU_CONFIG); printf(" Instruction cache prefetch %s, data cache prefetch %s\n", (mode & CPU_CONFIG_IC_PREF) ? "enabled" : "disabled", (mode & CPU_CONFIG_DC_PREF) ? "enabled" : "disabled"); switch (d) { case MV_DEV_88F6281: case MV_DEV_88F6282: mode = read_cpu_ctrl(CPU_L2_CONFIG) & CPU_L2_CONFIG_MODE; printf(" 256KB 4-way set-associative %s unified L2 cache\n", mode ? "write-through" : "write-back"); break; case MV_DEV_MV78100: mode = read_cpu_ctrl(CPU_CONTROL); size = mode & CPU_CONTROL_L2_SIZE; mode = mode & CPU_CONTROL_L2_MODE; printf(" %s set-associative %s unified L2 cache\n", size ? "256KB 4-way" : "512KB 8-way", mode ? "write-through" : "write-back"); break; default: break; } } static void platform_identify(void *dummy) { soc_identify(); /* * XXX Board identification e.g. read out from FPGA or similar should * go here */ } SYSINIT(platform_identify, SI_SUB_CPU, SI_ORDER_SECOND, platform_identify, NULL); #ifdef KDB static void mv_enter_debugger(void *dummy) { if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); } SYSINIT(mv_enter_debugger, SI_SUB_CPU, SI_ORDER_ANY, mv_enter_debugger, NULL); #endif int soc_decode_win(void) { uint32_t dev, rev; int mask, err; mask = 0; TUNABLE_INT_FETCH("hw.pm-disable-mask", &mask); if (mask != 0) pm_disable_device(mask); /* Retrieve data about physical addresses from device tree. */ if ((err = win_cpu_from_dt()) != 0) return (err); /* Retrieve our ID: some windows facilities vary between SoC models */ soc_id(&dev, &rev); #ifdef SOC_MV_ARMADAXP if ((err = decode_win_sdram_fixup()) != 0) return(err); #endif if (!decode_win_cpu_valid() || !decode_win_usb_valid() || !decode_win_eth_valid() || !decode_win_idma_valid() || !decode_win_pcie_valid() || !decode_win_sata_valid() || !decode_win_xor_valid() || !decode_win_usb3_valid() || !decode_win_sdhci_valid() || !decode_win_cesa_valid()) return (EINVAL); decode_win_cpu_setup(); if (MV_DUMP_WIN) soc_dump_decode_win(); eth_port = 0; usb_port = 0; if ((err = fdt_win_setup()) != 0) return (err); return (0); } /************************************************************************** * Decode windows registers accessors **************************************************************************/ WIN_REG_IDX_RD(win_cpu, cr, MV_WIN_CPU_CTRL, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu, br, MV_WIN_CPU_BASE, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu, remap_l, MV_WIN_CPU_REMAP_LO, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu, remap_h, MV_WIN_CPU_REMAP_HI, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, cr, MV_WIN_CPU_CTRL, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, br, MV_WIN_CPU_BASE, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, remap_l, MV_WIN_CPU_REMAP_LO, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, remap_h, MV_WIN_CPU_REMAP_HI, MV_MBUS_BRIDGE_BASE) WIN_REG_BASE_IDX_RD(win_cesa, cr, MV_WIN_CESA_CTRL) WIN_REG_BASE_IDX_RD(win_cesa, br, MV_WIN_CESA_BASE) WIN_REG_BASE_IDX_WR(win_cesa, cr, MV_WIN_CESA_CTRL) WIN_REG_BASE_IDX_WR(win_cesa, br, MV_WIN_CESA_BASE) WIN_REG_BASE_IDX_RD(win_usb, cr, MV_WIN_USB_CTRL) WIN_REG_BASE_IDX_RD(win_usb, br, MV_WIN_USB_BASE) WIN_REG_BASE_IDX_WR(win_usb, cr, MV_WIN_USB_CTRL) WIN_REG_BASE_IDX_WR(win_usb, br, MV_WIN_USB_BASE) #ifdef SOC_MV_ARMADA38X WIN_REG_BASE_IDX_RD(win_usb3, cr, MV_WIN_USB3_CTRL) WIN_REG_BASE_IDX_RD(win_usb3, br, MV_WIN_USB3_BASE) WIN_REG_BASE_IDX_WR(win_usb3, cr, MV_WIN_USB3_CTRL) WIN_REG_BASE_IDX_WR(win_usb3, br, MV_WIN_USB3_BASE) #endif WIN_REG_BASE_IDX_RD(win_eth, br, MV_WIN_ETH_BASE) WIN_REG_BASE_IDX_RD(win_eth, sz, MV_WIN_ETH_SIZE) WIN_REG_BASE_IDX_RD(win_eth, har, MV_WIN_ETH_REMAP) WIN_REG_BASE_IDX_WR(win_eth, br, MV_WIN_ETH_BASE) WIN_REG_BASE_IDX_WR(win_eth, sz, MV_WIN_ETH_SIZE) WIN_REG_BASE_IDX_WR(win_eth, har, MV_WIN_ETH_REMAP) WIN_REG_BASE_IDX_RD2(win_xor, br, MV_WIN_XOR_BASE) WIN_REG_BASE_IDX_RD2(win_xor, sz, MV_WIN_XOR_SIZE) WIN_REG_BASE_IDX_RD2(win_xor, har, MV_WIN_XOR_REMAP) WIN_REG_BASE_IDX_RD2(win_xor, ctrl, MV_WIN_XOR_CTRL) WIN_REG_BASE_IDX_WR2(win_xor, br, MV_WIN_XOR_BASE) WIN_REG_BASE_IDX_WR2(win_xor, sz, MV_WIN_XOR_SIZE) WIN_REG_BASE_IDX_WR2(win_xor, har, MV_WIN_XOR_REMAP) WIN_REG_BASE_IDX_WR2(win_xor, ctrl, MV_WIN_XOR_CTRL) WIN_REG_BASE_RD(win_eth, bare, 0x290) WIN_REG_BASE_RD(win_eth, epap, 0x294) WIN_REG_BASE_WR(win_eth, bare, 0x290) WIN_REG_BASE_WR(win_eth, epap, 0x294) WIN_REG_BASE_IDX_RD(win_pcie, cr, MV_WIN_PCIE_CTRL); WIN_REG_BASE_IDX_RD(win_pcie, br, MV_WIN_PCIE_BASE); WIN_REG_BASE_IDX_RD(win_pcie, remap, MV_WIN_PCIE_REMAP); WIN_REG_BASE_IDX_WR(win_pcie, cr, MV_WIN_PCIE_CTRL); WIN_REG_BASE_IDX_WR(win_pcie, br, MV_WIN_PCIE_BASE); WIN_REG_BASE_IDX_WR(win_pcie, remap, MV_WIN_PCIE_REMAP); WIN_REG_BASE_IDX_RD(pcie_bar, br, MV_PCIE_BAR_BASE); WIN_REG_BASE_IDX_RD(pcie_bar, brh, MV_PCIE_BAR_BASE_H); WIN_REG_BASE_IDX_RD(pcie_bar, cr, MV_PCIE_BAR_CTRL); WIN_REG_BASE_IDX_WR(pcie_bar, br, MV_PCIE_BAR_BASE); WIN_REG_BASE_IDX_WR(pcie_bar, brh, MV_PCIE_BAR_BASE_H); WIN_REG_BASE_IDX_WR(pcie_bar, cr, MV_PCIE_BAR_CTRL); WIN_REG_BASE_IDX_RD(win_idma, br, MV_WIN_IDMA_BASE) WIN_REG_BASE_IDX_RD(win_idma, sz, MV_WIN_IDMA_SIZE) WIN_REG_BASE_IDX_RD(win_idma, har, MV_WIN_IDMA_REMAP) WIN_REG_BASE_IDX_RD(win_idma, cap, MV_WIN_IDMA_CAP) WIN_REG_BASE_IDX_WR(win_idma, br, MV_WIN_IDMA_BASE) WIN_REG_BASE_IDX_WR(win_idma, sz, MV_WIN_IDMA_SIZE) WIN_REG_BASE_IDX_WR(win_idma, har, MV_WIN_IDMA_REMAP) WIN_REG_BASE_IDX_WR(win_idma, cap, MV_WIN_IDMA_CAP) WIN_REG_BASE_RD(win_idma, bare, 0xa80) WIN_REG_BASE_WR(win_idma, bare, 0xa80) WIN_REG_BASE_IDX_RD(win_sata, cr, MV_WIN_SATA_CTRL); WIN_REG_BASE_IDX_RD(win_sata, br, MV_WIN_SATA_BASE); WIN_REG_BASE_IDX_WR(win_sata, cr, MV_WIN_SATA_CTRL); WIN_REG_BASE_IDX_WR(win_sata, br, MV_WIN_SATA_BASE); #if defined(SOC_MV_ARMADA38X) WIN_REG_BASE_IDX_RD(win_sata, sz, MV_WIN_SATA_SIZE); WIN_REG_BASE_IDX_WR(win_sata, sz, MV_WIN_SATA_SIZE); #endif WIN_REG_BASE_IDX_RD(win_sdhci, cr, MV_WIN_SDHCI_CTRL); WIN_REG_BASE_IDX_RD(win_sdhci, br, MV_WIN_SDHCI_BASE); WIN_REG_BASE_IDX_WR(win_sdhci, cr, MV_WIN_SDHCI_CTRL); WIN_REG_BASE_IDX_WR(win_sdhci, br, MV_WIN_SDHCI_BASE); #ifndef SOC_MV_DOVE WIN_REG_IDX_RD(ddr, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE) WIN_REG_IDX_RD(ddr, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE) WIN_REG_IDX_WR(ddr, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE) WIN_REG_IDX_WR(ddr, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE) #else /* * On 88F6781 (Dove) SoC DDR Controller is accessed through * single MBUS <-> AXI bridge. In this case we provide emulated * ddr_br_read() and ddr_sz_read() functions to keep compatibility * with common decoding windows setup code. */ static inline uint32_t ddr_br_read(int i) { uint32_t mmap; /* Read Memory Address Map Register for CS i */ mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0); /* Return CS i base address */ return (mmap & 0xFF000000); } static inline uint32_t ddr_sz_read(int i) { uint32_t mmap, size; /* Read Memory Address Map Register for CS i */ mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0); /* Extract size of CS space in 64kB units */ size = (1 << ((mmap >> 16) & 0x0F)); /* Return CS size and enable/disable status */ return (((size - 1) << 16) | (mmap & 0x01)); } #endif /************************************************************************** * Decode windows helper routines **************************************************************************/ void soc_dump_decode_win(void) { uint32_t dev, rev; int i; soc_id(&dev, &rev); for (i = 0; i < MV_WIN_CPU_MAX; i++) { printf("CPU window#%d: c 0x%08x, b 0x%08x", i, win_cpu_cr_read(i), win_cpu_br_read(i)); if (win_cpu_can_remap(i)) printf(", rl 0x%08x, rh 0x%08x", win_cpu_remap_l_read(i), win_cpu_remap_h_read(i)); printf("\n"); } printf("Internal regs base: 0x%08x\n", bus_space_read_4(fdtbus_bs_tag, MV_INTREGS_BASE, 0)); for (i = 0; i < MV_WIN_DDR_MAX; i++) printf("DDR CS#%d: b 0x%08x, s 0x%08x\n", i, ddr_br_read(i), ddr_sz_read(i)); } /************************************************************************** * CPU windows routines **************************************************************************/ int win_cpu_can_remap(int i) { uint32_t dev, rev; soc_id(&dev, &rev); /* Depending on the SoC certain windows have remap capability */ if ((dev == MV_DEV_88F5182 && i < 2) || (dev == MV_DEV_88F5281 && i < 4) || (dev == MV_DEV_88F6281 && i < 4) || (dev == MV_DEV_88F6282 && i < 4) || (dev == MV_DEV_88F6828 && i < 20) || (dev == MV_DEV_88F6820 && i < 20) || (dev == MV_DEV_88F6810 && i < 20) || (dev == MV_DEV_88RC8180 && i < 2) || (dev == MV_DEV_88F6781 && i < 4) || (dev == MV_DEV_MV78100_Z0 && i < 8) || ((dev & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY && i < 8)) return (1); return (0); } /* XXX This should check for overlapping remap fields too.. */ int decode_win_overlap(int win, int win_no, const struct decode_win *wintab) { const struct decode_win *tab; int i; tab = wintab; for (i = 0; i < win_no; i++, tab++) { if (i == win) /* Skip self */ continue; if ((tab->base + tab->size - 1) < (wintab + win)->base) continue; else if (((wintab + win)->base + (wintab + win)->size - 1) < tab->base) continue; else return (i); } return (-1); } static int decode_win_cpu_valid(void) { int i, j, rv; uint32_t b, e, s; if (cpu_wins_no > MV_WIN_CPU_MAX) { printf("CPU windows: too many entries: %d\n", cpu_wins_no); return (0); } rv = 1; for (i = 0; i < cpu_wins_no; i++) { if (cpu_wins[i].target == 0) { printf("CPU window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (cpu_wins[i].remap != ~0 && win_cpu_can_remap(i) != 1) { printf("CPU window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, cpu_wins[i].remap); rv = 0; } s = cpu_wins[i].size; b = cpu_wins[i].base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* * XXX this boundary check should account for 64bit * and remapping.. */ printf("CPU window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } if (b != rounddown2(b, s)) { printf("CPU window#%d: address 0x%08x is not aligned " "to 0x%08x\n", i, b, s); rv = 0; continue; } j = decode_win_overlap(i, cpu_wins_no, &cpu_wins[0]); if (j >= 0) { printf("CPU window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, cpu_wins[j].base, cpu_wins[j].base + cpu_wins[j].size - 1); rv = 0; } } return (rv); } int decode_win_cpu_set(int target, int attr, vm_paddr_t base, uint32_t size, vm_paddr_t remap) { uint32_t br, cr; int win, i; if (remap == ~0) { win = MV_WIN_CPU_MAX - 1; i = -1; } else { win = 0; i = 1; } while ((win >= 0) && (win < MV_WIN_CPU_MAX)) { cr = win_cpu_cr_read(win); if ((cr & MV_WIN_CPU_ENABLE_BIT) == 0) break; if ((cr & ((0xff << MV_WIN_CPU_ATTR_SHIFT) | (0x1f << MV_WIN_CPU_TARGET_SHIFT))) == ((attr << MV_WIN_CPU_ATTR_SHIFT) | (target << MV_WIN_CPU_TARGET_SHIFT))) break; win += i; } if ((win < 0) || (win >= MV_WIN_CPU_MAX) || ((remap != ~0) && (win_cpu_can_remap(win) == 0))) return (-1); br = base & 0xffff0000; win_cpu_br_write(win, br); if (win_cpu_can_remap(win)) { if (remap != ~0) { win_cpu_remap_l_write(win, remap & 0xffff0000); win_cpu_remap_h_write(win, 0); } else { /* * Remap function is not used for a given window * (capable of remapping) - set remap field with the * same value as base. */ win_cpu_remap_l_write(win, base & 0xffff0000); win_cpu_remap_h_write(win, 0); } } cr = ((size - 1) & 0xffff0000) | (attr << MV_WIN_CPU_ATTR_SHIFT) | (target << MV_WIN_CPU_TARGET_SHIFT) | MV_WIN_CPU_ENABLE_BIT; win_cpu_cr_write(win, cr); return (0); } static void decode_win_cpu_setup(void) { int i; /* Disable all CPU windows */ for (i = 0; i < MV_WIN_CPU_MAX; i++) { win_cpu_cr_write(i, 0); win_cpu_br_write(i, 0); if (win_cpu_can_remap(i)) { win_cpu_remap_l_write(i, 0); win_cpu_remap_h_write(i, 0); } } for (i = 0; i < cpu_wins_no; i++) if (cpu_wins[i].target > 0) decode_win_cpu_set(cpu_wins[i].target, cpu_wins[i].attr, cpu_wins[i].base, cpu_wins[i].size, cpu_wins[i].remap); } #ifdef SOC_MV_ARMADAXP static int decode_win_sdram_fixup(void) { struct mem_region mr[FDT_MEM_REGIONS]; uint8_t window_valid[MV_WIN_DDR_MAX]; int mr_cnt, err, i, j; uint32_t valid_win_num = 0; /* Grab physical memory regions information from device tree. */ err = fdt_get_mem_regions(mr, &mr_cnt, NULL); if (err != 0) return (err); for (i = 0; i < MV_WIN_DDR_MAX; i++) window_valid[i] = 0; /* Try to match entries from device tree with settings from u-boot */ for (i = 0; i < mr_cnt; i++) { for (j = 0; j < MV_WIN_DDR_MAX; j++) { if (ddr_is_active(j) && (ddr_base(j) == mr[i].mr_start) && (ddr_size(j) == mr[i].mr_size)) { window_valid[j] = 1; valid_win_num++; } } } if (mr_cnt != valid_win_num) return (EINVAL); /* Destroy windows without corresponding device tree entry */ for (j = 0; j < MV_WIN_DDR_MAX; j++) { if (ddr_is_active(j) && (window_valid[j] != 1)) { printf("Disabling SDRAM decoding window: %d\n", j); ddr_disable(j); } } return (0); } #endif /* * Check if we're able to cover all active DDR banks. */ static int decode_win_can_cover_ddr(int max) { int i, c; c = 0; for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (c > max) { printf("Unable to cover all active DDR banks: " "%d, available windows: %d\n", c, max); return (0); } return (1); } /************************************************************************** * DDR windows routines **************************************************************************/ int ddr_is_active(int i) { if (ddr_sz_read(i) & 0x1) return (1); return (0); } void ddr_disable(int i) { ddr_sz_write(i, 0); ddr_br_write(i, 0); } uint32_t ddr_base(int i) { return (ddr_br_read(i) & 0xff000000); } uint32_t ddr_size(int i) { return ((ddr_sz_read(i) | 0x00ffffff) + 1); } uint32_t ddr_attr(int i) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88RC8180) return ((ddr_sz_read(i) & 0xf0) >> 4); if (dev == MV_DEV_88F6781) return (0); return (i == 0 ? 0xe : (i == 1 ? 0xd : (i == 2 ? 0xb : (i == 3 ? 0x7 : 0xff)))); } uint32_t ddr_target(int i) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88RC8180) { i = (ddr_sz_read(i) & 0xf0) >> 4; return (i == 0xe ? 0xc : (i == 0xd ? 0xd : (i == 0xb ? 0xe : (i == 0x7 ? 0xf : 0xc)))); } /* * On SOCs other than 88RC8180 Mbus unit ID for * DDR SDRAM controller is always 0x0. */ return (0); } /************************************************************************** * CESA windows routines **************************************************************************/ static int decode_win_cesa_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_CESA_MAX)); } static void decode_win_cesa_dump(u_long base) { int i; for (i = 0; i < MV_WIN_CESA_MAX; i++) printf("CESA window#%d: c 0x%08x, b 0x%08x\n", i, win_cesa_cr_read(base, i), win_cesa_br_read(base, i)); } /* * Set CESA decode windows. */ static void decode_win_cesa_setup(u_long base) { uint32_t br, cr; uint64_t size; int i, j; for (i = 0; i < MV_WIN_CESA_MAX; i++) { win_cesa_cr_write(base, i, 0); win_cesa_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); size = ddr_size(i); #ifdef SOC_MV_ARMADA38X /* * Armada 38x SoC's equipped with 4GB DRAM * suffer freeze during CESA operation, if * MBUS window opened at given DRAM CS reaches * end of the address space. Apply a workaround * by setting the window size to the closest possible * value, i.e. divide it by 2. */ if (size + ddr_base(i) == 0x100000000ULL) size /= 2; #endif cr = (((size - 1) & 0xffff0000) | (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK); /* Set the first free CESA window */ for (j = 0; j < MV_WIN_CESA_MAX; j++) { if (win_cesa_cr_read(base, j) & 0x1) continue; win_cesa_br_write(base, j, br); win_cesa_cr_write(base, j, cr); break; } } } } /************************************************************************** * USB windows routines **************************************************************************/ static int decode_win_usb_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_USB_MAX)); } static void decode_win_usb_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port - 1))) return; for (i = 0; i < MV_WIN_USB_MAX; i++) printf("USB window#%d: c 0x%08x, b 0x%08x\n", i, win_usb_cr_read(base, i), win_usb_br_read(base, i)); } /* * Set USB decode windows. */ static void decode_win_usb_setup(u_long base) { uint32_t br, cr; int i, j; if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port))) return; usb_port++; for (i = 0; i < MV_WIN_USB_MAX; i++) { win_usb_cr_write(base, i, 0); win_usb_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); /* * XXX for 6281 we should handle Mbus write * burst limit field in the ctrl reg */ cr = (((ddr_size(i) - 1) & 0xffff0000) | (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1); /* Set the first free USB window */ for (j = 0; j < MV_WIN_USB_MAX; j++) { if (win_usb_cr_read(base, j) & 0x1) continue; win_usb_br_write(base, j, br); win_usb_cr_write(base, j, cr); break; } } } } /************************************************************************** * USB3 windows routines **************************************************************************/ #ifdef SOC_MV_ARMADA38X static int decode_win_usb3_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_USB3_MAX)); } static void decode_win_usb3_dump(u_long base) { int i; for (i = 0; i < MV_WIN_USB3_MAX; i++) printf("USB3.0 window#%d: c 0x%08x, b 0x%08x\n", i, win_usb3_cr_read(base, i), win_usb3_br_read(base, i)); } /* * Set USB3 decode windows */ static void decode_win_usb3_setup(u_long base) { uint32_t br, cr; int i, j; for (i = 0; i < MV_WIN_USB3_MAX; i++) { win_usb3_cr_write(base, i, 0); win_usb3_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); cr = (((ddr_size(i) - 1) & (IO_WIN_SIZE_MASK << IO_WIN_SIZE_SHIFT)) | (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK); /* Set the first free USB3.0 window */ for (j = 0; j < MV_WIN_USB3_MAX; j++) { if (win_usb3_cr_read(base, j) & IO_WIN_ENA_MASK) continue; win_usb3_br_write(base, j, br); win_usb3_cr_write(base, j, cr); break; } } } } #else /* * Provide dummy functions to satisfy the build * for SoCs not equipped with USB3 */ static int decode_win_usb3_valid(void) { return (1); } static void decode_win_usb3_setup(u_long base) { } static void decode_win_usb3_dump(u_long base) { } #endif /************************************************************************** * ETH windows routines **************************************************************************/ static int win_eth_can_remap(int i) { /* ETH encode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } static int eth_bare_read(uint32_t base, int i) { uint32_t v; v = win_eth_bare_read(base); v &= (1 << i); return (v >> i); } static void eth_bare_write(uint32_t base, int i, int val) { uint32_t v; v = win_eth_bare_read(base); v &= ~(1 << i); v |= (val << i); win_eth_bare_write(base, v); } static void eth_epap_write(uint32_t base, int i, int val) { uint32_t v; v = win_eth_epap_read(base); v &= ~(0x3 << (i * 2)); v |= (val << (i * 2)); win_eth_epap_write(base, v); } static void decode_win_eth_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port - 1))) return; for (i = 0; i < MV_WIN_ETH_MAX; i++) { printf("ETH window#%d: b 0x%08x, s 0x%08x", i, win_eth_br_read(base, i), win_eth_sz_read(base, i)); if (win_eth_can_remap(i)) printf(", ha 0x%08x", win_eth_har_read(base, i)); printf("\n"); } printf("ETH windows: bare 0x%08x, epap 0x%08x\n", win_eth_bare_read(base), win_eth_epap_read(base)); } #define MV_WIN_ETH_DDR_TRGT(n) ddr_target(n) static void decode_win_eth_setup(u_long base) { uint32_t br, sz; int i, j; if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port))) return; eth_port++; /* Disable, clear and revoke protection for all ETH windows */ for (i = 0; i < MV_WIN_ETH_MAX; i++) { eth_bare_write(base, i, 1); eth_epap_write(base, i, 0); win_eth_br_write(base, i, 0); win_eth_sz_write(base, i, 0); if (win_eth_can_remap(i)) win_eth_har_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | MV_WIN_ETH_DDR_TRGT(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Set the first free ETH window */ for (j = 0; j < MV_WIN_ETH_MAX; j++) { if (eth_bare_read(base, j) == 0) continue; win_eth_br_write(base, j, br); win_eth_sz_write(base, j, sz); /* XXX remapping ETH windows not supported */ /* Set protection RW */ eth_epap_write(base, j, 0x3); /* Enable window */ eth_bare_write(base, j, 0); break; } } +} + +static void +decode_win_neta_dump(u_long base) +{ + + decode_win_eth_dump(base + MV_WIN_NETA_OFFSET); +} + +static void +decode_win_neta_setup(u_long base) +{ + + decode_win_eth_setup(base + MV_WIN_NETA_OFFSET); } static int decode_win_eth_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_ETH_MAX)); } /************************************************************************** * PCIE windows routines **************************************************************************/ static void decode_win_pcie_dump(u_long base) { int i; printf("PCIE windows base 0x%08lx\n", base); for (i = 0; i < MV_WIN_PCIE_MAX; i++) printf("PCIE window#%d: cr 0x%08x br 0x%08x remap 0x%08x\n", i, win_pcie_cr_read(base, i), win_pcie_br_read(base, i), win_pcie_remap_read(base, i)); for (i = 0; i < MV_PCIE_BAR_MAX; i++) printf("PCIE bar#%d: cr 0x%08x br 0x%08x brh 0x%08x\n", i, pcie_bar_cr_read(base, i), pcie_bar_br_read(base, i), pcie_bar_brh_read(base, i)); } void decode_win_pcie_setup(u_long base) { uint32_t size = 0, ddrbase = ~0; uint32_t cr, br; int i, j; for (i = 0; i < MV_PCIE_BAR_MAX; i++) { pcie_bar_br_write(base, i, MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); if (i < 3) pcie_bar_brh_write(base, i, 0); if (i > 0) pcie_bar_cr_write(base, i, 0); } for (i = 0; i < MV_WIN_PCIE_MAX; i++) { win_pcie_cr_write(base, i, 0); win_pcie_br_write(base, i, 0); win_pcie_remap_write(base, i, 0); } /* On End-Point only set BAR size to 1MB regardless of DDR size */ if ((bus_space_read_4(fdtbus_bs_tag, base, MV_PCIE_CONTROL) & MV_PCIE_ROOT_CMPLX) == 0) { pcie_bar_cr_write(base, 1, 0xf0000 | 1); return; } for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { /* Map DDR to BAR 1 */ cr = (ddr_size(i) - 1) & 0xffff0000; size += ddr_size(i) & 0xffff0000; cr |= (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1; br = ddr_base(i); if (br < ddrbase) ddrbase = br; /* Use the first available PCIE window */ for (j = 0; j < MV_WIN_PCIE_MAX; j++) { if (win_pcie_cr_read(base, j) != 0) continue; win_pcie_br_write(base, j, br); win_pcie_cr_write(base, j, cr); break; } } } /* * Upper 16 bits in BAR register is interpreted as BAR size * (in 64 kB units) plus 64kB, so subtract 0x10000 * form value passed to register to get correct value. */ size -= 0x10000; pcie_bar_cr_write(base, 1, size | 1); pcie_bar_br_write(base, 1, ddrbase | MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); pcie_bar_br_write(base, 0, fdt_immr_pa | MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); } static int decode_win_pcie_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_PCIE_MAX)); } /************************************************************************** * IDMA windows routines **************************************************************************/ #if defined(SOC_MV_ORION) || defined(SOC_MV_DISCOVERY) static int idma_bare_read(u_long base, int i) { uint32_t v; v = win_idma_bare_read(base); v &= (1 << i); return (v >> i); } static void idma_bare_write(u_long base, int i, int val) { uint32_t v; v = win_idma_bare_read(base); v &= ~(1 << i); v |= (val << i); win_idma_bare_write(base, v); } /* * Sets channel protection 'val' for window 'w' on channel 'c' */ static void idma_cap_write(u_long base, int c, int w, int val) { uint32_t v; v = win_idma_cap_read(base, c); v &= ~(0x3 << (w * 2)); v |= (val << (w * 2)); win_idma_cap_write(base, c, v); } /* * Set protection 'val' on all channels for window 'w' */ static void idma_set_prot(u_long base, int w, int val) { int c; for (c = 0; c < MV_IDMA_CHAN_MAX; c++) idma_cap_write(base, c, w, val); } static int win_idma_can_remap(int i) { /* IDMA decode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } void decode_win_idma_setup(u_long base) { uint32_t br, sz; int i, j; if (pm_is_disabled(CPU_PM_CTRL_IDMA)) return; /* * Disable and clear all IDMA windows, revoke protection for all channels */ for (i = 0; i < MV_WIN_IDMA_MAX; i++) { idma_bare_write(base, i, 1); win_idma_br_write(base, i, 0); win_idma_sz_write(base, i, 0); if (win_idma_can_remap(i) == 1) win_idma_har_write(base, i, 0); } for (i = 0; i < MV_IDMA_CHAN_MAX; i++) win_idma_cap_write(base, i, 0); /* * Set up access to all active DRAM banks */ for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Place DDR entries in non-remapped windows */ for (j = 0; j < MV_WIN_IDMA_MAX; j++) if (win_idma_can_remap(j) != 1 && idma_bare_read(base, j) == 1) { /* Configure window */ win_idma_br_write(base, j, br); win_idma_sz_write(base, j, sz); /* Set protection RW on all channels */ idma_set_prot(base, j, 0x3); /* Enable window */ idma_bare_write(base, j, 0); break; } } /* * Remaining targets -- from statically defined table */ for (i = 0; i < idma_wins_no; i++) if (idma_wins[i].target > 0) { br = (idma_wins[i].base & 0xffff0000) | (idma_wins[i].attr << 8) | idma_wins[i].target; sz = ((idma_wins[i].size - 1) & 0xffff0000); /* Set the first free IDMA window */ for (j = 0; j < MV_WIN_IDMA_MAX; j++) { if (idma_bare_read(base, j) == 0) continue; /* Configure window */ win_idma_br_write(base, j, br); win_idma_sz_write(base, j, sz); if (win_idma_can_remap(j) && idma_wins[j].remap >= 0) win_idma_har_write(base, j, idma_wins[j].remap); /* Set protection RW on all channels */ idma_set_prot(base, j, 0x3); /* Enable window */ idma_bare_write(base, j, 0); break; } } } int decode_win_idma_valid(void) { const struct decode_win *wintab; int c, i, j, rv; uint32_t b, e, s; if (idma_wins_no > MV_WIN_IDMA_MAX) { printf("IDMA windows: too many entries: %d\n", idma_wins_no); return (0); } for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (idma_wins_no > (MV_WIN_IDMA_MAX - c)) { printf("IDMA windows: too many entries: %d, available: %d\n", idma_wins_no, MV_WIN_IDMA_MAX - c); return (0); } wintab = idma_wins; rv = 1; for (i = 0; i < idma_wins_no; i++, wintab++) { if (wintab->target == 0) { printf("IDMA window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) { printf("IDMA window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, wintab->remap); rv = 0; } s = wintab->size; b = wintab->base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* XXX this boundary check should account for 64bit and * remapping.. */ printf("IDMA window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } j = decode_win_overlap(i, idma_wins_no, &idma_wins[0]); if (j >= 0) { printf("IDMA window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, idma_wins[j].base, idma_wins[j].base + idma_wins[j].size - 1); rv = 0; } } return (rv); } void decode_win_idma_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_IDMA)) return; for (i = 0; i < MV_WIN_IDMA_MAX; i++) { printf("IDMA window#%d: b 0x%08x, s 0x%08x", i, win_idma_br_read(base, i), win_idma_sz_read(base, i)); if (win_idma_can_remap(i)) printf(", ha 0x%08x", win_idma_har_read(base, i)); printf("\n"); } for (i = 0; i < MV_IDMA_CHAN_MAX; i++) printf("IDMA channel#%d: ap 0x%08x\n", i, win_idma_cap_read(base, i)); printf("IDMA windows: bare 0x%08x\n", win_idma_bare_read(base)); } #else /* Provide dummy functions to satisfy the build for SoCs not equipped with IDMA */ int decode_win_idma_valid(void) { return (1); } void decode_win_idma_setup(u_long base) { } void decode_win_idma_dump(u_long base) { } #endif /************************************************************************** * XOR windows routines **************************************************************************/ #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) static int xor_ctrl_read(u_long base, int i, int c, int e) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= (1 << i); return (v >> i); } static void xor_ctrl_write(u_long base, int i, int c, int e, int val) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= ~(1 << i); v |= (val << i); win_xor_ctrl_write(base, c, e, v); } /* * Set channel protection 'val' for window 'w' on channel 'c' */ static void xor_chan_write(u_long base, int c, int e, int w, int val) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= ~(0x3 << (w * 2 + 16)); v |= (val << (w * 2 + 16)); win_xor_ctrl_write(base, c, e, v); } /* * Set protection 'val' on all channels for window 'w' on engine 'e' */ static void xor_set_prot(u_long base, int w, int e, int val) { int c; for (c = 0; c < MV_XOR_CHAN_MAX; c++) xor_chan_write(base, c, e, w, val); } static int win_xor_can_remap(int i) { /* XOR decode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } static int xor_max_eng(void) { uint32_t dev, rev; soc_id(&dev, &rev); switch (dev) { case MV_DEV_88F6281: case MV_DEV_88F6282: case MV_DEV_MV78130: case MV_DEV_MV78160: case MV_DEV_MV78230: case MV_DEV_MV78260: case MV_DEV_MV78460: return (2); case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: return (1); default: return (0); } } static void xor_active_dram(u_long base, int c, int e, int *window) { uint32_t br, sz; int i, m, w; /* * Set up access to all active DRAM banks */ m = xor_max_eng(); for (i = 0; i < m; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Place DDR entries in non-remapped windows */ for (w = 0; w < MV_WIN_XOR_MAX; w++) if (win_xor_can_remap(w) != 1 && (xor_ctrl_read(base, w, c, e) == 0) && w > *window) { /* Configure window */ win_xor_br_write(base, w, e, br); win_xor_sz_write(base, w, e, sz); /* Set protection RW on all channels */ xor_set_prot(base, w, e, 0x3); /* Enable window */ xor_ctrl_write(base, w, c, e, 1); (*window)++; break; } } } void decode_win_xor_setup(u_long base) { uint32_t br, sz; int i, j, z, e = 1, m, window; if (pm_is_disabled(CPU_PM_CTRL_XOR)) return; /* * Disable and clear all XOR windows, revoke protection for all * channels */ m = xor_max_eng(); for (j = 0; j < m; j++, e--) { /* Number of non-remaped windows */ window = MV_XOR_NON_REMAP - 1; for (i = 0; i < MV_WIN_XOR_MAX; i++) { win_xor_br_write(base, i, e, 0); win_xor_sz_write(base, i, e, 0); } if (win_xor_can_remap(i) == 1) win_xor_har_write(base, i, e, 0); for (i = 0; i < MV_XOR_CHAN_MAX; i++) { win_xor_ctrl_write(base, i, e, 0); xor_active_dram(base, i, e, &window); } /* * Remaining targets -- from a statically defined table */ for (i = 0; i < xor_wins_no; i++) if (xor_wins[i].target > 0) { br = (xor_wins[i].base & 0xffff0000) | (xor_wins[i].attr << 8) | xor_wins[i].target; sz = ((xor_wins[i].size - 1) & 0xffff0000); /* Set the first free XOR window */ for (z = 0; z < MV_WIN_XOR_MAX; z++) { if (xor_ctrl_read(base, z, 0, e) && xor_ctrl_read(base, z, 1, e)) continue; /* Configure window */ win_xor_br_write(base, z, e, br); win_xor_sz_write(base, z, e, sz); if (win_xor_can_remap(z) && xor_wins[z].remap >= 0) win_xor_har_write(base, z, e, xor_wins[z].remap); /* Set protection RW on all channels */ xor_set_prot(base, z, e, 0x3); /* Enable window */ xor_ctrl_write(base, z, 0, e, 1); xor_ctrl_write(base, z, 1, e, 1); break; } } } } int decode_win_xor_valid(void) { const struct decode_win *wintab; int c, i, j, rv; uint32_t b, e, s; if (xor_wins_no > MV_WIN_XOR_MAX) { printf("XOR windows: too many entries: %d\n", xor_wins_no); return (0); } for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (xor_wins_no > (MV_WIN_XOR_MAX - c)) { printf("XOR windows: too many entries: %d, available: %d\n", xor_wins_no, MV_WIN_IDMA_MAX - c); return (0); } wintab = xor_wins; rv = 1; for (i = 0; i < xor_wins_no; i++, wintab++) { if (wintab->target == 0) { printf("XOR window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) { printf("XOR window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, wintab->remap); rv = 0; } s = wintab->size; b = wintab->base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* * XXX this boundary check should account for 64bit * and remapping.. */ printf("XOR window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } j = decode_win_overlap(i, xor_wins_no, &xor_wins[0]); if (j >= 0) { printf("XOR window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, xor_wins[j].base, xor_wins[j].base + xor_wins[j].size - 1); rv = 0; } } return (rv); } void decode_win_xor_dump(u_long base) { int i, j; int e = 1; if (pm_is_disabled(CPU_PM_CTRL_XOR)) return; for (j = 0; j < xor_max_eng(); j++, e--) { for (i = 0; i < MV_WIN_XOR_MAX; i++) { printf("XOR window#%d: b 0x%08x, s 0x%08x", i, win_xor_br_read(base, i, e), win_xor_sz_read(base, i, e)); if (win_xor_can_remap(i)) printf(", ha 0x%08x", win_xor_har_read(base, i, e)); printf("\n"); } for (i = 0; i < MV_XOR_CHAN_MAX; i++) printf("XOR control#%d: 0x%08x\n", i, win_xor_ctrl_read(base, i, e)); } } #else /* Provide dummy functions to satisfy the build for SoCs not equipped with XOR */ static int decode_win_xor_valid(void) { return (1); } static void decode_win_xor_setup(u_long base) { } static void decode_win_xor_dump(u_long base) { } #endif /************************************************************************** * SATA windows routines **************************************************************************/ static void decode_win_sata_setup(u_long base) { uint32_t cr, br; int i, j; if (pm_is_disabled(CPU_PM_CTRL_SATA)) return; for (i = 0; i < MV_WIN_SATA_MAX; i++) { win_sata_cr_write(base, i, 0); win_sata_br_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { cr = ((ddr_size(i) - 1) & 0xffff0000) | (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1; br = ddr_base(i); /* Use the first available SATA window */ for (j = 0; j < MV_WIN_SATA_MAX; j++) { if ((win_sata_cr_read(base, j) & 1) != 0) continue; win_sata_br_write(base, j, br); win_sata_cr_write(base, j, cr); break; } } } #ifdef SOC_MV_ARMADA38X /* * Configure AHCI decoding windows */ static void decode_win_ahci_setup(u_long base) { uint32_t br, cr, sz; int i, j; for (i = 0; i < MV_WIN_SATA_MAX; i++) { win_sata_cr_write(base, i, 0); win_sata_br_write(base, i, 0); win_sata_sz_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { cr = (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK; br = ddr_base(i); sz = (ddr_size(i) - 1) & (IO_WIN_SIZE_MASK << IO_WIN_SIZE_SHIFT); /* Use first available SATA window */ for (j = 0; j < MV_WIN_SATA_MAX; j++) { if (win_sata_cr_read(base, j) & IO_WIN_ENA_MASK) continue; /* BASE is set to DRAM base (0x00000000) */ win_sata_br_write(base, j, br); /* CTRL targets DRAM ctrl with 0x0E or 0x0D */ win_sata_cr_write(base, j, cr); /* SIZE is set to 16MB - max value */ win_sata_sz_write(base, j, sz); break; } } } } static void decode_win_ahci_dump(u_long base) { int i; for (i = 0; i < MV_WIN_SATA_MAX; i++) printf("SATA window#%d: cr 0x%08x, br 0x%08x, sz 0x%08x\n", i, win_sata_cr_read(base, i), win_sata_br_read(base, i), win_sata_sz_read(base,i)); } #else /* * Provide dummy functions to satisfy the build * for SoC's not equipped with AHCI controller */ static void decode_win_ahci_setup(u_long base) { } static void decode_win_ahci_dump(u_long base) { } #endif static int decode_win_sata_valid(void) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88F5281) return (1); return (decode_win_can_cover_ddr(MV_WIN_SATA_MAX)); } static void decode_win_sdhci_setup(u_long base) { uint32_t cr, br; int i, j; for (i = 0; i < MV_WIN_SDHCI_MAX; i++) { win_sdhci_cr_write(base, i, 0); win_sdhci_br_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i); cr = (((ddr_size(i) - 1) & (IO_WIN_SIZE_MASK << IO_WIN_SIZE_SHIFT)) | (ddr_attr(i) << IO_WIN_ATTR_SHIFT) | (ddr_target(i) << IO_WIN_TGT_SHIFT) | IO_WIN_ENA_MASK); /* Use the first available SDHCI window */ for (j = 0; j < MV_WIN_SDHCI_MAX; j++) { if (win_sdhci_cr_read(base, j) & IO_WIN_ENA_MASK) continue; win_sdhci_cr_write(base, j, cr); win_sdhci_br_write(base, j, br); break; } } } static void decode_win_sdhci_dump(u_long base) { int i; for (i = 0; i < MV_WIN_SDHCI_MAX; i++) printf("SDHCI window#%d: c 0x%08x, b 0x%08x\n", i, win_sdhci_cr_read(base, i), win_sdhci_br_read(base, i)); } static int decode_win_sdhci_valid(void) { #ifdef SOC_MV_ARMADA38X return (decode_win_can_cover_ddr(MV_WIN_SDHCI_MAX)); #endif /* Satisfy platforms not equipped with this controller. */ return (1); } /************************************************************************** * FDT parsing routines. **************************************************************************/ static int fdt_get_ranges(const char *nodename, void *buf, int size, int *tuples, int *tuplesize) { phandle_t node; pcell_t addr_cells, par_addr_cells, size_cells; int len, tuple_size, tuples_count; node = OF_finddevice(nodename); if (node == -1) return (EINVAL); if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (ENXIO); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) return (ERANGE); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); /* Note the OF_getprop_alloc() cannot be used at this early stage. */ len = OF_getprop(node, "ranges", buf, size); /* * XXX this does not handle the empty 'ranges;' case, which is * legitimate and should be allowed. */ tuples_count = len / tuple_size; if (tuples_count <= 0) return (ERANGE); if (par_addr_cells > 2 || addr_cells > 2 || size_cells > 2) return (ERANGE); *tuples = tuples_count; *tuplesize = tuple_size; return (0); } static int win_cpu_from_dt(void) { pcell_t ranges[48]; phandle_t node; int i, entry_size, err, t, tuple_size, tuples; u_long sram_base, sram_size; t = 0; /* Retrieve 'ranges' property of '/localbus' node. */ if ((err = fdt_get_ranges("/localbus", ranges, sizeof(ranges), &tuples, &tuple_size)) == 0) { /* * Fill CPU decode windows table. */ bzero((void *)&cpu_win_tbl, sizeof(cpu_win_tbl)); entry_size = tuple_size / sizeof(pcell_t); cpu_wins_no = tuples; /* Check range */ if (tuples > nitems(cpu_win_tbl)) { debugf("too many tuples to fit into cpu_win_tbl\n"); return (ENOMEM); } for (i = 0, t = 0; t < tuples; i += entry_size, t++) { cpu_win_tbl[t].target = 1; cpu_win_tbl[t].attr = fdt32_to_cpu(ranges[i + 1]); cpu_win_tbl[t].base = fdt32_to_cpu(ranges[i + 2]); cpu_win_tbl[t].size = fdt32_to_cpu(ranges[i + 3]); cpu_win_tbl[t].remap = ~0; debugf("target = 0x%0x attr = 0x%0x base = 0x%0x " "size = 0x%0x remap = 0x%0x\n", cpu_win_tbl[t].target, cpu_win_tbl[t].attr, cpu_win_tbl[t].base, cpu_win_tbl[t].size, cpu_win_tbl[t].remap); } } /* * Retrieve CESA SRAM data. */ if ((node = OF_finddevice("sram")) != -1) if (ofw_bus_node_is_compatible(node, "mrvl,cesa-sram")) goto moveon; if ((node = OF_finddevice("/")) == 0) return (ENXIO); if ((node = fdt_find_compatible(node, "mrvl,cesa-sram", 0)) == 0) /* SRAM block is not always present. */ return (0); moveon: sram_base = sram_size = 0; if (fdt_regsize(node, &sram_base, &sram_size) != 0) return (EINVAL); /* Check range */ if (t >= nitems(cpu_win_tbl)) { debugf("cannot fit CESA tuple into cpu_win_tbl\n"); return (ENOMEM); } cpu_win_tbl[t].target = MV_WIN_CESA_TARGET; #ifdef SOC_MV_ARMADA38X cpu_win_tbl[t].attr = MV_WIN_CESA_ATTR(0); #else cpu_win_tbl[t].attr = MV_WIN_CESA_ATTR(1); #endif cpu_win_tbl[t].base = sram_base; cpu_win_tbl[t].size = sram_size; cpu_win_tbl[t].remap = ~0; cpu_wins_no++; debugf("sram: base = 0x%0lx size = 0x%0lx\n", sram_base, sram_size); /* Check if there is a second CESA node */ while ((node = OF_peer(node)) != 0) { if (ofw_bus_node_is_compatible(node, "mrvl,cesa-sram")) { if (fdt_regsize(node, &sram_base, &sram_size) != 0) return (EINVAL); break; } } if (node == 0) return (0); t++; if (t >= nitems(cpu_win_tbl)) { debugf("cannot fit CESA tuple into cpu_win_tbl\n"); return (ENOMEM); } /* Configure window for CESA1 */ cpu_win_tbl[t].target = MV_WIN_CESA_TARGET; cpu_win_tbl[t].attr = MV_WIN_CESA_ATTR(1); cpu_win_tbl[t].base = sram_base; cpu_win_tbl[t].size = sram_size; cpu_win_tbl[t].remap = ~0; cpu_wins_no++; debugf("sram: base = 0x%0lx size = 0x%0lx\n", sram_base, sram_size); return (0); } static int fdt_win_process(phandle_t child) { int i; struct soc_node_spec *soc_node; int addr_cells, size_cells; pcell_t reg[8]; u_long size, base; for (i = 0; soc_nodes[i].compat != NULL; i++) { soc_node = &soc_nodes[i]; /* Setup only for enabled devices */ if (ofw_bus_node_status_okay(child) == 0) continue; if (!ofw_bus_node_is_compatible(child, soc_node->compat)) continue; if (fdt_addrsize_cells(OF_parent(child), &addr_cells, &size_cells)) return (ENXIO); if ((sizeof(pcell_t) * (addr_cells + size_cells)) > sizeof(reg)) return (ENOMEM); if (OF_getprop(child, "reg", ®, sizeof(reg)) <= 0) return (EINVAL); if (addr_cells <= 2) base = fdt_data_get(®[0], addr_cells); else base = fdt_data_get(®[addr_cells - 2], 2); size = fdt_data_get(®[addr_cells], size_cells); base = (base & 0x000fffff) | fdt_immr_va; if (soc_node->decode_handler != NULL) soc_node->decode_handler(base); else return (ENXIO); if (MV_DUMP_WIN && (soc_node->dump_handler != NULL)) soc_node->dump_handler(base); } return (0); } static int fdt_win_setup(void) { phandle_t node, child, sb; phandle_t child_pci; int err; sb = 0; node = OF_finddevice("/"); if (node == -1) panic("fdt_win_setup: no root node"); /* * Traverse through all children of root and simple-bus nodes. * For each found device retrieve decode windows data (if applicable). */ child = OF_child(node); while (child != 0) { /* Lookup for callback and run */ err = fdt_win_process(child); if (err != 0) return (err); /* Process Marvell Armada-XP/38x PCIe controllers */ if (ofw_bus_node_is_compatible(child, "marvell,armada-370-pcie")) { child_pci = OF_child(child); while (child_pci != 0) { err = fdt_win_process(child_pci); if (err != 0) return (err); child_pci = OF_peer(child_pci); } } /* * Once done with root-level children let's move down to * simple-bus and its children. */ child = OF_peer(child); if ((child == 0) && (node == OF_finddevice("/"))) { sb = node = fdt_find_compatible(node, "simple-bus", 0); if (node == 0) return (ENXIO); child = OF_child(node); } /* * Next, move one more level down to internal-regs node (if * it is present) and its children. This node also have * "simple-bus" compatible. */ if ((child == 0) && (node == sb)) { node = fdt_find_compatible(node, "simple-bus", 0); if (node == 0) return (0); child = OF_child(node); } } return (0); } static void fdt_fixup_busfreq(phandle_t root) { phandle_t sb; pcell_t freq; freq = cpu_to_fdt32(get_tclk()); /* * Fix bus speed in cpu node */ if ((sb = OF_finddevice("cpu")) != 0) if (fdt_is_compatible_strict(sb, "ARM,88VS584")) OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq)); /* * This fixup sets the simple-bus bus-frequency property. */ if ((sb = fdt_find_compatible(root, "simple-bus", 1)) != 0) OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq)); } static void fdt_fixup_ranges(phandle_t root) { phandle_t node; pcell_t par_addr_cells, addr_cells, size_cells; pcell_t ranges[3], reg[2], *rangesptr; int len, tuple_size, tuples_count; uint32_t base; /* Fix-up SoC ranges according to real fdt_immr_pa */ if ((node = fdt_find_compatible(root, "simple-bus", 1)) != 0) { if (fdt_addrsize_cells(node, &addr_cells, &size_cells) == 0 && (par_addr_cells = fdt_parent_addr_cells(node) <= 2)) { tuple_size = sizeof(pcell_t) * (par_addr_cells + addr_cells + size_cells); len = OF_getprop(node, "ranges", ranges, sizeof(ranges)); tuples_count = len / tuple_size; /* Unexpected settings are not supported */ if (tuples_count != 1) goto fixup_failed; rangesptr = &ranges[0]; rangesptr += par_addr_cells; base = fdt_data_get((void *)rangesptr, addr_cells); *rangesptr = cpu_to_fdt32(fdt_immr_pa); if (OF_setprop(node, "ranges", (void *)&ranges[0], sizeof(ranges)) < 0) goto fixup_failed; } } /* Fix-up PCIe reg according to real PCIe registers' PA */ if ((node = fdt_find_compatible(root, "mrvl,pcie", 1)) != 0) { if (fdt_addrsize_cells(OF_parent(node), &par_addr_cells, &size_cells) == 0) { tuple_size = sizeof(pcell_t) * (par_addr_cells + size_cells); len = OF_getprop(node, "reg", reg, sizeof(reg)); tuples_count = len / tuple_size; /* Unexpected settings are not supported */ if (tuples_count != 1) goto fixup_failed; base = fdt_data_get((void *)®[0], par_addr_cells); base &= ~0xFF000000; base |= fdt_immr_pa; reg[0] = cpu_to_fdt32(base); if (OF_setprop(node, "reg", (void *)®[0], sizeof(reg)) < 0) goto fixup_failed; } } /* Fix-up succeeded. May return and continue */ return; fixup_failed: while (1) { /* * In case of any error while fixing ranges just hang. * 1. No message can be displayed yet since console * is not initialized. * 2. Going further will cause failure on bus_space_map() * relying on the wrong ranges or data abort when * accessing PCIe registers. */ } } struct fdt_fixup_entry fdt_fixup_table[] = { { "mrvl,DB-88F6281", &fdt_fixup_busfreq }, { "mrvl,DB-78460", &fdt_fixup_busfreq }, { "mrvl,DB-78460", &fdt_fixup_ranges }, { NULL, NULL } }; #ifndef INTRNG static int fdt_pic_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig, int *pol) { if (!ofw_bus_node_is_compatible(node, "mrvl,pic") && !ofw_bus_node_is_compatible(node, "mrvl,mpic")) return (ENXIO); *interrupt = fdt32_to_cpu(intr[0]); *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; return (0); } fdt_pic_decode_t fdt_pic_table[] = { #ifdef SOC_MV_ARMADA38X &gic_decode_fdt, #endif &fdt_pic_decode_ic, NULL }; #endif uint64_t get_sar_value(void) { uint32_t sar_low, sar_high; #if defined(SOC_MV_ARMADAXP) sar_high = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, SAMPLE_AT_RESET_HI); sar_low = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, SAMPLE_AT_RESET_LO); #elif defined(SOC_MV_ARMADA38X) sar_high = 0; sar_low = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, SAMPLE_AT_RESET); #else /* * TODO: Add getting proper values for other SoC configurations */ sar_high = 0; sar_low = 0; #endif return (((uint64_t)sar_high << 32) | sar_low); } Index: head/sys/arm/mv/mvwin.h =================================================================== --- head/sys/arm/mv/mvwin.h (revision 319906) +++ head/sys/arm/mv/mvwin.h (revision 319907) @@ -1,421 +1,425 @@ /*- * Copyright (C) 2007-2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MVWIN_H_ #define _MVWIN_H_ /* * Decode windows addresses. * * All decoding windows must be aligned to their size, which has to be * a power of 2. */ /* * SoC Integrated devices: 0xF1000000, 16 MB (VA == PA) */ /* SoC Regs */ #define MV_PHYS_BASE 0xF1000000 #define MV_SIZE (1024 * 1024) /* 1 MB */ /* SRAM */ #define MV_CESA_SRAM_BASE 0xF1100000 /* * External devices: 0x80000000, 1 GB (VA == PA) * Includes Device Bus, PCI and PCIE. */ #if defined(SOC_MV_ORION) #define MV_PCI_PORTS 2 /* 1x PCI + 1x PCIE */ #elif defined(SOC_MV_KIRKWOOD) #define MV_PCI_PORTS 1 /* 1x PCIE */ #elif defined(SOC_MV_DISCOVERY) #define MV_PCI_PORTS 8 /* 8x PCIE */ #elif defined(SOC_MV_ARMADAXP) #define MV_PCI_PORTS 3 /* 3x PCIE */ #elif defined(SOC_MV_ARMADA38X) #define MV_PCI_PORTS 4 /* 4x PCIE */ #else #error "MV_PCI_PORTS not configured !" #endif /* PCI/PCIE Memory */ #define MV_PCI_MEM_PHYS_BASE 0x80000000 #define MV_PCI_MEM_SIZE (512 * 1024 * 1024) /* 512 MB */ #define MV_PCI_MEM_BASE MV_PCI_MEM_PHYS_BASE #define MV_PCI_MEM_SLICE_SIZE (MV_PCI_MEM_SIZE / MV_PCI_PORTS) #define MV_PCI_MEM_SLICE(n) (MV_PCI_MEM_BASE + ((n) * \ MV_PCI_MEM_SLICE_SIZE)) /* PCI/PCIE I/O */ #define MV_PCI_IO_PHYS_BASE 0xBF000000 #define MV_PCI_IO_SIZE (16 * 1024 * 1024) /* 16 MB */ #define MV_PCI_IO_BASE MV_PCI_IO_PHYS_BASE #define MV_PCI_IO_SLICE_SIZE (MV_PCI_IO_SIZE / MV_PCI_PORTS) #define MV_PCI_IO_SLICE(n) (MV_PCI_IO_BASE + ((n) * MV_PCI_IO_SLICE_SIZE)) #define MV_PCI_VA_MEM_BASE 0 #define MV_PCI_VA_IO_BASE 0 /* * Device Bus (VA == PA) */ #define MV_DEV_BOOT_BASE 0xF9300000 #define MV_DEV_BOOT_SIZE (1024 * 1024) /* 1 MB */ #define MV_DEV_CS0_BASE 0xF9400000 #define MV_DEV_CS0_SIZE (1024 * 1024) /* 1 MB */ #define MV_DEV_CS1_BASE 0xF9500000 #define MV_DEV_CS1_SIZE (32 * 1024 * 1024) /* 32 MB */ #define MV_DEV_CS2_BASE 0xFB500000 #define MV_DEV_CS2_SIZE (1024 * 1024) /* 1 MB */ /* * Integrated SoC peripherals addresses */ #define MV_BASE MV_PHYS_BASE /* VA == PA mapping */ #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) #define MV_DDR_CADR_BASE (MV_BASE + 0x20180) #else #define MV_DDR_CADR_BASE (MV_BASE + 0x1500) #endif #define MV_MPP_BASE (MV_BASE + 0x10000) #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) #define MV_MISC_BASE (MV_BASE + 0x18200) #define MV_MBUS_BRIDGE_BASE (MV_BASE + 0x20000) #define MV_INTREGS_BASE (MV_MBUS_BRIDGE_BASE + 0x80) #define MV_MP_CLOCKS_BASE (MV_MBUS_BRIDGE_BASE + 0x700) #define MV_CPU_CONTROL_BASE (MV_MBUS_BRIDGE_BASE + 0x1800) #else #define MV_MBUS_BRIDGE_BASE (MV_BASE + 0x20000) #define MV_INTREGS_BASE (MV_MBUS_BRIDGE_BASE + 0x80) #define MV_CPU_CONTROL_BASE (MV_MBUS_BRIDGE_BASE + 0x100) #endif #define MV_PCI_BASE (MV_BASE + 0x30000) #define MV_PCI_SIZE 0x2000 #if defined(SOC_MV_ARMADA38X) #define MV_PCIE_BASE (MV_BASE + 0x80000) #else #define MV_PCIE_BASE (MV_BASE + 0x40000) #endif #define MV_PCIE_SIZE 0x2000 #define MV_PCIE00_BASE (MV_PCIE_BASE + 0x00000) #define MV_PCIE01_BASE (MV_PCIE_BASE + 0x04000) #define MV_PCIE02_BASE (MV_PCIE_BASE + 0x08000) #define MV_PCIE03_BASE (MV_PCIE_BASE + 0x0C000) #define MV_PCIE10_BASE (MV_PCIE_BASE + 0x40000) #define MV_PCIE11_BASE (MV_PCIE_BASE + 0x44000) #define MV_PCIE12_BASE (MV_PCIE_BASE + 0x48000) #define MV_PCIE13_BASE (MV_PCIE_BASE + 0x4C000) #define MV_SDIO_BASE (MV_BASE + 0x90000) #define MV_SDIO_SIZE 0x10000 /* * Decode windows definitions and macros */ #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) #define MV_WIN_CPU_CTRL(n) (((n) < 8) ? 0x10 * (n) : 0x90 + (0x8 * ((n) - 8))) #define MV_WIN_CPU_BASE(n) ((((n) < 8) ? 0x10 * (n) : 0x90 + (0x8 * ((n) - 8))) + 0x4) #define MV_WIN_CPU_REMAP_LO(n) (0x10 * (n) + 0x008) #define MV_WIN_CPU_REMAP_HI(n) (0x10 * (n) + 0x00C) #else #define MV_WIN_CPU_CTRL(n) (0x10 * (n) + (((n) < 8) ? 0x000 : 0x880)) #define MV_WIN_CPU_BASE(n) (0x10 * (n) + (((n) < 8) ? 0x004 : 0x884)) #define MV_WIN_CPU_REMAP_LO(n) (0x10 * (n) + (((n) < 8) ? 0x008 : 0x888)) #define MV_WIN_CPU_REMAP_HI(n) (0x10 * (n) + (((n) < 8) ? 0x00C : 0x88C)) #endif #if defined(SOC_MV_DISCOVERY) #define MV_WIN_CPU_MAX 14 #elif defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) #define MV_WIN_CPU_MAX 20 #else #define MV_WIN_CPU_MAX 8 #endif #define MV_WIN_CPU_ATTR_SHIFT 8 #define MV_WIN_CPU_TARGET_SHIFT 4 #define MV_WIN_CPU_ENABLE_BIT 1 #define MV_WIN_DDR_BASE(n) (0x8 * (n) + 0x0) #define MV_WIN_DDR_SIZE(n) (0x8 * (n) + 0x4) #define MV_WIN_DDR_MAX 4 /* * These values are valid only for peripherals decoding windows * Bit in ATTR is zeroed according to CS bank number */ #define MV_WIN_DDR_ATTR(cs) (0x0F & ~(0x01 << (cs))) #define MV_WIN_DDR_TARGET 0x0 #if defined(SOC_MV_DISCOVERY) #define MV_WIN_CESA_TARGET 9 #define MV_WIN_CESA_ATTR(eng_sel) 1 #elif defined(SOC_MV_ARMADAXP) #define MV_WIN_CESA_TARGET 9 /* * Bits [2:3] of cesa attribute select engine: * eng_sel: * 1: engine1 * 2: engine0 */ #define MV_WIN_CESA_ATTR(eng_sel) (1 | ((eng_sel) << 2)) #elif defined(SOC_MV_ARMADA38X) #define MV_WIN_CESA_TARGET 9 /* * Bits [1:0] = Data swapping * 0x0 = Byte swap * 0x1 = No swap * 0x2 = Byte and word swap * 0x3 = Word swap * Bits [4:2] = CESA select: * 0x6 = CESA0 * 0x5 = CESA1 */ #define MV_WIN_CESA_ATTR(eng_sel) (0x11 | (1 << (3 - (eng_sel)))) #else #define MV_WIN_CESA_TARGET 3 #define MV_WIN_CESA_ATTR(eng_sel) 0 #endif /* CESA TDMA address decoding registers */ #define MV_WIN_CESA_CTRL(n) (0x8 * (n) + 0xA04) #define MV_WIN_CESA_BASE(n) (0x8 * (n) + 0xA00) #define MV_WIN_CESA_MAX 4 #define MV_WIN_USB_CTRL(n) (0x10 * (n) + 0x320) #define MV_WIN_USB_BASE(n) (0x10 * (n) + 0x324) #define MV_WIN_USB_MAX 4 #define MV_WIN_USB3_CTRL(n) (0x8 * (n) + 0x4000) #define MV_WIN_USB3_BASE(n) (0x8 * (n) + 0x4004) #define MV_WIN_USB3_MAX 8 +#define MV_WIN_NETA_OFFSET 0x2000 +#define MV_WIN_NETA_BASE(n) MV_WIN_ETH_BASE(n) + MV_WIN_NETA_OFFSET + #define MV_WIN_ETH_BASE(n) (0x8 * (n) + 0x200) #define MV_WIN_ETH_SIZE(n) (0x8 * (n) + 0x204) #define MV_WIN_ETH_REMAP(n) (0x4 * (n) + 0x280) #define MV_WIN_ETH_MAX 6 #define MV_WIN_IDMA_BASE(n) (0x8 * (n) + 0xa00) #define MV_WIN_IDMA_SIZE(n) (0x8 * (n) + 0xa04) #define MV_WIN_IDMA_REMAP(n) (0x4 * (n) + 0xa60) #define MV_WIN_IDMA_CAP(n) (0x4 * (n) + 0xa70) #define MV_WIN_IDMA_MAX 8 #define MV_IDMA_CHAN_MAX 4 #define MV_WIN_XOR_BASE(n, m) (0x4 * (n) + 0xa50 + (m) * 0x100) #define MV_WIN_XOR_SIZE(n, m) (0x4 * (n) + 0xa70 + (m) * 0x100) #define MV_WIN_XOR_REMAP(n, m) (0x4 * (n) + 0xa90 + (m) * 0x100) #define MV_WIN_XOR_CTRL(n, m) (0x4 * (n) + 0xa40 + (m) * 0x100) #define MV_WIN_XOR_OVERR(n, m) (0x4 * (n) + 0xaa0 + (m) * 0x100) #define MV_WIN_XOR_MAX 8 #define MV_XOR_CHAN_MAX 2 #define MV_XOR_NON_REMAP 4 #if defined(SOC_MV_DISCOVERY) || defined(SOC_MV_KIRKWOOD) #define MV_WIN_PCIE_TARGET(n) 4 #define MV_WIN_PCIE_MEM_ATTR(n) 0xE8 #define MV_WIN_PCIE_IO_ATTR(n) 0xE0 #elif defined(SOC_MV_ARMADAXP) #define MV_WIN_PCIE_TARGET(n) (4 + (4 * ((n) % 2))) #define MV_WIN_PCIE_MEM_ATTR(n) (0xE8 + (0x10 * ((n) / 2))) #define MV_WIN_PCIE_IO_ATTR(n) (0xE0 + (0x10 * ((n) / 2))) #elif defined(SOC_MV_ARMADA38X) #define MV_WIN_PCIE_TARGET(n) ((n) == 0 ? 8 : 4) #define MV_WIN_PCIE_MEM_ATTR(n) ((n) < 2 ? 0xE8 : (0xD8 - (((n) % 2) * 0x20))) #define MV_WIN_PCIE_IO_ATTR(n) ((n) < 2 ? 0xE0 : (0xD0 - (((n) % 2) * 0x20))) #elif defined(SOC_MV_ORION) #define MV_WIN_PCIE_TARGET(n) 4 #define MV_WIN_PCIE_MEM_ATTR(n) 0x59 #define MV_WIN_PCIE_IO_ATTR(n) 0x51 #endif #define MV_WIN_PCI_TARGET 3 #define MV_WIN_PCI_MEM_ATTR 0x59 #define MV_WIN_PCI_IO_ATTR 0x51 #define MV_WIN_PCIE_CTRL(n) (0x10 * (((n) < 5) ? (n) : \ (n) + 1) + 0x1820) #define MV_WIN_PCIE_BASE(n) (0x10 * (((n) < 5) ? (n) : \ (n) + 1) + 0x1824) #define MV_WIN_PCIE_REMAP(n) (0x10 * (((n) < 5) ? (n) : \ (n) + 1) + 0x182C) #define MV_WIN_PCIE_MAX 6 #define MV_PCIE_BAR_CTRL(n) (0x04 * (n) + 0x1800) #define MV_PCIE_BAR_BASE(n) (0x08 * ((n) < 3 ? (n) : 4) + 0x0010) #define MV_PCIE_BAR_BASE_H(n) (0x08 * (n) + 0x0014) #define MV_PCIE_BAR_MAX 4 #define MV_PCIE_BAR_64BIT (0x4) #define MV_PCIE_BAR_PREFETCH_EN (0x8) #define MV_PCIE_CONTROL (0x1a00) #define MV_PCIE_ROOT_CMPLX (1 << 1) #if defined(SOC_MV_ARMADA38X) #define MV_WIN_SATA_CTRL(n) (0x10 * (n) + 0x60) #define MV_WIN_SATA_BASE(n) (0x10 * (n) + 0x64) #define MV_WIN_SATA_SIZE(n) (0x10 * (n) + 0x68) #define MV_WIN_SATA_MAX 4 #else #define MV_WIN_SATA_CTRL(n) (0x10 * (n) + 0x30) #define MV_WIN_SATA_BASE(n) (0x10 * (n) + 0x34) #define MV_WIN_SATA_MAX 4 #endif #define MV_WIN_SDHCI_CTRL(n) (0x8 * (n) + 0x4080) #define MV_WIN_SDHCI_BASE(n) (0x8 * (n) + 0x4084) #define MV_WIN_SDHCI_MAX 8 #if defined(SOC_MV_ARMADA38X) #define MV_BOOTROM_MEM_ADDR 0xFFF00000 #define MV_BOOTROM_WIN_SIZE 0xF #define MV_CPU_SUBSYS_REGS_LEN 0x100 #define IO_WIN_9_CTRL_OFFSET 0x98 #define IO_WIN_9_BASE_OFFSET 0x9C /* Mbus decoding unit IDs and attributes */ #define MBUS_BOOTROM_TGT_ID 0x1 #define MBUS_BOOTROM_ATTR 0x1D /* Internal Units Sync Barrier Control Register */ #define MV_SYNC_BARRIER_CTRL 0x84 #define MV_SYNC_BARRIER_CTRL_ALL 0xFFFF #endif /* IO Window Control Register fields */ #define IO_WIN_SIZE_SHIFT 16 #define IO_WIN_SIZE_MASK 0xFFFF +#define IO_WIN_COH_ATTR_MASK (0xF << 12) #define IO_WIN_ATTR_SHIFT 8 #define IO_WIN_ATTR_MASK 0xFF #define IO_WIN_TGT_SHIFT 4 #define IO_WIN_TGT_MASK 0xF #define IO_WIN_SYNC_SHIFT 1 #define IO_WIN_SYNC_MASK 0x1 #define IO_WIN_ENA_SHIFT 0 #define IO_WIN_ENA_MASK 0x1 #define WIN_REG_IDX_RD(pre,reg,off,base) \ static __inline uint32_t \ pre ## _ ## reg ## _read(int i) \ { \ return (bus_space_read_4(fdtbus_bs_tag, base, off(i))); \ } #define WIN_REG_IDX_RD2(pre,reg,off,base) \ static __inline uint32_t \ pre ## _ ## reg ## _read(int i, int j) \ { \ return (bus_space_read_4(fdtbus_bs_tag, base, off(i, j))); \ } \ #define WIN_REG_BASE_IDX_RD(pre,reg,off) \ static __inline uint32_t \ pre ## _ ## reg ## _read(uint32_t base, int i) \ { \ return (bus_space_read_4(fdtbus_bs_tag, base, off(i))); \ } #define WIN_REG_BASE_IDX_RD2(pre,reg,off) \ static __inline uint32_t \ pre ## _ ## reg ## _read(uint32_t base, int i, int j) \ { \ return (bus_space_read_4(fdtbus_bs_tag, base, off(i, j))); \ } #define WIN_REG_IDX_WR(pre,reg,off,base) \ static __inline void \ pre ## _ ## reg ## _write(int i, uint32_t val) \ { \ bus_space_write_4(fdtbus_bs_tag, base, off(i), val); \ } #define WIN_REG_IDX_WR2(pre,reg,off,base) \ static __inline void \ pre ## _ ## reg ## _write(int i, int j, uint32_t val) \ { \ bus_space_write_4(fdtbus_bs_tag, base, off(i, j), val); \ } #define WIN_REG_BASE_IDX_WR(pre,reg,off) \ static __inline void \ pre ## _ ## reg ## _write(uint32_t base, int i, uint32_t val) \ { \ bus_space_write_4(fdtbus_bs_tag, base, off(i), val); \ } #define WIN_REG_BASE_IDX_WR2(pre,reg,off) \ static __inline void \ pre ## _ ## reg ## _write(uint32_t base, int i, int j, uint32_t val) \ { \ bus_space_write_4(fdtbus_bs_tag, base, off(i, j), val); \ } #define WIN_REG_RD(pre,reg,off,base) \ static __inline uint32_t \ pre ## _ ## reg ## _read(void) \ { \ return (bus_space_read_4(fdtbus_bs_tag, base, off)); \ } #define WIN_REG_BASE_RD(pre,reg,off) \ static __inline uint32_t \ pre ## _ ## reg ## _read(uint32_t base) \ { \ return (bus_space_read_4(fdtbus_bs_tag, base, off)); \ } #define WIN_REG_WR(pre,reg,off,base) \ static __inline void \ pre ## _ ## reg ## _write(uint32_t val) \ { \ bus_space_write_4(fdtbus_bs_tag, base, off, val); \ } #define WIN_REG_BASE_WR(pre,reg,off) \ static __inline void \ pre ## _ ## reg ## _write(uint32_t base, uint32_t val) \ { \ bus_space_write_4(fdtbus_bs_tag, base, off, val); \ } #endif /* _MVWIN_H_ */ Index: head/sys/conf/options =================================================================== --- head/sys/conf/options (revision 319906) +++ head/sys/conf/options (revision 319907) @@ -1,997 +1,1001 @@ # $FreeBSD$ # # On the handling of kernel options # # All kernel options should be listed in NOTES, with suitable # descriptions. Negative options (options that make some code not # compile) should be commented out; LINT (generated from NOTES) should # compile as much code as possible. Try to structure option-using # code so that a single option only switch code on, or only switch # code off, to make it possible to have a full compile-test. If # necessary, you can check for COMPILING_LINT to get maximum code # coverage. # # All new options shall also be listed in either "conf/options" or # "conf/options.". Options that affect a single source-file # .[c|s] should be directed into "opt_.h", while options # that affect multiple files should either go in "opt_global.h" if # this is a kernel-wide option (used just about everywhere), or in # "opt_.h" if it affects only some files. # Note that the effect of listing only an option without a # header-file-name in conf/options (and cousins) is that the last # convention is followed. # # This handling scheme is not yet fully implemented. # # # Format of this file: # Option name filename # # If filename is missing, the default is # opt_.h AAC_DEBUG opt_aac.h AACRAID_DEBUG opt_aacraid.h AHC_ALLOW_MEMIO opt_aic7xxx.h AHC_TMODE_ENABLE opt_aic7xxx.h AHC_DUMP_EEPROM opt_aic7xxx.h AHC_DEBUG opt_aic7xxx.h AHC_DEBUG_OPTS opt_aic7xxx.h AHC_REG_PRETTY_PRINT opt_aic7xxx.h AHD_DEBUG opt_aic79xx.h AHD_DEBUG_OPTS opt_aic79xx.h AHD_TMODE_ENABLE opt_aic79xx.h AHD_REG_PRETTY_PRINT opt_aic79xx.h ADW_ALLOW_MEMIO opt_adw.h TWA_DEBUG opt_twa.h TWA_FLASH_FIRMWARE opt_twa.h # Debugging options. ALT_BREAK_TO_DEBUGGER opt_kdb.h BREAK_TO_DEBUGGER opt_kdb.h BUF_TRACKING opt_global.h DDB DDB_BUFR_SIZE opt_ddb.h DDB_CAPTURE_DEFAULTBUFSIZE opt_ddb.h DDB_CAPTURE_MAXBUFSIZE opt_ddb.h DDB_CTF opt_ddb.h DDB_NUMSYM opt_ddb.h FULL_BUF_TRACKING opt_global.h GDB KDB opt_global.h KDB_TRACE opt_kdb.h KDB_UNATTENDED opt_kdb.h KLD_DEBUG opt_kld.h SYSCTL_DEBUG opt_sysctl.h EARLY_PRINTF opt_global.h TEXTDUMP_PREFERRED opt_ddb.h TEXTDUMP_VERBOSE opt_ddb.h NUM_CORE_FILES opt_global.h # Miscellaneous options. ADAPTIVE_LOCKMGRS ALQ ALTERA_SDCARD_FAST_SIM opt_altera_sdcard.h ATSE_CFI_HACK opt_cfi.h AUDIT opt_global.h BOOTHOWTO opt_global.h BOOTVERBOSE opt_global.h CALLOUT_PROFILING CAPABILITIES opt_capsicum.h CAPABILITY_MODE opt_capsicum.h COMPAT_43 opt_compat.h COMPAT_43TTY opt_compat.h COMPAT_FREEBSD4 opt_compat.h COMPAT_FREEBSD5 opt_compat.h COMPAT_FREEBSD6 opt_compat.h COMPAT_FREEBSD7 opt_compat.h COMPAT_FREEBSD9 opt_compat.h COMPAT_FREEBSD10 opt_compat.h COMPAT_FREEBSD11 opt_compat.h COMPAT_CLOUDABI32 opt_dontuse.h COMPAT_CLOUDABI64 opt_dontuse.h COMPAT_LINUXKPI opt_compat.h COMPILING_LINT opt_global.h CY_PCI_FASTINTR DEADLKRES opt_watchdog.h DEVICE_NUMA EXT_RESOURCES opt_global.h DIRECTIO FILEMON opt_dontuse.h FFCLOCK FULL_PREEMPTION opt_sched.h GZIO opt_gzio.h IMAGACT_BINMISC opt_dontuse.h IPI_PREEMPTION opt_sched.h GEOM_AES opt_geom.h GEOM_BDE opt_geom.h GEOM_BSD opt_geom.h GEOM_CACHE opt_geom.h GEOM_CONCAT opt_geom.h GEOM_ELI opt_geom.h GEOM_FOX opt_geom.h GEOM_GATE opt_geom.h GEOM_JOURNAL opt_geom.h GEOM_LABEL opt_geom.h GEOM_LABEL_GPT opt_geom.h GEOM_LINUX_LVM opt_geom.h GEOM_MAP opt_geom.h GEOM_MBR opt_geom.h GEOM_MIRROR opt_geom.h GEOM_MOUNTVER opt_geom.h GEOM_MULTIPATH opt_geom.h GEOM_NOP opt_geom.h GEOM_PART_APM opt_geom.h GEOM_PART_BSD opt_geom.h GEOM_PART_BSD64 opt_geom.h GEOM_PART_EBR opt_geom.h GEOM_PART_EBR_COMPAT opt_geom.h GEOM_PART_GPT opt_geom.h GEOM_PART_LDM opt_geom.h GEOM_PART_MBR opt_geom.h GEOM_PART_VTOC8 opt_geom.h GEOM_RAID opt_geom.h GEOM_RAID3 opt_geom.h GEOM_SHSEC opt_geom.h GEOM_STRIPE opt_geom.h GEOM_SUNLABEL opt_geom.h GEOM_UZIP opt_geom.h GEOM_UZIP_DEBUG opt_geom.h GEOM_VINUM opt_geom.h GEOM_VIRSTOR opt_geom.h GEOM_VOL opt_geom.h GEOM_ZERO opt_geom.h IFLIB opt_iflib.h KDTRACE_HOOKS opt_global.h KDTRACE_FRAME opt_kdtrace.h KN_HASHSIZE opt_kqueue.h KSTACK_MAX_PAGES KSTACK_PAGES KSTACK_USAGE_PROF KTRACE KTRACE_REQUEST_POOL opt_ktrace.h LIBICONV MAC opt_global.h MAC_BIBA opt_dontuse.h MAC_BSDEXTENDED opt_dontuse.h MAC_IFOFF opt_dontuse.h MAC_LOMAC opt_dontuse.h MAC_MLS opt_dontuse.h MAC_NONE opt_dontuse.h MAC_PARTITION opt_dontuse.h MAC_PORTACL opt_dontuse.h MAC_SEEOTHERUIDS opt_dontuse.h MAC_STATIC opt_mac.h MAC_STUB opt_dontuse.h MAC_TEST opt_dontuse.h MD_ROOT opt_md.h MD_ROOT_FSTYPE opt_md.h MD_ROOT_SIZE opt_md.h MFI_DEBUG opt_mfi.h MFI_DECODE_LOG opt_mfi.h MPROF_BUFFERS opt_mprof.h MPROF_HASH_SIZE opt_mprof.h NEW_PCIB opt_global.h NO_ADAPTIVE_MUTEXES opt_adaptive_mutexes.h NO_ADAPTIVE_RWLOCKS NO_ADAPTIVE_SX NO_EVENTTIMERS opt_timer.h NO_SYSCTL_DESCR opt_global.h NSWBUF_MIN opt_swap.h MBUF_PACKET_ZONE_DISABLE opt_global.h PANIC_REBOOT_WAIT_TIME opt_panic.h PCI_HP opt_pci.h PCI_IOV opt_global.h PPC_DEBUG opt_ppc.h PPC_PROBE_CHIPSET opt_ppc.h PPS_SYNC opt_ntp.h PREEMPTION opt_sched.h QUOTA SCHED_4BSD opt_sched.h SCHED_STATS opt_sched.h SCHED_ULE opt_sched.h SLEEPQUEUE_PROFILING SLHCI_DEBUG opt_slhci.h SPX_HACK STACK opt_stack.h SUIDDIR MSGMNB opt_sysvipc.h MSGMNI opt_sysvipc.h MSGSEG opt_sysvipc.h MSGSSZ opt_sysvipc.h MSGTQL opt_sysvipc.h SEMMNI opt_sysvipc.h SEMMNS opt_sysvipc.h SEMMNU opt_sysvipc.h SEMMSL opt_sysvipc.h SEMOPM opt_sysvipc.h SEMUME opt_sysvipc.h SHMALL opt_sysvipc.h SHMMAX opt_sysvipc.h SHMMAXPGS opt_sysvipc.h SHMMIN opt_sysvipc.h SHMMNI opt_sysvipc.h SHMSEG opt_sysvipc.h SYSVMSG opt_sysvipc.h SYSVSEM opt_sysvipc.h SYSVSHM opt_sysvipc.h SW_WATCHDOG opt_watchdog.h TURNSTILE_PROFILING UMTX_PROFILING UMTX_CHAINS opt_global.h VERBOSE_SYSINIT # POSIX kernel options P1003_1B_MQUEUE opt_posix.h P1003_1B_SEMAPHORES opt_posix.h _KPOSIX_PRIORITY_SCHEDULING opt_posix.h # Do we want the config file compiled into the kernel? INCLUDE_CONFIG_FILE opt_config.h # Options for static filesystems. These should only be used at config # time, since the corresponding lkms cannot work if there are any static # dependencies. Unusability is enforced by hiding the defines for the # options in a never-included header. AUTOFS opt_dontuse.h CD9660 opt_dontuse.h EXT2FS opt_dontuse.h FDESCFS opt_dontuse.h FFS opt_dontuse.h FUSE opt_dontuse.h MSDOSFS opt_dontuse.h NANDFS opt_dontuse.h NULLFS opt_dontuse.h PROCFS opt_dontuse.h PSEUDOFS opt_dontuse.h SMBFS opt_dontuse.h TMPFS opt_dontuse.h UDF opt_dontuse.h UNIONFS opt_dontuse.h ZFS opt_dontuse.h # Pseudofs debugging PSEUDOFS_TRACE opt_pseudofs.h # In-kernel GSS-API KGSSAPI opt_kgssapi.h KGSSAPI_DEBUG opt_kgssapi.h # These static filesystems have one slightly bogus static dependency in # sys/i386/i386/autoconf.c. If any of these filesystems are # statically compiled into the kernel, code for mounting them as root # filesystems will be enabled - but look below. # NFSCL - client # NFSD - server NFSCL opt_nfs.h NFSD opt_nfs.h # filesystems and libiconv bridge CD9660_ICONV opt_dontuse.h MSDOSFS_ICONV opt_dontuse.h UDF_ICONV opt_dontuse.h # If you are following the conditions in the copyright, # you can enable soft-updates which will speed up a lot of thigs # and make the system safer from crashes at the same time. # otherwise a STUB module will be compiled in. SOFTUPDATES opt_ffs.h # On small, embedded systems, it can be useful to turn off support for # snapshots. It saves about 30-40k for a feature that would be lightly # used, if it is used at all. NO_FFS_SNAPSHOT opt_ffs.h # Enabling this option turns on support for Access Control Lists in UFS, # which can be used to support high security configurations. Depends on # UFS_EXTATTR. UFS_ACL opt_ufs.h # Enabling this option turns on support for extended attributes in UFS-based # filesystems, which can be used to support high security configurations # as well as new filesystem features. UFS_EXTATTR opt_ufs.h UFS_EXTATTR_AUTOSTART opt_ufs.h # Enable fast hash lookups for large directories on UFS-based filesystems. UFS_DIRHASH opt_ufs.h # Enable gjournal-based UFS journal. UFS_GJOURNAL opt_ufs.h # The below sentence is not in English, and neither is this one. # We plan to remove the static dependences above, with a # _ROOT option to control if it usable as root. This list # allows these options to be present in config files already (though # they won't make any difference yet). NFS_ROOT opt_nfsroot.h # SMB/CIFS requester NETSMB opt_netsmb.h # Options used only in subr_param.c. HZ opt_param.h MAXFILES opt_param.h NBUF opt_param.h NSFBUFS opt_param.h VM_BCACHE_SIZE_MAX opt_param.h VM_SWZONE_SIZE_MAX opt_param.h MAXUSERS DFLDSIZ opt_param.h MAXDSIZ opt_param.h MAXSSIZ opt_param.h # Generic SCSI options. CAM_MAX_HIGHPOWER opt_cam.h CAMDEBUG opt_cam.h CAM_DEBUG_COMPILE opt_cam.h CAM_DEBUG_DELAY opt_cam.h CAM_DEBUG_BUS opt_cam.h CAM_DEBUG_TARGET opt_cam.h CAM_DEBUG_LUN opt_cam.h CAM_DEBUG_FLAGS opt_cam.h CAM_BOOT_DELAY opt_cam.h CAM_IOSCHED_DYNAMIC opt_cam.h SCSI_DELAY opt_scsi.h SCSI_NO_SENSE_STRINGS opt_scsi.h SCSI_NO_OP_STRINGS opt_scsi.h # Options used only in cam/ata/ata_da.c ADA_TEST_FAILURE opt_ada.h ATA_STATIC_ID opt_ada.h # Options used only in cam/scsi/scsi_cd.c CHANGER_MIN_BUSY_SECONDS opt_cd.h CHANGER_MAX_BUSY_SECONDS opt_cd.h # Options used only in cam/scsi/scsi_sa.c. SA_IO_TIMEOUT opt_sa.h SA_SPACE_TIMEOUT opt_sa.h SA_REWIND_TIMEOUT opt_sa.h SA_ERASE_TIMEOUT opt_sa.h SA_1FM_AT_EOD opt_sa.h # Options used only in cam/scsi/scsi_pt.c SCSI_PT_DEFAULT_TIMEOUT opt_pt.h # Options used only in cam/scsi/scsi_ses.c SES_ENABLE_PASSTHROUGH opt_ses.h # Options used in dev/sym/ (Symbios SCSI driver). SYM_SETUP_LP_PROBE_MAP opt_sym.h #-Low Priority Probe Map (bits) # Allows the ncr to take precedence # 1 (1<<0) -> 810a, 860 # 2 (1<<1) -> 825a, 875, 885, 895 # 4 (1<<2) -> 895a, 896, 1510d SYM_SETUP_SCSI_DIFF opt_sym.h #-HVD support for 825a, 875, 885 # disabled:0 (default), enabled:1 SYM_SETUP_PCI_PARITY opt_sym.h #-PCI parity checking # disabled:0, enabled:1 (default) SYM_SETUP_MAX_LUN opt_sym.h #-Number of LUNs supported # default:8, range:[1..64] # Options used only in dev/ncr/* SCSI_NCR_DEBUG opt_ncr.h SCSI_NCR_MAX_SYNC opt_ncr.h SCSI_NCR_MAX_WIDE opt_ncr.h SCSI_NCR_MYADDR opt_ncr.h # Options used only in dev/isp/* ISP_TARGET_MODE opt_isp.h ISP_FW_CRASH_DUMP opt_isp.h ISP_DEFAULT_ROLES opt_isp.h ISP_INTERNAL_TARGET opt_isp.h ISP_FCTAPE_OFF opt_isp.h # Options used only in dev/iscsi ISCSI_INITIATOR_DEBUG opt_iscsi_initiator.h # Net stuff. ACCEPT_FILTER_DATA ACCEPT_FILTER_DNS ACCEPT_FILTER_HTTP ALTQ opt_global.h ALTQ_CBQ opt_altq.h ALTQ_CDNR opt_altq.h ALTQ_CODEL opt_altq.h ALTQ_DEBUG opt_altq.h ALTQ_HFSC opt_altq.h ALTQ_FAIRQ opt_altq.h ALTQ_NOPCC opt_altq.h ALTQ_PRIQ opt_altq.h ALTQ_RED opt_altq.h ALTQ_RIO opt_altq.h BOOTP opt_bootp.h BOOTP_BLOCKSIZE opt_bootp.h BOOTP_COMPAT opt_bootp.h BOOTP_NFSROOT opt_bootp.h BOOTP_NFSV3 opt_bootp.h BOOTP_WIRED_TO opt_bootp.h DEVICE_POLLING DUMMYNET opt_ipdn.h RATELIMIT opt_ratelimit.h INET opt_inet.h INET6 opt_inet6.h IPDIVERT IPFILTER opt_ipfilter.h IPFILTER_DEFAULT_BLOCK opt_ipfilter.h IPFILTER_LOG opt_ipfilter.h IPFILTER_LOOKUP opt_ipfilter.h IPFIREWALL opt_ipfw.h IPFIREWALL_DEFAULT_TO_ACCEPT opt_ipfw.h IPFIREWALL_NAT opt_ipfw.h IPFIREWALL_NAT64 opt_ipfw.h IPFIREWALL_NAT64_DIRECT_OUTPUT opt_ipfw.h IPFIREWALL_NPTV6 opt_ipfw.h IPFIREWALL_VERBOSE opt_ipfw.h IPFIREWALL_VERBOSE_LIMIT opt_ipfw.h IPFIREWALL_PMOD opt_ipfw.h IPSEC opt_ipsec.h IPSEC_DEBUG opt_ipsec.h IPSEC_SUPPORT opt_ipsec.h IPSTEALTH KRPC LIBALIAS LIBMBPOOL LIBMCHAIN MBUF_PROFILING MBUF_STRESS_TEST MROUTING opt_mrouting.h NFSLOCKD PCBGROUP opt_pcbgroup.h PF_DEFAULT_TO_DROP opt_pf.h RADIX_MPATH opt_mpath.h ROUTETABLES opt_route.h RSS opt_rss.h SLIP_IFF_OPTS opt_slip.h TCPDEBUG TCPPCAP opt_global.h SIFTR TCP_HHOOK opt_inet.h TCP_OFFLOAD opt_inet.h # Enable code to dispatch TCP offloading TCP_RFC7413 opt_inet.h TCP_RFC7413_MAX_KEYS opt_inet.h TCP_SIGNATURE opt_ipsec.h VLAN_ARRAY opt_vlan.h XBONEHACK FLOWTABLE opt_route.h FLOWTABLE_HASH_ALL opt_route.h # # SCTP # SCTP opt_sctp.h SCTP_DEBUG opt_sctp.h # Enable debug printfs SCTP_WITH_NO_CSUM opt_sctp.h # Use this at your peril SCTP_LOCK_LOGGING opt_sctp.h # Log to KTR lock activity SCTP_MBUF_LOGGING opt_sctp.h # Log to KTR general mbuf aloc/free SCTP_MBCNT_LOGGING opt_sctp.h # Log to KTR mbcnt activity SCTP_PACKET_LOGGING opt_sctp.h # Log to a packet buffer last N packets SCTP_LTRACE_CHUNKS opt_sctp.h # Log to KTR chunks processed SCTP_LTRACE_ERRORS opt_sctp.h # Log to KTR error returns. SCTP_USE_PERCPU_STAT opt_sctp.h # Use per cpu stats. SCTP_MCORE_INPUT opt_sctp.h # Have multiple input threads for input mbufs SCTP_LOCAL_TRACE_BUF opt_sctp.h # Use tracebuffer exported via sysctl SCTP_DETAILED_STR_STATS opt_sctp.h # Use per PR-SCTP policy stream stats # # # # Netgraph(4). Use option NETGRAPH to enable the base netgraph code. # Each netgraph node type can be either be compiled into the kernel # or loaded dynamically. To get the former, include the corresponding # option below. Each type has its own man page, e.g. ng_async(4). NETGRAPH NETGRAPH_DEBUG opt_netgraph.h NETGRAPH_ASYNC opt_netgraph.h NETGRAPH_ATMLLC opt_netgraph.h NETGRAPH_ATM_ATMPIF opt_netgraph.h NETGRAPH_BLUETOOTH opt_netgraph.h NETGRAPH_BLUETOOTH_BT3C opt_netgraph.h NETGRAPH_BLUETOOTH_H4 opt_netgraph.h NETGRAPH_BLUETOOTH_HCI opt_netgraph.h NETGRAPH_BLUETOOTH_L2CAP opt_netgraph.h NETGRAPH_BLUETOOTH_SOCKET opt_netgraph.h NETGRAPH_BLUETOOTH_UBT opt_netgraph.h NETGRAPH_BLUETOOTH_UBTBCMFW opt_netgraph.h NETGRAPH_BPF opt_netgraph.h NETGRAPH_BRIDGE opt_netgraph.h NETGRAPH_CAR opt_netgraph.h NETGRAPH_CISCO opt_netgraph.h NETGRAPH_DEFLATE opt_netgraph.h NETGRAPH_DEVICE opt_netgraph.h NETGRAPH_ECHO opt_netgraph.h NETGRAPH_EIFACE opt_netgraph.h NETGRAPH_ETHER opt_netgraph.h NETGRAPH_ETHER_ECHO opt_netgraph.h NETGRAPH_FEC opt_netgraph.h NETGRAPH_FRAME_RELAY opt_netgraph.h NETGRAPH_GIF opt_netgraph.h NETGRAPH_GIF_DEMUX opt_netgraph.h NETGRAPH_HOLE opt_netgraph.h NETGRAPH_IFACE opt_netgraph.h NETGRAPH_IP_INPUT opt_netgraph.h NETGRAPH_IPFW opt_netgraph.h NETGRAPH_KSOCKET opt_netgraph.h NETGRAPH_L2TP opt_netgraph.h NETGRAPH_LMI opt_netgraph.h NETGRAPH_MPPC_COMPRESSION opt_netgraph.h NETGRAPH_MPPC_ENCRYPTION opt_netgraph.h NETGRAPH_NAT opt_netgraph.h NETGRAPH_NETFLOW opt_netgraph.h NETGRAPH_ONE2MANY opt_netgraph.h NETGRAPH_PATCH opt_netgraph.h NETGRAPH_PIPE opt_netgraph.h NETGRAPH_PPP opt_netgraph.h NETGRAPH_PPPOE opt_netgraph.h NETGRAPH_PPTPGRE opt_netgraph.h NETGRAPH_PRED1 opt_netgraph.h NETGRAPH_RFC1490 opt_netgraph.h NETGRAPH_SOCKET opt_netgraph.h NETGRAPH_SPLIT opt_netgraph.h NETGRAPH_SPPP opt_netgraph.h NETGRAPH_TAG opt_netgraph.h NETGRAPH_TCPMSS opt_netgraph.h NETGRAPH_TEE opt_netgraph.h NETGRAPH_TTY opt_netgraph.h NETGRAPH_UI opt_netgraph.h NETGRAPH_VJC opt_netgraph.h NETGRAPH_VLAN opt_netgraph.h # NgATM options NGATM_ATM opt_netgraph.h NGATM_ATMBASE opt_netgraph.h NGATM_SSCOP opt_netgraph.h NGATM_SSCFU opt_netgraph.h NGATM_UNI opt_netgraph.h NGATM_CCATM opt_netgraph.h # DRM options DRM_DEBUG opt_drm.h TI_SF_BUF_JUMBO opt_ti.h TI_JUMBO_HDRSPLIT opt_ti.h # DPT driver debug flags DPT_MEASURE_PERFORMANCE opt_dpt.h DPT_RESET_HBA opt_dpt.h # Misc debug flags. Most of these should probably be replaced with # 'DEBUG', and then let people recompile just the interesting modules # with 'make CC="cc -DDEBUG"'. CLUSTERDEBUG opt_debug_cluster.h DEBUG_1284 opt_ppb_1284.h VP0_DEBUG opt_vpo.h LPT_DEBUG opt_lpt.h PLIP_DEBUG opt_plip.h LOCKF_DEBUG opt_debug_lockf.h SI_DEBUG opt_debug_si.h IFMEDIA_DEBUG opt_ifmedia.h # Fb options FB_DEBUG opt_fb.h FB_INSTALL_CDEV opt_fb.h # ppbus related options PERIPH_1284 opt_ppb_1284.h DONTPROBE_1284 opt_ppb_1284.h # smbus related options ENABLE_ALART opt_intpm.h # These cause changes all over the kernel BLKDEV_IOSIZE opt_global.h BURN_BRIDGES opt_global.h DEBUG opt_global.h DEBUG_LOCKS opt_global.h DEBUG_VFS_LOCKS opt_global.h DFLTPHYS opt_global.h DIAGNOSTIC opt_global.h INVARIANT_SUPPORT opt_global.h INVARIANTS opt_global.h MAXCPU opt_global.h MAXMEMDOM opt_global.h MAXPHYS opt_global.h MCLSHIFT opt_global.h MUTEX_NOINLINE opt_global.h LOCK_PROFILING opt_global.h LOCK_PROFILING_FAST opt_global.h MSIZE opt_global.h REGRESSION opt_global.h RWLOCK_NOINLINE opt_global.h SX_NOINLINE opt_global.h VFS_BIO_DEBUG opt_global.h # These are VM related options VM_KMEM_SIZE opt_vm.h VM_KMEM_SIZE_SCALE opt_vm.h VM_KMEM_SIZE_MAX opt_vm.h VM_NRESERVLEVEL opt_vm.h VM_NUMA_ALLOC opt_vm.h VM_LEVEL_0_ORDER opt_vm.h NO_SWAPPING opt_vm.h MALLOC_MAKE_FAILURES opt_vm.h MALLOC_PROFILE opt_vm.h MALLOC_DEBUG_MAXZONES opt_vm.h # The MemGuard replacement allocator used for tamper-after-free detection DEBUG_MEMGUARD opt_vm.h # The RedZone malloc(9) protection DEBUG_REDZONE opt_vm.h # Standard SMP options EARLY_AP_STARTUP opt_global.h SMP opt_global.h # Size of the kernel message buffer MSGBUF_SIZE opt_msgbuf.h # NFS options NFS_MINATTRTIMO opt_nfs.h NFS_MAXATTRTIMO opt_nfs.h NFS_MINDIRATTRTIMO opt_nfs.h NFS_MAXDIRATTRTIMO opt_nfs.h NFS_DEBUG opt_nfs.h # For the Bt848/Bt848A/Bt849/Bt878/Bt879 driver OVERRIDE_CARD opt_bktr.h OVERRIDE_TUNER opt_bktr.h OVERRIDE_DBX opt_bktr.h OVERRIDE_MSP opt_bktr.h BROOKTREE_SYSTEM_DEFAULT opt_bktr.h BROOKTREE_ALLOC_PAGES opt_bktr.h BKTR_OVERRIDE_CARD opt_bktr.h BKTR_OVERRIDE_TUNER opt_bktr.h BKTR_OVERRIDE_DBX opt_bktr.h BKTR_OVERRIDE_MSP opt_bktr.h BKTR_SYSTEM_DEFAULT opt_bktr.h BKTR_ALLOC_PAGES opt_bktr.h BKTR_USE_PLL opt_bktr.h BKTR_GPIO_ACCESS opt_bktr.h BKTR_NO_MSP_RESET opt_bktr.h BKTR_430_FX_MODE opt_bktr.h BKTR_SIS_VIA_MODE opt_bktr.h BKTR_USE_FREEBSD_SMBUS opt_bktr.h BKTR_NEW_MSP34XX_DRIVER opt_bktr.h # Options for uart(4) UART_PPS_ON_CTS opt_uart.h UART_POLL_FREQ opt_uart.h UART_DEV_TOLERANCE_PCT opt_uart.h # options for bus/device framework BUS_DEBUG opt_bus.h # options for USB support USB_DEBUG opt_usb.h USB_HOST_ALIGN opt_usb.h USB_REQ_DEBUG opt_usb.h USB_TEMPLATE opt_usb.h USB_VERBOSE opt_usb.h USB_DMA_SINGLE_ALLOC opt_usb.h USB_EHCI_BIG_ENDIAN_DESC opt_usb.h U3G_DEBUG opt_u3g.h UKBD_DFLT_KEYMAP opt_ukbd.h UPLCOM_INTR_INTERVAL opt_uplcom.h UVSCOM_DEFAULT_OPKTSIZE opt_uvscom.h UVSCOM_INTR_INTERVAL opt_uvscom.h # options for the Realtek rtwn driver RTWN_DEBUG opt_rtwn.h RTWN_WITHOUT_UCODE opt_rtwn.h # Embedded system options INIT_PATH ROOTDEVNAME FDC_DEBUG opt_fdc.h PCFCLOCK_VERBOSE opt_pcfclock.h PCFCLOCK_MAX_RETRIES opt_pcfclock.h KTR opt_global.h KTR_ALQ opt_ktr.h KTR_MASK opt_ktr.h KTR_CPUMASK opt_ktr.h KTR_COMPILE opt_global.h KTR_BOOT_ENTRIES opt_global.h KTR_ENTRIES opt_global.h KTR_VERBOSE opt_ktr.h WITNESS opt_global.h WITNESS_KDB opt_witness.h WITNESS_NO_VNODE opt_witness.h WITNESS_SKIPSPIN opt_witness.h WITNESS_COUNT opt_witness.h OPENSOLARIS_WITNESS opt_global.h # options for ACPI support ACPI_DEBUG opt_acpi.h ACPI_MAX_TASKS opt_acpi.h ACPI_MAX_THREADS opt_acpi.h ACPI_DMAR opt_acpi.h DEV_ACPI opt_acpi.h # ISA support DEV_ISA opt_isa.h ISAPNP opt_isa.h # various 'device presence' options. DEV_BPF opt_bpf.h DEV_CARP opt_carp.h DEV_NETMAP opt_global.h DEV_PCI opt_pci.h DEV_PF opt_pf.h DEV_PFLOG opt_pf.h DEV_PFSYNC opt_pf.h DEV_RANDOM opt_global.h DEV_SPLASH opt_splash.h DEV_VLAN opt_vlan.h # ed driver ED_HPP opt_ed.h ED_3C503 opt_ed.h ED_SIC opt_ed.h # bce driver BCE_DEBUG opt_bce.h BCE_NVRAM_WRITE_SUPPORT opt_bce.h SOCKBUF_DEBUG opt_global.h # options for ubsec driver UBSEC_DEBUG opt_ubsec.h UBSEC_RNDTEST opt_ubsec.h UBSEC_NO_RNG opt_ubsec.h # options for hifn driver HIFN_DEBUG opt_hifn.h HIFN_RNDTEST opt_hifn.h # options for safenet driver SAFE_DEBUG opt_safe.h SAFE_NO_RNG opt_safe.h SAFE_RNDTEST opt_safe.h # syscons/vt options MAXCONS opt_syscons.h SC_ALT_MOUSE_IMAGE opt_syscons.h SC_CUT_SPACES2TABS opt_syscons.h SC_CUT_SEPCHARS opt_syscons.h SC_DEBUG_LEVEL opt_syscons.h SC_DFLT_FONT opt_syscons.h SC_DISABLE_KDBKEY opt_syscons.h SC_DISABLE_REBOOT opt_syscons.h SC_HISTORY_SIZE opt_syscons.h SC_KERNEL_CONS_ATTR opt_syscons.h SC_KERNEL_CONS_REV_ATTR opt_syscons.h SC_MOUSE_CHAR opt_syscons.h SC_NO_CUTPASTE opt_syscons.h SC_NO_FONT_LOADING opt_syscons.h SC_NO_HISTORY opt_syscons.h SC_NO_MODE_CHANGE opt_syscons.h SC_NO_SUSPEND_VTYSWITCH opt_syscons.h SC_NO_SYSMOUSE opt_syscons.h SC_NORM_ATTR opt_syscons.h SC_NORM_REV_ATTR opt_syscons.h SC_PIXEL_MODE opt_syscons.h SC_RENDER_DEBUG opt_syscons.h SC_TWOBUTTON_MOUSE opt_syscons.h VT_ALT_TO_ESC_HACK opt_syscons.h VT_FB_DEFAULT_WIDTH opt_syscons.h VT_FB_DEFAULT_HEIGHT opt_syscons.h VT_MAXWINDOWS opt_syscons.h VT_TWOBUTTON_MOUSE opt_syscons.h DEV_SC opt_syscons.h DEV_VT opt_syscons.h # teken terminal emulator options TEKEN_CONS25 opt_teken.h TEKEN_UTF8 opt_teken.h TERMINAL_KERN_ATTR opt_teken.h TERMINAL_NORM_ATTR opt_teken.h # options for printf PRINTF_BUFR_SIZE opt_printf.h # kbd options KBD_DISABLE_KEYMAP_LOAD opt_kbd.h KBD_INSTALL_CDEV opt_kbd.h KBD_MAXRETRY opt_kbd.h KBD_MAXWAIT opt_kbd.h KBD_RESETDELAY opt_kbd.h KBDIO_DEBUG opt_kbd.h KBDMUX_DFLT_KEYMAP opt_kbdmux.h # options for the Atheros driver ATH_DEBUG opt_ath.h ATH_TXBUF opt_ath.h ATH_RXBUF opt_ath.h ATH_DIAGAPI opt_ath.h ATH_TX99_DIAG opt_ath.h ATH_ENABLE_11N opt_ath.h ATH_ENABLE_DFS opt_ath.h ATH_EEPROM_FIRMWARE opt_ath.h ATH_ENABLE_RADIOTAP_VENDOR_EXT opt_ath.h ATH_DEBUG_ALQ opt_ath.h ATH_KTR_INTR_DEBUG opt_ath.h # options for the Atheros hal AH_SUPPORT_AR5416 opt_ah.h # XXX For now, this breaks non-AR9130 chipsets, so only use it # XXX when actually targeting AR9130. AH_SUPPORT_AR9130 opt_ah.h # This is required for AR933x SoC support AH_SUPPORT_AR9330 opt_ah.h AH_SUPPORT_AR9340 opt_ah.h AH_SUPPORT_QCA9530 opt_ah.h AH_SUPPORT_QCA9550 opt_ah.h AH_DEBUG opt_ah.h AH_ASSERT opt_ah.h AH_DEBUG_ALQ opt_ah.h AH_REGOPS_FUNC opt_ah.h AH_WRITE_REGDOMAIN opt_ah.h AH_DEBUG_COUNTRY opt_ah.h AH_WRITE_EEPROM opt_ah.h AH_PRIVATE_DIAG opt_ah.h AH_NEED_DESC_SWAP opt_ah.h AH_USE_INIPDGAIN opt_ah.h AH_MAXCHAN opt_ah.h AH_RXCFG_SDMAMW_4BYTES opt_ah.h AH_INTERRUPT_DEBUGGING opt_ah.h # AR5416 and later interrupt mitigation # XXX do not use this for AR9130 AH_AR5416_INTERRUPT_MITIGATION opt_ah.h # options for the Broadcom BCM43xx driver (bwi) BWI_DEBUG opt_bwi.h BWI_DEBUG_VERBOSE opt_bwi.h # options for the Brodacom BCM43xx driver (bwn) BWN_DEBUG opt_bwn.h BWN_GPL_PHY opt_bwn.h # Options for the SIBA driver SIBA_DEBUG opt_siba.h # options for the Marvell 8335 wireless driver MALO_DEBUG opt_malo.h MALO_TXBUF opt_malo.h MALO_RXBUF opt_malo.h # options for the Marvell wireless driver MWL_DEBUG opt_mwl.h MWL_TXBUF opt_mwl.h MWL_RXBUF opt_mwl.h MWL_DIAGAPI opt_mwl.h MWL_AGGR_SIZE opt_mwl.h MWL_TX_NODROP opt_mwl.h +# Options for the Marvell NETA driver +MVNETA_MULTIQUEUE opt_mvneta.h +MVNETA_KTR opt_mvneta.h + # Options for the Intel 802.11ac wireless driver IWM_DEBUG opt_iwm.h # Options for the Intel 802.11n wireless driver IWN_DEBUG opt_iwn.h # Options for the Intel 3945ABG wireless driver WPI_DEBUG opt_wpi.h # dcons options DCONS_BUF_SIZE opt_dcons.h DCONS_POLL_HZ opt_dcons.h DCONS_FORCE_CONSOLE opt_dcons.h DCONS_FORCE_GDB opt_dcons.h # HWPMC options HWPMC_DEBUG opt_global.h HWPMC_HOOKS HWPMC_MIPS_BACKTRACE opt_hwpmc_hooks.h # XBOX options for FreeBSD/i386, but some files are MI XBOX opt_xbox.h # Interrupt filtering INTR_FILTER # 802.11 support layer IEEE80211_DEBUG opt_wlan.h IEEE80211_DEBUG_REFCNT opt_wlan.h IEEE80211_AMPDU_AGE opt_wlan.h IEEE80211_SUPPORT_MESH opt_wlan.h IEEE80211_SUPPORT_SUPERG opt_wlan.h IEEE80211_SUPPORT_TDMA opt_wlan.h IEEE80211_ALQ opt_wlan.h IEEE80211_DFS_DEBUG opt_wlan.h # 802.11 TDMA support TDMA_SLOTLEN_DEFAULT opt_tdma.h TDMA_SLOTCNT_DEFAULT opt_tdma.h TDMA_BINTVAL_DEFAULT opt_tdma.h TDMA_TXRATE_11B_DEFAULT opt_tdma.h TDMA_TXRATE_11G_DEFAULT opt_tdma.h TDMA_TXRATE_11A_DEFAULT opt_tdma.h TDMA_TXRATE_TURBO_DEFAULT opt_tdma.h TDMA_TXRATE_HALF_DEFAULT opt_tdma.h TDMA_TXRATE_QUARTER_DEFAULT opt_tdma.h TDMA_TXRATE_11NA_DEFAULT opt_tdma.h TDMA_TXRATE_11NG_DEFAULT opt_tdma.h # VideoMode PICKMODE_DEBUG opt_videomode.h # Network stack virtualization options VIMAGE opt_global.h VNET_DEBUG opt_global.h # Common Flash Interface (CFI) options CFI_SUPPORT_STRATAFLASH opt_cfi.h CFI_ARMEDANDDANGEROUS opt_cfi.h CFI_HARDWAREBYTESWAP opt_cfi.h # Sound options SND_DEBUG opt_snd.h SND_DIAGNOSTIC opt_snd.h SND_FEEDER_MULTIFORMAT opt_snd.h SND_FEEDER_FULL_MULTIFORMAT opt_snd.h SND_FEEDER_RATE_HP opt_snd.h SND_PCM_64 opt_snd.h SND_OLDSTEREO opt_snd.h X86BIOS # Flattened device tree options FDT opt_platform.h FDT_DTB_STATIC opt_platform.h # OFED Infiniband stack OFED opt_ofed.h OFED_DEBUG_INIT opt_ofed.h SDP opt_ofed.h SDP_DEBUG opt_ofed.h IPOIB opt_ofed.h IPOIB_DEBUG opt_ofed.h IPOIB_CM opt_ofed.h # Resource Accounting RACCT opt_global.h RACCT_DEFAULT_TO_DISABLED opt_global.h # Resource Limits RCTL opt_global.h # Random number generator(s) # Which CSPRNG hash we get. # If Yarrow is not chosen, Fortuna is selected. RANDOM_YARROW opt_global.h # With this, no entropy processor is loaded, but the entropy # harvesting infrastructure is present. This means an entropy # processor may be loaded as a module. RANDOM_LOADABLE opt_global.h # This turns on high-rate and potentially expensive harvesting in # the uma slab allocator. RANDOM_ENABLE_UMA opt_global.h # BHND(4) driver BHND_LOGLEVEL opt_global.h # GPIO and child devices GPIO_SPI_DEBUG opt_gpio.h # etherswitch(4) driver RTL8366_SOFT_RESET opt_etherswitch.h # evdev protocol support EVDEV_SUPPORT opt_evdev.h EVDEV_DEBUG opt_evdev.h UINPUT_DEBUG opt_evdev.h # Hyper-V network driver HN_DEBUG opt_hn.h # Encrypted kernel crash dumps EKCD opt_ekcd.h Index: head/sys/dev/neta/if_mvneta.c =================================================================== --- head/sys/dev/neta/if_mvneta.c (nonexistent) +++ head/sys/dev/neta/if_mvneta.c (revision 319907) @@ -0,0 +1,3570 @@ +/* + * Copyright (c) 2017 Stormshield. + * Copyright (c) 2017 Semihalf. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_platform.h" +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef MVNETA_KTR +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include +#include + +#include "if_mvnetareg.h" +#include "if_mvnetavar.h" + +#include "miibus_if.h" +#include "mdio_if.h" + +#ifdef MVNETA_DEBUG +#define STATIC /* nothing */ +#else +#define STATIC static +#endif + +#define DASSERT(x) KASSERT((x), (#x)) + +/* Device Register Initialization */ +STATIC int mvneta_initreg(struct ifnet *); + +/* Descriptor Ring Control for each of queues */ +STATIC int mvneta_ring_alloc_rx_queue(struct mvneta_softc *, int); +STATIC int mvneta_ring_alloc_tx_queue(struct mvneta_softc *, int); +STATIC void mvneta_ring_dealloc_rx_queue(struct mvneta_softc *, int); +STATIC void mvneta_ring_dealloc_tx_queue(struct mvneta_softc *, int); +STATIC int mvneta_ring_init_rx_queue(struct mvneta_softc *, int); +STATIC int mvneta_ring_init_tx_queue(struct mvneta_softc *, int); +STATIC void mvneta_ring_flush_rx_queue(struct mvneta_softc *, int); +STATIC void mvneta_ring_flush_tx_queue(struct mvneta_softc *, int); +STATIC void mvneta_dmamap_cb(void *, bus_dma_segment_t *, int, int); +STATIC int mvneta_dma_create(struct mvneta_softc *); + +/* Rx/Tx Queue Control */ +STATIC int mvneta_rx_queue_init(struct ifnet *, int); +STATIC int mvneta_tx_queue_init(struct ifnet *, int); +STATIC int mvneta_rx_queue_enable(struct ifnet *, int); +STATIC int mvneta_tx_queue_enable(struct ifnet *, int); +STATIC void mvneta_rx_lockq(struct mvneta_softc *, int); +STATIC void mvneta_rx_unlockq(struct mvneta_softc *, int); +STATIC void mvneta_tx_lockq(struct mvneta_softc *, int); +STATIC void mvneta_tx_unlockq(struct mvneta_softc *, int); + +/* Interrupt Handlers */ +STATIC void mvneta_disable_intr(struct mvneta_softc *); +STATIC void mvneta_enable_intr(struct mvneta_softc *); +STATIC void mvneta_rxtxth_intr(void *); +STATIC int mvneta_misc_intr(struct mvneta_softc *); +STATIC void mvneta_tick(void *); +/* struct ifnet and mii callbacks*/ +STATIC int mvneta_xmitfast_locked(struct mvneta_softc *, int, struct mbuf **); +STATIC int mvneta_xmit_locked(struct mvneta_softc *, int); +#ifdef MVNETA_MULTIQUEUE +STATIC int mvneta_transmit(struct ifnet *, struct mbuf *); +#else /* !MVNETA_MULTIQUEUE */ +STATIC void mvneta_start(struct ifnet *); +#endif +STATIC void mvneta_qflush(struct ifnet *); +STATIC void mvneta_tx_task(void *, int); +STATIC int mvneta_ioctl(struct ifnet *, u_long, caddr_t); +STATIC void mvneta_init(void *); +STATIC void mvneta_init_locked(void *); +STATIC void mvneta_stop(struct mvneta_softc *); +STATIC void mvneta_stop_locked(struct mvneta_softc *); +STATIC int mvneta_mediachange(struct ifnet *); +STATIC void mvneta_mediastatus(struct ifnet *, struct ifmediareq *); +STATIC void mvneta_portup(struct mvneta_softc *); +STATIC void mvneta_portdown(struct mvneta_softc *); + +/* Link State Notify */ +STATIC void mvneta_update_autoneg(struct mvneta_softc *, int); +STATIC int mvneta_update_media(struct mvneta_softc *, int); +STATIC void mvneta_adjust_link(struct mvneta_softc *); +STATIC void mvneta_update_eee(struct mvneta_softc *); +STATIC void mvneta_update_fc(struct mvneta_softc *); +STATIC void mvneta_link_isr(struct mvneta_softc *); +STATIC void mvneta_linkupdate(struct mvneta_softc *, boolean_t); +STATIC void mvneta_linkup(struct mvneta_softc *); +STATIC void mvneta_linkdown(struct mvneta_softc *); +STATIC void mvneta_linkreset(struct mvneta_softc *); + +/* Tx Subroutines */ +STATIC int mvneta_tx_queue(struct mvneta_softc *, struct mbuf **, int); +STATIC void mvneta_tx_set_csumflag(struct ifnet *, + struct mvneta_tx_desc *, struct mbuf *); +STATIC void mvneta_tx_queue_complete(struct mvneta_softc *, int); +STATIC void mvneta_tx_drain(struct mvneta_softc *); + +/* Rx Subroutines */ +STATIC int mvneta_rx(struct mvneta_softc *, int, int); +STATIC void mvneta_rx_queue(struct mvneta_softc *, int, int); +STATIC void mvneta_rx_queue_refill(struct mvneta_softc *, int); +STATIC void mvneta_rx_set_csumflag(struct ifnet *, + struct mvneta_rx_desc *, struct mbuf *); +STATIC void mvneta_rx_buf_free(struct mvneta_softc *, struct mvneta_buf *); + +/* MAC address filter */ +STATIC void mvneta_filter_setup(struct mvneta_softc *); + +/* sysctl(9) */ +STATIC int sysctl_read_mib(SYSCTL_HANDLER_ARGS); +STATIC int sysctl_clear_mib(SYSCTL_HANDLER_ARGS); +STATIC int sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS); +STATIC void sysctl_mvneta_init(struct mvneta_softc *); + +/* MIB */ +STATIC void mvneta_clear_mib(struct mvneta_softc *); +STATIC void mvneta_update_mib(struct mvneta_softc *); + +/* Switch */ +STATIC boolean_t mvneta_has_switch(device_t); + +#define mvneta_sc_lock(sc) mtx_lock(&sc->mtx) +#define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx) + +STATIC struct mtx mii_mutex; +STATIC int mii_init = 0; + +/* Device */ +STATIC int mvneta_detach(device_t); +/* MII */ +STATIC int mvneta_miibus_readreg(device_t, int, int); +STATIC int mvneta_miibus_writereg(device_t, int, int, int); + +static device_method_t mvneta_methods[] = { + /* Device interface */ + DEVMETHOD(device_detach, mvneta_detach), + /* MII interface */ + DEVMETHOD(miibus_readreg, mvneta_miibus_readreg), + DEVMETHOD(miibus_writereg, mvneta_miibus_writereg), + /* MDIO interface */ + DEVMETHOD(mdio_readreg, mvneta_miibus_readreg), + DEVMETHOD(mdio_writereg, mvneta_miibus_writereg), + + /* End */ + DEVMETHOD_END +}; + +DEFINE_CLASS_0(mvneta, mvneta_driver, mvneta_methods, sizeof(struct mvneta_softc)); + +DRIVER_MODULE(miibus, mvneta, miibus_driver, miibus_devclass, 0, 0); +DRIVER_MODULE(mdio, mvneta, mdio_driver, mdio_devclass, 0, 0); +MODULE_DEPEND(mvneta, mdio, 1, 1, 1); +MODULE_DEPEND(mvneta, ether, 1, 1, 1); +MODULE_DEPEND(mvneta, miibus, 1, 1, 1); +MODULE_DEPEND(mvneta, mvxpbm, 1, 1, 1); + +/* + * List of MIB register and names + */ +enum mvneta_mib_idx +{ + MVNETA_MIB_RX_GOOD_OCT_IDX, + MVNETA_MIB_RX_BAD_OCT_IDX, + MVNETA_MIB_TX_MAC_TRNS_ERR_IDX, + MVNETA_MIB_RX_GOOD_FRAME_IDX, + MVNETA_MIB_RX_BAD_FRAME_IDX, + MVNETA_MIB_RX_BCAST_FRAME_IDX, + MVNETA_MIB_RX_MCAST_FRAME_IDX, + MVNETA_MIB_RX_FRAME64_OCT_IDX, + MVNETA_MIB_RX_FRAME127_OCT_IDX, + MVNETA_MIB_RX_FRAME255_OCT_IDX, + MVNETA_MIB_RX_FRAME511_OCT_IDX, + MVNETA_MIB_RX_FRAME1023_OCT_IDX, + MVNETA_MIB_RX_FRAMEMAX_OCT_IDX, + MVNETA_MIB_TX_GOOD_OCT_IDX, + MVNETA_MIB_TX_GOOD_FRAME_IDX, + MVNETA_MIB_TX_EXCES_COL_IDX, + MVNETA_MIB_TX_MCAST_FRAME_IDX, + MVNETA_MIB_TX_BCAST_FRAME_IDX, + MVNETA_MIB_TX_MAC_CTL_ERR_IDX, + MVNETA_MIB_FC_SENT_IDX, + MVNETA_MIB_FC_GOOD_IDX, + MVNETA_MIB_FC_BAD_IDX, + MVNETA_MIB_PKT_UNDERSIZE_IDX, + MVNETA_MIB_PKT_FRAGMENT_IDX, + MVNETA_MIB_PKT_OVERSIZE_IDX, + MVNETA_MIB_PKT_JABBER_IDX, + MVNETA_MIB_MAC_RX_ERR_IDX, + MVNETA_MIB_MAC_CRC_ERR_IDX, + MVNETA_MIB_MAC_COL_IDX, + MVNETA_MIB_MAC_LATE_COL_IDX, +}; + +STATIC struct mvneta_mib_def { + uint32_t regnum; + int reg64; + const char *sysctl_name; + const char *desc; +} mvneta_mib_list[] = { + [MVNETA_MIB_RX_GOOD_OCT_IDX] = {MVNETA_MIB_RX_GOOD_OCT, 1, + "rx_good_oct", "Good Octets Rx"}, + [MVNETA_MIB_RX_BAD_OCT_IDX] = {MVNETA_MIB_RX_BAD_OCT, 0, + "rx_bad_oct", "Bad Octets Rx"}, + [MVNETA_MIB_TX_MAC_TRNS_ERR_IDX] = {MVNETA_MIB_TX_MAC_TRNS_ERR, 0, + "tx_mac_err", "MAC Transmit Error"}, + [MVNETA_MIB_RX_GOOD_FRAME_IDX] = {MVNETA_MIB_RX_GOOD_FRAME, 0, + "rx_good_frame", "Good Frames Rx"}, + [MVNETA_MIB_RX_BAD_FRAME_IDX] = {MVNETA_MIB_RX_BAD_FRAME, 0, + "rx_bad_frame", "Bad Frames Rx"}, + [MVNETA_MIB_RX_BCAST_FRAME_IDX] = {MVNETA_MIB_RX_BCAST_FRAME, 0, + "rx_bcast_frame", "Broadcast Frames Rx"}, + [MVNETA_MIB_RX_MCAST_FRAME_IDX] = {MVNETA_MIB_RX_MCAST_FRAME, 0, + "rx_mcast_frame", "Multicast Frames Rx"}, + [MVNETA_MIB_RX_FRAME64_OCT_IDX] = {MVNETA_MIB_RX_FRAME64_OCT, 0, + "rx_frame_1_64", "Frame Size 1 - 64"}, + [MVNETA_MIB_RX_FRAME127_OCT_IDX] = {MVNETA_MIB_RX_FRAME127_OCT, 0, + "rx_frame_65_127", "Frame Size 65 - 127"}, + [MVNETA_MIB_RX_FRAME255_OCT_IDX] = {MVNETA_MIB_RX_FRAME255_OCT, 0, + "rx_frame_128_255", "Frame Size 128 - 255"}, + [MVNETA_MIB_RX_FRAME511_OCT_IDX] = {MVNETA_MIB_RX_FRAME511_OCT, 0, + "rx_frame_256_511", "Frame Size 256 - 511"}, + [MVNETA_MIB_RX_FRAME1023_OCT_IDX] = {MVNETA_MIB_RX_FRAME1023_OCT, 0, + "rx_frame_512_1023", "Frame Size 512 - 1023"}, + [MVNETA_MIB_RX_FRAMEMAX_OCT_IDX] = {MVNETA_MIB_RX_FRAMEMAX_OCT, 0, + "rx_fame_1024_max", "Frame Size 1024 - Max"}, + [MVNETA_MIB_TX_GOOD_OCT_IDX] = {MVNETA_MIB_TX_GOOD_OCT, 1, + "tx_good_oct", "Good Octets Tx"}, + [MVNETA_MIB_TX_GOOD_FRAME_IDX] = {MVNETA_MIB_TX_GOOD_FRAME, 0, + "tx_good_frame", "Good Frames Tx"}, + [MVNETA_MIB_TX_EXCES_COL_IDX] = {MVNETA_MIB_TX_EXCES_COL, 0, + "tx_exces_collision", "Excessive Collision"}, + [MVNETA_MIB_TX_MCAST_FRAME_IDX] = {MVNETA_MIB_TX_MCAST_FRAME, 0, + "tx_mcast_frame", "Multicast Frames Tx"}, + [MVNETA_MIB_TX_BCAST_FRAME_IDX] = {MVNETA_MIB_TX_BCAST_FRAME, 0, + "tx_bcast_frame", "Broadcast Frames Tx"}, + [MVNETA_MIB_TX_MAC_CTL_ERR_IDX] = {MVNETA_MIB_TX_MAC_CTL_ERR, 0, + "tx_mac_ctl_err", "Unknown MAC Control"}, + [MVNETA_MIB_FC_SENT_IDX] = {MVNETA_MIB_FC_SENT, 0, + "fc_tx", "Flow Control Tx"}, + [MVNETA_MIB_FC_GOOD_IDX] = {MVNETA_MIB_FC_GOOD, 0, + "fc_rx_good", "Good Flow Control Rx"}, + [MVNETA_MIB_FC_BAD_IDX] = {MVNETA_MIB_FC_BAD, 0, + "fc_rx_bad", "Bad Flow Control Rx"}, + [MVNETA_MIB_PKT_UNDERSIZE_IDX] = {MVNETA_MIB_PKT_UNDERSIZE, 0, + "pkt_undersize", "Undersized Packets Rx"}, + [MVNETA_MIB_PKT_FRAGMENT_IDX] = {MVNETA_MIB_PKT_FRAGMENT, 0, + "pkt_fragment", "Fragmented Packets Rx"}, + [MVNETA_MIB_PKT_OVERSIZE_IDX] = {MVNETA_MIB_PKT_OVERSIZE, 0, + "pkt_oversize", "Oversized Packets Rx"}, + [MVNETA_MIB_PKT_JABBER_IDX] = {MVNETA_MIB_PKT_JABBER, 0, + "pkt_jabber", "Jabber Packets Rx"}, + [MVNETA_MIB_MAC_RX_ERR_IDX] = {MVNETA_MIB_MAC_RX_ERR, 0, + "mac_rx_err", "MAC Rx Errors"}, + [MVNETA_MIB_MAC_CRC_ERR_IDX] = {MVNETA_MIB_MAC_CRC_ERR, 0, + "mac_crc_err", "MAC CRC Errors"}, + [MVNETA_MIB_MAC_COL_IDX] = {MVNETA_MIB_MAC_COL, 0, + "mac_collision", "MAC Collision"}, + [MVNETA_MIB_MAC_LATE_COL_IDX] = {MVNETA_MIB_MAC_LATE_COL, 0, + "mac_late_collision", "MAC Late Collision"}, +}; + +static struct resource_spec res_spec[] = { + { SYS_RES_MEMORY, 0, RF_ACTIVE }, + { SYS_RES_IRQ, 0, RF_ACTIVE }, + { -1, 0} +}; + +static struct { + driver_intr_t *handler; + char * description; +} mvneta_intrs[] = { + { mvneta_rxtxth_intr, "MVNETA aggregated interrupt" }, +}; + +static int +mvneta_set_mac_address(struct mvneta_softc *sc, uint8_t *addr) +{ + unsigned int mac_h; + unsigned int mac_l; + + mac_l = (addr[4] << 8) | (addr[5]); + mac_h = (addr[0] << 24) | (addr[1] << 16) | + (addr[2] << 8) | (addr[3] << 0); + + MVNETA_WRITE(sc, MVNETA_MACAL, mac_l); + MVNETA_WRITE(sc, MVNETA_MACAH, mac_h); + return (0); +} + +static int +mvneta_get_mac_address(struct mvneta_softc *sc, uint8_t *addr) +{ + uint32_t mac_l, mac_h; + +#ifdef FDT + if (mvneta_fdt_mac_address(sc, addr) == 0) + return (0); +#endif + /* + * Fall back -- use the currently programmed address. + */ + mac_l = MVNETA_READ(sc, MVNETA_MACAL); + mac_h = MVNETA_READ(sc, MVNETA_MACAH); + if (mac_l == 0 && mac_h == 0) { + /* + * Generate pseudo-random MAC. + * Set lower part to random number | unit number. + */ + mac_l = arc4random() & ~0xff; + mac_l |= device_get_unit(sc->dev) & 0xff; + mac_h = arc4random(); + mac_h &= ~(3 << 24); /* Clear multicast and LAA bits */ + if (bootverbose) { + device_printf(sc->dev, + "Could not acquire MAC address. " + "Using randomized one.\n"); + } + } + + addr[0] = (mac_h & 0xff000000) >> 24; + addr[1] = (mac_h & 0x00ff0000) >> 16; + addr[2] = (mac_h & 0x0000ff00) >> 8; + addr[3] = (mac_h & 0x000000ff); + addr[4] = (mac_l & 0x0000ff00) >> 8; + addr[5] = (mac_l & 0x000000ff); + return (0); +} + +STATIC boolean_t +mvneta_has_switch(device_t self) +{ + phandle_t node, switch_node, switch_eth, switch_eth_handle; + + node = ofw_bus_get_node(self); + switch_node = + ofw_bus_find_compatible(OF_finddevice("/"), "marvell,dsa"); + switch_eth = 0; + + OF_getencprop(switch_node, "dsa,ethernet", + (void*)&switch_eth_handle, sizeof(switch_eth_handle)); + + if (switch_eth_handle > 0) + switch_eth = OF_node_from_xref(switch_eth_handle); + + /* Return true if dsa,ethernet cell points to us */ + return (node == switch_eth); +} + +STATIC int +mvneta_dma_create(struct mvneta_softc *sc) +{ + size_t maxsize, maxsegsz; + size_t q; + int error; + + /* + * Create Tx DMA + */ + maxsize = maxsegsz = sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT; + + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* parent */ + 16, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + maxsize, /* maxsize */ + 1, /* nsegments */ + maxsegsz, /* maxsegsz */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->tx_dtag); /* dmat */ + if (error != 0) { + device_printf(sc->dev, + "Failed to create DMA tag for Tx descriptors.\n"); + goto fail; + } + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* parent */ + 1, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + MVNETA_PACKET_SIZE, /* maxsize */ + MVNETA_TX_SEGLIMIT, /* nsegments */ + MVNETA_PACKET_SIZE, /* maxsegsz */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->txmbuf_dtag); + if (error != 0) { + device_printf(sc->dev, + "Failed to create DMA tag for Tx mbufs.\n"); + goto fail; + } + + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + error = mvneta_ring_alloc_tx_queue(sc, q); + if (error != 0) { + device_printf(sc->dev, + "Failed to allocate DMA safe memory for TxQ: %d\n", q); + goto fail; + } + } + + /* + * Create Rx DMA. + */ + /* Create tag for Rx descripors */ + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* parent */ + 32, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsize */ + 1, /* nsegments */ + sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, /* maxsegsz */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->rx_dtag); /* dmat */ + if (error != 0) { + device_printf(sc->dev, + "Failed to create DMA tag for Rx descriptors.\n"); + goto fail; + } + + /* Create tag for Rx buffers */ + error = bus_dma_tag_create( + bus_get_dma_tag(sc->dev), /* parent */ + 32, 0, /* alignment, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filtfunc, filtfuncarg */ + MVNETA_PACKET_SIZE, 1, /* maxsize, nsegments */ + MVNETA_PACKET_SIZE, /* maxsegsz */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockfuncarg */ + &sc->rxbuf_dtag); /* dmat */ + if (error != 0) { + device_printf(sc->dev, + "Failed to create DMA tag for Rx buffers.\n"); + goto fail; + } + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + if (mvneta_ring_alloc_rx_queue(sc, q) != 0) { + device_printf(sc->dev, + "Failed to allocate DMA safe memory for RxQ: %d\n", q); + goto fail; + } + } + + return (0); +fail: + mvneta_detach(sc->dev); + + return (error); +} + +/* ARGSUSED */ +int +mvneta_attach(device_t self) +{ + struct mvneta_softc *sc; + struct ifnet *ifp; + device_t child; + int ifm_target; + int q, error; + uint32_t reg; + + sc = device_get_softc(self); + sc->dev = self; + + mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF); + + error = bus_alloc_resources(self, res_spec, sc->res); + if (error) { + device_printf(self, "could not allocate resources\n"); + return (ENXIO); + } + + sc->version = MVNETA_READ(sc, MVNETA_PV); + device_printf(self, "version is %x\n", sc->version); + callout_init(&sc->tick_ch, 0); + + /* + * make sure DMA engines are in reset state + */ + MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001); + MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001); + + /* + * Disable port snoop for buffers and descriptors + * to avoid L2 caching of both without DRAM copy. + * Obtain coherency settings from the first MBUS + * window attribute. + */ + if ((MVNETA_READ(sc, MV_WIN_NETA_BASE(0)) & IO_WIN_COH_ATTR_MASK) == 0) { + reg = MVNETA_READ(sc, MVNETA_PSNPCFG); + reg &= ~MVNETA_PSNPCFG_DESCSNP_MASK; + reg &= ~MVNETA_PSNPCFG_BUFSNP_MASK; + MVNETA_WRITE(sc, MVNETA_PSNPCFG, reg); + } + + /* + * MAC address + */ + if (mvneta_get_mac_address(sc, sc->enaddr)) { + device_printf(self, "no mac address.\n"); + return (ENXIO); + } + mvneta_set_mac_address(sc, sc->enaddr); + + mvneta_disable_intr(sc); + + /* Allocate network interface */ + ifp = sc->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(self, "if_alloc() failed\n"); + mvneta_detach(self); + return (ENOMEM); + } + if_initname(ifp, device_get_name(self), device_get_unit(self)); + + /* + * We can support 802.1Q VLAN-sized frames and jumbo + * Ethernet frames. + */ + ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_JUMBO_MTU; + + ifp->if_softc = sc; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; +#ifdef MVNETA_MULTIQUEUE + ifp->if_transmit = mvneta_transmit; + ifp->if_qflush = mvneta_qflush; +#else /* !MVNETA_MULTIQUEUE */ + ifp->if_start = mvneta_start; + ifp->if_snd.ifq_drv_maxlen = MVNETA_TX_RING_CNT - 1; + IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); + IFQ_SET_READY(&ifp->if_snd); +#endif + ifp->if_init = mvneta_init; + ifp->if_ioctl = mvneta_ioctl; + + /* + * We can do IPv4/TCPv4/UDPv4/TCPv6/UDPv6 checksums in hardware. + */ + ifp->if_capabilities |= IFCAP_HWCSUM; + + /* + * As VLAN hardware tagging is not supported + * but is necessary to perform VLAN hardware checksums, + * it is done in the driver + */ + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; + + /* + * Currently IPv6 HW checksum is broken, so make sure it is disabled. + */ + ifp->if_capabilities &= ~IFCAP_HWCSUM_IPV6; + ifp->if_capenable = ifp->if_capabilities; + + /* + * Disabled option(s): + * - Support for Large Receive Offload + */ + ifp->if_capabilities |= IFCAP_LRO; + + ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; + + /* + * Device DMA Buffer allocation. + * Handles resource deallocation in case of failure. + */ + error = mvneta_dma_create(sc); + if (error != 0) { + mvneta_detach(self); + return (error); + } + + /* Initialize queues */ + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + error = mvneta_ring_init_tx_queue(sc, q); + if (error != 0) { + mvneta_detach(self); + return (error); + } + } + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + error = mvneta_ring_init_rx_queue(sc, q); + if (error != 0) { + mvneta_detach(self); + return (error); + } + } + + ether_ifattach(ifp, sc->enaddr); + + /* + * Enable DMA engines and Initialize Device Registers. + */ + MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000); + MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000); + MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM); + mvneta_sc_lock(sc); + mvneta_filter_setup(sc); + mvneta_sc_unlock(sc); + mvneta_initreg(ifp); + + /* + * Now MAC is working, setup MII. + */ + if (mii_init == 0) { + /* + * MII bus is shared by all MACs and all PHYs in SoC. + * serializing the bus access should be safe. + */ + mtx_init(&mii_mutex, "mvneta_mii", NULL, MTX_DEF); + mii_init = 1; + } + + /* Attach PHY(s) */ + if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) { + error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange, + mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, + MII_OFFSET_ANY, 0); + if (error != 0) { + if (bootverbose) { + device_printf(self, + "MII attach failed, error: %d\n", error); + } + ether_ifdetach(sc->ifp); + mvneta_detach(self); + return (error); + } + sc->mii = device_get_softc(sc->miibus); + sc->phy_attached = 1; + + /* Disable auto-negotiation in MAC - rely on PHY layer */ + mvneta_update_autoneg(sc, FALSE); + } else if (sc->use_inband_status == TRUE) { + /* In-band link status */ + ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, + mvneta_mediastatus); + + /* Configure media */ + ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, + 0, NULL); + ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); + ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, + 0, NULL); + ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); + ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, + 0, NULL); + ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO); + + /* Enable auto-negotiation */ + mvneta_update_autoneg(sc, TRUE); + + mvneta_sc_lock(sc); + if (MVNETA_IS_LINKUP(sc)) + mvneta_linkup(sc); + else + mvneta_linkdown(sc); + mvneta_sc_unlock(sc); + + } else { + /* Fixed-link, use predefined values */ + ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange, + mvneta_mediastatus); + + ifm_target = IFM_ETHER; + switch (sc->phy_speed) { + case 2500: + if (sc->phy_mode != MVNETA_PHY_SGMII && + sc->phy_mode != MVNETA_PHY_QSGMII) { + device_printf(self, + "2.5G speed can work only in (Q)SGMII mode\n"); + ether_ifdetach(sc->ifp); + mvneta_detach(self); + return (ENXIO); + } + ifm_target |= IFM_2500_T; + break; + case 1000: + ifm_target |= IFM_1000_T; + break; + case 100: + ifm_target |= IFM_100_TX; + break; + case 10: + ifm_target |= IFM_10_T; + break; + default: + ether_ifdetach(sc->ifp); + mvneta_detach(self); + return (ENXIO); + } + + if (sc->phy_fdx) + ifm_target |= IFM_FDX; + else + ifm_target |= IFM_HDX; + + ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL); + ifmedia_set(&sc->mvneta_ifmedia, ifm_target); + if_link_state_change(sc->ifp, LINK_STATE_UP); + + if (mvneta_has_switch(self)) { + child = device_add_child(sc->dev, "mdio", -1); + if (child == NULL) { + ether_ifdetach(sc->ifp); + mvneta_detach(self); + return (ENXIO); + } + bus_generic_attach(sc->dev); + bus_generic_attach(child); + } + + /* Configure MAC media */ + mvneta_update_media(sc, ifm_target); + } + + sysctl_mvneta_init(sc); + + callout_reset(&sc->tick_ch, 0, mvneta_tick, sc); + + error = bus_setup_intr(self, sc->res[1], + INTR_TYPE_NET | INTR_MPSAFE, NULL, mvneta_intrs[0].handler, sc, + &sc->ih_cookie[0]); + if (error) { + device_printf(self, "could not setup %s\n", + mvneta_intrs[0].description); + ether_ifdetach(sc->ifp); + mvneta_detach(self); + return (error); + } + + return (0); +} + +STATIC int +mvneta_detach(device_t dev) +{ + struct mvneta_softc *sc; + struct ifnet *ifp; + int q; + + sc = device_get_softc(dev); + ifp = sc->ifp; + + mvneta_stop(sc); + /* Detach network interface */ + if (sc->ifp) + if_free(sc->ifp); + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) + mvneta_ring_dealloc_rx_queue(sc, q); + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) + mvneta_ring_dealloc_tx_queue(sc, q); + + if (sc->tx_dtag != NULL) + bus_dma_tag_destroy(sc->tx_dtag); + if (sc->rx_dtag != NULL) + bus_dma_tag_destroy(sc->rx_dtag); + if (sc->txmbuf_dtag != NULL) + bus_dma_tag_destroy(sc->txmbuf_dtag); + + bus_release_resources(dev, res_spec, sc->res); + return (0); +} + +/* + * MII + */ +STATIC int +mvneta_miibus_readreg(device_t dev, int phy, int reg) +{ + struct mvneta_softc *sc; + struct ifnet *ifp; + uint32_t smi, val; + int i; + + sc = device_get_softc(dev); + ifp = sc->ifp; + + mtx_lock(&mii_mutex); + + for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { + if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) + break; + DELAY(1); + } + if (i == MVNETA_PHY_TIMEOUT) { + if_printf(ifp, "SMI busy timeout\n"); + mtx_unlock(&mii_mutex); + return (-1); + } + + smi = MVNETA_SMI_PHYAD(phy) | + MVNETA_SMI_REGAD(reg) | MVNETA_SMI_OPCODE_READ; + MVNETA_WRITE(sc, MVNETA_SMI, smi); + + for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { + if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) + break; + DELAY(1); + } + + if (i == MVNETA_PHY_TIMEOUT) { + if_printf(ifp, "SMI busy timeout\n"); + mtx_unlock(&mii_mutex); + return (-1); + } + for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { + smi = MVNETA_READ(sc, MVNETA_SMI); + if (smi & MVNETA_SMI_READVALID) + break; + DELAY(1); + } + + if (i == MVNETA_PHY_TIMEOUT) { + if_printf(ifp, "SMI busy timeout\n"); + mtx_unlock(&mii_mutex); + return (-1); + } + + mtx_unlock(&mii_mutex); + +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s i=%d, timeout=%d\n", ifp->if_xname, i, + MVNETA_PHY_TIMEOUT); +#endif + + val = smi & MVNETA_SMI_DATA_MASK; + +#ifdef MVNETA_KTR + CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, phy, + reg, val); +#endif + return (val); +} + +STATIC int +mvneta_miibus_writereg(device_t dev, int phy, int reg, int val) +{ + struct mvneta_softc *sc; + struct ifnet *ifp; + uint32_t smi; + int i; + + sc = device_get_softc(dev); + ifp = sc->ifp; +#ifdef MVNETA_KTR + CTR4(KTR_SPARE2, "%s phy=%d, reg=%#x, val=%#x\n", ifp->if_xname, + phy, reg, val); +#endif + + mtx_lock(&mii_mutex); + + for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { + if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) + break; + DELAY(1); + } + if (i == MVNETA_PHY_TIMEOUT) { + if_printf(ifp, "SMI busy timeout\n"); + mtx_unlock(&mii_mutex); + return (0); + } + + smi = MVNETA_SMI_PHYAD(phy) | MVNETA_SMI_REGAD(reg) | + MVNETA_SMI_OPCODE_WRITE | (val & MVNETA_SMI_DATA_MASK); + MVNETA_WRITE(sc, MVNETA_SMI, smi); + + for (i = 0; i < MVNETA_PHY_TIMEOUT; i++) { + if ((MVNETA_READ(sc, MVNETA_SMI) & MVNETA_SMI_BUSY) == 0) + break; + DELAY(1); + } + + mtx_unlock(&mii_mutex); + + if (i == MVNETA_PHY_TIMEOUT) + if_printf(ifp, "phy write timed out\n"); + + return (0); +} + +STATIC void +mvneta_portup(struct mvneta_softc *sc) +{ + int q; + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + mvneta_rx_lockq(sc, q); + mvneta_rx_queue_enable(sc->ifp, q); + mvneta_rx_unlockq(sc, q); + } + + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + mvneta_tx_lockq(sc, q); + mvneta_tx_queue_enable(sc->ifp, q); + mvneta_tx_unlockq(sc, q); + } + +} + +STATIC void +mvneta_portdown(struct mvneta_softc *sc) +{ + struct mvneta_rx_ring *rx; + struct mvneta_tx_ring *tx; + int q, cnt; + uint32_t reg; + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + rx = MVNETA_RX_RING(sc, q); + mvneta_rx_lockq(sc, q); + rx->queue_status = MVNETA_QUEUE_DISABLED; + mvneta_rx_unlockq(sc, q); + } + + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + tx = MVNETA_TX_RING(sc, q); + mvneta_tx_lockq(sc, q); + tx->queue_status = MVNETA_QUEUE_DISABLED; + mvneta_tx_unlockq(sc, q); + } + + /* Wait for all Rx activity to terminate. */ + reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK; + reg = MVNETA_RQC_DIS(reg); + MVNETA_WRITE(sc, MVNETA_RQC, reg); + cnt = 0; + do { + if (cnt >= RX_DISABLE_TIMEOUT) { + if_printf(sc->ifp, + "timeout for RX stopped. rqc 0x%x\n", reg); + break; + } + cnt++; + reg = MVNETA_READ(sc, MVNETA_RQC); + } while ((reg & MVNETA_RQC_EN_MASK) != 0); + + /* Wait for all Tx activity to terminate. */ + reg = MVNETA_READ(sc, MVNETA_PIE); + reg &= ~MVNETA_PIE_TXPKTINTRPTENB_MASK; + MVNETA_WRITE(sc, MVNETA_PIE, reg); + + reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); + reg &= ~MVNETA_PRXTXTI_TBTCQ_MASK; + MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); + + reg = MVNETA_READ(sc, MVNETA_TQC) & MVNETA_TQC_EN_MASK; + reg = MVNETA_TQC_DIS(reg); + MVNETA_WRITE(sc, MVNETA_TQC, reg); + cnt = 0; + do { + if (cnt >= TX_DISABLE_TIMEOUT) { + if_printf(sc->ifp, + "timeout for TX stopped. tqc 0x%x\n", reg); + break; + } + cnt++; + reg = MVNETA_READ(sc, MVNETA_TQC); + } while ((reg & MVNETA_TQC_EN_MASK) != 0); + + /* Wait for all Tx FIFO is empty */ + cnt = 0; + do { + if (cnt >= TX_FIFO_EMPTY_TIMEOUT) { + if_printf(sc->ifp, + "timeout for TX FIFO drained. ps0 0x%x\n", reg); + break; + } + cnt++; + reg = MVNETA_READ(sc, MVNETA_PS0); + } while (((reg & MVNETA_PS0_TXFIFOEMP) == 0) && + ((reg & MVNETA_PS0_TXINPROG) != 0)); +} + +/* + * Device Register Initialization + * reset device registers to device driver default value. + * the device is not enabled here. + */ +STATIC int +mvneta_initreg(struct ifnet *ifp) +{ + struct mvneta_softc *sc; + int q, i; + uint32_t reg; + + sc = ifp->if_softc; +#ifdef MVNETA_KTR + CTR1(KTR_SPARE2, "%s initializing device register", ifp->if_xname); +#endif + + /* Disable Legacy WRR, Disable EJP, Release from reset. */ + MVNETA_WRITE(sc, MVNETA_TQC_1, 0); + /* Enable mbus retry. */ + MVNETA_WRITE(sc, MVNETA_MBUS_CONF, MVNETA_MBUS_RETRY_EN); + + /* Init TX/RX Queue Registers */ + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + mvneta_rx_lockq(sc, q); + if (mvneta_rx_queue_init(ifp, q) != 0) { + device_printf(sc->dev, + "initialization failed: cannot initialize queue\n"); + mvneta_rx_unlockq(sc, q); + return (ENOBUFS); + } + mvneta_rx_unlockq(sc, q); + } + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + mvneta_tx_lockq(sc, q); + if (mvneta_tx_queue_init(ifp, q) != 0) { + device_printf(sc->dev, + "initialization failed: cannot initialize queue\n"); + mvneta_tx_unlockq(sc, q); + return (ENOBUFS); + } + mvneta_tx_unlockq(sc, q); + } + + /* + * Ethernet Unit Control - disable automatic PHY management by HW. + * In case the port uses SMI-controlled PHY, poll its status with + * mii_tick() and update MAC settings accordingly. + */ + reg = MVNETA_READ(sc, MVNETA_EUC); + reg &= ~MVNETA_EUC_POLLING; + MVNETA_WRITE(sc, MVNETA_EUC, reg); + + /* EEE: Low Power Idle */ + reg = MVNETA_LPIC0_LILIMIT(MVNETA_LPI_LI); + reg |= MVNETA_LPIC0_TSLIMIT(MVNETA_LPI_TS); + MVNETA_WRITE(sc, MVNETA_LPIC0, reg); + + reg = MVNETA_LPIC1_TWLIMIT(MVNETA_LPI_TW); + MVNETA_WRITE(sc, MVNETA_LPIC1, reg); + + reg = MVNETA_LPIC2_MUSTSET; + MVNETA_WRITE(sc, MVNETA_LPIC2, reg); + + /* Port MAC Control set 0 */ + reg = MVNETA_PMACC0_MUSTSET; /* must write 0x1 */ + reg &= ~MVNETA_PMACC0_PORTEN; /* port is still disabled */ + reg |= MVNETA_PMACC0_FRAMESIZELIMIT(MVNETA_MAX_FRAME); + MVNETA_WRITE(sc, MVNETA_PMACC0, reg); + + /* Port MAC Control set 2 */ + reg = MVNETA_READ(sc, MVNETA_PMACC2); + switch (sc->phy_mode) { + case MVNETA_PHY_QSGMII: + reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN); + MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_QSGMII); + break; + case MVNETA_PHY_SGMII: + reg |= (MVNETA_PMACC2_PCSEN | MVNETA_PMACC2_RGMIIEN); + MVNETA_WRITE(sc, MVNETA_PSERDESCFG, MVNETA_PSERDESCFG_SGMII); + break; + case MVNETA_PHY_RGMII: + case MVNETA_PHY_RGMII_ID: + reg |= MVNETA_PMACC2_RGMIIEN; + break; + } + reg |= MVNETA_PMACC2_MUSTSET; + reg &= ~MVNETA_PMACC2_PORTMACRESET; + MVNETA_WRITE(sc, MVNETA_PMACC2, reg); + + /* Port Configuration Extended: enable Tx CRC generation */ + reg = MVNETA_READ(sc, MVNETA_PXCX); + reg &= ~MVNETA_PXCX_TXCRCDIS; + MVNETA_WRITE(sc, MVNETA_PXCX, reg); + + /* clear MIB counter registers(clear by read) */ + for (i = 0; i < nitems(mvneta_mib_list); i++) { + if (mvneta_mib_list[i].reg64) + MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum); + else + MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum); + } + MVNETA_READ(sc, MVNETA_PDFC); + MVNETA_READ(sc, MVNETA_POFC); + + /* Set SDC register except IPGINT bits */ + reg = MVNETA_SDC_RXBSZ_16_64BITWORDS; + reg |= MVNETA_SDC_TXBSZ_16_64BITWORDS; + reg |= MVNETA_SDC_BLMR; + reg |= MVNETA_SDC_BLMT; + MVNETA_WRITE(sc, MVNETA_SDC, reg); + + return (0); +} + +STATIC void +mvneta_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) +{ + + if (error != 0) + return; + *(bus_addr_t *)arg = segs->ds_addr; +} + +STATIC int +mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_rx_ring *rx; + struct mvneta_buf *rxbuf; + bus_dmamap_t dmap; + int i, error; + + if (q >= MVNETA_RX_QNUM_MAX) + return (EINVAL); + + rx = MVNETA_RX_RING(sc, q); + mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF); + /* Allocate DMA memory for Rx descriptors */ + error = bus_dmamem_alloc(sc->rx_dtag, + (void**)&(rx->desc), + BUS_DMA_NOWAIT | BUS_DMA_ZERO, + &rx->desc_map); + if (error != 0 || rx->desc == NULL) + goto fail; + error = bus_dmamap_load(sc->rx_dtag, rx->desc_map, + rx->desc, + sizeof(struct mvneta_rx_desc) * MVNETA_RX_RING_CNT, + mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + + for (i = 0; i < MVNETA_RX_RING_CNT; i++) { + error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap); + if (error != 0) { + device_printf(sc->dev, + "Failed to create DMA map for Rx buffer num: %d\n", i); + goto fail; + } + rxbuf = &rx->rxbuf[i]; + rxbuf->dmap = dmap; + rxbuf->m = NULL; + } + + return (0); +fail: + mvneta_ring_dealloc_rx_queue(sc, q); + device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); + return (error); +} + +STATIC int +mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_tx_ring *tx; + int error; + + if (q >= MVNETA_TX_QNUM_MAX) + return (EINVAL); + tx = MVNETA_TX_RING(sc, q); + mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF); + error = bus_dmamem_alloc(sc->tx_dtag, + (void**)&(tx->desc), + BUS_DMA_NOWAIT | BUS_DMA_ZERO, + &tx->desc_map); + if (error != 0 || tx->desc == NULL) + goto fail; + error = bus_dmamap_load(sc->tx_dtag, tx->desc_map, + tx->desc, + sizeof(struct mvneta_tx_desc) * MVNETA_TX_RING_CNT, + mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + +#ifdef MVNETA_MULTIQUEUE + tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, + &tx->ring_mtx); + if (tx->br == NULL) { + device_printf(sc->dev, + "Could not setup buffer ring for TxQ(%d)\n", q); + error = ENOMEM; + goto fail; + } +#endif + + return (0); +fail: + mvneta_ring_dealloc_tx_queue(sc, q); + device_printf(sc->dev, "DMA Ring buffer allocation failure.\n"); + return (error); +} + +STATIC void +mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_tx_ring *tx; + struct mvneta_buf *txbuf; + void *kva; + int error; + int i; + + if (q >= MVNETA_TX_QNUM_MAX) + return; + tx = MVNETA_TX_RING(sc, q); + + if (tx->taskq != NULL) { + /* Remove task */ + while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0) + taskqueue_drain(tx->taskq, &tx->task); + } +#ifdef MVNETA_MULTIQUEUE + if (tx->br != NULL) + drbr_free(tx->br, M_DEVBUF); +#endif + + if (sc->txmbuf_dtag != NULL) { + if (mtx_name(&tx->ring_mtx) != NULL) { + /* + * It is assumed that maps are being loaded after mutex + * is initialized. Therefore we can skip unloading maps + * when mutex is empty. + */ + mvneta_tx_lockq(sc, q); + mvneta_ring_flush_tx_queue(sc, q); + mvneta_tx_unlockq(sc, q); + } + for (i = 0; i < MVNETA_TX_RING_CNT; i++) { + txbuf = &tx->txbuf[i]; + if (txbuf->dmap != NULL) { + error = bus_dmamap_destroy(sc->txmbuf_dtag, + txbuf->dmap); + if (error != 0) { + panic("%s: map busy for Tx descriptor (Q%d, %d)", + __func__, q, i); + } + } + } + } + + if (tx->desc_pa != 0) + bus_dmamap_unload(sc->tx_dtag, tx->desc_map); + + kva = (void *)tx->desc; + if (kva != NULL) + bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map); + + if (mtx_name(&tx->ring_mtx) != NULL) + mtx_destroy(&tx->ring_mtx); + + memset(tx, 0, sizeof(*tx)); +} + +STATIC void +mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_rx_ring *rx; + struct lro_ctrl *lro; + void *kva; + + if (q >= MVNETA_RX_QNUM_MAX) + return; + + rx = MVNETA_RX_RING(sc, q); + + mvneta_ring_flush_rx_queue(sc, q); + + if (rx->desc_pa != 0) + bus_dmamap_unload(sc->rx_dtag, rx->desc_map); + + kva = (void *)rx->desc; + if (kva != NULL) + bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map); + + lro = &rx->lro; + tcp_lro_free(lro); + + if (mtx_name(&rx->ring_mtx) != NULL) + mtx_destroy(&rx->ring_mtx); + + memset(rx, 0, sizeof(*rx)); +} + +STATIC int +mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_rx_ring *rx; + struct lro_ctrl *lro; + int error; + + if (q >= MVNETA_RX_QNUM_MAX) + return (0); + + rx = MVNETA_RX_RING(sc, q); + rx->dma = rx->cpu = 0; + rx->queue_th_received = MVNETA_RXTH_COUNT; + rx->queue_th_time = (get_tclk() / 1000) / 10; /* 0.1 [ms] */ + + /* Initialize LRO */ + rx->lro_enabled = FALSE; + if ((sc->ifp->if_capenable & IFCAP_LRO) != 0) { + lro = &rx->lro; + error = tcp_lro_init(lro); + if (error != 0) + device_printf(sc->dev, "LRO Initialization failed!\n"); + else { + rx->lro_enabled = TRUE; + lro->ifp = sc->ifp; + } + } + + return (0); +} + +STATIC int +mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_tx_ring *tx; + struct mvneta_buf *txbuf; + int i, error; + + if (q >= MVNETA_TX_QNUM_MAX) + return (0); + + tx = MVNETA_TX_RING(sc, q); + + /* Tx handle */ + for (i = 0; i < MVNETA_TX_RING_CNT; i++) { + txbuf = &tx->txbuf[i]; + txbuf->m = NULL; + /* Tx handle needs DMA map for busdma_load_mbuf() */ + error = bus_dmamap_create(sc->txmbuf_dtag, 0, + &txbuf->dmap); + if (error != 0) { + device_printf(sc->dev, + "can't create dma map (tx ring %d)\n", i); + return (error); + } + } + tx->dma = tx->cpu = 0; + tx->used = 0; + tx->drv_error = 0; + tx->queue_status = MVNETA_QUEUE_DISABLED; + tx->queue_hung = FALSE; + + tx->ifp = sc->ifp; + tx->qidx = q; + TASK_INIT(&tx->task, 0, mvneta_tx_task, tx); + tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK, + taskqueue_thread_enqueue, &tx->taskq); + taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)", + device_get_nameunit(sc->dev), q); + + return (0); +} + +STATIC void +mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_tx_ring *tx; + struct mvneta_buf *txbuf; + int i; + + tx = MVNETA_TX_RING(sc, q); + KASSERT_TX_MTX(sc, q); + + /* Tx handle */ + for (i = 0; i < MVNETA_TX_RING_CNT; i++) { + txbuf = &tx->txbuf[i]; + bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); + if (txbuf->m != NULL) { + m_freem(txbuf->m); + txbuf->m = NULL; + } + } + tx->dma = tx->cpu = 0; + tx->used = 0; +} + +STATIC void +mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q) +{ + struct mvneta_rx_ring *rx; + struct mvneta_buf *rxbuf; + int i; + + rx = MVNETA_RX_RING(sc, q); + KASSERT_RX_MTX(sc, q); + + /* Rx handle */ + for (i = 0; i < MVNETA_RX_RING_CNT; i++) { + rxbuf = &rx->rxbuf[i]; + mvneta_rx_buf_free(sc, rxbuf); + } + rx->dma = rx->cpu = 0; +} + +/* + * Rx/Tx Queue Control + */ +STATIC int +mvneta_rx_queue_init(struct ifnet *ifp, int q) +{ + struct mvneta_softc *sc; + struct mvneta_rx_ring *rx; + uint32_t reg; + + sc = ifp->if_softc; + KASSERT_RX_MTX(sc, q); + rx = MVNETA_RX_RING(sc, q); + DASSERT(rx->desc_pa != 0); + + /* descriptor address */ + MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa); + + /* Rx buffer size and descriptor ring size */ + reg = MVNETA_PRXDQS_BUFFERSIZE(MVNETA_PACKET_SIZE >> 3); + reg |= MVNETA_PRXDQS_DESCRIPTORSQUEUESIZE(MVNETA_RX_RING_CNT); + MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg); +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", ifp->if_xname, q, + MVNETA_READ(sc, MVNETA_PRXDQS(q))); +#endif + /* Rx packet offset address */ + reg = MVNETA_PRXC_PACKETOFFSET(MVNETA_PACKET_OFFSET >> 3); + MVNETA_WRITE(sc, MVNETA_PRXC(q), reg); +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", ifp->if_xname, q, + MVNETA_READ(sc, MVNETA_PRXC(q))); +#endif + + /* if DMA is not working, register is not updated */ + DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa); + return (0); +} + +STATIC int +mvneta_tx_queue_init(struct ifnet *ifp, int q) +{ + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + uint32_t reg; + + sc = ifp->if_softc; + KASSERT_TX_MTX(sc, q); + tx = MVNETA_TX_RING(sc, q); + DASSERT(tx->desc_pa != 0); + + /* descriptor address */ + MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa); + + /* descriptor ring size */ + reg = MVNETA_PTXDQS_DQS(MVNETA_TX_RING_CNT); + MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg); + + /* if DMA is not working, register is not updated */ + DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa); + return (0); +} + +STATIC int +mvneta_rx_queue_enable(struct ifnet *ifp, int q) +{ + struct mvneta_softc *sc; + struct mvneta_rx_ring *rx; + uint32_t reg; + + sc = ifp->if_softc; + rx = MVNETA_RX_RING(sc, q); + KASSERT_RX_MTX(sc, q); + + /* Set Rx interrupt threshold */ + reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received); + MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg); + + reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); + MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg); + + /* Unmask RXTX_TH Intr. */ + reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); + reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */ + MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); + + /* Enable Rx queue */ + reg = MVNETA_READ(sc, MVNETA_RQC) & MVNETA_RQC_EN_MASK; + reg |= MVNETA_RQC_ENQ(q); + MVNETA_WRITE(sc, MVNETA_RQC, reg); + + rx->queue_status = MVNETA_QUEUE_WORKING; + return (0); +} + +STATIC int +mvneta_tx_queue_enable(struct ifnet *ifp, int q) +{ + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + + sc = ifp->if_softc; + tx = MVNETA_TX_RING(sc, q); + KASSERT_TX_MTX(sc, q); + + /* Enable Tx queue */ + MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q)); + + tx->queue_status = MVNETA_QUEUE_IDLE; + tx->queue_hung = FALSE; + return (0); +} + +STATIC __inline void +mvneta_rx_lockq(struct mvneta_softc *sc, int q) +{ + + DASSERT(q >= 0); + DASSERT(q < MVNETA_RX_QNUM_MAX); + mtx_lock(&sc->rx_ring[q].ring_mtx); +} + +STATIC __inline void +mvneta_rx_unlockq(struct mvneta_softc *sc, int q) +{ + + DASSERT(q >= 0); + DASSERT(q < MVNETA_RX_QNUM_MAX); + mtx_unlock(&sc->rx_ring[q].ring_mtx); +} + +STATIC __inline int __unused +mvneta_tx_trylockq(struct mvneta_softc *sc, int q) +{ + + DASSERT(q >= 0); + DASSERT(q < MVNETA_TX_QNUM_MAX); + return (mtx_trylock(&sc->tx_ring[q].ring_mtx)); +} + +STATIC __inline void +mvneta_tx_lockq(struct mvneta_softc *sc, int q) +{ + + DASSERT(q >= 0); + DASSERT(q < MVNETA_TX_QNUM_MAX); + mtx_lock(&sc->tx_ring[q].ring_mtx); +} + +STATIC __inline void +mvneta_tx_unlockq(struct mvneta_softc *sc, int q) +{ + + DASSERT(q >= 0); + DASSERT(q < MVNETA_TX_QNUM_MAX); + mtx_unlock(&sc->tx_ring[q].ring_mtx); +} + +/* + * Interrupt Handlers + */ +STATIC void +mvneta_disable_intr(struct mvneta_softc *sc) +{ + + MVNETA_WRITE(sc, MVNETA_EUIM, 0); + MVNETA_WRITE(sc, MVNETA_EUIC, 0); + MVNETA_WRITE(sc, MVNETA_PRXTXTIM, 0); + MVNETA_WRITE(sc, MVNETA_PRXTXTIC, 0); + MVNETA_WRITE(sc, MVNETA_PRXTXIM, 0); + MVNETA_WRITE(sc, MVNETA_PRXTXIC, 0); + MVNETA_WRITE(sc, MVNETA_PMIM, 0); + MVNETA_WRITE(sc, MVNETA_PMIC, 0); + MVNETA_WRITE(sc, MVNETA_PIE, 0); +} + +STATIC void +mvneta_enable_intr(struct mvneta_softc *sc) +{ + uint32_t reg; + + /* Enable Summary Bit to check all interrupt cause. */ + reg = MVNETA_READ(sc, MVNETA_PRXTXTIM); + reg |= MVNETA_PRXTXTI_PMISCICSUMMARY; + MVNETA_WRITE(sc, MVNETA_PRXTXTIM, reg); + + if (sc->use_inband_status) { + /* Enable Port MISC Intr. (via RXTX_TH_Summary bit) */ + MVNETA_WRITE(sc, MVNETA_PMIM, MVNETA_PMI_PHYSTATUSCHNG | + MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE); + } + + /* Enable All Queue Interrupt */ + reg = MVNETA_READ(sc, MVNETA_PIE); + reg |= MVNETA_PIE_RXPKTINTRPTENB_MASK; + reg |= MVNETA_PIE_TXPKTINTRPTENB_MASK; + MVNETA_WRITE(sc, MVNETA_PIE, reg); +} + +STATIC void +mvneta_rxtxth_intr(void *arg) +{ + struct mvneta_softc *sc; + struct ifnet *ifp; + uint32_t ic, queues; + + sc = arg; + ifp = sc->ifp; +#ifdef MVNETA_KTR + CTR1(KTR_SPARE2, "%s got RXTX_TH_Intr", ifp->if_xname); +#endif + ic = MVNETA_READ(sc, MVNETA_PRXTXTIC); + if (ic == 0) + return; + MVNETA_WRITE(sc, MVNETA_PRXTXTIC, ~ic); + + /* Ack maintance interrupt first */ + if (__predict_false((ic & MVNETA_PRXTXTI_PMISCICSUMMARY) && + sc->use_inband_status)) { + mvneta_sc_lock(sc); + mvneta_misc_intr(sc); + mvneta_sc_unlock(sc); + } + if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) + return; + /* RxTxTH interrupt */ + queues = MVNETA_PRXTXTI_GET_RBICTAPQ(ic); + if (__predict_true(queues)) { +#ifdef MVNETA_KTR + CTR1(KTR_SPARE2, "%s got PRXTXTIC: +RXEOF", ifp->if_xname); +#endif + /* At the moment the driver support only one RX queue. */ + DASSERT(MVNETA_IS_QUEUE_SET(queues, 0)); + mvneta_rx(sc, 0, 0); + } +} + +STATIC int +mvneta_misc_intr(struct mvneta_softc *sc) +{ + uint32_t ic; + int claimed = 0; + +#ifdef MVNETA_KTR + CTR1(KTR_SPARE2, "%s got MISC_INTR", sc->ifp->if_xname); +#endif + KASSERT_SC_MTX(sc); + + for (;;) { + ic = MVNETA_READ(sc, MVNETA_PMIC); + ic &= MVNETA_READ(sc, MVNETA_PMIM); + if (ic == 0) + break; + MVNETA_WRITE(sc, MVNETA_PMIC, ~ic); + claimed = 1; + + if (ic & (MVNETA_PMI_PHYSTATUSCHNG | + MVNETA_PMI_LINKCHANGE | MVNETA_PMI_PSCSYNCCHANGE)) + mvneta_link_isr(sc); + } + return (claimed); +} + +STATIC void +mvneta_tick(void *arg) +{ + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + struct mvneta_rx_ring *rx; + int q; + uint32_t fc_prev, fc_curr; + + sc = arg; + + /* + * This is done before mib update to get the right stats + * for this tick. + */ + mvneta_tx_drain(sc); + + /* Extract previous flow-control frame received counter. */ + fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; + /* Read mib registers (clear by read). */ + mvneta_update_mib(sc); + /* Extract current flow-control frame received counter. */ + fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter; + + + if (sc->phy_attached && sc->ifp->if_flags & IFF_UP) { + mvneta_sc_lock(sc); + mii_tick(sc->mii); + + /* Adjust MAC settings */ + mvneta_adjust_link(sc); + mvneta_sc_unlock(sc); + } + + /* + * We were unable to refill the rx queue and left the rx func, leaving + * the ring without mbuf and no way to call the refill func. + */ + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + rx = MVNETA_RX_RING(sc, q); + if (rx->needs_refill == TRUE) { + mvneta_rx_lockq(sc, q); + mvneta_rx_queue_refill(sc, q); + mvneta_rx_unlockq(sc, q); + } + } + + /* + * Watchdog: + * - check if queue is mark as hung. + * - ignore hung status if we received some pause frame + * as hardware may have paused packet transmit. + */ + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + /* + * We should take queue lock, but as we only read + * queue status we can do it without lock, we may + * only missdetect queue status for one tick. + */ + tx = MVNETA_TX_RING(sc, q); + + if (tx->queue_hung && (fc_curr - fc_prev) == 0) + goto timeout; + } + + callout_schedule(&sc->tick_ch, hz); + return; + +timeout: + if_printf(sc->ifp, "watchdog timeout\n"); + + mvneta_sc_lock(sc); + sc->counter_watchdog++; + sc->counter_watchdog_mib++; + /* Trigger reinitialize sequence. */ + mvneta_stop_locked(sc); + mvneta_init_locked(sc); + mvneta_sc_unlock(sc); +} + +STATIC void +mvneta_qflush(struct ifnet *ifp) +{ +#ifdef MVNETA_MULTIQUEUE + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + struct mbuf *m; + size_t q; + + sc = ifp->if_softc; + + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + tx = MVNETA_TX_RING(sc, q); + mvneta_tx_lockq(sc, q); + while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) + m_freem(m); + mvneta_tx_unlockq(sc, q); + } +#endif + if_qflush(ifp); +} + +STATIC void +mvneta_tx_task(void *arg, int pending) +{ + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + struct ifnet *ifp; + int error; + + tx = arg; + ifp = tx->ifp; + sc = ifp->if_softc; + + mvneta_tx_lockq(sc, tx->qidx); + error = mvneta_xmit_locked(sc, tx->qidx); + mvneta_tx_unlockq(sc, tx->qidx); + + /* Try again */ + if (__predict_false(error != 0 && error != ENETDOWN)) { + pause("mvneta_tx_task_sleep", 1); + taskqueue_enqueue(tx->taskq, &tx->task); + } +} + +STATIC int +mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m) +{ + struct mvneta_tx_ring *tx; + struct ifnet *ifp; + int error; + + KASSERT_TX_MTX(sc, q); + tx = MVNETA_TX_RING(sc, q); + error = 0; + + ifp = sc->ifp; + + /* Dont enqueue packet if the queue is disabled. */ + if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) { + m_freem(*m); + *m = NULL; + return (ENETDOWN); + } + + /* Reclaim mbuf if above threshold. */ + if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT)) + mvneta_tx_queue_complete(sc, q); + + /* Do not call transmit path if queue is already too full. */ + if (__predict_false(tx->used > + MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT)) + return (ENOBUFS); + + error = mvneta_tx_queue(sc, m, q); + if (__predict_false(error != 0)) + return (error); + + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, *m); + + /* Set watchdog on */ + tx->watchdog_time = ticks; + tx->queue_status = MVNETA_QUEUE_WORKING; + + return (error); +} + +#ifdef MVNETA_MULTIQUEUE +STATIC int +mvneta_transmit(struct ifnet *ifp, struct mbuf *m) +{ + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + int error; + int q; + + sc = ifp->if_softc; + + /* Use default queue if there is no flow id as thread can migrate. */ + if (__predict_true(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)) + q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX; + else + q = 0; + + tx = MVNETA_TX_RING(sc, q); + + /* If buf_ring is full start transmit immediatly. */ + if (buf_ring_full(tx->br)) { + mvneta_tx_lockq(sc, q); + mvneta_xmit_locked(sc, q); + mvneta_tx_unlockq(sc, q); + } + + /* + * If the buf_ring is empty we will not reorder packets. + * If the lock is available transmit without using buf_ring. + */ + if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) { + error = mvneta_xmitfast_locked(sc, q, &m); + mvneta_tx_unlockq(sc, q); + if (__predict_true(error == 0)) + return (0); + + /* Transmit can fail in fastpath. */ + if (__predict_false(m == NULL)) + return (error); + } + + /* Enqueue then schedule taskqueue. */ + error = drbr_enqueue(ifp, tx->br, m); + if (__predict_false(error != 0)) + return (error); + + taskqueue_enqueue(tx->taskq, &tx->task); + return (0); +} + +STATIC int +mvneta_xmit_locked(struct mvneta_softc *sc, int q) +{ + struct ifnet *ifp; + struct mvneta_tx_ring *tx; + struct mbuf *m; + int error; + + KASSERT_TX_MTX(sc, q); + ifp = sc->ifp; + tx = MVNETA_TX_RING(sc, q); + error = 0; + + while ((m = drbr_peek(ifp, tx->br)) != NULL) { + error = mvneta_xmitfast_locked(sc, q, &m); + if (__predict_false(error != 0)) { + if (m != NULL) + drbr_putback(ifp, tx->br, m); + else + drbr_advance(ifp, tx->br); + break; + } + drbr_advance(ifp, tx->br); + } + + return (error); +} +#else /* !MVNETA_MULTIQUEUE */ +STATIC void +mvneta_start(struct ifnet *ifp) +{ + struct mvneta_softc *sc; + struct mvneta_tx_ring *tx; + int error; + + sc = ifp->if_softc; + tx = MVNETA_TX_RING(sc, 0); + + mvneta_tx_lockq(sc, 0); + error = mvneta_xmit_locked(sc, 0); + mvneta_tx_unlockq(sc, 0); + /* Handle retransmit in the background taskq. */ + if (__predict_false(error != 0 && error != ENETDOWN)) + taskqueue_enqueue(tx->taskq, &tx->task); +} + +STATIC int +mvneta_xmit_locked(struct mvneta_softc *sc, int q) +{ + struct ifnet *ifp; + struct mvneta_tx_ring *tx; + struct mbuf *m; + int error; + + KASSERT_TX_MTX(sc, q); + ifp = sc->ifp; + tx = MVNETA_TX_RING(sc, 0); + error = 0; + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + IFQ_DRV_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) + break; + + error = mvneta_xmitfast_locked(sc, q, &m); + if (__predict_false(error != 0)) { + if (m != NULL) + IFQ_DRV_PREPEND(&ifp->if_snd, m); + break; + } + } + + return (error); +} +#endif + +STATIC int +mvneta_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct mvneta_softc *sc; + struct mvneta_rx_ring *rx; + struct ifreq *ifr; + int error, mask; + uint32_t flags; + int q; + + error = 0; + sc = ifp->if_softc; + ifr = (struct ifreq *)data; + switch (cmd) { + case SIOCSIFFLAGS: + mvneta_sc_lock(sc); + if (ifp->if_flags & IFF_UP) { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + flags = ifp->if_flags ^ sc->mvneta_if_flags; + + if (flags != 0) + sc->mvneta_if_flags = ifp->if_flags; + + if ((flags & IFF_PROMISC) != 0) + mvneta_filter_setup(sc); + } else { + mvneta_init_locked(sc); + sc->mvneta_if_flags = ifp->if_flags; + if (sc->phy_attached) + mii_mediachg(sc->mii); + mvneta_sc_unlock(sc); + break; + } + } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) + mvneta_stop_locked(sc); + + sc->mvneta_if_flags = ifp->if_flags; + mvneta_sc_unlock(sc); + break; + case SIOCSIFCAP: + if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU && + ifr->ifr_reqcap & IFCAP_TXCSUM) + ifr->ifr_reqcap &= ~IFCAP_TXCSUM; + mask = ifp->if_capenable ^ ifr->ifr_reqcap; + if (mask & IFCAP_HWCSUM) { + ifp->if_capenable &= ~IFCAP_HWCSUM; + ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; + if (ifp->if_capenable & IFCAP_TXCSUM) + ifp->if_hwassist = CSUM_IP | CSUM_TCP | + CSUM_UDP; + else + ifp->if_hwassist = 0; + } + if (mask & IFCAP_LRO) { + mvneta_sc_lock(sc); + ifp->if_capenable ^= IFCAP_LRO; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + rx = MVNETA_RX_RING(sc, q); + rx->lro_enabled = !rx->lro_enabled; + } + } + mvneta_sc_unlock(sc); + } + VLAN_CAPABILITIES(ifp); + break; + case SIOCSIFMEDIA: + if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T || + IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) && + (ifr->ifr_media & IFM_FDX) == 0) { + device_printf(sc->dev, + "%s half-duplex unsupported\n", + IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ? + "1000Base-T" : + "2500Base-T"); + error = EINVAL; + break; + } + case SIOCGIFMEDIA: /* FALLTHROUGH */ + if (!sc->phy_attached) + error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia, + cmd); + else + error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, + cmd); + break; + case SIOCSIFMTU: + if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME - + MVNETA_ETHER_SIZE) { + error = EINVAL; + } else { + ifp->if_mtu = ifr->ifr_mtu; + mvneta_sc_lock(sc); + if (ifp->if_mtu > MVNETA_MAX_CSUM_MTU) { + ifp->if_capenable &= ~IFCAP_TXCSUM; + ifp->if_hwassist = 0; + } else { + ifp->if_capenable |= IFCAP_TXCSUM; + ifp->if_hwassist = CSUM_IP | CSUM_TCP | + CSUM_UDP; + } + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + /* Trigger reinitialize sequence */ + mvneta_stop_locked(sc); + mvneta_init_locked(sc); + } + mvneta_sc_unlock(sc); + } + break; + + default: + error = ether_ioctl(ifp, cmd, data); + break; + } + + return (error); +} + +STATIC void +mvneta_init_locked(void *arg) +{ + struct mvneta_softc *sc; + struct ifnet *ifp; + uint32_t reg; + int q, cpu; + + sc = arg; + ifp = sc->ifp; + + if (!device_is_attached(sc->dev) || + (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) + return; + + mvneta_disable_intr(sc); + callout_stop(&sc->tick_ch); + + /* Get the latest mac address */ + bcopy(IF_LLADDR(ifp), sc->enaddr, ETHER_ADDR_LEN); + mvneta_set_mac_address(sc, sc->enaddr); + mvneta_filter_setup(sc); + + /* Start DMA Engine */ + MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000000); + MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000000); + MVNETA_WRITE(sc, MVNETA_PACC, MVNETA_PACC_ACCELERATIONMODE_EDM); + + /* Enable port */ + reg = MVNETA_READ(sc, MVNETA_PMACC0); + reg |= MVNETA_PMACC0_PORTEN; + MVNETA_WRITE(sc, MVNETA_PMACC0, reg); + + /* Allow access to each TXQ/RXQ from both CPU's */ + for (cpu = 0; cpu < mp_ncpus; ++cpu) + MVNETA_WRITE(sc, MVNETA_PCP2Q(cpu), + MVNETA_PCP2Q_TXQEN_MASK | MVNETA_PCP2Q_RXQEN_MASK); + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + mvneta_rx_lockq(sc, q); + mvneta_rx_queue_refill(sc, q); + mvneta_rx_unlockq(sc, q); + } + + if (!sc->phy_attached) + mvneta_linkup(sc); + + /* Enable interrupt */ + mvneta_enable_intr(sc); + + /* Set Counter */ + callout_schedule(&sc->tick_ch, hz); + + ifp->if_drv_flags |= IFF_DRV_RUNNING; +} + +STATIC void +mvneta_init(void *arg) +{ + struct mvneta_softc *sc; + + sc = arg; + mvneta_sc_lock(sc); + mvneta_init_locked(sc); + if (sc->phy_attached) + mii_mediachg(sc->mii); + mvneta_sc_unlock(sc); +} + +/* ARGSUSED */ +STATIC void +mvneta_stop_locked(struct mvneta_softc *sc) +{ + struct ifnet *ifp; + struct mvneta_rx_ring *rx; + struct mvneta_tx_ring *tx; + uint32_t reg; + int q; + + ifp = sc->ifp; + if (ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + return; + + mvneta_disable_intr(sc); + + callout_stop(&sc->tick_ch); + + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + + /* Link down */ + if (sc->linkup == TRUE) + mvneta_linkdown(sc); + + /* Reset the MAC Port Enable bit */ + reg = MVNETA_READ(sc, MVNETA_PMACC0); + reg &= ~MVNETA_PMACC0_PORTEN; + MVNETA_WRITE(sc, MVNETA_PMACC0, reg); + + /* Disable each of queue */ + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + rx = MVNETA_RX_RING(sc, q); + + mvneta_rx_lockq(sc, q); + mvneta_ring_flush_rx_queue(sc, q); + mvneta_rx_unlockq(sc, q); + } + + /* + * Hold Reset state of DMA Engine + * (must write 0x0 to restart it) + */ + MVNETA_WRITE(sc, MVNETA_PRXINIT, 0x00000001); + MVNETA_WRITE(sc, MVNETA_PTXINIT, 0x00000001); + + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + tx = MVNETA_TX_RING(sc, q); + + mvneta_tx_lockq(sc, q); + mvneta_ring_flush_tx_queue(sc, q); + mvneta_tx_unlockq(sc, q); + } +} + +STATIC void +mvneta_stop(struct mvneta_softc *sc) +{ + + mvneta_sc_lock(sc); + mvneta_stop_locked(sc); + mvneta_sc_unlock(sc); +} + +STATIC int +mvneta_mediachange(struct ifnet *ifp) +{ + struct mvneta_softc *sc; + + sc = ifp->if_softc; + + if (!sc->phy_attached && !sc->use_inband_status) { + /* We shouldn't be here */ + if_printf(ifp, "Cannot change media in fixed-link mode!\n"); + return (0); + } + + if (sc->use_inband_status) { + mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media); + return (0); + } + + mvneta_sc_lock(sc); + + /* Update PHY */ + mii_mediachg(sc->mii); + + mvneta_sc_unlock(sc); + + return (0); +} + +STATIC void +mvneta_get_media(struct mvneta_softc *sc, struct ifmediareq *ifmr) +{ + uint32_t psr; + + psr = MVNETA_READ(sc, MVNETA_PSR); + + /* Speed */ + if (psr & MVNETA_PSR_GMIISPEED) + ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T); + else if (psr & MVNETA_PSR_MIISPEED) + ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX); + else if (psr & MVNETA_PSR_LINKUP) + ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T); + + /* Duplex */ + if (psr & MVNETA_PSR_FULLDX) + ifmr->ifm_active |= IFM_FDX; + + /* Link */ + ifmr->ifm_status = IFM_AVALID; + if (psr & MVNETA_PSR_LINKUP) + ifmr->ifm_status |= IFM_ACTIVE; +} + +STATIC void +mvneta_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct mvneta_softc *sc; + struct mii_data *mii; + + sc = ifp->if_softc; + + if (!sc->phy_attached && !sc->use_inband_status) { + ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; + return; + } + + mvneta_sc_lock(sc); + + if (sc->use_inband_status) { + mvneta_get_media(sc, ifmr); + mvneta_sc_unlock(sc); + return; + } + + mii = sc->mii; + mii_pollstat(mii); + + ifmr->ifm_active = mii->mii_media_active; + ifmr->ifm_status = mii->mii_media_status; + + mvneta_sc_unlock(sc); +} + +/* + * Link State Notify + */ +STATIC void +mvneta_update_autoneg(struct mvneta_softc *sc, int enable) +{ + int reg; + + if (enable) { + reg = MVNETA_READ(sc, MVNETA_PANC); + reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS | + MVNETA_PANC_ANFCEN); + reg |= MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN | + MVNETA_PANC_INBANDANEN; + MVNETA_WRITE(sc, MVNETA_PANC, reg); + + reg = MVNETA_READ(sc, MVNETA_PMACC2); + reg |= MVNETA_PMACC2_INBANDANMODE; + MVNETA_WRITE(sc, MVNETA_PMACC2, reg); + + reg = MVNETA_READ(sc, MVNETA_PSOMSCD); + reg |= MVNETA_PSOMSCD_ENABLE; + MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg); + } else { + reg = MVNETA_READ(sc, MVNETA_PANC); + reg &= ~(MVNETA_PANC_FORCELINKFAIL | MVNETA_PANC_FORCELINKPASS | + MVNETA_PANC_ANDUPLEXEN | MVNETA_PANC_ANSPEEDEN | + MVNETA_PANC_INBANDANEN); + MVNETA_WRITE(sc, MVNETA_PANC, reg); + + reg = MVNETA_READ(sc, MVNETA_PMACC2); + reg &= ~MVNETA_PMACC2_INBANDANMODE; + MVNETA_WRITE(sc, MVNETA_PMACC2, reg); + + reg = MVNETA_READ(sc, MVNETA_PSOMSCD); + reg &= ~MVNETA_PSOMSCD_ENABLE; + MVNETA_WRITE(sc, MVNETA_PSOMSCD, reg); + } +} + +STATIC int +mvneta_update_media(struct mvneta_softc *sc, int media) +{ + int reg, err; + boolean_t running; + + err = 0; + + mvneta_sc_lock(sc); + + mvneta_linkreset(sc); + + running = (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; + if (running) + mvneta_stop_locked(sc); + + sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO); + + if (sc->use_inband_status) + mvneta_update_autoneg(sc, IFM_SUBTYPE(media) == IFM_AUTO); + + mvneta_update_eee(sc); + mvneta_update_fc(sc); + + if (IFM_SUBTYPE(media) != IFM_AUTO) { + reg = MVNETA_READ(sc, MVNETA_PANC); + reg &= ~(MVNETA_PANC_SETGMIISPEED | + MVNETA_PANC_SETMIISPEED | + MVNETA_PANC_SETFULLDX); + if (IFM_SUBTYPE(media) == IFM_1000_T || + IFM_SUBTYPE(media) == IFM_2500_T) { + if ((media & IFM_FDX) == 0) { + device_printf(sc->dev, + "%s half-duplex unsupported\n", + IFM_SUBTYPE(media) == IFM_1000_T ? + "1000Base-T" : + "2500Base-T"); + err = EINVAL; + goto out; + } + reg |= MVNETA_PANC_SETGMIISPEED; + } else if (IFM_SUBTYPE(media) == IFM_100_TX) + reg |= MVNETA_PANC_SETMIISPEED; + + if (media & IFM_FDX) + reg |= MVNETA_PANC_SETFULLDX; + + MVNETA_WRITE(sc, MVNETA_PANC, reg); + } +out: + if (running) + mvneta_init_locked(sc); + mvneta_sc_unlock(sc); + return (err); +} + +STATIC void +mvneta_adjust_link(struct mvneta_softc *sc) +{ + boolean_t phy_linkup; + int reg; + + /* Update eee/fc */ + mvneta_update_eee(sc); + mvneta_update_fc(sc); + + /* Check for link change */ + phy_linkup = (sc->mii->mii_media_status & + (IFM_AVALID | IFM_ACTIVE)) == (IFM_AVALID | IFM_ACTIVE); + + if (sc->linkup != phy_linkup) + mvneta_linkupdate(sc, phy_linkup); + + /* Don't update media on disabled link */ + if (!phy_linkup ) + return; + + /* Check for media type change */ + if (sc->mvneta_media != sc->mii->mii_media_active) { + sc->mvneta_media = sc->mii->mii_media_active; + + reg = MVNETA_READ(sc, MVNETA_PANC); + reg &= ~(MVNETA_PANC_SETGMIISPEED | + MVNETA_PANC_SETMIISPEED | + MVNETA_PANC_SETFULLDX); + if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T || + IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) { + reg |= MVNETA_PANC_SETGMIISPEED; + } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX) + reg |= MVNETA_PANC_SETMIISPEED; + + if (sc->mvneta_media & IFM_FDX) + reg |= MVNETA_PANC_SETFULLDX; + + MVNETA_WRITE(sc, MVNETA_PANC, reg); + } +} + +STATIC void +mvneta_link_isr(struct mvneta_softc *sc) +{ + int linkup; + + KASSERT_SC_MTX(sc); + + linkup = MVNETA_IS_LINKUP(sc) ? TRUE : FALSE; + if (sc->linkup == linkup) + return; + + if (linkup == TRUE) + mvneta_linkup(sc); + else + mvneta_linkdown(sc); + +#ifdef DEBUG + log(LOG_DEBUG, + "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down"); +#endif +} + +STATIC void +mvneta_linkupdate(struct mvneta_softc *sc, boolean_t linkup) +{ + + KASSERT_SC_MTX(sc); + + if (linkup == TRUE) + mvneta_linkup(sc); + else + mvneta_linkdown(sc); + +#ifdef DEBUG + log(LOG_DEBUG, + "%s: link %s\n", device_xname(sc->dev), linkup ? "up" : "down"); +#endif +} + +STATIC void +mvneta_update_eee(struct mvneta_softc *sc) +{ + uint32_t reg; + + KASSERT_SC_MTX(sc); + + /* set EEE parameters */ + reg = MVNETA_READ(sc, MVNETA_LPIC1); + if (sc->cf_lpi) + reg |= MVNETA_LPIC1_LPIRE; + else + reg &= ~MVNETA_LPIC1_LPIRE; + MVNETA_WRITE(sc, MVNETA_LPIC1, reg); +} + +STATIC void +mvneta_update_fc(struct mvneta_softc *sc) +{ + uint32_t reg; + + KASSERT_SC_MTX(sc); + + reg = MVNETA_READ(sc, MVNETA_PANC); + if (sc->cf_fc) { + /* Flow control negotiation */ + reg |= MVNETA_PANC_PAUSEADV; + reg |= MVNETA_PANC_ANFCEN; + } else { + /* Disable flow control negotiation */ + reg &= ~MVNETA_PANC_PAUSEADV; + reg &= ~MVNETA_PANC_ANFCEN; + } + + MVNETA_WRITE(sc, MVNETA_PANC, reg); +} + +STATIC void +mvneta_linkup(struct mvneta_softc *sc) +{ + uint32_t reg; + + KASSERT_SC_MTX(sc); + + if (!sc->use_inband_status) { + reg = MVNETA_READ(sc, MVNETA_PANC); + reg |= MVNETA_PANC_FORCELINKPASS; + reg &= ~MVNETA_PANC_FORCELINKFAIL; + MVNETA_WRITE(sc, MVNETA_PANC, reg); + } + + mvneta_qflush(sc->ifp); + mvneta_portup(sc); + sc->linkup = TRUE; + if_link_state_change(sc->ifp, LINK_STATE_UP); +} + +STATIC void +mvneta_linkdown(struct mvneta_softc *sc) +{ + uint32_t reg; + + KASSERT_SC_MTX(sc); + + if (!sc->use_inband_status) { + reg = MVNETA_READ(sc, MVNETA_PANC); + reg &= ~MVNETA_PANC_FORCELINKPASS; + reg |= MVNETA_PANC_FORCELINKFAIL; + MVNETA_WRITE(sc, MVNETA_PANC, reg); + } + + mvneta_portdown(sc); + mvneta_qflush(sc->ifp); + sc->linkup = FALSE; + if_link_state_change(sc->ifp, LINK_STATE_DOWN); +} + +STATIC void +mvneta_linkreset(struct mvneta_softc *sc) +{ + struct mii_softc *mii; + + if (sc->phy_attached) { + /* Force reset PHY */ + mii = LIST_FIRST(&sc->mii->mii_phys); + if (mii) + mii_phy_reset(mii); + } +} + +/* + * Tx Subroutines + */ +STATIC int +mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q) +{ + struct ifnet *ifp; + bus_dma_segment_t txsegs[MVNETA_TX_SEGLIMIT]; + struct mbuf *mtmp, *mbuf; + struct mvneta_tx_ring *tx; + struct mvneta_buf *txbuf; + struct mvneta_tx_desc *t; + uint32_t ptxsu; + int start, used, error, i, txnsegs; + + mbuf = *mbufp; + tx = MVNETA_TX_RING(sc, q); + DASSERT(tx->used >= 0); + DASSERT(tx->used <= MVNETA_TX_RING_CNT); + t = NULL; + ifp = sc->ifp; + + if (__predict_false(mbuf->m_flags & M_VLANTAG)) { + mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag); + if (mbuf == NULL) { + tx->drv_error++; + *mbufp = NULL; + return (ENOBUFS); + } + mbuf->m_flags &= ~M_VLANTAG; + *mbufp = mbuf; + } + + if (__predict_false(mbuf->m_next != NULL && + (mbuf->m_pkthdr.csum_flags & + (CSUM_IP | CSUM_TCP | CSUM_UDP)) != 0)) { + if (M_WRITABLE(mbuf) == 0) { + mtmp = m_dup(mbuf, M_NOWAIT); + m_freem(mbuf); + if (mtmp == NULL) { + tx->drv_error++; + *mbufp = NULL; + return (ENOBUFS); + } + *mbufp = mbuf = mtmp; + } + } + + /* load mbuf using dmamap of 1st descriptor */ + txbuf = &tx->txbuf[tx->cpu]; + error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag, + txbuf->dmap, mbuf, txsegs, &txnsegs, + BUS_DMA_NOWAIT); + if (__predict_false(error != 0)) { +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", ifp->if_xname, q, error); +#endif + /* This is the only recoverable error (except EFBIG). */ + if (error != ENOMEM) { + tx->drv_error++; + m_freem(mbuf); + *mbufp = NULL; + return (ENOBUFS); + } + return (error); + } + + if (__predict_false(txnsegs <= 0 + || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) { + /* we have no enough descriptors or mbuf is broken */ +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s:%u not enough descriptors txnsegs=%d", + ifp->if_xname, q, txnsegs); +#endif + bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); + return (ENOBUFS); + } + DASSERT(txbuf->m == NULL); + + /* remember mbuf using 1st descriptor */ + txbuf->m = mbuf; + bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + /* load to tx descriptors */ + start = tx->cpu; + used = 0; + for (i = 0; i < txnsegs; i++) { + t = &tx->desc[tx->cpu]; + t->command = 0; + t->l4ichk = 0; + t->flags = 0; + if (__predict_true(i == 0)) { + /* 1st descriptor */ + t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0); + t->command |= MVNETA_TX_CMD_F; + mvneta_tx_set_csumflag(ifp, t, mbuf); + } + t->bufptr_pa = txsegs[i].ds_addr; + t->bytecnt = txsegs[i].ds_len; + tx->cpu = tx_counter_adv(tx->cpu, 1); + + tx->used++; + used++; + } + /* t is last descriptor here */ + DASSERT(t != NULL); + t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING; + + bus_dmamap_sync(sc->tx_dtag, tx->desc_map, + BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + while (__predict_false(used > 255)) { + ptxsu = MVNETA_PTXSU_NOWD(255); + MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); + used -= 255; + } + if (__predict_true(used > 0)) { + ptxsu = MVNETA_PTXSU_NOWD(used); + MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); + } + return (0); +} + +STATIC void +mvneta_tx_set_csumflag(struct ifnet *ifp, + struct mvneta_tx_desc *t, struct mbuf *m) +{ + struct ether_header *eh; + int csum_flags; + uint32_t iphl, ipoff; + struct ip *ip; + + iphl = ipoff = 0; + csum_flags = ifp->if_hwassist & m->m_pkthdr.csum_flags; + eh = mtod(m, struct ether_header *); + switch (ntohs(eh->ether_type)) { + case ETHERTYPE_IP: + ipoff = ETHER_HDR_LEN; + break; + case ETHERTYPE_IPV6: + return; + case ETHERTYPE_VLAN: + ipoff = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + break; + } + + if (__predict_true(csum_flags & (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP))) { + ip = (struct ip *)(m->m_data + ipoff); + iphl = ip->ip_hl<<2; + t->command |= MVNETA_TX_CMD_L3_IP4; + } else { + t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; + return; + } + + + /* L3 */ + if (csum_flags & CSUM_IP) { + t->command |= MVNETA_TX_CMD_IP4_CHECKSUM; + } + + /* L4 */ + if (csum_flags & CSUM_IP_TCP) { + t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; + t->command |= MVNETA_TX_CMD_L4_TCP; + } else if (csum_flags & CSUM_IP_UDP) { + t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG; + t->command |= MVNETA_TX_CMD_L4_UDP; + } else + t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE; + + t->l4ichk = 0; + t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2); + t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff); +} + +STATIC void +mvneta_tx_queue_complete(struct mvneta_softc *sc, int q) +{ + struct mvneta_tx_ring *tx; + struct mvneta_buf *txbuf; + struct mvneta_tx_desc *t; + uint32_t ptxs, ptxsu, ndesc; + int i; + + KASSERT_TX_MTX(sc, q); + + tx = MVNETA_TX_RING(sc, q); + if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) + return; + + ptxs = MVNETA_READ(sc, MVNETA_PTXS(q)); + ndesc = MVNETA_PTXS_GET_TBC(ptxs); + + if (__predict_false(ndesc == 0)) { + if (tx->used == 0) + tx->queue_status = MVNETA_QUEUE_IDLE; + else if (tx->queue_status == MVNETA_QUEUE_WORKING && + ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG)) + tx->queue_hung = TRUE; + return; + } + +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s:%u tx_complete begin ndesc=%u", + sc->ifp->if_xname, q, ndesc); +#endif + + bus_dmamap_sync(sc->tx_dtag, tx->desc_map, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + + for (i = 0; i < ndesc; i++) { + t = &tx->desc[tx->dma]; +#ifdef MVNETA_KTR + if (t->flags & MVNETA_TX_F_ES) + CTR3(KTR_SPARE2, "%s tx error queue %d desc %d", + sc->ifp->if_xname, q, tx->dma); +#endif + txbuf = &tx->txbuf[tx->dma]; + if (__predict_true(txbuf->m != NULL)) { + DASSERT((t->command & MVNETA_TX_CMD_F) != 0); + bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap); + m_freem(txbuf->m); + txbuf->m = NULL; + } + else + DASSERT((t->flags & MVNETA_TX_CMD_F) == 0); + tx->dma = tx_counter_adv(tx->dma, 1); + tx->used--; + } + DASSERT(tx->used >= 0); + DASSERT(tx->used <= MVNETA_TX_RING_CNT); + while (__predict_false(ndesc > 255)) { + ptxsu = MVNETA_PTXSU_NORB(255); + MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); + ndesc -= 255; + } + if (__predict_true(ndesc > 0)) { + ptxsu = MVNETA_PTXSU_NORB(ndesc); + MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu); + } +#ifdef MVNETA_KTR + CTR5(KTR_SPARE2, "%s:%u tx_complete tx_cpu=%d tx_dma=%d tx_used=%d", + sc->ifp->if_xname, q, tx->cpu, tx->dma, tx->used); +#endif + + tx->watchdog_time = ticks; + + if (tx->used == 0) + tx->queue_status = MVNETA_QUEUE_IDLE; +} + +/* + * Do a final TX complete when TX is idle. + */ +STATIC void +mvneta_tx_drain(struct mvneta_softc *sc) +{ + struct mvneta_tx_ring *tx; + int q; + + /* + * Handle trailing mbuf on TX queue. + * Check is done lockess to avoid TX path contention. + */ + for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) { + tx = MVNETA_TX_RING(sc, q); + if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP && + tx->used > 0) { + mvneta_tx_lockq(sc, q); + mvneta_tx_queue_complete(sc, q); + mvneta_tx_unlockq(sc, q); + } + } +} + +/* + * Rx Subroutines + */ +STATIC int +mvneta_rx(struct mvneta_softc *sc, int q, int count) +{ + uint32_t prxs, npkt; + int more; + + more = 0; + mvneta_rx_lockq(sc, q); + prxs = MVNETA_READ(sc, MVNETA_PRXS(q)); + npkt = MVNETA_PRXS_GET_ODC(prxs); + if (__predict_false(npkt == 0)) + goto out; + + if (count > 0 && npkt > count) { + more = 1; + npkt = count; + } + mvneta_rx_queue(sc, q, npkt); +out: + mvneta_rx_unlockq(sc, q); + return more; +} + +/* + * Helper routine for updating PRXSU register of a given queue. + * Handles number of processed descriptors bigger than maximum acceptable value. + */ +STATIC __inline void +mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed) +{ + uint32_t prxsu; + + while (__predict_false(processed > 255)) { + prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(255); + MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); + processed -= 255; + } + prxsu = MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(processed); + MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); +} + +static __inline void +mvneta_prefetch(void *p) +{ + + __builtin_prefetch(p); +} + +STATIC void +mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt) +{ + struct ifnet *ifp; + struct mvneta_rx_ring *rx; + struct mvneta_rx_desc *r; + struct mvneta_buf *rxbuf; + struct mbuf *m; + struct lro_ctrl *lro; + struct lro_entry *queued; + void *pktbuf; + int i, pktlen, processed, ndma; + + KASSERT_RX_MTX(sc, q); + + ifp = sc->ifp; + rx = MVNETA_RX_RING(sc, q); + processed = 0; + + if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) + return; + + bus_dmamap_sync(sc->rx_dtag, rx->desc_map, + BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); + + for (i = 0; i < npkt; i++) { + /* Prefetch next desc, rxbuf. */ + ndma = rx_counter_adv(rx->dma, 1); + mvneta_prefetch(&rx->desc[ndma]); + mvneta_prefetch(&rx->rxbuf[ndma]); + + /* get descriptor and packet */ + r = &rx->desc[rx->dma]; + rxbuf = &rx->rxbuf[rx->dma]; + m = rxbuf->m; + rxbuf->m = NULL; + DASSERT(m != NULL); + bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); + /* Prefetch mbuf header. */ + mvneta_prefetch(m); + + processed++; + /* Drop desc with error status or not in a single buffer. */ + DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) == + (MVNETA_RX_F|MVNETA_RX_L)); + if (__predict_false((r->status & MVNETA_RX_ES) || + (r->status & (MVNETA_RX_F|MVNETA_RX_L)) != + (MVNETA_RX_F|MVNETA_RX_L))) + goto rx_error; + + /* + * [ OFF | MH | PKT | CRC ] + * bytecnt cover MH, PKT, CRC + */ + pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE; + pktbuf = (uint8_t *)r->bufptr_va + MVNETA_PACKET_OFFSET + + MVNETA_HWHEADER_SIZE; + + /* Prefetch mbuf data. */ + mvneta_prefetch(pktbuf); + + /* Write value to mbuf (avoid read). */ + m->m_data = pktbuf; + m->m_len = m->m_pkthdr.len = pktlen; + m->m_pkthdr.rcvif = ifp; + mvneta_rx_set_csumflag(ifp, r, m); + + /* Increase rx_dma before releasing the lock. */ + rx->dma = ndma; + + if (__predict_false(rx->lro_enabled && + ((r->status & MVNETA_RX_L3_IP) != 0) && + ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) && + (m->m_pkthdr.csum_flags & + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR))) { + if (rx->lro.lro_cnt != 0) { + if (tcp_lro_rx(&rx->lro, m, 0) == 0) + goto rx_done; + } + } + + mvneta_rx_unlockq(sc, q); + (*ifp->if_input)(ifp, m); + mvneta_rx_lockq(sc, q); + /* + * Check whether this queue has been disabled in the + * meantime. If yes, then clear LRO and exit. + */ + if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED)) + goto rx_lro; +rx_done: + /* Refresh receive ring to avoid stall and minimize jitter. */ + if (processed >= MVNETA_RX_REFILL_COUNT) { + mvneta_prxsu_update(sc, q, processed); + mvneta_rx_queue_refill(sc, q); + processed = 0; + } + continue; +rx_error: + m_freem(m); + rx->dma = ndma; + /* Refresh receive ring to avoid stall and minimize jitter. */ + if (processed >= MVNETA_RX_REFILL_COUNT) { + mvneta_prxsu_update(sc, q, processed); + mvneta_rx_queue_refill(sc, q); + processed = 0; + } + } +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s:%u %u packets received", ifp->if_xname, q, npkt); +#endif + /* DMA status update */ + mvneta_prxsu_update(sc, q, processed); + /* Refill the rest of buffers if there are any to refill */ + mvneta_rx_queue_refill(sc, q); + +rx_lro: + /* + * Flush any outstanding LRO work + */ + lro = &rx->lro; + while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) { + LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next); + tcp_lro_flush(lro, queued); + } +} + +STATIC void +mvneta_rx_buf_free(struct mvneta_softc *sc, struct mvneta_buf *rxbuf) +{ + + bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap); + /* This will remove all data at once */ + m_freem(rxbuf->m); +} + +STATIC void +mvneta_rx_queue_refill(struct mvneta_softc *sc, int q) +{ + struct mvneta_rx_ring *rx; + struct mvneta_rx_desc *r; + struct mvneta_buf *rxbuf; + bus_dma_segment_t segs; + struct mbuf *m; + uint32_t prxs, prxsu, ndesc; + int npkt, refill, nsegs, error; + + KASSERT_RX_MTX(sc, q); + + rx = MVNETA_RX_RING(sc, q); + prxs = MVNETA_READ(sc, MVNETA_PRXS(q)); + ndesc = MVNETA_PRXS_GET_NODC(prxs) + MVNETA_PRXS_GET_ODC(prxs); + refill = MVNETA_RX_RING_CNT - ndesc; +#ifdef MVNETA_KTR + CTR3(KTR_SPARE2, "%s:%u refill %u packets", sc->ifp->if_xname, q, + refill); +#endif + if (__predict_false(refill <= 0)) + return; + + for (npkt = 0; npkt < refill; npkt++) { + rxbuf = &rx->rxbuf[rx->cpu]; + m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); + if (__predict_false(m == NULL)) { + error = ENOBUFS; + break; + } + m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; + + error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap, + m, &segs, &nsegs, BUS_DMA_NOWAIT); + if (__predict_false(error != 0 || nsegs != 1)) { + KASSERT(1, ("Failed to load Rx mbuf DMA map")); + m_freem(m); + break; + } + + /* Add the packet to the ring */ + rxbuf->m = m; + r = &rx->desc[rx->cpu]; + r->bufptr_pa = segs.ds_addr; + r->bufptr_va = (uint32_t)m->m_data; + + rx->cpu = rx_counter_adv(rx->cpu, 1); + } + if (npkt == 0) { + if (refill == MVNETA_RX_RING_CNT) + rx->needs_refill = TRUE; + return; + } + + rx->needs_refill = FALSE; + bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + while (__predict_false(npkt > 255)) { + prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(255); + MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); + npkt -= 255; + } + if (__predict_true(npkt > 0)) { + prxsu = MVNETA_PRXSU_NOOFNEWDESCRIPTORS(npkt); + MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu); + } +} + +STATIC __inline void +mvneta_rx_set_csumflag(struct ifnet *ifp, + struct mvneta_rx_desc *r, struct mbuf *m) +{ + uint32_t csum_flags; + + csum_flags = 0; + if (__predict_false((r->status & + (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == 0)) + return; /* not a IP packet */ + + /* L3 */ + if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) == + MVNETA_RX_IP_HEADER_OK)) + csum_flags |= CSUM_L3_CALC|CSUM_L3_VALID; + + if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) == + (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP))) { + /* L4 */ + switch (r->status & MVNETA_RX_L4_MASK) { + case MVNETA_RX_L4_TCP: + case MVNETA_RX_L4_UDP: + csum_flags |= CSUM_L4_CALC; + if (__predict_true((r->status & + MVNETA_RX_L4_CHECKSUM_OK) == MVNETA_RX_L4_CHECKSUM_OK)) { + csum_flags |= CSUM_L4_VALID; + m->m_pkthdr.csum_data = htons(0xffff); + } + break; + case MVNETA_RX_L4_OTH: + default: + break; + } + } + m->m_pkthdr.csum_flags = csum_flags; +} + +/* + * MAC address filter + */ +STATIC void +mvneta_filter_setup(struct mvneta_softc *sc) +{ + struct ifnet *ifp; + uint32_t dfut[MVNETA_NDFUT], dfsmt[MVNETA_NDFSMT], dfomt[MVNETA_NDFOMT]; + uint32_t pxc; + int i; + + KASSERT_SC_MTX(sc); + + memset(dfut, 0, sizeof(dfut)); + memset(dfsmt, 0, sizeof(dfsmt)); + memset(dfomt, 0, sizeof(dfomt)); + + ifp = sc->ifp; + ifp->if_flags |= IFF_ALLMULTI; + if (ifp->if_flags & (IFF_ALLMULTI|IFF_PROMISC)) { + for (i = 0; i < MVNETA_NDFSMT; i++) { + dfsmt[i] = dfomt[i] = + MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | + MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | + MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | + MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); + } + } + + pxc = MVNETA_READ(sc, MVNETA_PXC); + pxc &= ~(MVNETA_PXC_UPM | MVNETA_PXC_RXQ_MASK | MVNETA_PXC_RXQARP_MASK | + MVNETA_PXC_TCPQ_MASK | MVNETA_PXC_UDPQ_MASK | MVNETA_PXC_BPDUQ_MASK); + pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1); + pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1); + pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1); + pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1); + pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1); + pxc |= MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP; + if (ifp->if_flags & IFF_BROADCAST) { + pxc &= ~(MVNETA_PXC_RB | MVNETA_PXC_RBIP | MVNETA_PXC_RBARP); + } + if (ifp->if_flags & IFF_PROMISC) { + pxc |= MVNETA_PXC_UPM; + } + MVNETA_WRITE(sc, MVNETA_PXC, pxc); + + /* Set Destination Address Filter Unicast Table */ + if (ifp->if_flags & IFF_PROMISC) { + /* pass all unicast addresses */ + for (i = 0; i < MVNETA_NDFUT; i++) { + dfut[i] = + MVNETA_DF(0, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | + MVNETA_DF(1, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | + MVNETA_DF(2, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS) | + MVNETA_DF(3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); + } + } else { + i = sc->enaddr[5] & 0xf; /* last nibble */ + dfut[i>>2] = MVNETA_DF(i&3, MVNETA_DF_QUEUE(0) | MVNETA_DF_PASS); + } + MVNETA_WRITE_REGION(sc, MVNETA_DFUT(0), dfut, MVNETA_NDFUT); + + /* Set Destination Address Filter Multicast Tables */ + MVNETA_WRITE_REGION(sc, MVNETA_DFSMT(0), dfsmt, MVNETA_NDFSMT); + MVNETA_WRITE_REGION(sc, MVNETA_DFOMT(0), dfomt, MVNETA_NDFOMT); +} + +/* + * sysctl(9) + */ +STATIC int +sysctl_read_mib(SYSCTL_HANDLER_ARGS) +{ + struct mvneta_sysctl_mib *arg; + struct mvneta_softc *sc; + uint64_t val; + + arg = (struct mvneta_sysctl_mib *)arg1; + if (arg == NULL) + return (EINVAL); + + sc = arg->sc; + if (sc == NULL) + return (EINVAL); + if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER) + return (EINVAL); + + mvneta_sc_lock(sc); + val = arg->counter; + mvneta_sc_unlock(sc); + return sysctl_handle_64(oidp, &val, 0, req); +} + + +STATIC int +sysctl_clear_mib(SYSCTL_HANDLER_ARGS) +{ + struct mvneta_softc *sc; + int err, val; + + val = 0; + sc = (struct mvneta_softc *)arg1; + if (sc == NULL) + return (EINVAL); + + err = sysctl_handle_int(oidp, &val, 0, req); + if (err != 0) + return (err); + + if (val < 0 || val > 1) + return (EINVAL); + + if (val == 1) { + mvneta_sc_lock(sc); + mvneta_clear_mib(sc); + mvneta_sc_unlock(sc); + } + + return (0); +} + +STATIC int +sysctl_set_queue_rxthtime(SYSCTL_HANDLER_ARGS) +{ + struct mvneta_sysctl_queue *arg; + struct mvneta_rx_ring *rx; + struct mvneta_softc *sc; + uint32_t reg, time_mvtclk; + int err, time_us; + + rx = NULL; + arg = (struct mvneta_sysctl_queue *)arg1; + if (arg == NULL) + return (EINVAL); + if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT) + return (EINVAL); + if (arg->rxtx != MVNETA_SYSCTL_RX) + return (EINVAL); + + sc = arg->sc; + if (sc == NULL) + return (EINVAL); + + /* read queue length */ + mvneta_sc_lock(sc); + mvneta_rx_lockq(sc, arg->queue); + rx = MVNETA_RX_RING(sc, arg->queue); + time_mvtclk = rx->queue_th_time; + time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / get_tclk(); + mvneta_rx_unlockq(sc, arg->queue); + mvneta_sc_unlock(sc); + + err = sysctl_handle_int(oidp, &time_us, 0, req); + if (err != 0) + return (err); + + mvneta_sc_lock(sc); + mvneta_rx_lockq(sc, arg->queue); + + /* update queue length (0[sec] - 1[sec]) */ + if (time_us < 0 || time_us > (1000 * 1000)) { + mvneta_rx_unlockq(sc, arg->queue); + mvneta_sc_unlock(sc); + return (EINVAL); + } + time_mvtclk = + (uint64_t)get_tclk() * (uint64_t)time_us / (1000ULL * 1000ULL); + rx->queue_th_time = time_mvtclk; + reg = MVNETA_PRXITTH_RITT(rx->queue_th_time); + MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg); + mvneta_rx_unlockq(sc, arg->queue); + mvneta_sc_unlock(sc); + + return (0); +} + +STATIC void +sysctl_mvneta_init(struct mvneta_softc *sc) +{ + struct sysctl_ctx_list *ctx; + struct sysctl_oid_list *children; + struct sysctl_oid_list *rxchildren; + struct sysctl_oid_list *qchildren, *mchildren; + struct sysctl_oid *tree; + int i, q; + struct mvneta_sysctl_queue *rxarg; +#define MVNETA_SYSCTL_NAME(num) "queue" # num + static const char *sysctl_queue_names[] = { + MVNETA_SYSCTL_NAME(0), MVNETA_SYSCTL_NAME(1), + MVNETA_SYSCTL_NAME(2), MVNETA_SYSCTL_NAME(3), + MVNETA_SYSCTL_NAME(4), MVNETA_SYSCTL_NAME(5), + MVNETA_SYSCTL_NAME(6), MVNETA_SYSCTL_NAME(7), + }; +#undef MVNETA_SYSCTL_NAME + +#define MVNETA_SYSCTL_DESCR(num) "configuration parameters for queue " # num + static const char *sysctl_queue_descrs[] = { + MVNETA_SYSCTL_DESCR(0), MVNETA_SYSCTL_DESCR(1), + MVNETA_SYSCTL_DESCR(2), MVNETA_SYSCTL_DESCR(3), + MVNETA_SYSCTL_DESCR(4), MVNETA_SYSCTL_DESCR(5), + MVNETA_SYSCTL_DESCR(6), MVNETA_SYSCTL_DESCR(7), + }; +#undef MVNETA_SYSCTL_DESCR + + + ctx = device_get_sysctl_ctx(sc->dev); + children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); + + tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx", + CTLFLAG_RD, 0, "NETA RX"); + rxchildren = SYSCTL_CHILDREN(tree); + tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "mib", + CTLFLAG_RD, 0, "NETA MIB"); + mchildren = SYSCTL_CHILDREN(tree); + + + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "flow_control", + CTLFLAG_RW, &sc->cf_fc, 0, "flow control"); + SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lpi", + CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle"); + + /* + * MIB access + */ + /* dev.mvneta.[unit].mib. */ + for (i = 0; i < MVNETA_PORTMIB_NOCOUNTER; i++) { + const char *name = mvneta_mib_list[i].sysctl_name; + const char *desc = mvneta_mib_list[i].desc; + struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i]; + + mib_arg->sc = sc; + mib_arg->index = i; + SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, name, + CTLTYPE_U64|CTLFLAG_RD, (void *)mib_arg, 0, + sysctl_read_mib, "I", desc); + } + SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "rx_discard", + CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter"); + SYSCTL_ADD_UQUAD(ctx, mchildren, OID_AUTO, "overrun", + CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter"); + SYSCTL_ADD_UINT(ctx, mchildren, OID_AUTO, "watchdog", + CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter"); + + SYSCTL_ADD_PROC(ctx, mchildren, OID_AUTO, "reset", + CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0, + sysctl_clear_mib, "I", "Reset MIB counters"); + + for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) { + rxarg = &sc->sysctl_rx_queue[q]; + + rxarg->sc = sc; + rxarg->queue = q; + rxarg->rxtx = MVNETA_SYSCTL_RX; + + /* hw.mvneta.mvneta[unit].rx.[queue] */ + tree = SYSCTL_ADD_NODE(ctx, rxchildren, OID_AUTO, + sysctl_queue_names[q], CTLFLAG_RD, 0, + sysctl_queue_descrs[q]); + qchildren = SYSCTL_CHILDREN(tree); + + /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */ + SYSCTL_ADD_PROC(ctx, qchildren, OID_AUTO, "threshold_timer_us", + CTLTYPE_UINT | CTLFLAG_RW, rxarg, 0, + sysctl_set_queue_rxthtime, "I", + "interrupt coalescing threshold timer [us]"); + } +} + +/* + * MIB + */ +STATIC void +mvneta_clear_mib(struct mvneta_softc *sc) +{ + int i; + + KASSERT_SC_MTX(sc); + + for (i = 0; i < nitems(mvneta_mib_list); i++) { + if (mvneta_mib_list[i].reg64) + MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum); + else + MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum); + sc->sysctl_mib[i].counter = 0; + } + MVNETA_READ(sc, MVNETA_PDFC); + sc->counter_pdfc = 0; + MVNETA_READ(sc, MVNETA_POFC); + sc->counter_pofc = 0; + sc->counter_watchdog = 0; +} + +STATIC void +mvneta_update_mib(struct mvneta_softc *sc) +{ + struct mvneta_tx_ring *tx; + int i; + uint64_t val; + uint32_t reg; + + for (i = 0; i < nitems(mvneta_mib_list); i++) { + + if (mvneta_mib_list[i].reg64) + val = MVNETA_READ_MIB_8(sc, mvneta_mib_list[i].regnum); + else + val = MVNETA_READ_MIB_4(sc, mvneta_mib_list[i].regnum); + + if (val == 0) + continue; + + sc->sysctl_mib[i].counter += val; + switch (mvneta_mib_list[i].regnum) { + case MVNETA_MIB_RX_GOOD_OCT: + if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val); + break; + case MVNETA_MIB_RX_BAD_FRAME: + if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val); + break; + case MVNETA_MIB_RX_GOOD_FRAME: + if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val); + break; + case MVNETA_MIB_RX_MCAST_FRAME: + if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val); + break; + case MVNETA_MIB_TX_GOOD_OCT: + if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val); + break; + case MVNETA_MIB_TX_GOOD_FRAME: + if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val); + break; + case MVNETA_MIB_TX_MCAST_FRAME: + if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val); + break; + case MVNETA_MIB_MAC_COL: + if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val); + break; + case MVNETA_MIB_TX_MAC_TRNS_ERR: + case MVNETA_MIB_TX_EXCES_COL: + case MVNETA_MIB_MAC_LATE_COL: + if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val); + break; + } + } + + reg = MVNETA_READ(sc, MVNETA_PDFC); + sc->counter_pdfc += reg; + if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); + reg = MVNETA_READ(sc, MVNETA_POFC); + sc->counter_pofc += reg; + if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg); + + /* TX watchdog. */ + if (sc->counter_watchdog_mib > 0 ) { + if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib); + sc->counter_watchdog_mib = 0; + } + /* + * TX driver errors: + * We do not take queue locks to not disrupt TX path. + * We may only miss one drv error which will be fixed at + * next mib update. We may also clear counter when TX path + * is incrementing it but we only do it if counter was not zero + * thus we may only loose one error. + */ + for (i = 0; i < MVNETA_TX_QNUM_MAX; i++) { + tx = MVNETA_TX_RING(sc, i); + + if (tx->drv_error > 0) { + if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error); + tx->drv_error = 0; + } + } +} Property changes on: head/sys/dev/neta/if_mvneta.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/neta/if_mvneta_fdt.c =================================================================== --- head/sys/dev/neta/if_mvneta_fdt.c (nonexistent) +++ head/sys/dev/neta/if_mvneta_fdt.c (revision 319907) @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2017 Stormshield. + * Copyright (c) 2017 Semihalf. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_platform.h" +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include "if_mvnetareg.h" +#include "if_mvnetavar.h" + +#define PHY_MODE_MAXLEN 10 +#define INBAND_STATUS_MAXLEN 16 + +static int mvneta_fdt_probe(device_t); +static int mvneta_fdt_attach(device_t); + +static device_method_t mvneta_fdt_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, mvneta_fdt_probe), + DEVMETHOD(device_attach, mvneta_fdt_attach), + + /* End */ + DEVMETHOD_END +}; + +DEFINE_CLASS_1(mvneta, mvneta_fdt_driver, mvneta_fdt_methods, + sizeof(struct mvneta_softc), mvneta_driver); + +static devclass_t mvneta_fdt_devclass; + +DRIVER_MODULE(mvneta, ofwbus, mvneta_fdt_driver, mvneta_fdt_devclass, 0, 0); +DRIVER_MODULE(mvneta, simplebus, mvneta_fdt_driver, mvneta_fdt_devclass, 0, 0); + +static int mvneta_fdt_phy_acquire(device_t); + +static int +mvneta_fdt_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_is_compatible(dev, "marvell,armada-370-neta")) + return (ENXIO); + + device_set_desc(dev, "NETA controller"); + return (BUS_PROBE_DEFAULT); +} + +static int +mvneta_fdt_attach(device_t dev) +{ + int err; + + /* Try to fetch PHY information from FDT */ + err = mvneta_fdt_phy_acquire(dev); + if (err != 0) + return (err); + + return (mvneta_attach(dev)); +} + +static int +mvneta_fdt_phy_acquire(device_t dev) +{ + struct mvneta_softc *sc; + phandle_t node, child, phy_handle; + char phymode[PHY_MODE_MAXLEN]; + char managed[INBAND_STATUS_MAXLEN]; + char *name; + + sc = device_get_softc(dev); + node = ofw_bus_get_node(dev); + + /* PHY mode is crucial */ + if (OF_getprop(node, "phy-mode", phymode, sizeof(phymode)) <= 0) { + device_printf(dev, "Failed to acquire PHY mode from FDT.\n"); + return (ENXIO); + } + + if (strncmp(phymode, "rgmii-id", 8) == 0) + sc->phy_mode = MVNETA_PHY_RGMII_ID; + else if (strncmp(phymode, "rgmii", 5) == 0) + sc->phy_mode = MVNETA_PHY_RGMII; + else if (strncmp(phymode, "sgmii", 5) == 0) + sc->phy_mode = MVNETA_PHY_SGMII; + else if (strncmp(phymode, "qsgmii", 6) == 0) + sc->phy_mode = MVNETA_PHY_QSGMII; + else + sc->phy_mode = MVNETA_PHY_SGMII; + + /* Check if in-band link status will be used */ + if (OF_getprop(node, "managed", managed, sizeof(managed)) > 0) { + if (strncmp(managed, "in-band-status", 14) == 0) { + sc->use_inband_status = TRUE; + device_printf(dev, "Use in-band link status.\n"); + return (0); + } + } + + if (OF_getencprop(node, "phy", (void *)&phy_handle, + sizeof(phy_handle)) <= 0) { + /* Test for fixed-link (present i.e. in 388-gp) */ + for (child = OF_child(node); child != 0; child = OF_peer(child)) { + if (OF_getprop_alloc(child, + "name", 1, (void **)&name) <= 0) { + continue; + } + if (strncmp(name, "fixed-link", 10) == 0) { + free(name, M_OFWPROP); + if (OF_getencprop(child, "speed", + &sc->phy_speed, sizeof(sc->phy_speed)) <= 0) { + if (bootverbose) { + device_printf(dev, + "No PHY information.\n"); + } + return (ENXIO); + } + if (OF_hasprop(child, "full-duplex")) + sc->phy_fdx = TRUE; + else + sc->phy_fdx = FALSE; + + /* Keep this flag just for the record */ + sc->phy_addr = MII_PHY_ANY; + + return (0); + } + free(name, M_OFWPROP); + } + if (bootverbose) { + device_printf(dev, + "Could not find PHY information in FDT.\n"); + } + return (ENXIO); + } else { + phy_handle = OF_instance_to_package(phy_handle); + if (OF_getencprop(phy_handle, "reg", &sc->phy_addr, + sizeof(sc->phy_addr)) <= 0) { + device_printf(dev, + "Could not find PHY address in FDT.\n"); + return (ENXIO); + } + } + + return (0); +} + +int +mvneta_fdt_mac_address(struct mvneta_softc *sc, uint8_t *addr) +{ + phandle_t node; + uint8_t lmac[ETHER_ADDR_LEN]; + uint8_t zeromac[] = {[0 ... (ETHER_ADDR_LEN - 1)] = 0}; + int len; + + /* + * Retrieve hw address from the device tree. + */ + node = ofw_bus_get_node(sc->dev); + if (node == 0) + return (ENXIO); + + len = OF_getprop(node, "local-mac-address", (void *)lmac, sizeof(lmac)); + if (len != ETHER_ADDR_LEN) + return (ENOENT); + + if (memcmp(lmac, zeromac, ETHER_ADDR_LEN) == 0) { + /* Invalid MAC address (all zeros) */ + return (EINVAL); + } + memcpy(addr, lmac, ETHER_ADDR_LEN); + + return (0); +} Property changes on: head/sys/dev/neta/if_mvneta_fdt.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/neta/if_mvnetareg.h =================================================================== --- head/sys/dev/neta/if_mvnetareg.h (nonexistent) +++ head/sys/dev/neta/if_mvnetareg.h (revision 319907) @@ -0,0 +1,926 @@ +/* + * Copyright (c) 2017 Stormshield. + * Copyright (c) 2017 Semihalf. + * Copyright (c) 2015 Internet Initiative Japan Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _IF_MVNETAREG_H_ +#define _IF_MVNETAREG_H_ + +#if BYTE_ORDER == BIG_ENDIAN +#error "BIG ENDIAN not supported" +#endif + +#define MVNETA_SIZE 0x4000 + +#define MVNETA_NWINDOW 6 +#define MVNETA_NREMAP 4 + +#define MVNETA_MAX_QUEUE_SIZE 8 +#define MVNETA_RX_QNUM_MAX 1 +/* XXX: Currently multi-queue can be used on the Tx side only */ +#ifdef MVNETA_MULTIQUEUE +#define MVNETA_TX_QNUM_MAX 2 +#else +#define MVNETA_TX_QNUM_MAX 1 +#endif + +#if MVNETA_TX_QNUM_MAX & (MVNETA_TX_QNUM_MAX - 1) != 0 +#error "MVNETA_TX_QNUM_MAX Should be a power of 2" +#endif +#if MVNETA_RX_QNUM_MAX & (MVNETA_RX_QNUM_MAX - 1) != 0 +#error "MVNETA_RX_QNUM_MAX Should be a power of 2" +#endif + +#define MVNETA_QUEUE(n) (1 << (n)) +#define MVNETA_QUEUE_ALL 0xff +#define MVNETA_TX_QUEUE_ALL ((1<> 0) & 0x3fff) +#define MVNETA_PRXS_GET_NODC(reg) (((reg) >> 16) & 0x3fff) + +/* Port RX queues Status Update (MVNETA_PRXSU) */ +#define MVNETA_PRXSU_NOOFPROCESSEDDESCRIPTORS(x) (((x) & 0xff) << 0) +#define MVNETA_PRXSU_NOOFNEWDESCRIPTORS(x) (((x) & 0xff) << 16) + +/* Port RX Initialization (MVNETA_PRXINIT) */ +#define MVNETA_PRXINIT_RXDMAINIT (1 << 0) + +/* + * Tx DMA Miscellaneous Registers + */ +/* Transmit Queue Command (MVNETA_TQC) */ +#define MVNETA_TQC_EN_MASK (0xff << 0) +#define MVNETA_TQC_ENQ(q) (1 << ((q) + 0))/* Enable Q */ +#define MVNETA_TQC_EN(n) ((n) << 0) +#define MVNETA_TQC_DIS_MASK (0xff << 8) +#define MVNETA_TQC_DISQ(q) (1 << ((q) + 8))/* Disable Q */ +#define MVNETA_TQC_DIS(n) ((n) << 8) + +/* + * Tx DMA Networking Controller Miscellaneous Registers + */ +/* Port TX queues Descriptors Queue Size (MVNETA_PTXDQS) */ + /* Descriptors Queue Size */ +#define MVNETA_PTXDQS_DQS_MASK (0x3fff << 0) +#define MVNETA_PTXDQS_DQS(x) (((x) & 0x3fff) << 0) + /* Transmitted Buffer Threshold */ +#define MVNETA_PTXDQS_TBT_MASK (0x3fff << 16) +#define MVNETA_PTXDQS_TBT(x) (((x) & 0x3fff) << 16) + +/* Port TX queues Status (MVNETA_PTXS) */ + /* Transmitted Buffer Counter */ +#define MVNETA_PTXS_TBC(x) (((x) & 0x3fff) << 16) + +#define MVNETA_PTXS_GET_TBC(reg) (((reg) >> 16) & 0x3fff) + /* Pending Descriptors Counter */ +#define MVNETA_PTXS_PDC(x) ((x) & 0x3fff) +#define MVNETA_PTXS_GET_PDC(x) ((x) & 0x3fff) + +/* Port TX queues Status Update (MVNETA_PTXSU) */ + /* Number Of Written Descriptors */ +#define MVNETA_PTXSU_NOWD(x) (((x) & 0xff) << 0) + /* Number Of Released Buffers */ +#define MVNETA_PTXSU_NORB(x) (((x) & 0xff) << 16) + +/* TX Transmitted Buffers Counter (MVNETA_TXTBC) */ + /* Transmitted Buffers Counter */ +#define MVNETA_TXTBC_TBC(x) (((x) & 0x3fff) << 16) + +/* Port TX Initialization (MVNETA_PTXINIT) */ +#define MVNETA_PTXINIT_TXDMAINIT (1 << 0) + +/* + * Tx DMA Queue Arbiter Registers (Version 1 ) + */ +/* Transmit Queue Fixed Priority Configuration */ +#define MVNETA_TQFPC_EN(q) (1 << (q)) + +/* + * RX_TX DMA Registers + */ +/* Port Configuration (MVNETA_PXC) */ +#define MVNETA_PXC_UPM (1 << 0) /* Uni Promisc mode */ +#define MVNETA_PXC_RXQ(q) ((q) << 1) +#define MVNETA_PXC_RXQ_MASK MVNETA_PXC_RXQ(7) +#define MVNETA_PXC_RXQARP(q) ((q) << 4) +#define MVNETA_PXC_RXQARP_MASK MVNETA_PXC_RXQARP(7) +#define MVNETA_PXC_RB (1 << 7) /* Rej mode of MAC */ +#define MVNETA_PXC_RBIP (1 << 8) +#define MVNETA_PXC_RBARP (1 << 9) +#define MVNETA_PXC_AMNOTXES (1 << 12) +#define MVNETA_PXC_RBARPF (1 << 13) +#define MVNETA_PXC_TCPCAPEN (1 << 14) +#define MVNETA_PXC_UDPCAPEN (1 << 15) +#define MVNETA_PXC_TCPQ(q) ((q) << 16) +#define MVNETA_PXC_TCPQ_MASK MVNETA_PXC_TCPQ(7) +#define MVNETA_PXC_UDPQ(q) ((q) << 19) +#define MVNETA_PXC_UDPQ_MASK MVNETA_PXC_UDPQ(7) +#define MVNETA_PXC_BPDUQ(q) ((q) << 22) +#define MVNETA_PXC_BPDUQ_MASK MVNETA_PXC_BPDUQ(7) +#define MVNETA_PXC_RXCS (1 << 25) + +/* Port Configuration Extend (MVNETA_PXCX) */ +#define MVNETA_PXCX_SPAN (1 << 1) +#define MVNETA_PXCX_TXCRCDIS (1 << 3) + +/* Marvell Header (MVNETA_MH) */ +#define MVNETA_MH_MHEN (1 << 0) +#define MVNETA_MH_DAPREFIX (0x3 << 1) +#define MVNETA_MH_SPID (0xf << 4) +#define MVNETA_MH_MHMASK (0x3 << 8) +#define MVNETA_MH_MHMASK_8QUEUES (0x0 << 8) +#define MVNETA_MH_MHMASK_4QUEUES (0x1 << 8) +#define MVNETA_MH_MHMASK_2QUEUES (0x3 << 8) +#define MVNETA_MH_DSAEN_MASK (0x3 << 10) +#define MVNETA_MH_DSAEN_DISABLE (0x0 << 10) +#define MVNETA_MH_DSAEN_NONEXTENDED (0x1 << 10) +#define MVNETA_MH_DSAEN_EXTENDED (0x2 << 10) + +/* + * Serial(SMI/MII) Registers + */ +#define MVNETA_PSOMSCD_ENABLE (1UL<<31) +#define MVNETA_PSERDESCFG_QSGMII (0x0667) +#define MVNETA_PSERDESCFG_SGMII (0x0cc7) +/* Port Seiral Control0 (MVNETA_PSC0) */ +#define MVNETA_PSC0_FORCE_FC_MASK (0x3 << 5) +#define MVNETA_PSC0_FORCE_FC(fc) (((fc) & 0x3) << 5) +#define MVNETA_PSC0_FORCE_FC_PAUSE MVNETA_PSC0_FORCE_FC(0x1) +#define MVNETA_PSC0_FORCE_FC_NO_PAUSE MVNETA_PSC0_FORCE_FC(0x0) +#define MVNETA_PSC0_FORCE_BP_MASK (0x3 << 7) +#define MVNETA_PSC0_FORCE_BP(fc) (((fc) & 0x3) << 5) +#define MVNETA_PSC0_FORCE_BP_JAM MVNETA_PSC0_FORCE_BP(0x1) +#define MVNETA_PSC0_FORCE_BP_NO_JAM MVNETA_PSC0_FORCE_BP(0x0) +#define MVNETA_PSC0_DTE_ADV (1 << 14) +#define MVNETA_PSC0_IGN_RXERR (1 << 28) +#define MVNETA_PSC0_IGN_COLLISION (1 << 29) +#define MVNETA_PSC0_IGN_CARRIER (1 << 30) + +/* Ethernet Port Status0 (MVNETA_PS0) */ +#define MVNETA_PS0_TXINPROG (1 << 0) +#define MVNETA_PS0_TXFIFOEMP (1 << 8) +#define MVNETA_PS0_RXFIFOEMPTY (1 << 16) + +/* + * Gigabit Ethernet MAC Serial Parameters Configuration Registers + */ +#define MVNETA_PSPC_MUST_SET (1 << 3 | 1 << 4 | 1 << 5 | 0x23 << 6) +#define MVNETA_PSP1C_MUST_SET (1 << 0 | 1 << 1 | 1 << 2) + +/* + * Gigabit Ethernet Auto-Negotiation Configuration Registers + */ +/* Port Auto-Negotiation Configuration (MVNETA_PANC) */ +#define MVNETA_PANC_FORCELINKFAIL (1 << 0) +#define MVNETA_PANC_FORCELINKPASS (1 << 1) +#define MVNETA_PANC_INBANDANEN (1 << 2) +#define MVNETA_PANC_INBANDANBYPASSEN (1 << 3) +#define MVNETA_PANC_INBANDRESTARTAN (1 << 4) +#define MVNETA_PANC_SETMIISPEED (1 << 5) +#define MVNETA_PANC_SETGMIISPEED (1 << 6) +#define MVNETA_PANC_ANSPEEDEN (1 << 7) +#define MVNETA_PANC_SETFCEN (1 << 8) +#define MVNETA_PANC_PAUSEADV (1 << 9) +#define MVNETA_PANC_ANFCEN (1 << 11) +#define MVNETA_PANC_SETFULLDX (1 << 12) +#define MVNETA_PANC_ANDUPLEXEN (1 << 13) +#define MVNETA_PANC_MUSTSET (1 << 15) + +/* + * Gigabit Ethernet MAC Control Registers + */ +/* Port MAC Control 0 (MVNETA_PMACC0) */ +#define MVNETA_PMACC0_PORTEN (1 << 0) +#define MVNETA_PMACC0_PORTTYPE (1 << 1) +#define MVNETA_PMACC0_FRAMESIZELIMIT(x) ((((x) >> 1) << 2) & 0x7ffc) +#define MVNETA_PMACC0_FRAMESIZELIMIT_MASK (0x7ffc) +#define MVNETA_PMACC0_MUSTSET (1 << 15) + +/* Port MAC Control 1 (MVNETA_PMACC1) */ +#define MVNETA_PMACC1_PCSLB (1 << 6) + +/* Port MAC Control 2 (MVNETA_PMACC2) */ +#define MVNETA_PMACC2_INBANDANMODE (1 << 0) +#define MVNETA_PMACC2_PCSEN (1 << 3) +#define MVNETA_PMACC2_PCSEN (1 << 3) +#define MVNETA_PMACC2_RGMIIEN (1 << 4) +#define MVNETA_PMACC2_PADDINGDIS (1 << 5) +#define MVNETA_PMACC2_PORTMACRESET (1 << 6) +#define MVNETA_PMACC2_PRBSCHECKEN (1 << 10) +#define MVNETA_PMACC2_PRBSGENEN (1 << 11) +#define MVNETA_PMACC2_SDTT_MASK (3 << 12) /* Select Data To Transmit */ +#define MVNETA_PMACC2_SDTT_RM (0 << 12) /* Regular Mode */ +#define MVNETA_PMACC2_SDTT_PRBS (1 << 12) /* PRBS Mode */ +#define MVNETA_PMACC2_SDTT_ZC (2 << 12) /* Zero Constant */ +#define MVNETA_PMACC2_SDTT_OC (3 << 12) /* One Constant */ +#define MVNETA_PMACC2_MUSTSET (3 << 14) + +/* Port MAC Control 3 (MVNETA_PMACC3) */ +#define MVNETA_PMACC3_IPG_MASK 0x7f80 + +/* + * Gigabit Ethernet MAC Interrupt Registers + */ +/* Port Interrupt Cause/Mask (MVNETA_PIC/MVNETA_PIM) */ +#define MVNETA_PI_INTSUM (1 << 0) +#define MVNETA_PI_LSC (1 << 1) /* LinkStatus Change */ +#define MVNETA_PI_ACOP (1 << 2) /* AnCompleted OnPort */ +#define MVNETA_PI_AOOR (1 << 5) /* AddressOut Of Range */ +#define MVNETA_PI_SSC (1 << 6) /* SyncStatus Change */ +#define MVNETA_PI_PRBSEOP (1 << 7) /* QSGMII PRBS error */ +#define MVNETA_PI_MIBCWA (1 << 15) /* MIB counter wrap around */ +#define MVNETA_PI_QSGMIIPRBSE (1 << 10) /* QSGMII PRBS error */ +#define MVNETA_PI_PCSRXPRLPI (1 << 11) /* PCS Rx path received LPI*/ +#define MVNETA_PI_PCSTXPRLPI (1 << 12) /* PCS Tx path received LPI*/ +#define MVNETA_PI_MACRXPRLPI (1 << 13) /* MAC Rx path received LPI*/ +#define MVNETA_PI_MIBCCD (1 << 14) /* MIB counters copy done */ + +/* + * Gigabit Ethernet MAC Low Power Idle Registers + */ +/* LPI Control 0 (MVNETA_LPIC0) */ +#define MVNETA_LPIC0_LILIMIT(x) (((x) & 0xff) << 0) +#define MVNETA_LPIC0_TSLIMIT(x) (((x) & 0xff) << 8) + +/* LPI Control 1 (MVNETA_LPIC1) */ +#define MVNETA_LPIC1_LPIRE (1 << 0) /* LPI request enable */ +#define MVNETA_LPIC1_LPIRF (1 << 1) /* LPI request force */ +#define MVNETA_LPIC1_LPIMM (1 << 2) /* LPI manual mode */ +#define MVNETA_LPIC1_TWLIMIT(x) (((x) & 0xfff) << 4) + +/* LPI Control 2 (MVNETA_LPIC2) */ +#define MVNETA_LPIC2_MUSTSET 0x17d + +/* LPI Status (MVNETA_LPIS) */ +#define MVNETA_LPIS_PCSRXPLPIS (1 << 0) /* PCS Rx path LPI status */ +#define MVNETA_LPIS_PCSTXPLPIS (1 << 1) /* PCS Tx path LPI status */ +#define MVNETA_LPIS_MACRXPLPIS (1 << 2)/* MAC Rx path LP idle status */ +#define MVNETA_LPIS_MACTXPLPWS (1 << 3)/* MAC Tx path LP wait status */ +#define MVNETA_LPIS_MACTXPLPIS (1 << 4)/* MAC Tx path LP idle status */ + +/* + * Gigabit Ethernet MAC PRBS Check Status Registers + */ +/* Port PRBS Status (MVNETA_PPRBSS) */ +#define MVNETA_PPRBSS_PRBSCHECKLOCKED (1 << 0) +#define MVNETA_PPRBSS_PRBSCHECKRDY (1 << 1) + +/* + * Gigabit Ethernet MAC Status Registers + */ +/* Port Status Register (MVNETA_PSR) */ +#define MVNETA_PSR_LINKUP (1 << 0) +#define MVNETA_PSR_GMIISPEED (1 << 1) +#define MVNETA_PSR_MIISPEED (1 << 2) +#define MVNETA_PSR_FULLDX (1 << 3) +#define MVNETA_PSR_RXFCEN (1 << 4) +#define MVNETA_PSR_TXFCEN (1 << 5) +#define MVNETA_PSR_PRP (1 << 6) /* Port Rx Pause */ +#define MVNETA_PSR_PTP (1 << 7) /* Port Tx Pause */ +#define MVNETA_PSR_PDP (1 << 8) /*Port is Doing Back-Pressure*/ +#define MVNETA_PSR_SYNCFAIL10MS (1 << 10) +#define MVNETA_PSR_ANDONE (1 << 11) +#define MVNETA_PSR_IBANBA (1 << 12) /* InBand AutoNeg BypassAct */ +#define MVNETA_PSR_SYNCOK (1 << 14) + +/* + * Networking Controller Interrupt Registers + */ +/* Port CPU to Queue */ +#define MVNETA_MAXCPU 2 +#define MVNETA_PCP2Q_TXQEN(q) (1 << ((q) + 8)) +#define MVNETA_PCP2Q_TXQEN_MASK (0xff << 8) +#define MVNETA_PCP2Q_RXQEN(q) (1 << ((q) + 0)) +#define MVNETA_PCP2Q_RXQEN_MASK (0xff << 0) + +/* Port RX_TX Interrupt Threshold */ +#define MVNETA_PRXITTH_RITT(t) ((t) & 0xffffff) + +/* Port RX_TX Threshold Interrupt Cause/Mask (MVNETA_PRXTXTIC/MVNETA_PRXTXTIM) */ +#define MVNETA_PRXTXTI_TBTCQ(q) (1 << ((q) + 0)) +#define MVNETA_PRXTXTI_TBTCQ_MASK (0xff << 0) +#define MVNETA_PRXTXTI_GET_TBTCQ(reg) (((reg) >> 0) & 0xff) + /* Tx Buffer Threshold Cross Queue*/ +#define MVNETA_PRXTXTI_RBICTAPQ(q) (1 << ((q) + 8)) +#define MVNETA_PRXTXTI_RBICTAPQ_MASK (0xff << 8) +#define MVNETA_PRXTXTI_GET_RBICTAPQ(reg) (((reg) >> 8) & 0xff) + /* Rx Buffer Int. Coaleasing Th. Pri. Alrt Q */ +#define MVNETA_PRXTXTI_RDTAQ(q) (1 << ((q) + 16)) +#define MVNETA_PRXTXTI_RDTAQ_MASK (0xff << 16) +#define MVNETA_PRXTXTI_GET_RDTAQ(reg) (((reg) >> 16) & 0xff) + /* Rx Descriptor Threshold Alert Queue*/ +#define MVNETA_PRXTXTI_PRXTXICSUMMARY (1 << 29) /* PRXTXI summary */ +#define MVNETA_PRXTXTI_PTXERRORSUMMARY (1 << 30) /* PTEXERROR summary */ +#define MVNETA_PRXTXTI_PMISCICSUMMARY (1UL << 31) /* PMISCIC summary */ + +/* Port RX_TX Interrupt Cause/Mask (MVNETA_PRXTXIC/MVNETA_PRXTXIM) */ +#define MVNETA_PRXTXI_TBRQ(q) (1 << ((q) + 0)) +#define MVNETA_PRXTXI_TBRQ_MASK (0xff << 0) +#define MVNETA_PRXTXI_GET_TBRQ(reg) (((reg) >> 0) & 0xff) +#define MVNETA_PRXTXI_RPQ(q) (1 << ((q) + 8)) +#define MVNETA_PRXTXI_RPQ_MASK (0xff << 8) +#define MVNETA_PRXTXI_GET_RPQ(reg) (((reg) >> 8) & 0xff) +#define MVNETA_PRXTXI_RREQ(q) (1 << ((q) + 16)) +#define MVNETA_PRXTXI_RREQ_MASK (0xff << 16) +#define MVNETA_PRXTXI_GET_RREQ(reg) (((reg) >> 16) & 0xff) +#define MVNETA_PRXTXI_PRXTXTHICSUMMARY (1 << 29) +#define MVNETA_PRXTXI_PTXERRORSUMMARY (1 << 30) +#define MVNETA_PRXTXI_PMISCICSUMMARY (1UL << 31) + +/* Port Misc Interrupt Cause/Mask (MVNETA_PMIC/MVNETA_PMIM) */ +#define MVNETA_PMI_PHYSTATUSCHNG (1 << 0) +#define MVNETA_PMI_LINKCHANGE (1 << 1) +#define MVNETA_PMI_IAE (1 << 7) /* Internal Address Error */ +#define MVNETA_PMI_RXOVERRUN (1 << 8) +#define MVNETA_PMI_RXCRCERROR (1 << 9) +#define MVNETA_PMI_RXLARGEPACKET (1 << 10) +#define MVNETA_PMI_TXUNDRN (1 << 11) +#define MVNETA_PMI_PRBSERROR (1 << 12) +#define MVNETA_PMI_PSCSYNCCHANGE (1 << 13) +#define MVNETA_PMI_SRSE (1 << 14) /* SerdesRealignSyncError */ +#define MVNETA_PMI_TREQ(q) (1 << ((q) + 24)) /* TxResourceErrorQ */ +#define MVNETA_PMI_TREQ_MASK (0xff << 24) /* TxResourceErrorQ */ + +/* Port Interrupt Enable (MVNETA_PIE) */ +#define MVNETA_PIE_RXPKTINTRPTENB(q) (1 << ((q) + 0)) +#define MVNETA_PIE_TXPKTINTRPTENB(q) (1 << ((q) + 8)) +#define MVNETA_PIE_RXPKTINTRPTENB_MASK (0xff << 0) +#define MVNETA_PIE_TXPKTINTRPTENB_MASK (0xff << 8) + +/* + * Miscellaneous Interrupt Registers + */ +#define MVNETA_PEUIAE_ADDR_MASK (0x3fff) +#define MVNETA_PEUIAE_ADDR(addr) ((addr) & 0x3fff) +#define MVNETA_PEUIAE_GET_ADDR(reg) ((reg) & 0x3fff) + +/* + * SGMII PHY Registers + */ +/* Power and PLL Control (MVNETA_PPLLC) */ +#define MVNETA_PPLLC_REF_FREF_SEL_MASK (0xf << 0) +#define MVNETA_PPLLC_PHY_MODE_MASK (7 << 5) +#define MVNETA_PPLLC_PHY_MODE_SATA (0 << 5) +#define MVNETA_PPLLC_PHY_MODE_SAS (1 << 5) +#define MVNETA_PPLLC_PLL_LOCK (1 << 8) +#define MVNETA_PPLLC_PU_DFE (1 << 10) +#define MVNETA_PPLLC_PU_TX_INTP (1 << 11) +#define MVNETA_PPLLC_PU_TX (1 << 12) +#define MVNETA_PPLLC_PU_RX (1 << 13) +#define MVNETA_PPLLC_PU_PLL (1 << 14) + +/* Digital Loopback Enable (MVNETA_DLE) */ +#define MVNETA_DLE_LOCAL_SEL_BITS_MASK (3 << 10) +#define MVNETA_DLE_LOCAL_SEL_BITS_10BITS (0 << 10) +#define MVNETA_DLE_LOCAL_SEL_BITS_20BITS (1 << 10) +#define MVNETA_DLE_LOCAL_SEL_BITS_40BITS (2 << 10) +#define MVNETA_DLE_LOCAL_RXPHER_TO_TX_EN (1 << 12) +#define MVNETA_DLE_LOCAL_ANA_TX2RX_LPBK_EN (1 << 13) +#define MVNETA_DLE_LOCAL_DIG_TX2RX_LPBK_EN (1 << 14) +#define MVNETA_DLE_LOCAL_DIG_RX2TX_LPBK_EN (1 << 15) + +/* Reference Clock Select (MVNETA_RCS) */ +#define MVNETA_RCS_REFCLK_SEL (1 << 10) + +/* + * DMA descriptors + */ +struct mvneta_tx_desc { + /* LITTLE_ENDIAN */ + uint32_t command; /* off 0x00: commands */ + uint16_t l4ichk; /* initial checksum */ + uint16_t bytecnt; /* 0ff 0x04: buffer byte count */ + uint32_t bufptr_pa; /* off 0x08: buffer ptr(PA) */ + uint32_t flags; /* off 0x0c: flags */ + uint32_t reserved0; /* off 0x10 */ + uint32_t reserved1; /* off 0x14 */ + uint32_t reserved2; /* off 0x18 */ + uint32_t reserved3; /* off 0x1c */ +}; + +struct mvneta_rx_desc { + /* LITTLE_ENDIAN */ + uint32_t status; /* status and flags */ + uint16_t reserved0; + uint16_t bytecnt; /* buffer byte count */ + uint32_t bufptr_pa; /* packet buffer pointer */ + uint32_t reserved1; + uint32_t bufptr_va; + uint16_t reserved2; + uint16_t l4chk; /* L4 checksum */ + uint32_t reserved3; + uint32_t reserved4; +}; + +/* + * Received packet command header: + * network controller => software + * the controller parse the packet and set some flags. + */ +#define MVNETA_RX_IPV4_FRAGMENT (1UL << 31) /* Fragment Indicator */ +#define MVNETA_RX_L4_CHECKSUM_OK (1 << 30) /* L4 Checksum */ +/* bit 29 reserved */ +#define MVNETA_RX_U (1 << 28) /* Unknown Destination */ +#define MVNETA_RX_F (1 << 27) /* First buffer */ +#define MVNETA_RX_L (1 << 26) /* Last buffer */ +#define MVNETA_RX_IP_HEADER_OK (1 << 25) /* IP Header is OK */ +#define MVNETA_RX_L3_IP (1 << 24) /* IP Type 0:IP6 1:IP4 */ +#define MVNETA_RX_L2_EV2 (1 << 23) /* Ethernet v2 frame */ +#define MVNETA_RX_L4_MASK (3 << 21) /* L4 Type */ +#define MVNETA_RX_L4_TCP (0x00 << 21) +#define MVNETA_RX_L4_UDP (0x01 << 21) +#define MVNETA_RX_L4_OTH (0x10 << 21) +#define MVNETA_RX_BPDU (1 << 20) /* BPDU frame */ +#define MVNETA_RX_VLAN (1 << 19) /* VLAN tag found */ +#define MVNETA_RX_EC_MASK (3 << 17) /* Error code */ +#define MVNETA_RX_EC_CE (0x00 << 17) /* CRC error */ +#define MVNETA_RX_EC_OR (0x01 << 17) /* FIFO overrun */ +#define MVNETA_RX_EC_MF (0x10 << 17) /* Max. frame len */ +#define MVNETA_RX_EC_RE (0x11 << 17) /* Resource error */ +#define MVNETA_RX_ES (1 << 16) /* Error summary */ +/* bit 15:0 reserved */ + +/* + * Transmit packet command header: + * software => network controller + */ +#define MVNETA_TX_CMD_L4_CHECKSUM_MASK (0x3 << 30) /* Do L4 Checksum */ +#define MVNETA_TX_CMD_L4_CHECKSUM_FRAG (0x0 << 30) +#define MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG (0x1 << 30) +#define MVNETA_TX_CMD_L4_CHECKSUM_NONE (0x2 << 30) +#define MVNETA_TX_CMD_PACKET_OFFSET_MASK (0x7f << 23) /* Payload offset */ +#define MVNETA_TX_CMD_W_PACKET_OFFSET(v) (((v) & 0x7f) << 23) +/* bit 22 reserved */ +#define MVNETA_TX_CMD_F (1 << 21) /* First buffer */ +#define MVNETA_TX_CMD_L (1 << 20) /* Last buffer */ +#define MVNETA_TX_CMD_PADDING (1 << 19) /* Pad short frame */ +#define MVNETA_TX_CMD_IP4_CHECKSUM (1 << 18) /* Do IPv4 Checksum */ +#define MVNETA_TX_CMD_L3_IP4 (0 << 17) +#define MVNETA_TX_CMD_L3_IP6 (1 << 17) +#define MVNETA_TX_CMD_L4_TCP (0 << 16) +#define MVNETA_TX_CMD_L4_UDP (1 << 16) +/* bit 15:13 reserved */ +#define MVNETA_TX_CMD_IP_HEADER_LEN_MASK (0x1f << 8) /* IP header len >> 2 */ +#define MVNETA_TX_CMD_IP_HEADER_LEN(v) (((v) & 0x1f) << 8) +/* bit 7 reserved */ +#define MVNETA_TX_CMD_L3_OFFSET_MASK (0x7f << 0) /* offset of L3 hdr. */ +#define MVNETA_TX_CMD_L3_OFFSET(v) (((v) & 0x7f) << 0) + +/* + * Transmit packet extra attributes + * and error status returned from network controller. + */ +#define MVNETA_TX_F_DSA_TAG (3 << 30) /* DSA Tag */ +/* bit 29:8 reserved */ +#define MVNETA_TX_F_MH_SEL (0xf << 4) /* Marvell Header */ +/* bit 3 reserved */ +#define MVNETA_TX_F_EC_MASK (3 << 1) /* Error code */ +#define MVNETA_TX_F_EC_LC (0x00 << 1) /* Late Collision */ +#define MVNETA_TX_F_EC_UR (0x01 << 1) /* Underrun */ +#define MVNETA_TX_F_EC_RL (0x10 << 1) /* Excess. Collision */ +#define MVNETA_TX_F_EC_RESERVED (0x11 << 1) +#define MVNETA_TX_F_ES (1 << 0) /* Error summary */ + +#define MVNETA_ERROR_SUMMARY (1 << 0) +#define MVNETA_BUFFER_OWNED_MASK (1UL << 31) +#define MVNETA_BUFFER_OWNED_BY_HOST (0UL << 31) +#define MVNETA_BUFFER_OWNED_BY_DMA (1UL << 31) + +#endif /* _IF_MVNETAREG_H_ */ Property changes on: head/sys/dev/neta/if_mvnetareg.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/dev/neta/if_mvnetavar.h =================================================================== --- head/sys/dev/neta/if_mvnetavar.h (nonexistent) +++ head/sys/dev/neta/if_mvnetavar.h (revision 319907) @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2017 Stormshield. + * Copyright (c) 2017 Semihalf. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + * + */ + +#ifndef _IF_MVNETAVAR_H_ +#define _IF_MVNETAVAR_H_ +#include + +#define MVNETA_HWHEADER_SIZE 2 /* Marvell Header */ +#define MVNETA_ETHER_SIZE 22 /* Maximum ether size */ +#define MVNETA_MAX_CSUM_MTU 1600 /* Port1,2 hw limit */ + +/* + * Limit support for frame up to hw csum limit + * until jumbo frame support is added. + */ +#define MVNETA_MAX_FRAME (MVNETA_MAX_CSUM_MTU + MVNETA_ETHER_SIZE) + +/* + * Default limit of queue length + * + * queue 0 is lowest priority and queue 7 is highest priority. + * IP packet is received on queue 7 by default. + */ +#define MVNETA_TX_RING_CNT 512 +#define MVNETA_RX_RING_CNT 256 + +#define MVNETA_BUFRING_SIZE 1024 + +#define MVNETA_PACKET_OFFSET 64 +#define MVNETA_PACKET_SIZE MCLBYTES + +#define MVNETA_RXTH_COUNT 128 +#define MVNETA_RX_REFILL_COUNT 8 +#define MVNETA_TX_RECLAIM_COUNT 32 + +/* + * Device Register access + */ +#define MVNETA_READ(sc, reg) \ + bus_read_4((sc)->res[0], (reg)) +#define MVNETA_WRITE(sc, reg, val) \ + bus_write_4((sc)->res[0], (reg), (val)) + +#define MVNETA_READ_REGION(sc, reg, val, c) \ + bus_read_region_4((sc)->res[0], (reg), (val), (c)) +#define MVNETA_WRITE_REGION(sc, reg, val, c) \ + bus_write_region_4((sc)->res[0], (reg), (val), (c)) + +#define MVNETA_READ_MIB_4(sc, reg) \ + bus_read_4((sc)->res[0], MVNETA_PORTMIB_BASE + (reg)) +#define MVNETA_READ_MIB_8(sc, reg) \ + bus_read_8((sc)->res[0], MVNETA_PORTMIB_BASE + (reg)) + +#define MVNETA_IS_LINKUP(sc) \ + (MVNETA_READ((sc), MVNETA_PSR) & MVNETA_PSR_LINKUP) + +#define MVNETA_IS_QUEUE_SET(queues, q) \ + ((((queues) >> (q)) & 0x1)) + +/* + * EEE: Lower Power Idle config + * Default timer is duration of MTU sized frame transmission. + * The timer can be negotiated by LLDP protocol, but we have no + * support. + */ +#define MVNETA_LPI_TS (ETHERMTU * 8 / 1000) /* [us] */ +#define MVNETA_LPI_TW (ETHERMTU * 8 / 1000) /* [us] */ +#define MVNETA_LPI_LI (ETHERMTU * 8 / 1000) /* [us] */ + +/* + * DMA Descriptor + * + * the ethernet device has 8 rx/tx DMA queues. each of queue has its own + * decriptor list. descriptors are simply index by counter inside the device. + */ +#define MVNETA_TX_SEGLIMIT 32 + +#define MVNETA_QUEUE_IDLE 1 +#define MVNETA_QUEUE_WORKING 2 +#define MVNETA_QUEUE_DISABLED 3 + +struct mvneta_buf { + struct mbuf * m; /* pointer to related mbuf */ + bus_dmamap_t dmap; +}; + +struct mvneta_rx_ring { + int queue_status; + /* Real descriptors array. shared by RxDMA */ + struct mvneta_rx_desc *desc; + bus_dmamap_t desc_map; + bus_addr_t desc_pa; + + /* Managment entries for each of descritors */ + struct mvneta_buf rxbuf[MVNETA_RX_RING_CNT]; + + /* locks */ + struct mtx ring_mtx; + + /* Index */ + int dma; + int cpu; + + /* Limit */ + int queue_th_received; + int queue_th_time; /* [Tclk] */ + + /* LRO */ + struct lro_ctrl lro; + boolean_t lro_enabled; + /* Is this queue out of mbuf */ + boolean_t needs_refill; +} __aligned(CACHE_LINE_SIZE); + +struct mvneta_tx_ring { + /* Index of this queue */ + int qidx; + /* IFNET pointer */ + struct ifnet *ifp; + /* Ring buffer for IFNET */ + struct buf_ring *br; + /* Real descriptors array. shared by TxDMA */ + struct mvneta_tx_desc *desc; + bus_dmamap_t desc_map; + bus_addr_t desc_pa; + + /* Managment entries for each of descritors */ + struct mvneta_buf txbuf[MVNETA_TX_RING_CNT]; + + /* locks */ + struct mtx ring_mtx; + + /* Index */ + int used; + int dma; + int cpu; + + /* watchdog */ +#define MVNETA_WATCHDOG_TXCOMP (hz / 10) /* 100ms */ +#define MVNETA_WATCHDOG (10 * hz) /* 10s */ + int watchdog_time; + int queue_status; + boolean_t queue_hung; + + /* Task */ + struct task task; + struct taskqueue *taskq; + + /* Stats */ + uint32_t drv_error; +} __aligned(CACHE_LINE_SIZE); + +static __inline int +tx_counter_adv(int ctr, int n) +{ + + ctr += n; + while (__predict_false(ctr >= MVNETA_TX_RING_CNT)) + ctr -= MVNETA_TX_RING_CNT; + + return (ctr); +} + +static __inline int +rx_counter_adv(int ctr, int n) +{ + + ctr += n; + while (__predict_false(ctr >= MVNETA_RX_RING_CNT)) + ctr -= MVNETA_RX_RING_CNT; + + return (ctr); +} + +/* + * Timeout control + */ +#define MVNETA_PHY_TIMEOUT 10000 /* msec */ +#define RX_DISABLE_TIMEOUT 0x1000000 /* times */ +#define TX_DISABLE_TIMEOUT 0x1000000 /* times */ +#define TX_FIFO_EMPTY_TIMEOUT 0x1000000 /* times */ + +/* + * Debug + */ +#define KASSERT_SC_MTX(sc) \ + KASSERT(mtx_owned(&(sc)->mtx), ("SC mutex not owned")) +#define KASSERT_BM_MTX(sc) \ + KASSERT(mtx_owned(&(sc)->bm.bm_mtx), ("BM mutex not owned")) +#define KASSERT_RX_MTX(sc, q) \ + KASSERT(mtx_owned(&(sc)->rx_ring[(q)].ring_mtx),\ + ("RX mutex not owned")) +#define KASSERT_TX_MTX(sc, q) \ + KASSERT(mtx_owned(&(sc)->tx_ring[(q)].ring_mtx),\ + ("TX mutex not owned")) + +/* + * sysctl(9) parameters + */ +struct mvneta_sysctl_queue { + struct mvneta_softc *sc; + int rxtx; + int queue; +}; +#define MVNETA_SYSCTL_RX 0 +#define MVNETA_SYSCTL_TX 1 + +struct mvneta_sysctl_mib { + struct mvneta_softc *sc; + int index; + uint64_t counter; +}; + +enum mvneta_phy_mode { + MVNETA_PHY_QSGMII, + MVNETA_PHY_SGMII, + MVNETA_PHY_RGMII, + MVNETA_PHY_RGMII_ID +}; + +/* + * Ethernet Device main context + */ +DECLARE_CLASS(mvneta_driver); + +struct mvneta_softc { + device_t dev; + uint32_t version; + /* + * mtx must be held by interface functions to/from + * other frameworks. interrupt hander, sysctl hander, + * ioctl hander, and so on. + */ + struct mtx mtx; + struct resource *res[2]; + void *ih_cookie[1]; + + struct ifnet *ifp; + uint32_t mvneta_if_flags; + uint32_t mvneta_media; + + int phy_attached; + enum mvneta_phy_mode phy_mode; + int phy_addr; + int phy_speed; /* PHY speed */ + boolean_t phy_fdx; /* Full duplex mode */ + boolean_t autoneg; /* Autonegotiation status */ + boolean_t use_inband_status; /* In-band link status */ + + /* + * Link State control + */ + boolean_t linkup; + device_t miibus; + struct mii_data *mii; + uint8_t enaddr[ETHER_ADDR_LEN]; + struct ifmedia mvneta_ifmedia; + + bus_dma_tag_t rx_dtag; + bus_dma_tag_t rxbuf_dtag; + bus_dma_tag_t tx_dtag; + bus_dma_tag_t txmbuf_dtag; + struct mvneta_rx_ring rx_ring[MVNETA_RX_QNUM_MAX]; + struct mvneta_tx_ring tx_ring[MVNETA_TX_QNUM_MAX]; + + /* + * Maintance clock + */ + struct callout tick_ch; + + int cf_lpi; + int cf_fc; + int debug; + + /* + * Sysctl interfaces + */ + struct mvneta_sysctl_queue sysctl_rx_queue[MVNETA_RX_QNUM_MAX]; + struct mvneta_sysctl_queue sysctl_tx_queue[MVNETA_TX_QNUM_MAX]; + + /* + * MIB counter + */ + struct mvneta_sysctl_mib sysctl_mib[MVNETA_PORTMIB_NOCOUNTER]; + uint64_t counter_pdfc; + uint64_t counter_pofc; + uint32_t counter_watchdog; /* manual reset when clearing mib */ + uint32_t counter_watchdog_mib; /* reset after each mib update */ +}; +#define MVNETA_RX_RING(sc, q) \ + (&(sc)->rx_ring[(q)]) +#define MVNETA_TX_RING(sc, q) \ + (&(sc)->tx_ring[(q)]) + +int mvneta_attach(device_t); + +#ifdef FDT +int mvneta_fdt_mac_address(struct mvneta_softc *, uint8_t *); +#endif + +#endif /* _IF_MVNETAVAR_H_ */ Property changes on: head/sys/dev/neta/if_mvnetavar.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property