diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c index 7c4645792321..fe42aaac6568 100644 --- a/sys/dev/axgbe/if_axgbe_pci.c +++ b/sys/dev/axgbe/if_axgbe_pci.c @@ -1,2339 +1,2413 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Contact Information : * Rajesh Kumar * Shreyank Amartya */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include "xgbe.h" #include "xgbe-common.h" #include "miibus_if.h" #include "ifdi_if.h" #include "opt_inet.h" #include "opt_inet6.h" MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data"); extern struct if_txrx axgbe_txrx; +static int axgbe_sph_enable; /* Function prototypes */ static void *axgbe_register(device_t); static int axgbe_if_attach_pre(if_ctx_t); static int axgbe_if_attach_post(if_ctx_t); static int axgbe_if_detach(if_ctx_t); static void axgbe_if_stop(if_ctx_t); static void axgbe_if_init(if_ctx_t); /* Queue related routines */ static int axgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static int axgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static int axgbe_alloc_channels(if_ctx_t); static void axgbe_if_queues_free(if_ctx_t); static int axgbe_if_tx_queue_intr_enable(if_ctx_t, uint16_t); static int axgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); /* Interrupt related routines */ static void axgbe_if_disable_intr(if_ctx_t); static void axgbe_if_enable_intr(if_ctx_t); static int axgbe_if_msix_intr_assign(if_ctx_t, int); static void xgbe_free_intr(struct xgbe_prv_data *, struct resource *, void *, int); /* Init and Iflib routines */ static void axgbe_pci_init(struct xgbe_prv_data *); static void axgbe_pci_stop(if_ctx_t); static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *, struct xgbe_channel *); static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *); static int axgbe_if_mtu_set(if_ctx_t, uint32_t); static void axgbe_if_update_admin_status(if_ctx_t); static void axgbe_if_media_status(if_ctx_t, struct ifmediareq *); static int axgbe_if_media_change(if_ctx_t); static int axgbe_if_promisc_set(if_ctx_t, int); static uint64_t axgbe_if_get_counter(if_ctx_t, ift_counter); static void axgbe_if_vlan_register(if_ctx_t, uint16_t); static void axgbe_if_vlan_unregister(if_ctx_t, uint16_t); #if __FreeBSD_version >= 1300000 static bool axgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); #endif static void axgbe_set_counts(if_ctx_t); static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *); /* MII interface registered functions */ static int axgbe_miibus_readreg(device_t, int, int); static int axgbe_miibus_writereg(device_t, int, int, int); static void axgbe_miibus_statchg(device_t); /* ISR routines */ static int axgbe_dev_isr(void *); static void axgbe_ecc_isr(void *); static void axgbe_i2c_isr(void *); static void axgbe_an_isr(void *); static int axgbe_msix_que(void *); /* Timer routines */ static void xgbe_service(void *, int); static void xgbe_service_timer(void *); static void xgbe_init_timers(struct xgbe_prv_data *); static void xgbe_stop_timers(struct xgbe_prv_data *); /* Dump routines */ static void xgbe_dump_prop_registers(struct xgbe_prv_data *); /* * Allocate only for MAC (BAR0) and PCS (BAR1) registers, and just point the * MSI-X table bar (BAR5) to iflib. iflib will do the allocation for MSI-X * table. */ static struct resource_spec axgbe_pci_mac_spec[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* MAC regs */ { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, /* PCS regs */ { -1, 0 } }; static pci_vendor_info_t axgbe_vendor_info_array[] = { PVID(0x1022, 0x1458, "AMD 10 Gigabit Ethernet Driver"), PVID(0x1022, 0x1459, "AMD 10 Gigabit Ethernet Driver"), PVID_END }; static struct xgbe_version_data xgbe_v2a = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, .mmc_64bit = 1, .tx_max_fifo_size = 229376, .rx_max_fifo_size = 229376, .tx_tstamp_workaround = 1, .ecc_support = 1, .i2c_support = 1, .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, .an_cdr_workaround = 1, }; static struct xgbe_version_data xgbe_v2b = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, .mmc_64bit = 1, .tx_max_fifo_size = 65536, .rx_max_fifo_size = 65536, .tx_tstamp_workaround = 1, .ecc_support = 1, .i2c_support = 1, .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, .an_cdr_workaround = 1, }; /* Device Interface */ static device_method_t ax_methods[] = { DEVMETHOD(device_register, axgbe_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), /* MII interface */ DEVMETHOD(miibus_readreg, axgbe_miibus_readreg), DEVMETHOD(miibus_writereg, axgbe_miibus_writereg), DEVMETHOD(miibus_statchg, axgbe_miibus_statchg), DEVMETHOD_END }; static driver_t ax_driver = { "ax", ax_methods, sizeof(struct axgbe_if_softc), }; devclass_t ax_devclass; DRIVER_MODULE(axp, pci, ax_driver, ax_devclass, 0, 0); DRIVER_MODULE(miibus, ax, miibus_driver, miibus_devclass, 0, 0); IFLIB_PNP_INFO(pci, ax_driver, axgbe_vendor_info_array); MODULE_DEPEND(ax, pci, 1, 1, 1); MODULE_DEPEND(ax, ether, 1, 1, 1); MODULE_DEPEND(ax, iflib, 1, 1, 1); MODULE_DEPEND(ax, miibus, 1, 1, 1); /* Iflib Interface */ static device_method_t axgbe_if_methods[] = { DEVMETHOD(ifdi_attach_pre, axgbe_if_attach_pre), DEVMETHOD(ifdi_attach_post, axgbe_if_attach_post), DEVMETHOD(ifdi_detach, axgbe_if_detach), DEVMETHOD(ifdi_init, axgbe_if_init), DEVMETHOD(ifdi_stop, axgbe_if_stop), DEVMETHOD(ifdi_msix_intr_assign, axgbe_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, axgbe_if_enable_intr), DEVMETHOD(ifdi_intr_disable, axgbe_if_disable_intr), DEVMETHOD(ifdi_tx_queue_intr_enable, axgbe_if_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, axgbe_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, axgbe_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, axgbe_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, axgbe_if_queues_free), DEVMETHOD(ifdi_update_admin_status, axgbe_if_update_admin_status), DEVMETHOD(ifdi_mtu_set, axgbe_if_mtu_set), DEVMETHOD(ifdi_media_status, axgbe_if_media_status), DEVMETHOD(ifdi_media_change, axgbe_if_media_change), DEVMETHOD(ifdi_promisc_set, axgbe_if_promisc_set), DEVMETHOD(ifdi_get_counter, axgbe_if_get_counter), DEVMETHOD(ifdi_vlan_register, axgbe_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, axgbe_if_vlan_unregister), #if __FreeBSD_version >= 1300000 DEVMETHOD(ifdi_needs_restart, axgbe_if_needs_restart), #endif DEVMETHOD_END }; static driver_t axgbe_if_driver = { "axgbe_if", axgbe_if_methods, sizeof(struct axgbe_if_softc) }; /* Iflib Shared Context */ static struct if_shared_ctx axgbe_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_driver = &axgbe_if_driver, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = MJUM9BYTES, .isc_rx_maxsegsize = MJUM9BYTES, .isc_rx_nsegments = 1, .isc_admin_intrcnt = 4, .isc_vendor_info = axgbe_vendor_info_array, .isc_driver_version = XGBE_DRV_VERSION, - .isc_nrxd_min = {XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MIN}, - .isc_nrxd_default = {XGBE_RX_DESC_CNT_DEFAULT, XGBE_RX_DESC_CNT_DEFAULT}, - .isc_nrxd_max = {XGBE_RX_DESC_CNT_MAX, XGBE_RX_DESC_CNT_MAX}, .isc_ntxd_min = {XGBE_TX_DESC_CNT_MIN}, .isc_ntxd_default = {XGBE_TX_DESC_CNT_DEFAULT}, .isc_ntxd_max = {XGBE_TX_DESC_CNT_MAX}, - .isc_nfl = 2, .isc_ntxqs = 1, - .isc_nrxqs = 2, .isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_NEED_ETHER_PAD, }; static void * axgbe_register(device_t dev) { + int axgbe_nfl; + int axgbe_nrxqs; + int error, i; + char *value = NULL; + + value = kern_getenv("dev.ax.sph_enable"); + if (value) { + axgbe_sph_enable = strtol(value, NULL, 10); + freeenv(value); + } else { + /* + * No tunable found, generate one with default values + * Note: only a reboot will reveal the new kenv + */ + error = kern_setenv("dev.ax.sph_enable", "1"); + if (error) { + printf("Error setting tunable, using default driver values\n"); + } + axgbe_sph_enable = 1; + } + + if (!axgbe_sph_enable) { + axgbe_nfl = 1; + axgbe_nrxqs = 1; + } else { + axgbe_nfl = 2; + axgbe_nrxqs = 2; + } + + axgbe_sctx_init.isc_nfl = axgbe_nfl; + axgbe_sctx_init.isc_nrxqs = axgbe_nrxqs; + + for (i = 0 ; i < axgbe_nrxqs ; i++) { + axgbe_sctx_init.isc_nrxd_min[i] = XGBE_RX_DESC_CNT_MIN; + axgbe_sctx_init.isc_nrxd_default[i] = XGBE_RX_DESC_CNT_DEFAULT; + axgbe_sctx_init.isc_nrxd_max[i] = XGBE_RX_DESC_CNT_MAX; + } + return (&axgbe_sctx_init); } /* MII Interface Functions */ static int axgbe_miibus_readreg(device_t dev, int phy, int reg) { struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); struct xgbe_prv_data *pdata = &sc->pdata; int val; axgbe_printf(3, "%s: phy %d reg %d\n", __func__, phy, reg); val = xgbe_phy_mii_read(pdata, phy, reg); axgbe_printf(2, "%s: val 0x%x\n", __func__, val); return (val & 0xFFFF); } static int axgbe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); struct xgbe_prv_data *pdata = &sc->pdata; axgbe_printf(3, "%s: phy %d reg %d val 0x%x\n", __func__, phy, reg, val); xgbe_phy_mii_write(pdata, phy, reg, val); return(0); } static void axgbe_miibus_statchg(device_t dev) { struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); struct xgbe_prv_data *pdata = &sc->pdata; struct mii_data *mii = device_get_softc(pdata->axgbe_miibus); struct ifnet *ifp = pdata->netdev; int bmsr; axgbe_printf(2, "%s: Link %d/%d\n", __func__, pdata->phy.link, pdata->phy_link); if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: pdata->phy.link = 1; break; case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: pdata->phy.link = 1; break; default: pdata->phy.link = 0; break; } } else pdata->phy_link = 0; bmsr = axgbe_miibus_readreg(pdata->dev, pdata->mdio_addr, MII_BMSR); if (bmsr & BMSR_ANEG) { axgbe_printf(2, "%s: Autoneg Done\n", __func__); /* Raise AN Interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK); } } static int axgbe_if_attach_pre(if_ctx_t ctx) { struct axgbe_if_softc *sc; struct xgbe_prv_data *pdata; struct resource *mac_res[2]; if_softc_ctx_t scctx; if_shared_ctx_t sctx; device_t dev; unsigned int ma_lo, ma_hi; unsigned int reg; sc = iflib_get_softc(ctx); sc->pdata.dev = dev = iflib_get_dev(ctx); sc->sctx = sctx = iflib_get_sctx(ctx); sc->scctx = scctx = iflib_get_softc_ctx(ctx); sc->media = iflib_get_media(ctx); sc->ctx = ctx; sc->link_status = LINK_STATE_DOWN; pdata = &sc->pdata; pdata->netdev = iflib_get_ifp(ctx); spin_lock_init(&pdata->xpcs_lock); /* Initialize locks */ mtx_init(&pdata->rss_mutex, "xgbe rss mutex lock", NULL, MTX_DEF); mtx_init(&pdata->mdio_mutex, "xgbe MDIO mutex lock", NULL, MTX_SPIN); /* Allocate VLAN bitmap */ pdata->active_vlans = bit_alloc(VLAN_NVID, M_AXGBE, M_WAITOK|M_ZERO); pdata->num_active_vlans = 0; /* Get the version data */ DBGPR("%s: Device ID: 0x%x\n", __func__, pci_get_device(dev)); if (pci_get_device(dev) == 0x1458) sc->pdata.vdata = &xgbe_v2a; else if (pci_get_device(dev) == 0x1459) sc->pdata.vdata = &xgbe_v2b; /* PCI setup */ if (bus_alloc_resources(dev, axgbe_pci_mac_spec, mac_res)) return (ENXIO); sc->pdata.xgmac_res = mac_res[0]; sc->pdata.xpcs_res = mac_res[1]; /* Set the PCS indirect addressing definition registers*/ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; /* Configure the PCS indirect addressing support */ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); pdata->xpcs_window <<= 6; pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; DBGPR("xpcs window def : %#010x\n", pdata->xpcs_window_def_reg); DBGPR("xpcs window sel : %#010x\n", pdata->xpcs_window_sel_reg); DBGPR("xpcs window : %#010x\n", pdata->xpcs_window); DBGPR("xpcs window size : %#010x\n", pdata->xpcs_window_size); DBGPR("xpcs window mask : %#010x\n", pdata->xpcs_window_mask); /* Enable all interrupts in the hardware */ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); /* Retrieve the MAC address */ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); pdata->mac_addr[0] = ma_lo & 0xff; pdata->mac_addr[1] = (ma_lo >> 8) & 0xff; pdata->mac_addr[2] = (ma_lo >>16) & 0xff; pdata->mac_addr[3] = (ma_lo >> 24) & 0xff; pdata->mac_addr[4] = ma_hi & 0xff; pdata->mac_addr[5] = (ma_hi >> 8) & 0xff; if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID)) { axgbe_error("Invalid mac address\n"); return (EINVAL); } iflib_set_mac(ctx, pdata->mac_addr); /* Clock settings */ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ; pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ; /* Set the DMA coherency values */ pdata->coherent = 1; pdata->arcr = XGBE_DMA_PCI_ARCR; pdata->awcr = XGBE_DMA_PCI_AWCR; pdata->awarcr = XGBE_DMA_PCI_AWARCR; /* Read the port property registers */ pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); DBGPR("port property 0 = %#010x\n", pdata->pp0); DBGPR("port property 1 = %#010x\n", pdata->pp1); DBGPR("port property 2 = %#010x\n", pdata->pp2); DBGPR("port property 3 = %#010x\n", pdata->pp3); DBGPR("port property 4 = %#010x\n", pdata->pp4); /* Set the maximum channels and queues */ pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_TX_DMA); pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_RX_DMA); pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_TX_QUEUES); pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_RX_QUEUES); DBGPR("max tx/rx channel count = %u/%u\n", pdata->tx_max_channel_count, pdata->rx_max_channel_count); DBGPR("max tx/rx hw queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); axgbe_set_counts(ctx); /* Set the maximum fifo amounts */ pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, TX_FIFO_SIZE); pdata->tx_max_fifo_size *= 16384; pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, pdata->vdata->tx_max_fifo_size); pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, RX_FIFO_SIZE); pdata->rx_max_fifo_size *= 16384; pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, pdata->vdata->rx_max_fifo_size); DBGPR("max tx/rx max fifo size = %u/%u\n", pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); /* Initialize IFLIB if_softc_ctx_t */ axgbe_init_iflib_softc_ctx(sc); /* Alloc channels */ if (axgbe_alloc_channels(ctx)) { axgbe_error("Unable to allocate channel memory\n"); return (ENOMEM); } TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata); /* create the workqueue */ pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK, taskqueue_thread_enqueue, &pdata->dev_workqueue); taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET, "axgbe dev taskq"); /* Init timers */ xgbe_init_timers(pdata); return (0); } /* axgbe_if_attach_pre */ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) { xgbe_init_function_ptrs_dev(&pdata->hw_if); xgbe_init_function_ptrs_phy(&pdata->phy_if); xgbe_init_function_ptrs_i2c(&pdata->i2c_if); xgbe_init_function_ptrs_desc(&pdata->desc_if); pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); } static void axgbe_set_counts(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx);; struct xgbe_prv_data *pdata = &sc->pdata; cpuset_t lcpus; int cpu_count, err; size_t len; /* Set all function pointers */ xgbe_init_all_fptrs(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); if (!pdata->tx_max_channel_count) pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; if (!pdata->rx_max_channel_count) pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; if (!pdata->tx_max_q_count) pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; if (!pdata->rx_max_q_count) pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; /* * Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues or maximum allowed */ /* Get cpu count from sysctl */ len = sizeof(cpu_count); err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL, 0, NULL, 0); if (err) { axgbe_error("Unable to fetch number of cpus\n"); cpu_count = 1; } if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) { axgbe_error("Unable to fetch CPU list\n"); /* TODO - handle CPU_COPY(&all_cpus, &lcpus); */ } DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus)); pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt); pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_channel_count); pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_q_count); pdata->tx_q_count = pdata->tx_ring_count; pdata->rx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.rx_ch_cnt); pdata->rx_ring_count = min(pdata->rx_ring_count, pdata->rx_max_channel_count); pdata->rx_q_count = min(pdata->hw_feat.rx_q_cnt, pdata->rx_max_q_count); DBGPR("TX/RX max channel count = %u/%u\n", pdata->tx_max_channel_count, pdata->rx_max_channel_count); DBGPR("TX/RX max queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); DBGPR("TX/RX DMA ring count = %u/%u\n", pdata->tx_ring_count, pdata->rx_ring_count); DBGPR("TX/RX hardware queue count = %u/%u\n", pdata->tx_q_count, pdata->rx_q_count); } /* axgbe_set_counts */ static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *sc) { struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; if_shared_ctx_t sctx = sc->sctx; int i; scctx->isc_nrxqsets = pdata->rx_q_count; scctx->isc_ntxqsets = pdata->tx_q_count; scctx->isc_msix_bar = pci_msix_table_bar(pdata->dev); scctx->isc_tx_nsegments = 32; for (i = 0; i < sctx->isc_ntxqs; i++) { scctx->isc_txqsizes[i] = roundup2(scctx->isc_ntxd[i] * sizeof(struct xgbe_ring_desc), 128); scctx->isc_txd_size[i] = sizeof(struct xgbe_ring_desc); } for (i = 0; i < sctx->isc_nrxqs; i++) { scctx->isc_rxqsizes[i] = roundup2(scctx->isc_nrxd[i] * sizeof(struct xgbe_ring_desc), 128); scctx->isc_rxd_size[i] = sizeof(struct xgbe_ring_desc); } scctx->isc_tx_tso_segments_max = 32; scctx->isc_tx_tso_size_max = XGBE_TSO_MAX_SIZE; scctx->isc_tx_tso_segsize_max = PAGE_SIZE; /* * Set capabilities * 1) IFLIB automatically adds IFCAP_HWSTATS, so need to set explicitly * 2) isc_tx_csum_flags is mandatory if IFCAP_TXCSUM (included in * IFCAP_HWCSUM) is set */ scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 | CSUM_TSO); scctx->isc_capenable = (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWTSO); scctx->isc_capabilities = scctx->isc_capenable; /* * Set rss_table_size alone when adding RSS support. rss_table_mask * will be set by IFLIB based on rss_table_size */ scctx->isc_rss_table_size = XGBE_RSS_MAX_TABLE_SIZE; scctx->isc_ntxqsets_max = XGBE_MAX_QUEUES; scctx->isc_nrxqsets_max = XGBE_MAX_QUEUES; scctx->isc_txrx = &axgbe_txrx; } static int axgbe_alloc_channels(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel; int i, j, count; DBGPR("%s: txqs %d rxqs %d\n", __func__, pdata->tx_ring_count, pdata->rx_ring_count); /* Iflibe sets based on isc_ntxqsets/nrxqsets */ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); /* Allocate channel memory */ for (i = 0; i < count ; i++) { channel = (struct xgbe_channel*)malloc(sizeof(struct xgbe_channel), M_AXGBE, M_NOWAIT | M_ZERO); if (channel == NULL) { for (j = 0; j < i; j++) { free(pdata->channel[j], M_AXGBE); pdata->channel[j] = NULL; } return (ENOMEM); } pdata->channel[i] = channel; } pdata->total_channel_count = count; DBGPR("Channel count set to: %u\n", pdata->total_channel_count); for (i = 0; i < count; i++) { channel = pdata->channel[i]; snprintf(channel->name, sizeof(channel->name), "channel-%d",i); channel->pdata = pdata; channel->queue_index = i; channel->dma_tag = rman_get_bustag(pdata->xgmac_res); bus_space_subregion(channel->dma_tag, rman_get_bushandle(pdata->xgmac_res), DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC, &channel->dma_handle); channel->tx_ring = NULL; channel->rx_ring = NULL; } return (0); } /* axgbe_alloc_channels */ static void xgbe_service(void *ctx, int pending) { struct xgbe_prv_data *pdata = ctx; struct axgbe_if_softc *sc = (struct axgbe_if_softc *)pdata; bool prev_state = false; /* Get previous link status */ prev_state = pdata->phy.link; pdata->phy_if.phy_status(pdata); if (prev_state != pdata->phy.link) { pdata->phy_link = pdata->phy.link; axgbe_if_update_admin_status(sc->ctx); } callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); } static void xgbe_service_timer(void *data) { struct xgbe_prv_data *pdata = data; taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work); } static void xgbe_init_timers(struct xgbe_prv_data *pdata) { callout_init(&pdata->service_timer, 1*hz); } static void xgbe_start_timers(struct xgbe_prv_data *pdata) { callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); } static void xgbe_stop_timers(struct xgbe_prv_data *pdata) { callout_drain(&pdata->service_timer); callout_stop(&pdata->service_timer); } static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) { axgbe_printf(1, "\n************* PHY Reg dump *********************\n"); axgbe_printf(1, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); axgbe_printf(1, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); axgbe_printf(1, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); axgbe_printf(1, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); axgbe_printf(1, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); axgbe_printf(1, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); axgbe_printf(1, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n", MDIO_AN_ADVERTISE, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); axgbe_printf(1, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n", MDIO_AN_ADVERTISE + 1, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); axgbe_printf(1, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n", MDIO_AN_ADVERTISE + 2, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); axgbe_printf(1, "Auto-Neg Completion Reg (%#06x) = %#06x\n", MDIO_AN_COMP_STAT, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); axgbe_printf(1, "\n************************************************\n"); } static void xgbe_dump_prop_registers(struct xgbe_prv_data *pdata) { int i; axgbe_printf(1, "\n************* PROP Reg dump ********************\n"); for (i = 0 ; i < 38 ; i++) { axgbe_printf(1, "PROP Offset 0x%08x = %08x\n", (XP_PROP_0 + (i * 4)), XP_IOREAD(pdata, (XP_PROP_0 + (i * 4)))); } } static void xgbe_dump_dma_registers(struct xgbe_prv_data *pdata, int ch) { struct xgbe_channel *channel; int i; axgbe_printf(1, "\n************* DMA Reg dump *********************\n"); axgbe_printf(1, "DMA MR Reg (%08x) = %08x\n", DMA_MR, XGMAC_IOREAD(pdata, DMA_MR)); axgbe_printf(1, "DMA SBMR Reg (%08x) = %08x\n", DMA_SBMR, XGMAC_IOREAD(pdata, DMA_SBMR)); axgbe_printf(1, "DMA ISR Reg (%08x) = %08x\n", DMA_ISR, XGMAC_IOREAD(pdata, DMA_ISR)); axgbe_printf(1, "DMA AXIARCR Reg (%08x) = %08x\n", DMA_AXIARCR, XGMAC_IOREAD(pdata, DMA_AXIARCR)); axgbe_printf(1, "DMA AXIAWCR Reg (%08x) = %08x\n", DMA_AXIAWCR, XGMAC_IOREAD(pdata, DMA_AXIAWCR)); axgbe_printf(1, "DMA AXIAWARCR Reg (%08x) = %08x\n", DMA_AXIAWARCR, XGMAC_IOREAD(pdata, DMA_AXIAWARCR)); axgbe_printf(1, "DMA DSR0 Reg (%08x) = %08x\n", DMA_DSR0, XGMAC_IOREAD(pdata, DMA_DSR0)); axgbe_printf(1, "DMA DSR1 Reg (%08x) = %08x\n", DMA_DSR1, XGMAC_IOREAD(pdata, DMA_DSR1)); axgbe_printf(1, "DMA DSR2 Reg (%08x) = %08x\n", DMA_DSR2, XGMAC_IOREAD(pdata, DMA_DSR2)); axgbe_printf(1, "DMA DSR3 Reg (%08x) = %08x\n", DMA_DSR3, XGMAC_IOREAD(pdata, DMA_DSR3)); axgbe_printf(1, "DMA DSR4 Reg (%08x) = %08x\n", DMA_DSR4, XGMAC_IOREAD(pdata, DMA_DSR4)); axgbe_printf(1, "DMA TXEDMACR Reg (%08x) = %08x\n", DMA_TXEDMACR, XGMAC_IOREAD(pdata, DMA_TXEDMACR)); axgbe_printf(1, "DMA RXEDMACR Reg (%08x) = %08x\n", DMA_RXEDMACR, XGMAC_IOREAD(pdata, DMA_RXEDMACR)); for (i = 0 ; i < 8 ; i++ ) { if (ch >= 0) { if (i != ch) continue; } channel = pdata->channel[i]; axgbe_printf(1, "\n************* DMA CH %d dump ****************\n", i); axgbe_printf(1, "DMA_CH_CR Reg (%08x) = %08x\n", DMA_CH_CR, XGMAC_DMA_IOREAD(channel, DMA_CH_CR)); axgbe_printf(1, "DMA_CH_TCR Reg (%08x) = %08x\n", DMA_CH_TCR, XGMAC_DMA_IOREAD(channel, DMA_CH_TCR)); axgbe_printf(1, "DMA_CH_RCR Reg (%08x) = %08x\n", DMA_CH_RCR, XGMAC_DMA_IOREAD(channel, DMA_CH_RCR)); axgbe_printf(1, "DMA_CH_TDLR_HI Reg (%08x) = %08x\n", DMA_CH_TDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_HI)); axgbe_printf(1, "DMA_CH_TDLR_LO Reg (%08x) = %08x\n", DMA_CH_TDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_LO)); axgbe_printf(1, "DMA_CH_RDLR_HI Reg (%08x) = %08x\n", DMA_CH_RDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_HI)); axgbe_printf(1, "DMA_CH_RDLR_LO Reg (%08x) = %08x\n", DMA_CH_RDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_LO)); axgbe_printf(1, "DMA_CH_TDTR_LO Reg (%08x) = %08x\n", DMA_CH_TDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO)); axgbe_printf(1, "DMA_CH_RDTR_LO Reg (%08x) = %08x\n", DMA_CH_RDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTR_LO)); axgbe_printf(1, "DMA_CH_TDRLR Reg (%08x) = %08x\n", DMA_CH_TDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_TDRLR)); axgbe_printf(1, "DMA_CH_RDRLR Reg (%08x) = %08x\n", DMA_CH_RDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_RDRLR)); axgbe_printf(1, "DMA_CH_IER Reg (%08x) = %08x\n", DMA_CH_IER, XGMAC_DMA_IOREAD(channel, DMA_CH_IER)); axgbe_printf(1, "DMA_CH_RIWT Reg (%08x) = %08x\n", DMA_CH_RIWT, XGMAC_DMA_IOREAD(channel, DMA_CH_RIWT)); axgbe_printf(1, "DMA_CH_CATDR_LO Reg (%08x) = %08x\n", DMA_CH_CATDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATDR_LO)); axgbe_printf(1, "DMA_CH_CARDR_LO Reg (%08x) = %08x\n", DMA_CH_CARDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARDR_LO)); axgbe_printf(1, "DMA_CH_CATBR_HI Reg (%08x) = %08x\n", DMA_CH_CATBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_HI)); axgbe_printf(1, "DMA_CH_CATBR_LO Reg (%08x) = %08x\n", DMA_CH_CATBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_LO)); axgbe_printf(1, "DMA_CH_CARBR_HI Reg (%08x) = %08x\n", DMA_CH_CARBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_HI)); axgbe_printf(1, "DMA_CH_CARBR_LO Reg (%08x) = %08x\n", DMA_CH_CARBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_LO)); axgbe_printf(1, "DMA_CH_SR Reg (%08x) = %08x\n", DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); axgbe_printf(1, "DMA_CH_DSR Reg (%08x) = %08x\n", DMA_CH_DSR, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); axgbe_printf(1, "DMA_CH_DCFL Reg (%08x) = %08x\n", DMA_CH_DCFL, XGMAC_DMA_IOREAD(channel, DMA_CH_DCFL)); axgbe_printf(1, "DMA_CH_MFC Reg (%08x) = %08x\n", DMA_CH_MFC, XGMAC_DMA_IOREAD(channel, DMA_CH_MFC)); axgbe_printf(1, "DMA_CH_TDTRO Reg (%08x) = %08x\n", DMA_CH_TDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTRO)); axgbe_printf(1, "DMA_CH_RDTRO Reg (%08x) = %08x\n", DMA_CH_RDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTRO)); axgbe_printf(1, "DMA_CH_TDWRO Reg (%08x) = %08x\n", DMA_CH_TDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDWRO)); axgbe_printf(1, "DMA_CH_RDWRO Reg (%08x) = %08x\n", DMA_CH_RDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDWRO)); } } static void xgbe_dump_mtl_registers(struct xgbe_prv_data *pdata) { int i; axgbe_printf(1, "\n************* MTL Reg dump *********************\n"); axgbe_printf(1, "MTL OMR Reg (%08x) = %08x\n", MTL_OMR, XGMAC_IOREAD(pdata, MTL_OMR)); axgbe_printf(1, "MTL FDCR Reg (%08x) = %08x\n", MTL_FDCR, XGMAC_IOREAD(pdata, MTL_FDCR)); axgbe_printf(1, "MTL FDSR Reg (%08x) = %08x\n", MTL_FDSR, XGMAC_IOREAD(pdata, MTL_FDSR)); axgbe_printf(1, "MTL FDDR Reg (%08x) = %08x\n", MTL_FDDR, XGMAC_IOREAD(pdata, MTL_FDDR)); axgbe_printf(1, "MTL ISR Reg (%08x) = %08x\n", MTL_ISR, XGMAC_IOREAD(pdata, MTL_ISR)); axgbe_printf(1, "MTL RQDCM0R Reg (%08x) = %08x\n", MTL_RQDCM0R, XGMAC_IOREAD(pdata, MTL_RQDCM0R)); axgbe_printf(1, "MTL RQDCM1R Reg (%08x) = %08x\n", MTL_RQDCM1R, XGMAC_IOREAD(pdata, MTL_RQDCM1R)); axgbe_printf(1, "MTL RQDCM2R Reg (%08x) = %08x\n", MTL_RQDCM2R, XGMAC_IOREAD(pdata, MTL_RQDCM2R)); axgbe_printf(1, "MTL TCPM0R Reg (%08x) = %08x\n", MTL_TCPM0R, XGMAC_IOREAD(pdata, MTL_TCPM0R)); axgbe_printf(1, "MTL TCPM1R Reg (%08x) = %08x\n", MTL_TCPM1R, XGMAC_IOREAD(pdata, MTL_TCPM1R)); for (i = 0 ; i < 8 ; i++ ) { axgbe_printf(1, "\n************* MTL CH %d dump ****************\n", i); axgbe_printf(1, "MTL_Q_TQOMR Reg (%08x) = %08x\n", MTL_Q_TQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); axgbe_printf(1, "MTL_Q_TQUR Reg (%08x) = %08x\n", MTL_Q_TQUR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQUR)); axgbe_printf(1, "MTL_Q_TQDR Reg (%08x) = %08x\n", MTL_Q_TQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQDR)); axgbe_printf(1, "MTL_Q_TC0ETSCR Reg (%08x) = %08x\n", MTL_Q_TC0ETSCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSCR)); axgbe_printf(1, "MTL_Q_TC0ETSSR Reg (%08x) = %08x\n", MTL_Q_TC0ETSSR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSSR)); axgbe_printf(1, "MTL_Q_TC0QWR Reg (%08x) = %08x\n", MTL_Q_TC0QWR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0QWR)); axgbe_printf(1, "MTL_Q_RQOMR Reg (%08x) = %08x\n", MTL_Q_RQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); axgbe_printf(1, "MTL_Q_RQMPOCR Reg (%08x) = %08x\n", MTL_Q_RQMPOCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQMPOCR)); axgbe_printf(1, "MTL_Q_RQDR Reg (%08x) = %08x\n", MTL_Q_RQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQDR)); axgbe_printf(1, "MTL_Q_RQCR Reg (%08x) = %08x\n", MTL_Q_RQCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQCR)); axgbe_printf(1, "MTL_Q_RQFCR Reg (%08x) = %08x\n", MTL_Q_RQFCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); axgbe_printf(1, "MTL_Q_IER Reg (%08x) = %08x\n", MTL_Q_IER, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_IER)); axgbe_printf(1, "MTL_Q_ISR Reg (%08x) = %08x\n", MTL_Q_ISR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR)); } } static void xgbe_dump_mac_registers(struct xgbe_prv_data *pdata) { axgbe_printf(1, "\n************* MAC Reg dump **********************\n"); axgbe_printf(1, "MAC TCR Reg (%08x) = %08x\n", MAC_TCR, XGMAC_IOREAD(pdata, MAC_TCR)); axgbe_printf(1, "MAC RCR Reg (%08x) = %08x\n", MAC_RCR, XGMAC_IOREAD(pdata, MAC_RCR)); axgbe_printf(1, "MAC PFR Reg (%08x) = %08x\n", MAC_PFR, XGMAC_IOREAD(pdata, MAC_PFR)); axgbe_printf(1, "MAC WTR Reg (%08x) = %08x\n", MAC_WTR, XGMAC_IOREAD(pdata, MAC_WTR)); axgbe_printf(1, "MAC HTR0 Reg (%08x) = %08x\n", MAC_HTR0, XGMAC_IOREAD(pdata, MAC_HTR0)); axgbe_printf(1, "MAC HTR1 Reg (%08x) = %08x\n", MAC_HTR1, XGMAC_IOREAD(pdata, MAC_HTR1)); axgbe_printf(1, "MAC HTR2 Reg (%08x) = %08x\n", MAC_HTR2, XGMAC_IOREAD(pdata, MAC_HTR2)); axgbe_printf(1, "MAC HTR3 Reg (%08x) = %08x\n", MAC_HTR3, XGMAC_IOREAD(pdata, MAC_HTR3)); axgbe_printf(1, "MAC HTR4 Reg (%08x) = %08x\n", MAC_HTR4, XGMAC_IOREAD(pdata, MAC_HTR4)); axgbe_printf(1, "MAC HTR5 Reg (%08x) = %08x\n", MAC_HTR5, XGMAC_IOREAD(pdata, MAC_HTR5)); axgbe_printf(1, "MAC HTR6 Reg (%08x) = %08x\n", MAC_HTR6, XGMAC_IOREAD(pdata, MAC_HTR6)); axgbe_printf(1, "MAC HTR7 Reg (%08x) = %08x\n", MAC_HTR7, XGMAC_IOREAD(pdata, MAC_HTR7)); axgbe_printf(1, "MAC VLANTR Reg (%08x) = %08x\n", MAC_VLANTR, XGMAC_IOREAD(pdata, MAC_VLANTR)); axgbe_printf(1, "MAC VLANHTR Reg (%08x) = %08x\n", MAC_VLANHTR, XGMAC_IOREAD(pdata, MAC_VLANHTR)); axgbe_printf(1, "MAC VLANIR Reg (%08x) = %08x\n", MAC_VLANIR, XGMAC_IOREAD(pdata, MAC_VLANIR)); axgbe_printf(1, "MAC IVLANIR Reg (%08x) = %08x\n", MAC_IVLANIR, XGMAC_IOREAD(pdata, MAC_IVLANIR)); axgbe_printf(1, "MAC RETMR Reg (%08x) = %08x\n", MAC_RETMR, XGMAC_IOREAD(pdata, MAC_RETMR)); axgbe_printf(1, "MAC Q0TFCR Reg (%08x) = %08x\n", MAC_Q0TFCR, XGMAC_IOREAD(pdata, MAC_Q0TFCR)); axgbe_printf(1, "MAC Q1TFCR Reg (%08x) = %08x\n", MAC_Q1TFCR, XGMAC_IOREAD(pdata, MAC_Q1TFCR)); axgbe_printf(1, "MAC Q2TFCR Reg (%08x) = %08x\n", MAC_Q2TFCR, XGMAC_IOREAD(pdata, MAC_Q2TFCR)); axgbe_printf(1, "MAC Q3TFCR Reg (%08x) = %08x\n", MAC_Q3TFCR, XGMAC_IOREAD(pdata, MAC_Q3TFCR)); axgbe_printf(1, "MAC Q4TFCR Reg (%08x) = %08x\n", MAC_Q4TFCR, XGMAC_IOREAD(pdata, MAC_Q4TFCR)); axgbe_printf(1, "MAC Q5TFCR Reg (%08x) = %08x\n", MAC_Q5TFCR, XGMAC_IOREAD(pdata, MAC_Q5TFCR)); axgbe_printf(1, "MAC Q6TFCR Reg (%08x) = %08x\n", MAC_Q6TFCR, XGMAC_IOREAD(pdata, MAC_Q6TFCR)); axgbe_printf(1, "MAC Q7TFCR Reg (%08x) = %08x\n", MAC_Q7TFCR, XGMAC_IOREAD(pdata, MAC_Q7TFCR)); axgbe_printf(1, "MAC RFCR Reg (%08x) = %08x\n", MAC_RFCR, XGMAC_IOREAD(pdata, MAC_RFCR)); axgbe_printf(1, "MAC RQC0R Reg (%08x) = %08x\n", MAC_RQC0R, XGMAC_IOREAD(pdata, MAC_RQC0R)); axgbe_printf(1, "MAC RQC1R Reg (%08x) = %08x\n", MAC_RQC1R, XGMAC_IOREAD(pdata, MAC_RQC1R)); axgbe_printf(1, "MAC RQC2R Reg (%08x) = %08x\n", MAC_RQC2R, XGMAC_IOREAD(pdata, MAC_RQC2R)); axgbe_printf(1, "MAC RQC3R Reg (%08x) = %08x\n", MAC_RQC3R, XGMAC_IOREAD(pdata, MAC_RQC3R)); axgbe_printf(1, "MAC ISR Reg (%08x) = %08x\n", MAC_ISR, XGMAC_IOREAD(pdata, MAC_ISR)); axgbe_printf(1, "MAC IER Reg (%08x) = %08x\n", MAC_IER, XGMAC_IOREAD(pdata, MAC_IER)); axgbe_printf(1, "MAC RTSR Reg (%08x) = %08x\n", MAC_RTSR, XGMAC_IOREAD(pdata, MAC_RTSR)); axgbe_printf(1, "MAC PMTCSR Reg (%08x) = %08x\n", MAC_PMTCSR, XGMAC_IOREAD(pdata, MAC_PMTCSR)); axgbe_printf(1, "MAC RWKPFR Reg (%08x) = %08x\n", MAC_RWKPFR, XGMAC_IOREAD(pdata, MAC_RWKPFR)); axgbe_printf(1, "MAC LPICSR Reg (%08x) = %08x\n", MAC_LPICSR, XGMAC_IOREAD(pdata, MAC_LPICSR)); axgbe_printf(1, "MAC LPITCR Reg (%08x) = %08x\n", MAC_LPITCR, XGMAC_IOREAD(pdata, MAC_LPITCR)); axgbe_printf(1, "MAC TIR Reg (%08x) = %08x\n", MAC_TIR, XGMAC_IOREAD(pdata, MAC_TIR)); axgbe_printf(1, "MAC VR Reg (%08x) = %08x\n", MAC_VR, XGMAC_IOREAD(pdata, MAC_VR)); axgbe_printf(1, "MAC DR Reg (%08x) = %08x\n", MAC_DR, XGMAC_IOREAD(pdata, MAC_DR)); axgbe_printf(1, "MAC HWF0R Reg (%08x) = %08x\n", MAC_HWF0R, XGMAC_IOREAD(pdata, MAC_HWF0R)); axgbe_printf(1, "MAC HWF1R Reg (%08x) = %08x\n", MAC_HWF1R, XGMAC_IOREAD(pdata, MAC_HWF1R)); axgbe_printf(1, "MAC HWF2R Reg (%08x) = %08x\n", MAC_HWF2R, XGMAC_IOREAD(pdata, MAC_HWF2R)); axgbe_printf(1, "MAC MDIOSCAR Reg (%08x) = %08x\n", MAC_MDIOSCAR, XGMAC_IOREAD(pdata, MAC_MDIOSCAR)); axgbe_printf(1, "MAC MDIOSCCDR Reg (%08x) = %08x\n", MAC_MDIOSCCDR, XGMAC_IOREAD(pdata, MAC_MDIOSCCDR)); axgbe_printf(1, "MAC MDIOISR Reg (%08x) = %08x\n", MAC_MDIOISR, XGMAC_IOREAD(pdata, MAC_MDIOISR)); axgbe_printf(1, "MAC MDIOIER Reg (%08x) = %08x\n", MAC_MDIOIER, XGMAC_IOREAD(pdata, MAC_MDIOIER)); axgbe_printf(1, "MAC MDIOCL22R Reg (%08x) = %08x\n", MAC_MDIOCL22R, XGMAC_IOREAD(pdata, MAC_MDIOCL22R)); axgbe_printf(1, "MAC GPIOCR Reg (%08x) = %08x\n", MAC_GPIOCR, XGMAC_IOREAD(pdata, MAC_GPIOCR)); axgbe_printf(1, "MAC GPIOSR Reg (%08x) = %08x\n", MAC_GPIOSR, XGMAC_IOREAD(pdata, MAC_GPIOSR)); axgbe_printf(1, "MAC MACA0HR Reg (%08x) = %08x\n", MAC_MACA0HR, XGMAC_IOREAD(pdata, MAC_MACA0HR)); axgbe_printf(1, "MAC MACA0LR Reg (%08x) = %08x\n", MAC_TCR, XGMAC_IOREAD(pdata, MAC_MACA0LR)); axgbe_printf(1, "MAC MACA1HR Reg (%08x) = %08x\n", MAC_MACA1HR, XGMAC_IOREAD(pdata, MAC_MACA1HR)); axgbe_printf(1, "MAC MACA1LR Reg (%08x) = %08x\n", MAC_MACA1LR, XGMAC_IOREAD(pdata, MAC_MACA1LR)); axgbe_printf(1, "MAC RSSCR Reg (%08x) = %08x\n", MAC_RSSCR, XGMAC_IOREAD(pdata, MAC_RSSCR)); axgbe_printf(1, "MAC RSSDR Reg (%08x) = %08x\n", MAC_RSSDR, XGMAC_IOREAD(pdata, MAC_RSSDR)); axgbe_printf(1, "MAC RSSAR Reg (%08x) = %08x\n", MAC_RSSAR, XGMAC_IOREAD(pdata, MAC_RSSAR)); axgbe_printf(1, "MAC TSCR Reg (%08x) = %08x\n", MAC_TSCR, XGMAC_IOREAD(pdata, MAC_TSCR)); axgbe_printf(1, "MAC SSIR Reg (%08x) = %08x\n", MAC_SSIR, XGMAC_IOREAD(pdata, MAC_SSIR)); axgbe_printf(1, "MAC STSR Reg (%08x) = %08x\n", MAC_STSR, XGMAC_IOREAD(pdata, MAC_STSR)); axgbe_printf(1, "MAC STNR Reg (%08x) = %08x\n", MAC_STNR, XGMAC_IOREAD(pdata, MAC_STNR)); axgbe_printf(1, "MAC STSUR Reg (%08x) = %08x\n", MAC_STSUR, XGMAC_IOREAD(pdata, MAC_STSUR)); axgbe_printf(1, "MAC STNUR Reg (%08x) = %08x\n", MAC_STNUR, XGMAC_IOREAD(pdata, MAC_STNUR)); axgbe_printf(1, "MAC TSAR Reg (%08x) = %08x\n", MAC_TSAR, XGMAC_IOREAD(pdata, MAC_TSAR)); axgbe_printf(1, "MAC TSSR Reg (%08x) = %08x\n", MAC_TSSR, XGMAC_IOREAD(pdata, MAC_TSSR)); axgbe_printf(1, "MAC TXSNR Reg (%08x) = %08x\n", MAC_TXSNR, XGMAC_IOREAD(pdata, MAC_TXSNR)); axgbe_printf(1, "MAC TXSSR Reg (%08x) = %08x\n", MAC_TXSSR, XGMAC_IOREAD(pdata, MAC_TXSSR)); } static void xgbe_dump_rmon_counters(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; axgbe_printf(1, "\n************* RMON counters dump ***************\n"); pdata->hw_if.read_mmc_stats(pdata); axgbe_printf(1, "rmon txoctetcount_gb (%08x) = %08lx\n", MMC_TXOCTETCOUNT_GB_LO, stats->txoctetcount_gb); axgbe_printf(1, "rmon txframecount_gb (%08x) = %08lx\n", MMC_TXFRAMECOUNT_GB_LO, stats->txframecount_gb); axgbe_printf(1, "rmon txbroadcastframes_g (%08x) = %08lx\n", MMC_TXBROADCASTFRAMES_G_LO, stats->txbroadcastframes_g); axgbe_printf(1, "rmon txmulticastframes_g (%08x) = %08lx\n", MMC_TXMULTICASTFRAMES_G_LO, stats->txmulticastframes_g); axgbe_printf(1, "rmon tx64octets_gb (%08x) = %08lx\n", MMC_TX64OCTETS_GB_LO, stats->tx64octets_gb); axgbe_printf(1, "rmon tx65to127octets_gb (%08x) = %08lx\n", MMC_TX65TO127OCTETS_GB_LO, stats->tx65to127octets_gb); axgbe_printf(1, "rmon tx128to255octets_gb (%08x) = %08lx\n", MMC_TX128TO255OCTETS_GB_LO, stats->tx128to255octets_gb); axgbe_printf(1, "rmon tx256to511octets_gb (%08x) = %08lx\n", MMC_TX256TO511OCTETS_GB_LO, stats->tx256to511octets_gb); axgbe_printf(1, "rmon tx512to1023octets_gb (%08x) = %08lx\n", MMC_TX512TO1023OCTETS_GB_LO, stats->tx512to1023octets_gb); axgbe_printf(1, "rmon tx1024tomaxoctets_gb (%08x) = %08lx\n", MMC_TX1024TOMAXOCTETS_GB_LO, stats->tx1024tomaxoctets_gb); axgbe_printf(1, "rmon txunicastframes_gb (%08x) = %08lx\n", MMC_TXUNICASTFRAMES_GB_LO, stats->txunicastframes_gb); axgbe_printf(1, "rmon txmulticastframes_gb (%08x) = %08lx\n", MMC_TXMULTICASTFRAMES_GB_LO, stats->txmulticastframes_gb); axgbe_printf(1, "rmon txbroadcastframes_gb (%08x) = %08lx\n", MMC_TXBROADCASTFRAMES_GB_LO, stats->txbroadcastframes_gb); axgbe_printf(1, "rmon txunderflowerror (%08x) = %08lx\n", MMC_TXUNDERFLOWERROR_LO, stats->txunderflowerror); axgbe_printf(1, "rmon txoctetcount_g (%08x) = %08lx\n", MMC_TXOCTETCOUNT_G_LO, stats->txoctetcount_g); axgbe_printf(1, "rmon txframecount_g (%08x) = %08lx\n", MMC_TXFRAMECOUNT_G_LO, stats->txframecount_g); axgbe_printf(1, "rmon txpauseframes (%08x) = %08lx\n", MMC_TXPAUSEFRAMES_LO, stats->txpauseframes); axgbe_printf(1, "rmon txvlanframes_g (%08x) = %08lx\n", MMC_TXVLANFRAMES_G_LO, stats->txvlanframes_g); axgbe_printf(1, "rmon rxframecount_gb (%08x) = %08lx\n", MMC_RXFRAMECOUNT_GB_LO, stats->rxframecount_gb); axgbe_printf(1, "rmon rxoctetcount_gb (%08x) = %08lx\n", MMC_RXOCTETCOUNT_GB_LO, stats->rxoctetcount_gb); axgbe_printf(1, "rmon rxoctetcount_g (%08x) = %08lx\n", MMC_RXOCTETCOUNT_G_LO, stats->rxoctetcount_g); axgbe_printf(1, "rmon rxbroadcastframes_g (%08x) = %08lx\n", MMC_RXBROADCASTFRAMES_G_LO, stats->rxbroadcastframes_g); axgbe_printf(1, "rmon rxmulticastframes_g (%08x) = %08lx\n", MMC_RXMULTICASTFRAMES_G_LO, stats->rxmulticastframes_g); axgbe_printf(1, "rmon rxcrcerror (%08x) = %08lx\n", MMC_RXCRCERROR_LO, stats->rxcrcerror); axgbe_printf(1, "rmon rxrunterror (%08x) = %08lx\n", MMC_RXRUNTERROR, stats->rxrunterror); axgbe_printf(1, "rmon rxjabbererror (%08x) = %08lx\n", MMC_RXJABBERERROR, stats->rxjabbererror); axgbe_printf(1, "rmon rxundersize_g (%08x) = %08lx\n", MMC_RXUNDERSIZE_G, stats->rxundersize_g); axgbe_printf(1, "rmon rxoversize_g (%08x) = %08lx\n", MMC_RXOVERSIZE_G, stats->rxoversize_g); axgbe_printf(1, "rmon rx64octets_gb (%08x) = %08lx\n", MMC_RX64OCTETS_GB_LO, stats->rx64octets_gb); axgbe_printf(1, "rmon rx65to127octets_gb (%08x) = %08lx\n", MMC_RX65TO127OCTETS_GB_LO, stats->rx65to127octets_gb); axgbe_printf(1, "rmon rx128to255octets_gb (%08x) = %08lx\n", MMC_RX128TO255OCTETS_GB_LO, stats->rx128to255octets_gb); axgbe_printf(1, "rmon rx256to511octets_gb (%08x) = %08lx\n", MMC_RX256TO511OCTETS_GB_LO, stats->rx256to511octets_gb); axgbe_printf(1, "rmon rx512to1023octets_gb (%08x) = %08lx\n", MMC_RX512TO1023OCTETS_GB_LO, stats->rx512to1023octets_gb); axgbe_printf(1, "rmon rx1024tomaxoctets_gb (%08x) = %08lx\n", MMC_RX1024TOMAXOCTETS_GB_LO, stats->rx1024tomaxoctets_gb); axgbe_printf(1, "rmon rxunicastframes_g (%08x) = %08lx\n", MMC_RXUNICASTFRAMES_G_LO, stats->rxunicastframes_g); axgbe_printf(1, "rmon rxlengtherror (%08x) = %08lx\n", MMC_RXLENGTHERROR_LO, stats->rxlengtherror); axgbe_printf(1, "rmon rxoutofrangetype (%08x) = %08lx\n", MMC_RXOUTOFRANGETYPE_LO, stats->rxoutofrangetype); axgbe_printf(1, "rmon rxpauseframes (%08x) = %08lx\n", MMC_RXPAUSEFRAMES_LO, stats->rxpauseframes); axgbe_printf(1, "rmon rxfifooverflow (%08x) = %08lx\n", MMC_RXFIFOOVERFLOW_LO, stats->rxfifooverflow); axgbe_printf(1, "rmon rxvlanframes_gb (%08x) = %08lx\n", MMC_RXVLANFRAMES_GB_LO, stats->rxvlanframes_gb); axgbe_printf(1, "rmon rxwatchdogerror (%08x) = %08lx\n", MMC_RXWATCHDOGERROR, stats->rxwatchdogerror); } void xgbe_dump_i2c_registers(struct xgbe_prv_data *pdata) { axgbe_printf(1, "*************** I2C Registers **************\n"); axgbe_printf(1, " IC_CON : %010x\n", XI2C_IOREAD(pdata, 0x00)); axgbe_printf(1, " IC_TAR : %010x\n", XI2C_IOREAD(pdata, 0x04)); axgbe_printf(1, " IC_HS_MADDR : %010x\n", XI2C_IOREAD(pdata, 0x0c)); axgbe_printf(1, " IC_INTR_STAT : %010x\n", XI2C_IOREAD(pdata, 0x2c)); axgbe_printf(1, " IC_INTR_MASK : %010x\n", XI2C_IOREAD(pdata, 0x30)); axgbe_printf(1, " IC_RAW_INTR_STAT : %010x\n", XI2C_IOREAD(pdata, 0x34)); axgbe_printf(1, " IC_RX_TL : %010x\n", XI2C_IOREAD(pdata, 0x38)); axgbe_printf(1, " IC_TX_TL : %010x\n", XI2C_IOREAD(pdata, 0x3c)); axgbe_printf(1, " IC_ENABLE : %010x\n", XI2C_IOREAD(pdata, 0x6c)); axgbe_printf(1, " IC_STATUS : %010x\n", XI2C_IOREAD(pdata, 0x70)); axgbe_printf(1, " IC_TXFLR : %010x\n", XI2C_IOREAD(pdata, 0x74)); axgbe_printf(1, " IC_RXFLR : %010x\n", XI2C_IOREAD(pdata, 0x78)); axgbe_printf(1, " IC_ENABLE_STATUS : %010x\n", XI2C_IOREAD(pdata, 0x9c)); axgbe_printf(1, " IC_COMP_PARAM1 : %010x\n", XI2C_IOREAD(pdata, 0xf4)); } static void xgbe_dump_active_vlans(struct xgbe_prv_data *pdata) { int i; for(i=0 ; iactive_vlans[i]); } axgbe_printf(1, "\n"); } static void xgbe_default_config(struct xgbe_prv_data *pdata) { pdata->blen = DMA_SBMR_BLEN_64; pdata->pbl = DMA_PBL_128; pdata->aal = 1; pdata->rd_osr_limit = 8; pdata->wr_osr_limit = 8; pdata->tx_sf_mode = MTL_TSF_ENABLE; pdata->tx_threshold = MTL_TX_THRESHOLD_64; pdata->tx_osp_mode = DMA_OSP_ENABLE; pdata->rx_sf_mode = MTL_RSF_DISABLE; pdata->rx_threshold = MTL_RX_THRESHOLD_64; pdata->pause_autoneg = 1; pdata->tx_pause = 1; pdata->rx_pause = 1; pdata->phy_speed = SPEED_UNKNOWN; pdata->power_down = 0; pdata->enable_rss = 1; } static void axgbe_setup_sysctl(struct xgbe_prv_data *pdata) { struct sysctl_ctx_list *clist; struct sysctl_oid *parent; struct sysctl_oid_list *top; clist = device_get_sysctl_ctx(pdata->dev); parent = device_get_sysctl_tree(pdata->dev); top = SYSCTL_CHILDREN(parent); } static int axgbe_if_attach_post(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct ifnet *ifp = pdata->netdev; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_hw_if *hw_if = &pdata->hw_if; if_softc_ctx_t scctx = sc->scctx; int i, ret; + /* set split header support based on tunable */ + pdata->sph_enable = axgbe_sph_enable; + /* Initialize ECC timestamps */ pdata->tx_sec_period = ticks; pdata->tx_ded_period = ticks; pdata->rx_sec_period = ticks; pdata->rx_ded_period = ticks; pdata->desc_sec_period = ticks; pdata->desc_ded_period = ticks; /* Reset the hardware */ ret = hw_if->exit(&sc->pdata); if (ret) axgbe_error("%s: exit error %d\n", __func__, ret); /* Configure the defaults */ xgbe_default_config(pdata); /* Set default max values if not provided */ if (!pdata->tx_max_fifo_size) pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; if (!pdata->rx_max_fifo_size) pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; DBGPR("%s: tx fifo 0x%x rx fifo 0x%x\n", __func__, pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); /* Set and validate the number of descriptors for a ring */ MPASS(powerof2(XGBE_TX_DESC_CNT)); pdata->tx_desc_count = XGBE_TX_DESC_CNT; MPASS(powerof2(XGBE_RX_DESC_CNT)); pdata->rx_desc_count = XGBE_RX_DESC_CNT; /* Adjust the number of queues based on interrupts assigned */ if (pdata->channel_irq_count) { pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, pdata->channel_irq_count); pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, pdata->channel_irq_count); DBGPR("adjusted TX %u/%u RX %u/%u\n", pdata->tx_ring_count, pdata->tx_q_count, pdata->rx_ring_count, pdata->rx_q_count); } /* Set channel count based on interrupts assigned */ pdata->channel_count = max_t(unsigned int, scctx->isc_ntxqsets, scctx->isc_nrxqsets); DBGPR("Channel count set to: %u\n", pdata->channel_count); /* Get RSS key */ #ifdef RSS rss_getkey((uint8_t *)pdata->rss_key); #else arc4rand(&pdata->rss_key, ARRAY_SIZE(pdata->rss_key), 0); #endif XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Initialize the PHY device */ pdata->sysctl_an_cdr_workaround = pdata->vdata->an_cdr_workaround; phy_if->phy_init(pdata); /* Set the coalescing */ xgbe_init_rx_coalesce(&sc->pdata); xgbe_init_tx_coalesce(&sc->pdata); ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SGMII, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_100_SGMII, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); /* Initialize the phy */ pdata->phy_link = -1; pdata->phy_speed = SPEED_UNKNOWN; ret = phy_if->phy_reset(pdata); if (ret) return (ret); /* Calculate the Rx buffer size before allocating rings */ ret = xgbe_calc_rx_buf_size(pdata->netdev, if_getmtu(pdata->netdev)); pdata->rx_buf_size = ret; DBGPR("%s: rx_buf_size %d\n", __func__, ret); /* Setup RSS lookup table */ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); /* * Mark the device down until it is initialized, which happens * when the device is accessed first (for configuring the iface, * eg: setting IP) */ set_bit(XGBE_DOWN, &pdata->dev_state); DBGPR("mtu %d\n", ifp->if_mtu); scctx->isc_max_frame_size = ifp->if_mtu + 18; scctx->isc_min_frame_size = XGMAC_MIN_PACKET; axgbe_setup_sysctl(pdata); axgbe_sysctl_init(pdata); + axgbe_pci_init(pdata); + return (0); } /* axgbe_if_attach_post */ static void xgbe_free_intr(struct xgbe_prv_data *pdata, struct resource *res, void *tag, int rid) { if (tag) bus_teardown_intr(pdata->dev, res, tag); if (res) bus_release_resource(pdata->dev, SYS_RES_IRQ, rid, res); } static void axgbe_interrupts_free(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct if_irq irq; int i; axgbe_printf(2, "%s: mode %d\n", __func__, scctx->isc_intr); /* Free dev_irq */ iflib_irq_free(ctx, &pdata->dev_irq); /* Free ecc_irq */ xgbe_free_intr(pdata, pdata->ecc_irq_res, pdata->ecc_irq_tag, pdata->ecc_rid); /* Free i2c_irq */ xgbe_free_intr(pdata, pdata->i2c_irq_res, pdata->i2c_irq_tag, pdata->i2c_rid); /* Free an_irq */ xgbe_free_intr(pdata, pdata->an_irq_res, pdata->an_irq_tag, pdata->an_rid); for (i = 0; i < scctx->isc_nrxqsets; i++) { channel = pdata->channel[i]; axgbe_printf(2, "%s: rid %d\n", __func__, channel->dma_irq_rid); irq.ii_res = channel->dma_irq_res; irq.ii_tag = channel->dma_irq_tag; iflib_irq_free(ctx, &irq); } } static int axgbe_if_detach(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct resource *mac_res[2]; mac_res[0] = pdata->xgmac_res; mac_res[1] = pdata->xpcs_res; phy_if->phy_exit(pdata); /* Free Interrupts */ axgbe_interrupts_free(ctx); /* Free workqueues */ taskqueue_free(pdata->dev_workqueue); /* Release bus resources */ bus_release_resources(iflib_get_dev(ctx), axgbe_pci_mac_spec, mac_res); /* Free VLAN bitmap */ free(pdata->active_vlans, M_AXGBE); axgbe_sysctl_exit(pdata); return (0); } /* axgbe_if_detach */ static void axgbe_pci_init(struct xgbe_prv_data *pdata) { struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_hw_if *hw_if = &pdata->hw_if; int ret = 0; - + + if (!__predict_false((test_bit(XGBE_DOWN, &pdata->dev_state)))) { + axgbe_printf(1, "%s: Starting when XGBE_UP\n", __func__); + return; + } + hw_if->init(pdata); ret = phy_if->phy_start(pdata); if (ret) { axgbe_error("%s: phy start %d\n", __func__, ret); ret = hw_if->exit(pdata); if (ret) axgbe_error("%s: exit error %d\n", __func__, ret); return; } hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); xgbe_start_timers(pdata); clear_bit(XGBE_DOWN, &pdata->dev_state); xgbe_dump_phy_registers(pdata); xgbe_dump_prop_registers(pdata); xgbe_dump_dma_registers(pdata, -1); xgbe_dump_mtl_registers(pdata); xgbe_dump_mac_registers(pdata); xgbe_dump_rmon_counters(pdata); } static void axgbe_if_init(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; axgbe_pci_init(pdata); } static void axgbe_pci_stop(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_hw_if *hw_if = &pdata->hw_if; int ret; if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { axgbe_printf(1, "%s: Stopping when XGBE_DOWN\n", __func__); return; } xgbe_stop_timers(pdata); taskqueue_drain_all(pdata->dev_workqueue); hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); phy_if->phy_stop(pdata); ret = hw_if->exit(pdata); if (ret) axgbe_error("%s: exit error %d\n", __func__, ret); set_bit(XGBE_DOWN, &pdata->dev_state); } static void axgbe_if_stop(if_ctx_t ctx) { axgbe_pci_stop(ctx); } static void axgbe_if_disable_intr(if_ctx_t ctx) { /* TODO - implement */ } static void axgbe_if_enable_intr(if_ctx_t ctx) { /* TODO - implement */ } static int axgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int ntxqs, int ntxqsets) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct xgbe_ring *tx_ring; int i, j, k; MPASS(scctx->isc_ntxqsets > 0); MPASS(scctx->isc_ntxqsets == ntxqsets); MPASS(ntxqs == 1); axgbe_printf(1, "%s: txqsets %d/%d txqs %d\n", __func__, scctx->isc_ntxqsets, ntxqsets, ntxqs); for (i = 0 ; i < ntxqsets; i++) { channel = pdata->channel[i]; tx_ring = (struct xgbe_ring*)malloc(ntxqs * sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); if (tx_ring == NULL) { axgbe_error("Unable to allocate TX ring memory\n"); goto tx_ring_fail; } channel->tx_ring = tx_ring; for (j = 0; j < ntxqs; j++, tx_ring++) { tx_ring->rdata = (struct xgbe_ring_data*)malloc(scctx->isc_ntxd[j] * sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); /* Get the virtual & physical address of hw queues */ tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j]; tx_ring->rdesc_paddr = pa[i*ntxqs + j]; tx_ring->rdesc_count = scctx->isc_ntxd[j]; spin_lock_init(&tx_ring->lock); } } axgbe_printf(1, "allocated for %d tx queues\n", scctx->isc_ntxqsets); return (0); tx_ring_fail: for (j = 0; j < i ; j++) { channel = pdata->channel[j]; tx_ring = channel->tx_ring; for (k = 0; k < ntxqs ; k++, tx_ring++) { if (tx_ring && tx_ring->rdata) free(tx_ring->rdata, M_AXGBE); } free(channel->tx_ring, M_AXGBE); channel->tx_ring = NULL; } return (ENOMEM); } /* axgbe_if_tx_queues_alloc */ static int axgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int nrxqs, int nrxqsets) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct xgbe_ring *rx_ring; int i, j, k; MPASS(scctx->isc_nrxqsets > 0); MPASS(scctx->isc_nrxqsets == nrxqsets); - MPASS(nrxqs == 2); + if (!pdata->sph_enable) { + MPASS(nrxqs == 1); + } else { + MPASS(nrxqs == 2); + } axgbe_printf(1, "%s: rxqsets %d/%d rxqs %d\n", __func__, scctx->isc_nrxqsets, nrxqsets, nrxqs); for (i = 0 ; i < nrxqsets; i++) { channel = pdata->channel[i]; rx_ring = (struct xgbe_ring*)malloc(nrxqs * sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); if (rx_ring == NULL) { axgbe_error("Unable to allocate RX ring memory\n"); goto rx_ring_fail; } channel->rx_ring = rx_ring; for (j = 0; j < nrxqs; j++, rx_ring++) { rx_ring->rdata = (struct xgbe_ring_data*)malloc(scctx->isc_nrxd[j] * sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); /* Get the virtual and physical address of the hw queues */ rx_ring->rdesc = (struct xgbe_ring_desc *)va[i*nrxqs + j]; rx_ring->rdesc_paddr = pa[i*nrxqs + j]; rx_ring->rdesc_count = scctx->isc_nrxd[j]; spin_lock_init(&rx_ring->lock); } } axgbe_printf(2, "allocated for %d rx queues\n", scctx->isc_nrxqsets); return (0); rx_ring_fail: for (j = 0 ; j < i ; j++) { channel = pdata->channel[j]; rx_ring = channel->rx_ring; for (k = 0; k < nrxqs ; k++, rx_ring++) { if (rx_ring && rx_ring->rdata) free(rx_ring->rdata, M_AXGBE); } free(channel->rx_ring, M_AXGBE); channel->rx_ring = NULL; } return (ENOMEM); } /* axgbe_if_rx_queues_alloc */ static void axgbe_if_queues_free(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; if_shared_ctx_t sctx = sc->sctx; struct xgbe_channel *channel; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; int i, j; for (i = 0 ; i < scctx->isc_ntxqsets; i++) { channel = pdata->channel[i]; tx_ring = channel->tx_ring; for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) { if (tx_ring && tx_ring->rdata) free(tx_ring->rdata, M_AXGBE); } free(channel->tx_ring, M_AXGBE); channel->tx_ring = NULL; } for (i = 0 ; i < scctx->isc_nrxqsets; i++) { channel = pdata->channel[i]; rx_ring = channel->rx_ring; for (j = 0; j < sctx->isc_nrxqs ; j++, rx_ring++) { if (rx_ring && rx_ring->rdata) free(rx_ring->rdata, M_AXGBE); } free(channel->rx_ring, M_AXGBE); channel->rx_ring = NULL; } /* Free Channels */ for (i = 0; i < pdata->total_channel_count ; i++) { free(pdata->channel[i], M_AXGBE); pdata->channel[i] = NULL; } pdata->total_channel_count = 0; pdata->channel_count = 0; } /* axgbe_if_queues_free */ static void axgbe_if_vlan_register(if_ctx_t ctx, uint16_t vtag) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; if (!bit_test(pdata->active_vlans, vtag)) { axgbe_printf(0, "Registering VLAN %d\n", vtag); bit_set(pdata->active_vlans, vtag); hw_if->update_vlan_hash_table(pdata); pdata->num_active_vlans++; axgbe_printf(1, "Total active vlans: %d\n", pdata->num_active_vlans); } else axgbe_printf(0, "VLAN %d already registered\n", vtag); xgbe_dump_active_vlans(pdata); } static void axgbe_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; if (pdata->num_active_vlans == 0) { axgbe_printf(1, "No active VLANs to unregister\n"); return; } if (bit_test(pdata->active_vlans, vtag)){ axgbe_printf(0, "Un-Registering VLAN %d\n", vtag); bit_clear(pdata->active_vlans, vtag); hw_if->update_vlan_hash_table(pdata); pdata->num_active_vlans--; axgbe_printf(1, "Total active vlans: %d\n", pdata->num_active_vlans); } else axgbe_printf(0, "VLAN %d already unregistered\n", vtag); xgbe_dump_active_vlans(pdata); } #if __FreeBSD_version >= 1300000 static bool axgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (true); } } #endif static int axgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct if_irq irq; int i, error, rid = 0, flags; char buf[16]; MPASS(scctx->isc_intr != IFLIB_INTR_LEGACY); pdata->isr_as_tasklet = 1; if (scctx->isc_intr == IFLIB_INTR_MSI) { pdata->irq_count = 1; pdata->channel_irq_count = 1; return (0); } axgbe_printf(1, "%s: msix %d txqsets %d rxqsets %d\n", __func__, msix, scctx->isc_ntxqsets, scctx->isc_nrxqsets); flags = RF_ACTIVE; /* DEV INTR SETUP */ rid++; error = iflib_irq_alloc_generic(ctx, &pdata->dev_irq, rid, IFLIB_INTR_ADMIN, axgbe_dev_isr, sc, 0, "dev_irq"); if (error) { axgbe_error("Failed to register device interrupt rid %d name %s\n", rid, "dev_irq"); return (error); } /* ECC INTR SETUP */ rid++; pdata->ecc_rid = rid; pdata->ecc_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, &rid, flags); if (!pdata->ecc_irq_res) { axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", rid, "ecc_irq"); return (ENOMEM); } error = bus_setup_intr(pdata->dev, pdata->ecc_irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, axgbe_ecc_isr, sc, &pdata->ecc_irq_tag); if (error) { axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", rid, "ecc_irq", error); return (error); } /* I2C INTR SETUP */ rid++; pdata->i2c_rid = rid; pdata->i2c_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, &rid, flags); if (!pdata->i2c_irq_res) { axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", rid, "i2c_irq"); return (ENOMEM); } error = bus_setup_intr(pdata->dev, pdata->i2c_irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, axgbe_i2c_isr, sc, &pdata->i2c_irq_tag); if (error) { axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", rid, "i2c_irq", error); return (error); } /* AN INTR SETUP */ rid++; pdata->an_rid = rid; pdata->an_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, &rid, flags); if (!pdata->an_irq_res) { axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", rid, "an_irq"); return (ENOMEM); } error = bus_setup_intr(pdata->dev, pdata->an_irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, axgbe_an_isr, sc, &pdata->an_irq_tag); if (error) { axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", rid, "an_irq", error); return (error); } pdata->per_channel_irq = 1; pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; rid++; for (i = 0; i < scctx->isc_nrxqsets; i++, rid++) { channel = pdata->channel[i]; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &irq, rid, IFLIB_INTR_RXTX, axgbe_msix_que, channel, channel->queue_index, buf); if (error) { axgbe_error("Failed to allocated que int %d err: %d\n", i, error); return (error); } channel->dma_irq_rid = rid; channel->dma_irq_res = irq.ii_res; channel->dma_irq_tag = irq.ii_tag; axgbe_printf(1, "%s: channel count %d idx %d irq %d\n", __func__, scctx->isc_nrxqsets, i, rid); } pdata->irq_count = msix; pdata->channel_irq_count = scctx->isc_nrxqsets; for (i = 0; i < scctx->isc_ntxqsets; i++) { channel = pdata->channel[i]; snprintf(buf, sizeof(buf), "txq%d", i); irq.ii_res = channel->dma_irq_res; iflib_softirq_alloc_generic(ctx, &irq, IFLIB_INTR_TX, channel, channel->queue_index, buf); } return (0); } /* axgbe_if_msix_intr_assign */ static int xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) { struct xgbe_hw_if *hw_if = &pdata->hw_if; enum xgbe_int int_id; if (channel->tx_ring && channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI_RI; else if (channel->tx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI; else if (channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_RI; else return (-1); axgbe_printf(1, "%s channel: %d rx_tx interrupt enabled %d\n", __func__, channel->queue_index, int_id); return (hw_if->enable_int(channel, int_id)); } static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) { struct xgbe_hw_if *hw_if = &pdata->hw_if; enum xgbe_int int_id; if (channel->tx_ring && channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI_RI; else if (channel->tx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI; else if (channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_RI; else return; axgbe_printf(1, "%s channel: %d rx_tx interrupt disabled %d\n", __func__, channel->queue_index, int_id); hw_if->disable_int(channel, int_id); } static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) xgbe_disable_rx_tx_int(pdata, pdata->channel[i]); } static int axgbe_msix_que(void *arg) { struct xgbe_channel *channel = (struct xgbe_channel *)arg; struct xgbe_prv_data *pdata = channel->pdata; unsigned int dma_ch_isr, dma_status; axgbe_printf(1, "%s: Channel: %d SR 0x%04x DSR 0x%04x IER:0x%04x D_ISR:0x%04x M_ISR:0x%04x\n", __func__, channel->queue_index, XGMAC_DMA_IOREAD(channel, DMA_CH_SR), XGMAC_DMA_IOREAD(channel, DMA_CH_DSR), XGMAC_DMA_IOREAD(channel, DMA_CH_IER), XGMAC_IOREAD(pdata, DMA_ISR), XGMAC_IOREAD(pdata, MAC_ISR)); dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); /* Disable Tx and Rx channel interrupts */ xgbe_disable_rx_tx_int(pdata, channel); /* Clear the interrupts */ dma_status = 0; XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1); XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1); XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status); return (FILTER_SCHEDULE_THREAD); } static int axgbe_dev_isr(void *arg) { struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel; struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int i, dma_isr, dma_ch_isr; unsigned int mac_isr, mac_mdioisr; int ret = FILTER_HANDLED; dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); axgbe_printf(2, "%s DMA ISR: 0x%x\n", __func__, dma_isr); if (!dma_isr) return (FILTER_HANDLED); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) continue; channel = pdata->channel[i]; dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); axgbe_printf(2, "%s: channel %d SR 0x%x DSR 0x%x\n", __func__, channel->queue_index, dma_ch_isr, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); /* * The TI or RI interrupt bits may still be set even if using * per channel DMA interrupts. Check to be sure those are not * enabled before using the private data napi structure. */ if (!pdata->per_channel_irq && (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { /* Disable Tx and Rx interrupts */ xgbe_disable_rx_tx_ints(pdata); } else { /* * Don't clear Rx/Tx status if doing per channel DMA * interrupts, these will be cleared by the ISR for * per channel DMA interrupts */ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); } if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) pdata->ext_stats.rx_buffer_unavailable++; /* Restart the device on a Fatal Bus Error */ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) axgbe_error("%s: Fatal bus error reported 0x%x\n", __func__, dma_ch_isr); /* Clear all interrupt signals */ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); ret = FILTER_SCHEDULE_THREAD; } if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); axgbe_printf(2, "%s MAC ISR: 0x%x\n", __func__, mac_isr); if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) hw_if->tx_mmc_int(pdata); if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) hw_if->rx_mmc_int(pdata); if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) { mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR); if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR, SNGLCOMPINT)) wakeup_one(pdata); } } return (ret); } /* axgbe_dev_isr */ static void axgbe_i2c_isr(void *arg) { struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; sc->pdata.i2c_if.i2c_isr(&sc->pdata); } static void axgbe_ecc_isr(void *arg) { /* TODO - implement */ } static void axgbe_an_isr(void *arg) { struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; sc->pdata.phy_if.an_isr(&sc->pdata); } static int axgbe_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; int ret; if (qid < pdata->tx_q_count) { ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); if (ret) { axgbe_error("Enable TX INT failed\n"); return (ret); } } else axgbe_error("Queue ID exceed channel count\n"); return (0); } static int axgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; int ret; if (qid < pdata->rx_q_count) { ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); if (ret) { axgbe_error("Enable RX INT failed\n"); return (ret); } } else axgbe_error("Queue ID exceed channel count\n"); return (0); } static void axgbe_if_update_admin_status(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; axgbe_printf(1, "%s: phy_link %d status %d speed %d\n", __func__, pdata->phy_link, sc->link_status, pdata->phy.speed); if (pdata->phy_link < 0) return; if (pdata->phy_link) { if (sc->link_status == LINK_STATE_DOWN) { sc->link_status = LINK_STATE_UP; if (pdata->phy.speed & SPEED_10000) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); else if (pdata->phy.speed & SPEED_2500) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(2.5)); else if (pdata->phy.speed & SPEED_1000) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(1)); else if (pdata->phy.speed & SPEED_100) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(100)); else if (pdata->phy.speed & SPEED_10) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(10)); } } else { if (sc->link_status == LINK_STATE_UP) { sc->link_status = LINK_STATE_DOWN; iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); } } } static int axgbe_if_media_change(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); sx_xlock(&sc->pdata.an_mutex); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_10G_KR: sc->pdata.phy.speed = SPEED_10000; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_2500_KX: sc->pdata.phy.speed = SPEED_2500; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_1000_KX: sc->pdata.phy.speed = SPEED_1000; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_100_TX: sc->pdata.phy.speed = SPEED_100; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_AUTO: sc->pdata.phy.autoneg = AUTONEG_ENABLE; break; } sx_xunlock(&sc->pdata.an_mutex); return (-sc->pdata.phy_if.phy_config_aneg(&sc->pdata)); } static int axgbe_if_promisc_set(if_ctx_t ctx, int flags) { - struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct axgbe_if_softc *sc = iflib_get_softc(ctx); + struct xgbe_prv_data *pdata = &sc->pdata; + struct ifnet *ifp = pdata->netdev; - if (XGMAC_IOREAD_BITS(&sc->pdata, MAC_PFR, PR) == 1) - return (0); + axgbe_printf(1, "%s: MAC_PFR 0x%x drv_flags 0x%x if_flags 0x%x\n", + __func__, XGMAC_IOREAD(pdata, MAC_PFR), ifp->if_drv_flags, ifp->if_flags); - XGMAC_IOWRITE_BITS(&sc->pdata, MAC_PFR, PR, 1); - XGMAC_IOWRITE_BITS(&sc->pdata, MAC_PFR, VTFE, 0); + if (ifp->if_flags & IFF_PPROMISC) { - return (0); + axgbe_printf(1, "User requested to enter promisc mode\n"); + + if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 1) { + axgbe_printf(1, "Already in promisc mode\n"); + return (0); + } + + axgbe_printf(1, "Entering promisc mode\n"); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); + } else { + + axgbe_printf(1, "User requested to leave promisc mode\n"); + + if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 0) { + axgbe_printf(1, "Already not in promisc mode\n"); + return (0); + } + + axgbe_printf(1, "Leaving promisc mode\n"); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); + } + + return (0); } static uint64_t axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; pdata->hw_if.read_mmc_stats(pdata); switch(cnt) { case IFCOUNTER_IPACKETS: return (pstats->rxframecount_gb); case IFCOUNTER_IERRORS: return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g - pstats->rxmulticastframes_g - pstats->rxunicastframes_g); case IFCOUNTER_OPACKETS: return (pstats->txframecount_gb); case IFCOUNTER_OERRORS: return (pstats->txframecount_gb - pstats->txframecount_g); case IFCOUNTER_IBYTES: return (pstats->rxoctetcount_gb); case IFCOUNTER_OBYTES: return (pstats->txoctetcount_gb); default: return (if_get_counter_default(ifp, cnt)); } } static int axgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; int ret; if (mtu > XGMAC_JUMBO_PACKET_MTU) return (EINVAL); ret = xgbe_calc_rx_buf_size(pdata->netdev, mtu); pdata->rx_buf_size = ret; axgbe_printf(1, "%s: rx_buf_size %d\n", __func__, ret); sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; return (0); } static void axgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; ifmr->ifm_status = IFM_AVALID; if (!sc->pdata.phy.link) return; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status |= IFM_ACTIVE; axgbe_printf(1, "Speed 0x%x Mode %d\n", sc->pdata.phy.speed, pdata->phy_if.phy_impl.cur_mode(pdata)); pdata->phy_if.phy_impl.get_type(pdata, ifmr); ifmr->ifm_active |= IFM_FDX; ifmr->ifm_active |= IFM_ETH_TXPAUSE; ifmr->ifm_active |= IFM_ETH_RXPAUSE; } diff --git a/sys/dev/axgbe/xgbe-dev.c b/sys/dev/axgbe/xgbe-dev.c index 1cfd557a4995..95161802ed8e 100644 --- a/sys/dev/axgbe/xgbe-dev.c +++ b/sys/dev/axgbe/xgbe-dev.c @@ -1,2845 +1,2864 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "xgbe.h" #include "xgbe-common.h" #include static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) { return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); } static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* * Convert the input usec value to the watchdog timer value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( usec * ( system_clock_mhz / 10^6 ) / 256 */ ret = (usec * (rate / 1000000)) / 256; return (ret); } static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* * Convert the input watchdog timer value to the usec value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) */ ret = (riwt * 256) / (rate / 1000000); return (ret); } static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata) { unsigned int pblx8, pbl; unsigned int i; pblx8 = DMA_PBL_X8_DISABLE; pbl = pdata->pbl; if (pdata->pbl > 32) { pblx8 = DMA_PBL_X8_ENABLE; pbl >>= 3; } for (i = 0; i < pdata->channel_count; i++) { XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, pblx8); if (pdata->channel[i]->tx_ring) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, PBL, pbl); if (pdata->channel[i]->rx_ring) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, PBL, pbl); } return (0); } static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, pdata->tx_osp_mode); } return (0); } static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); return (0); } static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); return (0); } static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); return (0); } static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); return (0); } static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, pdata->rx_riwt); } return (0); } static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) { return (0); } static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, pdata->rx_buf_size); } } static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) { unsigned int i; + int tso_enabled = (if_getcapenable(pdata->netdev) & IFCAP_TSO); + for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; - axgbe_printf(0, "Enabling TSO in channel %d\n", i); - XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); + axgbe_printf(1, "TSO in channel %d %s\n", i, tso_enabled ? "enabled" : "disabled"); + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, tso_enabled ? 1 : 0); } } static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) { unsigned int i; + int sph_enable_flag = XGMAC_IOREAD_BITS(pdata, MAC_HWF1R, SPHEN); + + axgbe_printf(1, "sph_enable %d sph feature enabled?: %d\n", + pdata->sph_enable, sph_enable_flag); + + if (pdata->sph_enable && sph_enable_flag) + axgbe_printf(0, "SPH Enabled\n"); for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; + if (pdata->sph_enable && sph_enable_flag) { + /* Enable split header feature */ + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); + } else { + /* Disable split header feature */ + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); + } - XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); + /* per-channel confirmation of SPH being disabled/enabled */ + int val = XGMAC_DMA_IOREAD_BITS(pdata->channel[i], DMA_CH_CR, SPH); + axgbe_printf(0, "%s: SPH %s in channel %d\n", __func__, + (val ? "enabled" : "disabled"), i); } - XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); + if (pdata->sph_enable && sph_enable_flag) + XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); } static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, unsigned int index, unsigned int val) { unsigned int wait; int ret = 0; mtx_lock(&pdata->rss_mutex); if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { ret = -EBUSY; goto unlock; } XGMAC_IOWRITE(pdata, MAC_RSSDR, val); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); wait = 1000; while (wait--) { if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) goto unlock; DELAY(1000); } ret = -EBUSY; unlock: mtx_unlock(&pdata->rss_mutex); return (ret); } static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) { unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t); unsigned int *key = (unsigned int *)&pdata->rss_key; int ret; while (key_regs--) { ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, key_regs, *key++); if (ret) return (ret); } return (0); } static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) { unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i, pdata->rss_table[i]); if (ret) return (ret); } return (0); } static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key) { memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); return (xgbe_write_rss_hash_key(pdata)); } static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table) { unsigned int i; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); return (xgbe_write_rss_lookup_table(pdata)); } static int xgbe_enable_rss(struct xgbe_prv_data *pdata) { int ret; if (!pdata->hw_feat.rss) return (-EOPNOTSUPP); /* Program the hash key */ ret = xgbe_write_rss_hash_key(pdata); if (ret) return (ret); /* Program the lookup table */ ret = xgbe_write_rss_lookup_table(pdata); if (ret) return (ret); /* Set the RSS options */ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); /* Enable RSS */ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); axgbe_printf(0, "RSS Enabled\n"); return (0); } static int xgbe_disable_rss(struct xgbe_prv_data *pdata) { if (!pdata->hw_feat.rss) return (-EOPNOTSUPP); XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); axgbe_printf(0, "RSS Disabled\n"); return (0); } static void xgbe_config_rss(struct xgbe_prv_data *pdata) { int ret; if (!pdata->hw_feat.rss) return; /* Check if the interface has RSS capability */ if (pdata->enable_rss) ret = xgbe_enable_rss(pdata); else ret = xgbe_disable_rss(pdata); if (ret) axgbe_error("error configuring RSS, RSS disabled\n"); } static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_QTFCR_INC; } return (0); } static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { unsigned int ehfc = 0; if (pdata->rx_rfd[i]) { /* Flow control thresholds are established */ /* TODO - enable pfc/ets support */ ehfc = 1; } XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); axgbe_printf(1, "flow control %s for RXq%u\n", ehfc ? "enabled" : "disabled", i); } /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); /* Enable transmit flow control */ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); /* Set pause time */ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_QTFCR_INC; } return (0); } static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); return (0); } static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); return (0); } static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) { if (pdata->tx_pause) xgbe_enable_tx_flow_control(pdata); else xgbe_disable_tx_flow_control(pdata); return (0); } static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) { if (pdata->rx_pause) xgbe_enable_rx_flow_control(pdata); else xgbe_disable_rx_flow_control(pdata); return (0); } static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) { xgbe_config_tx_flow_control(pdata); xgbe_config_rx_flow_control(pdata); XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); } static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; unsigned int i, ver; /* Set the interrupt mode if supported */ if (pdata->channel_irq_mode) XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, pdata->channel_irq_mode); ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); for (i = 0; i < pdata->channel_count; i++) { channel = pdata->channel[i]; /* Clear all the interrupts which are set */ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ channel->curr_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ if (ver < 0x21) { XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); } else { XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); } XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts * TIE - Transmit Interrupt Enable (unless using * per channel interrupts in edge triggered * mode) */ if (!pdata->per_channel_irq || pdata->channel_irq_mode) XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable * RIE - Receive Interrupt Enable (unless using * per channel interrupts in edge triggered * mode) */ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); if (!pdata->per_channel_irq || pdata->channel_irq_mode) XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); } } static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) { unsigned int mtl_q_isr; unsigned int q_count, i; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { /* Clear all the interrupts which are set */ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); /* No MTL interrupts to be enabled */ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); } } static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) { unsigned int mac_ier = 0; /* Enable Timestamp interrupt */ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); /* Enable all counter interrupts */ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); /* Enable MDIO single command completion interrupt */ XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); } static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) { unsigned int ss; switch (speed) { case SPEED_1000: ss = 0x03; break; case SPEED_2500: ss = 0x02; break; case SPEED_10000: ss = 0x00; break; default: return (-EINVAL); } if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); return (0); } static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) { /* Put the VLAN tag in the Rx descriptor */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); /* Don't check the VLAN type */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); /* Check only C-TAG (0x8100) packets */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); /* Enable VLAN tag stripping */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); axgbe_printf(0, "VLAN Stripping Enabled\n"); return (0); } static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); axgbe_printf(0, "VLAN Stripping Disabled\n"); return (0); } static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) { /* Enable VLAN filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); /* Enable VLAN Hash Table filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); /* Disable VLAN tag inverse matching */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); /* Only filter on the lower 12-bits of the VLAN tag */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); /* In order for the VLAN Hash Table filtering to be effective, * the VLAN tag identifier in the VLAN Tag Register must not * be zero. Set the VLAN tag identifier to "1" to enable the * VLAN Hash Table filtering. This implies that a VLAN tag of * 1 will always pass filtering. */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); axgbe_printf(0, "VLAN filtering Enabled\n"); return (0); } static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) { /* Disable VLAN filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); axgbe_printf(0, "VLAN filtering Disabled\n"); return (0); } static uint32_t xgbe_vid_crc32_le(__le16 vid_le) { uint32_t crc = ~0; uint32_t temp = 0; unsigned char *data = (unsigned char *)&vid_le; unsigned char data_byte = 0; int i, bits; bits = get_bitmask_order(VLAN_VID_MASK); for (i = 0; i < bits; i++) { if ((i % 8) == 0) data_byte = data[i / 8]; temp = ((crc & 1) ^ data_byte) & 1; crc >>= 1; data_byte >>= 1; if (temp) crc ^= CRC32_POLY_LE; } return (crc); } static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) { uint32_t crc; uint16_t vid; uint16_t vlan_hash_table = 0; __le16 vid_le = 0; axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__, XGMAC_IOREAD(pdata, MAC_VLANHTR)); /* Generate the VLAN Hash Table value */ for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) { /* Get the CRC32 value of the VLAN ID */ vid_le = cpu_to_le16(vid); crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; vlan_hash_table |= (1 << crc); axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x " "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc, vlan_hash_table); } /* Set the VLAN Hash Table filtering register */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__, XGMAC_IOREAD(pdata, MAC_VLANHTR)); return (0); } static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) return (0); axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); /* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) { axgbe_printf(1, "Disabling rx vlan filtering\n"); xgbe_disable_rx_vlan_filtering(pdata); } else { if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { axgbe_printf(1, "Enabling rx vlan filtering\n"); xgbe_enable_rx_vlan_filtering(pdata); } } return (0); } static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) return (0); axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); return (0); } static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg) { unsigned int mac_addr_hi, mac_addr_lo; uint8_t *mac_addr; mac_addr_lo = 0; mac_addr_hi = 0; if (addr) { mac_addr = (uint8_t *)&mac_addr_lo; mac_addr[0] = addr[0]; mac_addr[1] = addr[1]; mac_addr[2] = addr[2]; mac_addr[3] = addr[3]; mac_addr = (uint8_t *)&mac_addr_hi; mac_addr[0] = addr[4]; mac_addr[1] = addr[5]; axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg); XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); } XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); *mac_reg += MAC_MACA_INC; XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); *mac_reg += MAC_MACA_INC; } static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) { unsigned int mac_reg; unsigned int addn_macs; mac_reg = MAC_MACA1HR; addn_macs = pdata->hw_feat.addn_mac; xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg); addn_macs--; /* Clear remaining additional MAC address entries */ while (addn_macs--) xgbe_set_mac_reg(pdata, NULL, &mac_reg); } static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) { /* TODO - add support to set mac hash table */ xgbe_set_mac_addn_addrs(pdata); return (0); } static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr) { unsigned int mac_addr_hi, mac_addr_lo; mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | (addr[0] << 0); XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); return (0); } static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) { unsigned int pr_mode, am_mode; - pr_mode = ((pdata->netdev->if_drv_flags & IFF_PPROMISC) != 0); - am_mode = ((pdata->netdev->if_drv_flags & IFF_ALLMULTI) != 0); + pr_mode = ((pdata->netdev->if_flags & IFF_PPROMISC) != 0); + am_mode = ((pdata->netdev->if_flags & IFF_ALLMULTI) != 0); xgbe_set_promiscuous_mode(pdata, pr_mode); xgbe_set_all_multicast_mode(pdata, am_mode); xgbe_add_mac_addresses(pdata); return (0); } static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) { unsigned int reg; if (gpio > 15) return (-EINVAL); reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); reg &= ~(1 << (gpio + 16)); XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); return (0); } static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) { unsigned int reg; if (gpio > 15) return (-EINVAL); reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); reg |= (1 << (gpio + 16)); XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); return (0); } static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { unsigned long flags; unsigned int mmd_address, index, offset; int mmd_data; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 16-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 1 bit and reading 16 bits of data. */ mmd_address <<= 1; index = mmd_address & ~pdata->xpcs_window_mask; offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); mmd_data = XPCS16_IOREAD(pdata, offset); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return (mmd_data); } static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned long flags; unsigned int mmd_address, index, offset; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 16-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 1 bit and writing 16 bits of data. */ mmd_address <<= 1; index = mmd_address & ~pdata->xpcs_window_mask; offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); XPCS16_IOWRITE(pdata, offset, mmd_data); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { unsigned long flags; unsigned int mmd_address; int mmd_data; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return (mmd_data); } static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned int mmd_address; unsigned long flags; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and writing 32 bits of data. */ spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { switch (pdata->vdata->xpcs_access) { case XGBE_XPCS_ACCESS_V1: return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg)); case XGBE_XPCS_ACCESS_V2: default: return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg)); } } static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { switch (pdata->vdata->xpcs_access) { case XGBE_XPCS_ACCESS_V1: return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data)); case XGBE_XPCS_ACCESS_V2: default: return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data)); } } static unsigned int xgbe_create_mdio_sca(int port, int reg) { unsigned int mdio_sca, da; da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; mdio_sca = 0; XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); return (mdio_sca); } static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) { unsigned int mdio_sca, mdio_sccd; mtx_lock_spin(&pdata->mdio_mutex); mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == EWOULDBLOCK) { axgbe_error("%s: MDIO write error\n", __func__); mtx_unlock_spin(&pdata->mdio_mutex); return (-ETIMEDOUT); } mtx_unlock_spin(&pdata->mdio_mutex); return (0); } static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg) { unsigned int mdio_sca, mdio_sccd; mtx_lock_spin(&pdata->mdio_mutex); mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == EWOULDBLOCK) { axgbe_error("%s: MDIO read error\n", __func__); mtx_unlock_spin(&pdata->mdio_mutex); return (-ETIMEDOUT); } mtx_unlock_spin(&pdata->mdio_mutex); return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA)); } static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); switch (mode) { case XGBE_MDIO_MODE_CL22: if (port > XGMAC_MAX_C22_PORT) return (-EINVAL); reg_val |= (1 << port); break; case XGBE_MDIO_MODE_CL45: break; default: return (-EINVAL); } XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); return (0); } static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) { return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN)); } static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); axgbe_printf(0, "Receive checksum offload Disabled\n"); return (0); } static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); axgbe_printf(0, "Receive checksum offload Enabled\n"); return (0); } static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) { struct xgbe_ring_desc *rdesc = rdata->rdesc; /* Reset the Tx descriptor * Set buffer 1 (lo) address to zero * Set buffer 1 (hi) address to zero * Reset all other control bits (IC, TTSE, B2L & B1L) * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) */ rdesc->desc0 = 0; rdesc->desc1 = 0; rdesc->desc2 = 0; rdesc->desc3 = 0; wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) { struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; int i; int start_index = ring->cur; /* Initialze all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); /* Initialize Tx descriptor */ xgbe_tx_desc_reset(rdata); } /* Update the total number of Tx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); /* Update the starting address of descriptor ring */ rdata = XGBE_GET_DESC_DATA(ring, start_index); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, upper_32_bits(rdata->rdata_paddr)); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, lower_32_bits(rdata->rdata_paddr)); } static void xgbe_rx_desc_init(struct xgbe_channel *channel) { struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; unsigned int start_index = ring->cur; /* * Just set desc_count and the starting address of the desc list * here. Rest will be done as part of the txrx path. */ /* Update the total number of Rx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); /* Update the starting address of descriptor ring */ rdata = XGBE_GET_DESC_DATA(ring, start_index); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, upper_32_bits(rdata->rdata_paddr)); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, lower_32_bits(rdata->rdata_paddr)); } static int xgbe_dev_read(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; unsigned int err, etlt, l34t = 0; axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur); rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; /* Check for data availability */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) return (1); rmb(); if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { /* TODO - Timestamp Context Descriptor */ - XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 1); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT, 0); return (0); } /* Normal Descriptor, be sure Context Descriptor bit is off */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); /* Indicate if a Context Descriptor is next */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT, 1); /* Get the header length */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST, 1); rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, HL); if (rdata->rx.hdr_len) pdata->ext_stats.rx_split_header_packets++; } else XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST, 0); /* Get the RSS hash */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH, 1); packet->rss_hash = le32_to_cpu(rdesc->desc1); l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); switch (l34t) { case RX_DESC3_L34T_IPV4_TCP: packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4; break; case RX_DESC3_L34T_IPV4_UDP: packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4; break; case RX_DESC3_L34T_IPV6_TCP: packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6; break; case RX_DESC3_L34T_IPV6_UDP: packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6; break; default: packet->rss_hash_type = M_HASHTYPE_OPAQUE; break; } } /* Not all the data has been transferred for this packet */ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { /* This is not the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST, 0); return (0); } /* This is the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST, 1); /* Get the packet length */ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ /* TODO - add tunneling support */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 1); /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt); if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ if (etlt == 0x09) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG, 1); packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, RX_NORMAL_DESC0, OVT); axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag); } } else { unsigned int tnp = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNP); if ((etlt == 0x05) || (etlt == 0x06)) { axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n", __func__, l34t, err, etlt); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNPCSUM_DONE, 0); pdata->ext_stats.rx_csum_errors++; } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n", __func__, l34t, err, etlt); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNPCSUM_DONE, 0); pdata->ext_stats.rx_vxlan_csum_errors++; } else { axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n", __func__, tnp, l34t, err, etlt); axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n", __func__, channel->queue_index, XGMAC_DMA_IOREAD(channel, DMA_CH_SR), XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); axgbe_printf(1, "%s: ring cur %d dirty %d\n", __func__, ring->cur, ring->dirty); axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n", __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2, rdesc->desc3); XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, FRAME, 1); } } axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); return (0); } static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT)); } static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD)); } static int xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { struct xgbe_prv_data *pdata = channel->pdata; axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n", channel->curr_ier); switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); break; case XGMAC_INT_DMA_CH_SR_TI_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: channel->curr_ier |= channel->saved_ier; break; default: return (-1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n", channel->curr_ier); return (0); } static int xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { struct xgbe_prv_data *pdata = channel->pdata; axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n", channel->curr_ier); switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); break; case XGMAC_INT_DMA_CH_SR_TI_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: channel->saved_ier = channel->curr_ier; channel->curr_ier = 0; break; default: return (-1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n", channel->curr_ier); return (0); } static int __xgbe_exit(struct xgbe_prv_data *pdata) { unsigned int count = 2000; /* Issue a software reset */ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); DELAY(10); /* Poll Until Poll Condition */ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) DELAY(500); if (!count) return (-EBUSY); return (0); } static int xgbe_exit(struct xgbe_prv_data *pdata) { int ret; /* To guard against possible incorrectly generated interrupts, * issue the software reset twice. */ ret = __xgbe_exit(pdata); if (ret) { axgbe_error("%s: exit error %d\n", __func__, ret); return (ret); } return (__xgbe_exit(pdata)); } static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) { unsigned int i, count; if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) return (0); for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, MTL_Q_TQOMR, FTQ)) DELAY(500); if (!count) return (-EBUSY); } return (0); } static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) { unsigned int sbmr; sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); /* Set enhanced addressing mode */ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); /* Set the System Bus mode */ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); /* Set descriptor fetching threshold */ if (pdata->vdata->tx_desc_prefetch) XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, pdata->vdata->tx_desc_prefetch); if (pdata->vdata->rx_desc_prefetch) XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, pdata->vdata->rx_desc_prefetch); } static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); if (pdata->awarcr) XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); } static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) { unsigned int i; /* Set Tx to weighted round robin scheduling algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); /* Set Tx traffic classes to use WRR algorithm with equal weights */ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_ETS); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); } /* Set Rx to strict priority algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); } static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, unsigned int queue, unsigned int q_fifo_size) { unsigned int frame_fifo_size; unsigned int rfa, rfd; frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n", __func__, queue, q_fifo_size, frame_fifo_size); /* TODO - add pfc/ets related support */ /* This path deals with just maximum frame sizes which are * limited to a jumbo frame of 9,000 (plus headers, etc.) * so we can never exceed the maximum allowable RFA/RFD * values. */ if (q_fifo_size <= 2048) { /* rx_rfd to zero to signal no flow control */ pdata->rx_rfa[queue] = 0; pdata->rx_rfd[queue] = 0; return; } if (q_fifo_size <= 4096) { /* Between 2048 and 4096 */ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ return; } if (q_fifo_size <= frame_fifo_size) { /* Between 4096 and max-frame */ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ return; } if (q_fifo_size <= (frame_fifo_size * 3)) { /* Between max-frame and 3 max-frames, * trigger if we get just over a frame of data and * resume when we have just under half a frame left. */ rfa = q_fifo_size - frame_fifo_size; rfd = rfa + (frame_fifo_size / 2); } else { /* Above 3 max-frames - trigger when just over * 2 frames of space available */ rfa = frame_fifo_size * 2; rfa += XGMAC_FLOW_CONTROL_UNIT; rfd = rfa + frame_fifo_size; } pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__, queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]); } static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, unsigned int *fifo) { unsigned int q_fifo_size; unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) { q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n", __func__, i, fifo[i], q_fifo_size); xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); } } static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) { axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i, pdata->rx_rfa[i], pdata->rx_rfd[i]); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, pdata->rx_rfa[i]); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, pdata->rx_rfd[i]); axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); } } static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) { /* The configured value may not be the actual amount of fifo RAM */ return (min_t(unsigned int, pdata->tx_max_fifo_size, pdata->hw_feat.tx_fifo_size)); } static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) { /* The configured value may not be the actual amount of fifo RAM */ return (min_t(unsigned int, pdata->rx_max_fifo_size, pdata->hw_feat.rx_fifo_size)); } static void xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count, unsigned int *fifo) { unsigned int q_fifo_size; unsigned int p_fifo; unsigned int i; q_fifo_size = fifo_size / queue_count; /* Calculate the fifo setting by dividing the queue's fifo size * by the fifo allocation increment (with 0 representing the * base allocation increment so decrement the result by 1). */ p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; if (p_fifo) p_fifo--; /* Distribute the fifo equally amongst the queues */ for (i = 0; i < queue_count; i++) fifo[i] = p_fifo; } static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count, unsigned int *fifo) { unsigned int i; MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC)); if (queue_count <= IEEE_8021QAZ_MAX_TCS) return (fifo_size); /* Rx queues 9 and up are for specialized packets, * such as PTP or DCB control packets, etc. and * don't require a large fifo */ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; fifo_size -= XGMAC_FIFO_MIN_ALLOC; } return (fifo_size); } static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) { unsigned int fifo_size; unsigned int fifo[XGBE_MAX_QUEUES]; unsigned int i; fifo_size = xgbe_get_tx_fifo_size(pdata); axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size); xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); for (i = 0; i < pdata->tx_q_count; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); } axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); } static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) { unsigned int fifo_size; unsigned int fifo[XGBE_MAX_QUEUES]; unsigned int prio_queues; unsigned int i; /* TODO - add pfc/ets related support */ /* Clear any DCB related fifo/queue information */ fifo_size = xgbe_get_rx_fifo_size(pdata); prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__, fifo_size, pdata->rx_q_count, prio_queues); /* Assign a minimum fifo to the non-VLAN priority queues */ fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); for (i = 0; i < pdata->rx_q_count; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); } xgbe_calculate_flow_control_threshold(pdata, fifo); xgbe_config_flow_control_threshold(pdata); axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n", pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); } static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) { unsigned int qptc, qptc_extra, queue; unsigned int prio_queues; unsigned int ppq, ppq_extra, prio; unsigned int mask; unsigned int i, j, reg, reg_val; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } if (i < qptc_extra) { axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } } /* Map the 8 VLAN priority values to available MTL Rx queues */ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; reg = MAC_RQC2R; reg_val = 0; for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } if (i < ppq_extra) { axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_RQC2_INC; reg_val = 0; } /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ reg = MTL_RQDCM0R; reg_val = 0; for (i = 0; i < pdata->rx_q_count;) { reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); reg += MTL_RQDCM_INC; reg_val = 0; } } static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) { xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev)); /* Filtering is done using perfect filtering and hash filtering */ if (pdata->hw_feat.hash_table_size) { XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); } } static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) { unsigned int val; val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0; XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); } static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) { xgbe_set_speed(pdata, pdata->phy_speed); } static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) { if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM)) xgbe_enable_rx_csum(pdata); else xgbe_disable_rx_csum(pdata); } static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) { /* Indicate that VLAN Tx CTAGs come from context descriptors */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); /* Set the current VLAN Hash Table register value */ xgbe_update_vlan_hash_table(pdata); if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { axgbe_printf(1, "Enabling rx vlan filtering\n"); xgbe_enable_rx_vlan_filtering(pdata); } else { axgbe_printf(1, "Disabling rx vlan filtering\n"); xgbe_disable_rx_vlan_filtering(pdata); } if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) { axgbe_printf(1, "Enabling rx vlan stripping\n"); xgbe_enable_rx_vlan_stripping(pdata); } else { axgbe_printf(1, "Disabling rx vlan stripping\n"); xgbe_disable_rx_vlan_stripping(pdata); } } static uint64_t xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) { bool read_hi; uint64_t val; if (pdata->vdata->mmc_64bit) { switch (reg_lo) { /* These registers are always 32 bit */ case MMC_RXRUNTERROR: case MMC_RXJABBERERROR: case MMC_RXUNDERSIZE_G: case MMC_RXOVERSIZE_G: case MMC_RXWATCHDOGERROR: read_hi = false; break; default: read_hi = true; } } else { switch (reg_lo) { /* These registers are always 64 bit */ case MMC_TXOCTETCOUNT_GB_LO: case MMC_TXOCTETCOUNT_G_LO: case MMC_RXOCTETCOUNT_GB_LO: case MMC_RXOCTETCOUNT_G_LO: read_hi = true; break; default: read_hi = false; } } val = XGMAC_IOREAD(pdata, reg_lo); if (read_hi) val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); return (val); } static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) stats->txoctetcount_gb += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) stats->txframecount_gb += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) stats->txbroadcastframes_g += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) stats->txmulticastframes_g += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) stats->tx64octets_gb += xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) stats->tx65to127octets_gb += xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) stats->tx128to255octets_gb += xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) stats->tx256to511octets_gb += xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) stats->tx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) stats->tx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) stats->txunicastframes_gb += xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) stats->txmulticastframes_gb += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) stats->txbroadcastframes_g += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) stats->txunderflowerror += xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) stats->txoctetcount_g += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) stats->txframecount_g += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) stats->txpauseframes += xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) stats->txvlanframes_g += xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) stats->rxframecount_gb += xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) stats->rxoctetcount_gb += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) stats->rxoctetcount_g += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) stats->rxbroadcastframes_g += xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) stats->rxmulticastframes_g += xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) stats->rxcrcerror += xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) stats->rxrunterror += xgbe_mmc_read(pdata, MMC_RXRUNTERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) stats->rxjabbererror += xgbe_mmc_read(pdata, MMC_RXJABBERERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) stats->rxundersize_g += xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) stats->rxoversize_g += xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) stats->rx64octets_gb += xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) stats->rx65to127octets_gb += xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) stats->rx128to255octets_gb += xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) stats->rx256to511octets_gb += xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) stats->rx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) stats->rx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) stats->rxunicastframes_g += xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) stats->rxlengtherror += xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) stats->rxoutofrangetype += xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) stats->rxpauseframes += xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) stats->rxfifooverflow += xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) stats->rxvlanframes_gb += xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) stats->rxwatchdogerror += xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); } static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; /* Freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); stats->txoctetcount_gb += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); stats->txbroadcastframes_gb += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += xgbe_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += xgbe_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); } static void xgbe_config_mmc(struct xgbe_prv_data *pdata) { /* Set counters to reset on read */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); /* Reset the counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); } static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int tx_status; unsigned long tx_timeout; /* The Tx engine cannot be stopped if it is actively processing * packets. Wait for the Tx queue to empty the Tx fifo. Don't * wait forever though... */ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < tx_timeout) { tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) break; DELAY(500); } if (ticks >= tx_timeout) axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n", queue); } static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int tx_dsr, tx_pos, tx_qidx; unsigned int tx_status; unsigned long tx_timeout; if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) return (xgbe_txq_prepare_tx_stop(pdata, queue)); /* Calculate the status register to read and the position within */ if (queue < DMA_DSRX_FIRST_QUEUE) { tx_dsr = DMA_DSR0; tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; } else { tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + DMA_DSRX_TPS_START; } /* The Tx engine cannot be stopped if it is actively processing * descriptors. Wait for the Tx engine to enter the stopped or * suspended state. Don't wait forever though... */ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < tx_timeout) { tx_status = XGMAC_IOREAD(pdata, tx_dsr); tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); if ((tx_status == DMA_TPS_STOPPED) || (tx_status == DMA_TPS_SUSPENDED)) break; DELAY(500); } if (ticks >= tx_timeout) axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n", queue); } static void xgbe_enable_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Enable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); } /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, MTL_Q_ENABLED); /* Enable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); } static void xgbe_disable_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Prepare for Tx DMA channel stop */ for (i = 0; i < pdata->tx_q_count; i++) xgbe_prepare_tx_stop(pdata, i); /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); /* Disable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); } } static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int rx_status; unsigned long rx_timeout; /* The Rx engine cannot be stopped if it is actively processing * packets. Wait for the Rx queue to empty the Rx fifo. Don't * wait forever though... */ rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < rx_timeout) { rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) break; DELAY(500); } if (ticks >= rx_timeout) axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n", queue); } static void xgbe_enable_rx(struct xgbe_prv_data *pdata) { unsigned int reg_val, i; /* Enable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); } /* Enable each Rx queue */ reg_val = 0; for (i = 0; i < pdata->rx_q_count; i++) reg_val |= (0x02 << (i << 1)); XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); /* Enable MAC Rx */ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); } static void xgbe_disable_rx(struct xgbe_prv_data *pdata) { unsigned int i; /* Disable MAC Rx */ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); /* Prepare for Rx DMA channel stop */ for (i = 0; i < pdata->rx_q_count; i++) xgbe_prepare_rx_stop(pdata, i); /* Disable each Rx queue */ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); /* Disable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); } } static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Enable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); } /* Enable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); } static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Prepare for Tx DMA channel stop */ for (i = 0; i < pdata->tx_q_count; i++) xgbe_prepare_tx_stop(pdata, i); /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); } } static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) { unsigned int i; /* Enable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); } } static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) { unsigned int i; /* Disable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); } } static int xgbe_init(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; int ret; /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); if (ret) { axgbe_error("error flushing TX queues\n"); return (ret); } /* * Initialize DMA related features */ xgbe_config_dma_bus(pdata); xgbe_config_dma_cache(pdata); xgbe_config_osp_mode(pdata); xgbe_config_pbl_val(pdata); xgbe_config_rx_coalesce(pdata); xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); xgbe_config_sph_mode(pdata); xgbe_config_rss(pdata); desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); /* * Initialize MTL related features */ xgbe_config_mtl_mode(pdata); xgbe_config_queue_mapping(pdata); xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); xgbe_config_tx_threshold(pdata, pdata->tx_threshold); xgbe_config_rx_threshold(pdata, pdata->rx_threshold); xgbe_config_tx_fifo_size(pdata); xgbe_config_rx_fifo_size(pdata); /*TODO: Error Packet and undersized good Packet forwarding enable (FEP and FUP) */ xgbe_enable_mtl_interrupts(pdata); /* * Initialize MAC related features */ xgbe_config_mac_address(pdata); xgbe_config_rx_mode(pdata); xgbe_config_jumbo_enable(pdata); xgbe_config_flow_control(pdata); xgbe_config_mac_speed(pdata); xgbe_config_checksum_offload(pdata); xgbe_config_vlan_support(pdata); xgbe_config_mmc(pdata); xgbe_enable_mac_interrupts(pdata); return (0); } void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) { hw_if->tx_complete = xgbe_tx_complete; hw_if->set_mac_address = xgbe_set_mac_address; hw_if->config_rx_mode = xgbe_config_rx_mode; hw_if->enable_rx_csum = xgbe_enable_rx_csum; hw_if->disable_rx_csum = xgbe_disable_rx_csum; hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; hw_if->read_mmd_regs = xgbe_read_mmd_regs; hw_if->write_mmd_regs = xgbe_write_mmd_regs; hw_if->set_speed = xgbe_set_speed; hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; hw_if->set_gpio = xgbe_set_gpio; hw_if->clr_gpio = xgbe_clr_gpio; hw_if->enable_tx = xgbe_enable_tx; hw_if->disable_tx = xgbe_disable_tx; hw_if->enable_rx = xgbe_enable_rx; hw_if->disable_rx = xgbe_disable_rx; hw_if->powerup_tx = xgbe_powerup_tx; hw_if->powerdown_tx = xgbe_powerdown_tx; hw_if->powerup_rx = xgbe_powerup_rx; hw_if->powerdown_rx = xgbe_powerdown_rx; hw_if->dev_read = xgbe_dev_read; hw_if->enable_int = xgbe_enable_int; hw_if->disable_int = xgbe_disable_int; hw_if->init = xgbe_init; hw_if->exit = xgbe_exit; /* Descriptor related Sequences have to be initialized here */ hw_if->tx_desc_init = xgbe_tx_desc_init; hw_if->rx_desc_init = xgbe_rx_desc_init; hw_if->tx_desc_reset = xgbe_tx_desc_reset; hw_if->is_last_desc = xgbe_is_last_desc; hw_if->is_context_desc = xgbe_is_context_desc; /* For FLOW ctrl */ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; /* For RX coalescing */ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; hw_if->usec_to_riwt = xgbe_usec_to_riwt; hw_if->riwt_to_usec = xgbe_riwt_to_usec; /* For RX and TX threshold config */ hw_if->config_rx_threshold = xgbe_config_rx_threshold; hw_if->config_tx_threshold = xgbe_config_tx_threshold; /* For RX and TX Store and Forward Mode config */ hw_if->config_rsf_mode = xgbe_config_rsf_mode; hw_if->config_tsf_mode = xgbe_config_tsf_mode; /* For TX DMA Operating on Second Frame config */ hw_if->config_osp_mode = xgbe_config_osp_mode; /* For MMC statistics support */ hw_if->tx_mmc_int = xgbe_tx_mmc_int; hw_if->rx_mmc_int = xgbe_rx_mmc_int; hw_if->read_mmc_stats = xgbe_read_mmc_stats; /* For Receive Side Scaling */ hw_if->enable_rss = xgbe_enable_rss; hw_if->disable_rss = xgbe_disable_rss; hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; } diff --git a/sys/dev/axgbe/xgbe-sysctl.c b/sys/dev/axgbe/xgbe-sysctl.c index eee7c61170de..a3e777e0ca26 100644 --- a/sys/dev/axgbe/xgbe-sysctl.c +++ b/sys/dev/axgbe/xgbe-sysctl.c @@ -1,1715 +1,1719 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Contact Information : * Rajesh Kumar * Arpan Palit */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "xgbe.h" #include "xgbe-common.h" #define SYSCTL_BUF_LEN 64 typedef enum{ /* Coalesce flag */ rx_coalesce_usecs = 1, rx_max_coalesced_frames, rx_coalesce_usecs_irq, rx_max_coalesced_frames_irq, tx_coalesce_usecs, tx_max_coalesced_frames, tx_coalesce_usecs_irq, tx_max_coalesced_frames_irq, stats_block_coalesce_usecs, use_adaptive_rx_coalesce, use_adaptive_tx_coalesce, pkt_rate_low, rx_coalesce_usecs_low, rx_max_coalesced_frames_low, tx_coalesce_usecs_low, tx_max_coalesced_frames_low, pkt_rate_high, rx_coalesce_usecs_high, rx_max_coalesced_frames_high, tx_coalesce_usecs_high, tx_max_coalesced_frames_high, rate_sample_interval, /* Pasue flag */ autoneg, tx_pause, rx_pause, /* link settings */ speed, duplex, /* Ring settings */ rx_pending, rx_mini_pending, rx_jumbo_pending, tx_pending, /* Channels settings */ rx_count, tx_count, other_count, combined_count, } sysctl_variable_t; typedef enum { SYSL_NONE, SYSL_BOOL, SYSL_S32, SYSL_U8, SYSL_U16, SYSL_U32, SYSL_U64, SYSL_BE16, SYSL_IP4, SYSL_STR, SYSL_FLAG, SYSL_MAC, } sysctl_type_t; struct sysctl_info { uint8_t name[32]; sysctl_type_t type; sysctl_variable_t flag; uint8_t support[16]; }; struct sysctl_op { /* Coalesce options */ unsigned int rx_coalesce_usecs; unsigned int rx_max_coalesced_frames; unsigned int rx_coalesce_usecs_irq; unsigned int rx_max_coalesced_frames_irq; unsigned int tx_coalesce_usecs; unsigned int tx_max_coalesced_frames; unsigned int tx_coalesce_usecs_irq; unsigned int tx_max_coalesced_frames_irq; unsigned int stats_block_coalesce_usecs; unsigned int use_adaptive_rx_coalesce; unsigned int use_adaptive_tx_coalesce; unsigned int pkt_rate_low; unsigned int rx_coalesce_usecs_low; unsigned int rx_max_coalesced_frames_low; unsigned int tx_coalesce_usecs_low; unsigned int tx_max_coalesced_frames_low; unsigned int pkt_rate_high; unsigned int rx_coalesce_usecs_high; unsigned int rx_max_coalesced_frames_high; unsigned int tx_coalesce_usecs_high; unsigned int tx_max_coalesced_frames_high; unsigned int rate_sample_interval; /* Pasue options */ unsigned int autoneg; unsigned int tx_pause; unsigned int rx_pause; /* Link settings options */ unsigned int speed; unsigned int duplex; /* Ring param options */ unsigned int rx_max_pending; unsigned int rx_mini_max_pending; unsigned int rx_jumbo_max_pending; unsigned int tx_max_pending; unsigned int rx_pending; unsigned int rx_mini_pending; unsigned int rx_jumbo_pending; unsigned int tx_pending; /* Channels options */ unsigned int max_rx; unsigned int max_tx; unsigned int max_other; unsigned int max_combined; unsigned int rx_count; unsigned int tx_count; unsigned int other_count; unsigned int combined_count; } sys_op; #define GSTRING_LEN 32 struct xgbe_stats { char stat_string[GSTRING_LEN]; int stat_size; int stat_offset; }; #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define XGMAC_MMC_STAT(_string, _var) \ { _string, \ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \ offsetof(struct xgbe_prv_data, mmc_stats._var), \ } #define XGMAC_EXT_STAT(_string, _var) \ { _string, \ FIELD_SIZEOF(struct xgbe_ext_stats, _var), \ offsetof(struct xgbe_prv_data, ext_stats._var), \ } static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb), XGMAC_MMC_STAT("tx_packets", txframecount_gb), XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb), XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets), XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets), XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror), XGMAC_MMC_STAT("tx_pause_frames", txpauseframes), XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb), XGMAC_MMC_STAT("rx_packets", rxframecount_gb), XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g), XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets), XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g), XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g), XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror), XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror), XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror), XGMAC_MMC_STAT("rx_length_errors", rxlengtherror), XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors), XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors), XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets), XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable), }; #define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats) char** alloc_sysctl_buffer(void); void get_val(char *buf, char **op, char **val, int *n_op); void fill_data(struct sysctl_op *sys_op, int flag, unsigned int value); static int exit_bad_op(void) { printf("SYSCTL: bad command line option (s)\n"); return(-EINVAL); } static inline unsigned fls_long(unsigned long l) { if (sizeof(l) == 4) return (fls(l)); return (fls64(l)); } static inline __attribute__((const)) unsigned long __rounddown_pow_of_two(unsigned long n) { return (1UL << (fls_long(n) - 1)); } static inline int get_ubuf(struct sysctl_req *req, char *ubuf) { int rc; printf("%s: len:0x%li idx:0x%li\n", __func__, req->newlen, req->newidx); if (req->newlen >= SYSCTL_BUF_LEN) return (-EINVAL); rc = SYSCTL_IN(req, ubuf, req->newlen); if (rc) return (rc); ubuf[req->newlen] = '\0'; return (0); } char** alloc_sysctl_buffer(void) { char **buffer; int i; buffer = malloc(sizeof(char *)*32, M_AXGBE, M_WAITOK | M_ZERO); for(i = 0; i < 32; i++) buffer[i] = malloc(sizeof(char)*32, M_AXGBE, M_WAITOK | M_ZERO); return (buffer); } void get_val(char *buf, char **op, char **val, int *n_op) { int blen = strlen(buf); int count = 0; int i, j; *n_op = 0; for (i = 0; i < blen; i++) { count++; /* Get sysctl command option */ for (j = 0; buf[i] != ' '; j++) { if (i >= blen) break; op[*n_op][j] = buf[i++]; } op[*n_op][j+1] = '\0'; if (i >= strlen(buf)) goto out; /* Get sysctl value*/ i++; for (j = 0; buf[i] != ' '; j++) { if (i >= blen) break; val[*n_op][j] = buf[i++]; } val[*n_op][j+1] = '\0'; if (i >= strlen(buf)) goto out; *n_op = count; } out: *n_op = count; } void fill_data(struct sysctl_op *sys_op, int flag, unsigned int value) { switch(flag) { case 1: sys_op->rx_coalesce_usecs = value; break; case 2: sys_op->rx_max_coalesced_frames = value; break; case 3: sys_op->rx_coalesce_usecs_irq = value; break; case 4: sys_op->rx_max_coalesced_frames_irq = value; break; case 5: sys_op->tx_coalesce_usecs = value; break; case 6: sys_op->tx_max_coalesced_frames = value; break; case 7: sys_op->tx_coalesce_usecs_irq = value; break; case 8: sys_op->tx_max_coalesced_frames_irq = value; break; case 9: sys_op->stats_block_coalesce_usecs = value; break; case 10: sys_op->use_adaptive_rx_coalesce = value; break; case 11: sys_op->use_adaptive_tx_coalesce = value; break; case 12: sys_op->pkt_rate_low = value; break; case 13: sys_op->rx_coalesce_usecs_low = value; break; case 14: sys_op->rx_max_coalesced_frames_low = value; break; case 15: sys_op->tx_coalesce_usecs_low = value; break; case 16: sys_op->tx_max_coalesced_frames_low = value; break; case 17: sys_op->pkt_rate_high = value; break; case 18: sys_op->rx_coalesce_usecs_high = value; break; case 19: sys_op->rx_max_coalesced_frames_high = value; break; case 20: sys_op->tx_coalesce_usecs_high = value; break; case 21: sys_op->tx_max_coalesced_frames_high = value; break; case 22: sys_op->rate_sample_interval = value; break; case 23: sys_op->autoneg = value; break; case 24: sys_op->rx_pause = value; break; case 25: sys_op->tx_pause = value; break; case 26: sys_op->speed = value; break; case 27: sys_op->duplex = value; break; case 28: sys_op->rx_pending = value; break; case 29: sys_op->rx_mini_pending = value; break; case 30: sys_op->rx_jumbo_pending = value; break; case 31: sys_op->tx_pending = value; break; default: printf("Option error\n"); } } static int parse_generic_sysctl(struct xgbe_prv_data *pdata, char *buf, struct sysctl_info *info, unsigned int n_info) { struct sysctl_op *sys_op = pdata->sys_op; unsigned int value; char **op, **val; int n_op = 0; int rc = 0; int i, idx; op = alloc_sysctl_buffer(); val = alloc_sysctl_buffer(); get_val(buf, op, val, &n_op); for (i = 0; i < n_op; i++) { for (idx = 0; idx < n_info; idx++) { if (strcmp(info[idx].name, op[i]) == 0) { if (strcmp(info[idx].support, "not-supported") == 0){ axgbe_printf(1, "ignoring not-supported " "option \"%s\"\n", info[idx].name); break; } switch(info[idx].type) { case SYSL_BOOL: { if (!strcmp(val[i], "on")) fill_data(sys_op, info[idx].flag, 1); else if (!strcmp(val[i], "off")) fill_data(sys_op, info[idx].flag, 0); else rc = exit_bad_op(); break; } case SYSL_S32: sscanf(val[i], "%u", &value); fill_data(sys_op, info[idx].flag, value); break; case SYSL_U8: if (!strcmp(val[i], "half")) fill_data(sys_op, info[idx].flag, DUPLEX_HALF); else if (!strcmp(val[i], "full")) fill_data(sys_op, info[idx].flag, DUPLEX_FULL); else exit_bad_op(); default: rc = exit_bad_op(); } } } } for(i = 0; i < 32; i++) free(op[i], M_AXGBE); free(op, M_AXGBE); for(i = 0; i < 32; i++) free(val[i], M_AXGBE); free(val, M_AXGBE); return (rc); } static int sysctl_xgmac_reg_addr_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; unsigned int reg; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: sysctl_xgmac_reg: 0x%x\n", __func__, pdata->sysctl_xgmac_reg); sbuf_printf(sb, "\nXGMAC reg_addr: 0x%x\n", pdata->sysctl_xgmac_reg); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", ®); axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); pdata->sysctl_xgmac_reg = reg; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_get_drv_info_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; struct xgbe_hw_features *hw_feat = &pdata->hw_feat; ssize_t buf_size = 64; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } sbuf_printf(sb, "\ndriver: %s", XGBE_DRV_NAME); sbuf_printf(sb, "\nversion: %s", XGBE_DRV_VERSION); sbuf_printf(sb, "\nfirmware-version: %d.%d.%d", XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); sbuf_printf(sb, "\nbus-info: %04d:%02d:%02d", pdata->pcie_bus, pdata->pcie_device, pdata->pcie_func); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } return (-EINVAL); } static int sysctl_get_link_info_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } sbuf_printf(sb, "\nLink is %s", pdata->phy.link ? "Up" : "Down"); rc = sbuf_finish(sb); sbuf_delete(sb); return (0); } return (-EINVAL); } #define COALESCE_SYSCTL_INFO(__coalop) \ { \ { "adaptive-rx", SYSL_BOOL, use_adaptive_rx_coalesce, "not-supported" }, \ { "adaptive-tx", SYSL_BOOL, use_adaptive_tx_coalesce, "not-supported" }, \ { "sample-interval", SYSL_S32, rate_sample_interval, "not-supported" }, \ { "stats-block-usecs", SYSL_S32, stats_block_coalesce_usecs, "not-supported" }, \ { "pkt-rate-low", SYSL_S32, pkt_rate_low, "not-supported" }, \ { "pkt-rate-high", SYSL_S32, pkt_rate_high, "not-supported" }, \ { "rx-usecs", SYSL_S32, rx_coalesce_usecs, "supported" }, \ { "rx-frames", SYSL_S32, rx_max_coalesced_frames, "supported" }, \ { "rx-usecs-irq", SYSL_S32, rx_coalesce_usecs_irq, "not-supported" }, \ { "rx-frames-irq", SYSL_S32, rx_max_coalesced_frames_irq, "not-supported" }, \ { "tx-usecs", SYSL_S32, tx_coalesce_usecs, "not-supported" }, \ { "tx-frames", SYSL_S32, tx_max_coalesced_frames, "supported" }, \ { "tx-usecs-irq", SYSL_S32, tx_coalesce_usecs_irq, "not-supported" }, \ { "tx-frames-irq", SYSL_S32, tx_max_coalesced_frames_irq, "not-supported" }, \ { "rx-usecs-low", SYSL_S32, rx_coalesce_usecs_low, "not-supported" }, \ { "rx-frames-low", SYSL_S32, rx_max_coalesced_frames_low, "not-supported"}, \ { "tx-usecs-low", SYSL_S32, tx_coalesce_usecs_low, "not-supported" }, \ { "tx-frames-low", SYSL_S32, tx_max_coalesced_frames_low, "not-supported" }, \ { "rx-usecs-high", SYSL_S32, rx_coalesce_usecs_high, "not-supported" }, \ { "rx-frames-high", SYSL_S32, rx_max_coalesced_frames_high, "not-supported" }, \ { "tx-usecs-high", SYSL_S32, tx_coalesce_usecs_high, "not-supported" }, \ { "tx-frames-high", SYSL_S32, tx_max_coalesced_frames_high, "not-supported" }, \ } static int sysctl_coalesce_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; struct xgbe_hw_if *hw_if = &pdata->hw_if; struct sysctl_op *sys_op = pdata->sys_op; struct sysctl_info sysctl_coalesce[] = COALESCE_SYSCTL_INFO(coalop); unsigned int rx_frames, rx_riwt, rx_usecs; unsigned int tx_frames; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } sys_op->rx_coalesce_usecs = pdata->rx_usecs; sys_op->rx_max_coalesced_frames = pdata->rx_frames; sys_op->tx_max_coalesced_frames = pdata->tx_frames; sbuf_printf(sb, "\nAdaptive RX: %s TX: %s\n", sys_op->use_adaptive_rx_coalesce ? "on" : "off", sys_op->use_adaptive_tx_coalesce ? "on" : "off"); sbuf_printf(sb, "stats-block-usecs: %u\n" "sample-interval: %u\n" "pkt-rate-low: %u\n" "pkt-rate-high: %u\n" "\n" "rx-usecs: %u\n" "rx-frames: %u\n" "rx-usecs-irq: %u\n" "rx-frames-irq: %u\n" "\n" "tx-usecs: %u\n" "tx-frames: %u\n" "tx-usecs-irq: %u\n" "tx-frames-irq: %u\n" "\n" "rx-usecs-low: %u\n" "rx-frames-low: %u\n" "tx-usecs-low: %u\n" "tx-frames-low: %u\n" "\n" "rx-usecs-high: %u\n" "rx-frames-high: %u\n" "tx-usecs-high: %u\n" "tx-frames-high: %u\n", sys_op->stats_block_coalesce_usecs, sys_op->rate_sample_interval, sys_op->pkt_rate_low, sys_op->pkt_rate_high, sys_op->rx_coalesce_usecs, sys_op->rx_max_coalesced_frames, sys_op->rx_coalesce_usecs_irq, sys_op->rx_max_coalesced_frames_irq, sys_op->tx_coalesce_usecs, sys_op->tx_max_coalesced_frames, sys_op->tx_coalesce_usecs_irq, sys_op->tx_max_coalesced_frames_irq, sys_op->rx_coalesce_usecs_low, sys_op->rx_max_coalesced_frames_low, sys_op->tx_coalesce_usecs_low, sys_op->tx_max_coalesced_frames_low, sys_op->rx_coalesce_usecs_high, sys_op->rx_max_coalesced_frames_high, sys_op->tx_coalesce_usecs_high, sys_op->tx_max_coalesced_frames_high); rc = sbuf_finish(sb); sbuf_delete(sb); return (0); } rc = get_ubuf(req, buf); if (rc == 0) { parse_generic_sysctl(pdata, buf, sysctl_coalesce, ARRAY_SIZE(sysctl_coalesce)); rx_riwt = hw_if->usec_to_riwt(pdata, sys_op->rx_coalesce_usecs); rx_usecs = sys_op->rx_coalesce_usecs; rx_frames = sys_op->rx_max_coalesced_frames; /* Use smallest possible value if conversion resulted in zero */ if (rx_usecs && !rx_riwt) rx_riwt = 1; /* Check the bounds of values for Rx */ if (rx_riwt > XGMAC_MAX_DMA_RIWT) { axgbe_printf(2, "rx-usec is limited to %d usecs\n", hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT)); return (-EINVAL); } if (rx_frames > pdata->rx_desc_count) { axgbe_printf(2, "rx-frames is limited to %d frames\n", pdata->rx_desc_count); return (-EINVAL); } tx_frames = sys_op->tx_max_coalesced_frames; /* Check the bounds of values for Tx */ if (tx_frames > pdata->tx_desc_count) { axgbe_printf(2, "tx-frames is limited to %d frames\n", pdata->tx_desc_count); return (-EINVAL); } pdata->rx_riwt = rx_riwt; pdata->rx_usecs = rx_usecs; pdata->rx_frames = rx_frames; hw_if->config_rx_coalesce(pdata); pdata->tx_frames = tx_frames; hw_if->config_tx_coalesce(pdata); } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_pauseparam_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; struct sysctl_op *sys_op = pdata->sys_op; struct sysctl_info sysctl_pauseparam[] = { { "autoneg", SYSL_BOOL, autoneg, "supported" }, { "rx", SYSL_BOOL, rx_pause, "supported" }, { "tx", SYSL_BOOL, tx_pause, "supported" }, }; ssize_t buf_size = 512; char buf[buf_size]; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } sys_op->autoneg = pdata->phy.pause_autoneg; sys_op->tx_pause = pdata->phy.tx_pause; sys_op->rx_pause = pdata->phy.rx_pause; sbuf_printf(sb, "\nAutonegotiate: %s\n" "RX: %s\n" "TX: %s\n", sys_op->autoneg ? "on" : "off", sys_op->rx_pause ? "on" : "off", sys_op->tx_pause ? "on" : "off"); if (pdata->phy.lp_advertising) { int an_rx = 0, an_tx = 0; if (pdata->phy.advertising & pdata->phy.lp_advertising & ADVERTISED_Pause) { an_tx = 1; an_rx = 1; } else if (pdata->phy.advertising & pdata->phy.lp_advertising & ADVERTISED_Asym_Pause) { if (pdata->phy.advertising & ADVERTISED_Pause) an_rx = 1; else if (pdata->phy.lp_advertising & ADVERTISED_Pause) an_tx = 1; } sbuf_printf(sb, "\n->\nRX negotiated: %s\n" "TX negotiated: %s\n", an_rx ? "on" : "off", an_tx ? "on" : "off"); } rc = sbuf_finish(sb); sbuf_delete(sb); return (0); } rc = get_ubuf(req, buf); if (rc == 0) { parse_generic_sysctl(pdata, buf, sysctl_pauseparam, ARRAY_SIZE(sysctl_pauseparam)); if (sys_op->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) { axgbe_error("autoneg disabled, pause autoneg not available\n"); return (-EINVAL); } pdata->phy.pause_autoneg = sys_op->autoneg; pdata->phy.tx_pause = sys_op->tx_pause; pdata->phy.rx_pause = sys_op->rx_pause; XGBE_CLR_ADV(&pdata->phy, Pause); XGBE_CLR_ADV(&pdata->phy, Asym_Pause); if (sys_op->rx_pause) { XGBE_SET_ADV(&pdata->phy, Pause); XGBE_SET_ADV(&pdata->phy, Asym_Pause); } if (sys_op->tx_pause) { /* Equivalent to XOR of Asym_Pause */ if (XGBE_ADV(&pdata->phy, Asym_Pause)) XGBE_CLR_ADV(&pdata->phy, Asym_Pause); else XGBE_SET_ADV(&pdata->phy, Asym_Pause); } if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) rc = pdata->phy_if.phy_config_aneg(pdata); } return (rc); } static int sysctl_link_ksettings_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; struct sysctl_op *sys_op = pdata->sys_op; struct sysctl_info sysctl_linksettings[] = { { "autoneg", SYSL_BOOL, autoneg, "supported" }, { "speed", SYSL_U32, speed, "supported" }, { "duplex", SYSL_U8, duplex, "supported" }, }; ssize_t buf_size = 512; char buf[buf_size], link_modes[16], speed_modes[16]; struct sbuf *sb; uint32_t speed; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } sys_op->autoneg = pdata->phy.autoneg; sys_op->speed = pdata->phy.speed; sys_op->duplex = pdata->phy.duplex; XGBE_LM_COPY(&pdata->phy, supported, &pdata->phy, supported); XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, advertising); XGBE_LM_COPY(&pdata->phy, lp_advertising, &pdata->phy, lp_advertising); switch (sys_op->speed) { case 1: strcpy(link_modes, "Unknown"); strcpy(speed_modes, "Unknown"); break; case 2: strcpy(link_modes, "10Gbps/Full"); strcpy(speed_modes, "10000"); break; case 3: strcpy(link_modes, "2.5Gbps/Full"); strcpy(speed_modes, "2500"); break; case 4: strcpy(link_modes, "1Gbps/Full"); strcpy(speed_modes, "1000"); break; case 5: strcpy(link_modes, "100Mbps/Full"); strcpy(speed_modes, "100"); break; case 6: strcpy(link_modes, "10Mbps/Full"); strcpy(speed_modes, "10"); break; } sbuf_printf(sb, "\nlink_modes: %s\n" "autonegotiation: %s\n" "speed: %sMbps\n", link_modes, (sys_op->autoneg == AUTONEG_DISABLE) ? "off" : "on", speed_modes); switch (sys_op->duplex) { case DUPLEX_HALF: sbuf_printf(sb, "Duplex: Half\n"); break; case DUPLEX_FULL: sbuf_printf(sb, "Duplex: Full\n"); break; default: sbuf_printf(sb, "Duplex: Unknown\n"); break; } rc = sbuf_finish(sb); sbuf_delete(sb); return (0); } rc = get_ubuf(req, buf); if (rc == 0) { parse_generic_sysctl(pdata, buf, sysctl_linksettings, ARRAY_SIZE(sysctl_linksettings)); speed = sys_op->speed; if ((sys_op->autoneg != AUTONEG_ENABLE) && (sys_op->autoneg != AUTONEG_DISABLE)) { axgbe_error("unsupported autoneg %hhu\n", (unsigned char)sys_op->autoneg); return (-EINVAL); } if (sys_op->autoneg == AUTONEG_DISABLE) { if (!pdata->phy_if.phy_valid_speed(pdata, speed)) { axgbe_error("unsupported speed %u\n", speed); return (-EINVAL); } if (sys_op->duplex != DUPLEX_FULL) { axgbe_error("unsupported duplex %hhu\n", (unsigned char)sys_op->duplex); return (-EINVAL); } } pdata->phy.autoneg = sys_op->autoneg; pdata->phy.speed = speed; pdata->phy.duplex = sys_op->duplex; if (sys_op->autoneg == AUTONEG_ENABLE) XGBE_SET_ADV(&pdata->phy, Autoneg); else XGBE_CLR_ADV(&pdata->phy, Autoneg); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) rc = pdata->phy_if.phy_config_aneg(pdata); } return (rc); } static int sysctl_ringparam_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; struct sysctl_op *sys_op = pdata->sys_op; struct sysctl_info sysctl_ringparam[] = { { "rx", SYSL_S32, rx_pending, "supported" }, { "rx-mini", SYSL_S32, rx_mini_pending, "supported" }, { "rx-jumbo", SYSL_S32, rx_jumbo_pending, "supported" }, { "tx", SYSL_S32, tx_pending, "supported" }, }; ssize_t buf_size = 512; unsigned int rx, tx; char buf[buf_size]; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } sys_op->rx_max_pending = XGBE_RX_DESC_CNT_MAX; sys_op->tx_max_pending = XGBE_TX_DESC_CNT_MAX; sys_op->rx_pending = pdata->rx_desc_count; sys_op->tx_pending = pdata->tx_desc_count; sbuf_printf(sb, "\nPre-set maximums:\n" "RX: %u\n" "RX Mini: %u\n" "RX Jumbo: %u\n" "TX: %u\n", sys_op->rx_max_pending, sys_op->rx_mini_max_pending, sys_op->rx_jumbo_max_pending, sys_op->tx_max_pending); sbuf_printf(sb, "\nCurrent hardware settings:\n" "RX: %u\n" "RX Mini: %u\n" "RX Jumbo: %u\n" "TX: %u\n", sys_op->rx_pending, sys_op->rx_mini_pending, sys_op->rx_jumbo_pending, sys_op->tx_pending); rc = sbuf_finish(sb); sbuf_delete(sb); return (0); } rc = get_ubuf(req, buf); if (rc == 0) { parse_generic_sysctl(pdata, buf, sysctl_ringparam, ARRAY_SIZE(sysctl_ringparam)); if (sys_op->rx_mini_pending || sys_op->rx_jumbo_pending) { axgbe_error("unsupported ring parameter\n"); return (-EINVAL); } if ((sys_op->rx_pending < XGBE_RX_DESC_CNT_MIN) || (sys_op->rx_pending > XGBE_RX_DESC_CNT_MAX)) { axgbe_error("rx ring param must be between %u and %u\n", XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX); return (-EINVAL); } if ((sys_op->tx_pending < XGBE_TX_DESC_CNT_MIN) || (sys_op->tx_pending > XGBE_TX_DESC_CNT_MAX)) { axgbe_error("tx ring param must be between %u and %u\n", XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX); return (-EINVAL); } rx = __rounddown_pow_of_two(sys_op->rx_pending); if (rx != sys_op->rx_pending) axgbe_printf(1, "rx ring param rounded to power of 2: %u\n", rx); tx = __rounddown_pow_of_two(sys_op->tx_pending); if (tx != sys_op->tx_pending) axgbe_printf(1, "tx ring param rounded to power of 2: %u\n", tx); if ((rx == pdata->rx_desc_count) && (tx == pdata->tx_desc_count)) goto out; pdata->rx_desc_count = rx; pdata->tx_desc_count = tx; /* TODO - restart dev */ } out: return (0); } static int sysctl_channels_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; struct sysctl_op *sys_op = pdata->sys_op; struct sysctl_info sysctl_channels[] = { { "rx", SYSL_S32, rx_count, "supported" }, { "tx", SYSL_S32, tx_count, "supported" }, { "other", SYSL_S32, other_count, "supported" }, { "combined", SYSL_S32, combined_count, "supported" }, }; unsigned int rx, tx, combined; ssize_t buf_size = 512; char buf[buf_size]; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); rx = min(rx, pdata->channel_irq_count); tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); tx = min(tx, pdata->channel_irq_count); tx = min(tx, pdata->tx_max_q_count); combined = min(rx, tx); sys_op->max_combined = combined; sys_op->max_rx = rx ? rx - 1 : 0; sys_op->max_tx = tx ? tx - 1 : 0; /* Get current settings based on device state */ rx = pdata->rx_ring_count; tx = pdata->tx_ring_count; combined = min(rx, tx); rx -= combined; tx -= combined; sys_op->combined_count = combined; sys_op->rx_count = rx; sys_op->tx_count = tx; sbuf_printf(sb, "\nPre-set maximums:\n" "RX: %u\n" "TX: %u\n" "Other: %u\n" "Combined: %u\n", sys_op->max_rx, sys_op->max_tx, sys_op->max_other, sys_op->max_combined); sbuf_printf(sb, "\nCurrent hardware settings:\n" "RX: %u\n" "TX: %u\n" "Other: %u\n" "Combined: %u\n", sys_op->rx_count, sys_op->tx_count, sys_op->other_count, sys_op->combined_count); rc = sbuf_finish(sb); sbuf_delete(sb); return (0); } rc = get_ubuf(req, buf); if (rc == 0) { parse_generic_sysctl(pdata, buf, sysctl_channels, ARRAY_SIZE(sysctl_channels)); axgbe_error( "channel inputs: combined=%u, rx-only=%u," " tx-only=%u\n", sys_op->combined_count, sys_op->rx_count, sys_op->tx_count); } return (rc); } static int sysctl_mac_stats_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; struct sbuf *sb; int rc = 0; int i; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } pdata->hw_if.read_mmc_stats(pdata); for (i = 0; i < XGBE_STATS_COUNT; i++) { sbuf_printf(sb, "\n %s: %lu", xgbe_gstring_stats[i].stat_string, *(uint64_t *)((uint8_t *)pdata + xgbe_gstring_stats[i].stat_offset)); } for (i = 0; i < pdata->tx_ring_count; i++) { sbuf_printf(sb, "\n txq_packets[%d]: %lu" "\n txq_bytes[%d]: %lu", i, pdata->ext_stats.txq_packets[i], i, pdata->ext_stats.txq_bytes[i]); } for (i = 0; i < pdata->rx_ring_count; i++) { sbuf_printf(sb, "\n rxq_packets[%d]: %lu" "\n rxq_bytes[%d]: %lu", i, pdata->ext_stats.rxq_packets[i], i, pdata->ext_stats.rxq_bytes[i]); } rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } return (-EINVAL); } static int sysctl_xgmac_reg_value_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; unsigned int value; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } value = XGMAC_IOREAD(pdata, pdata->sysctl_xgmac_reg); axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); sbuf_printf(sb, "\nXGMAC reg_value: 0x%x\n", value); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", &value); axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); XGMAC_IOWRITE(pdata, pdata->sysctl_xgmac_reg, value); } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xpcs_mmd_reg_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; unsigned int reg; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: xpcs_mmd: 0x%x\n", __func__, pdata->sysctl_xpcs_mmd); sbuf_printf(sb, "\nXPCS mmd_reg: 0x%x\n", pdata->sysctl_xpcs_mmd); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", ®); axgbe_printf(2, "WRITE: %s: mmd_reg: 0x%x\n", __func__, reg); pdata->sysctl_xpcs_mmd = reg; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xpcs_reg_addr_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; unsigned int reg; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: sysctl_xpcs_reg: 0x%x\n", __func__, pdata->sysctl_xpcs_reg); sbuf_printf(sb, "\nXPCS reg_addr: 0x%x\n", pdata->sysctl_xpcs_reg); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", ®); axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); pdata->sysctl_xpcs_reg = reg; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xpcs_reg_value_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; unsigned int value; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } value = XMDIO_READ(pdata, pdata->sysctl_xpcs_mmd, pdata->sysctl_xpcs_reg); axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); sbuf_printf(sb, "\nXPCS reg_value: 0x%x\n", value); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", &value); axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); XMDIO_WRITE(pdata, pdata->sysctl_xpcs_mmd, pdata->sysctl_xpcs_reg, value); } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xprop_reg_addr_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; unsigned int reg; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: sysctl_xprop_reg: 0x%x\n", __func__, pdata->sysctl_xprop_reg); sbuf_printf(sb, "\nXPROP reg_addr: 0x%x\n", pdata->sysctl_xprop_reg); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", ®); axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); pdata->sysctl_xprop_reg = reg; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xprop_reg_value_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; unsigned int value; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } value = XP_IOREAD(pdata, pdata->sysctl_xprop_reg); axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); sbuf_printf(sb, "\nXPROP reg_value: 0x%x\n", value); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", &value); axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); XP_IOWRITE(pdata, pdata->sysctl_xprop_reg, value); } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xi2c_reg_addr_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; unsigned int reg; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: sysctl_xi2c_reg: 0x%x\n", __func__, pdata->sysctl_xi2c_reg); sbuf_printf(sb, "\nXI2C reg_addr: 0x%x\n", pdata->sysctl_xi2c_reg); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", ®); axgbe_printf(2, "WRITE: %s: reg: 0x%x\n", __func__, reg); pdata->sysctl_xi2c_reg = reg; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_xi2c_reg_value_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; ssize_t buf_size = 64; char buf[buf_size]; unsigned int value; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } value = XI2C_IOREAD(pdata, pdata->sysctl_xi2c_reg); axgbe_printf(2, "READ: %s: value: 0x%x\n", __func__, value); sbuf_printf(sb, "\nXI2C reg_value: 0x%x\n", value); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%x", &value); axgbe_printf(2, "WRITE: %s: value: 0x%x\n", __func__, value); XI2C_IOWRITE(pdata, pdata->sysctl_xi2c_reg, value); } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_an_cdr_wr_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; unsigned int an_cdr_wr = 0; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: an_cdr_wr: %d\n", __func__, pdata->sysctl_an_cdr_workaround); sbuf_printf(sb, "%d\n", pdata->sysctl_an_cdr_workaround); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%u", &an_cdr_wr); axgbe_printf(2, "WRITE: %s: an_cdr_wr: 0x%d\n", __func__, an_cdr_wr); if (an_cdr_wr) pdata->sysctl_an_cdr_workaround = 1; else pdata->sysctl_an_cdr_workaround = 0; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } static int sysctl_an_cdr_track_early_handler(SYSCTL_HANDLER_ARGS) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)arg1; unsigned int an_cdr_track_early = 0; ssize_t buf_size = 64; char buf[buf_size]; struct sbuf *sb; int rc = 0; if (req->newptr == NULL) { sb = sbuf_new_for_sysctl(NULL, NULL, buf_size, req); if (sb == NULL) { rc = sb->s_error; return (rc); } axgbe_printf(2, "READ: %s: an_cdr_track_early %d\n", __func__, pdata->sysctl_an_cdr_track_early); sbuf_printf(sb, "%d\n", pdata->sysctl_an_cdr_track_early); rc = sbuf_finish(sb); sbuf_delete(sb); return (rc); } rc = get_ubuf(req, buf); if (rc == 0) { sscanf(buf, "%u", &an_cdr_track_early); axgbe_printf(2, "WRITE: %s: an_cdr_track_early: %d\n", __func__, an_cdr_track_early); if (an_cdr_track_early) pdata->sysctl_an_cdr_track_early = 1; else pdata->sysctl_an_cdr_track_early = 0; } axgbe_printf(2, "%s: rc= %d\n", __func__, rc); return (rc); } void axgbe_sysctl_exit(struct xgbe_prv_data *pdata) { if (pdata->sys_op) free(pdata->sys_op, M_AXGBE); } void axgbe_sysctl_init(struct xgbe_prv_data *pdata) { struct sysctl_ctx_list *clist; struct sysctl_oid_list *top; struct sysctl_oid *parent; struct sysctl_op *sys_op; sys_op = malloc(sizeof(*sys_op), M_AXGBE, M_WAITOK | M_ZERO); pdata->sys_op = sys_op; clist = device_get_sysctl_ctx(pdata->dev); parent = device_get_sysctl_tree(pdata->dev); top = SYSCTL_CHILDREN(parent); /* Set defaults */ pdata->sysctl_xgmac_reg = 0; pdata->sysctl_xpcs_mmd = 1; pdata->sysctl_xpcs_reg = 0; SYSCTL_ADD_UINT(clist, top, OID_AUTO, "axgbe_debug_level", CTLFLAG_RWTUN, &pdata->debug_level, 0, "axgbe log level -- higher is verbose"); + SYSCTL_ADD_UINT(clist, top, OID_AUTO, "sph_enable", + CTLFLAG_RDTUN, &pdata->sph_enable, 1, + "shows the split header feature state (1 - enable, 0 - disable"); + SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xgmac_register", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xgmac_reg_addr_handler, "IU", "xgmac register addr"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xgmac_register_value", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xgmac_reg_value_handler, "IU", "xgmac register value"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_mmd", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xpcs_mmd_reg_handler, "IU", "xpcs mmd register"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_register", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xpcs_reg_addr_handler, "IU", "xpcs register"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xpcs_register_value", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xpcs_reg_value_handler, "IU", "xpcs register value"); if (pdata->xpcs_res) { SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xprop_register", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xprop_reg_addr_handler, "IU", "xprop register"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xprop_register_value", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xprop_reg_value_handler, "IU", "xprop register value"); } if (pdata->xpcs_res) { SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xi2c_register", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xi2c_reg_addr_handler, "IU", "xi2c register"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "xi2c_register_value", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_xi2c_reg_value_handler, "IU", "xi2c register value"); } if (pdata->vdata->an_cdr_workaround) { SYSCTL_ADD_PROC(clist, top, OID_AUTO, "an_cdr_workaround", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_an_cdr_wr_handler, "IU", "an cdr workaround"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "an_cdr_track_early", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_an_cdr_track_early_handler, "IU", "an cdr track early"); } SYSCTL_ADD_PROC(clist, top, OID_AUTO, "drv_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_get_drv_info_handler, "IU", "xgbe drv info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "link_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_get_link_info_handler, "IU", "xgbe link info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "coalesce_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_coalesce_handler, "IU", "xgbe coalesce info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "pauseparam_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_pauseparam_handler, "IU", "xgbe pauseparam info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "link_ksettings_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_link_ksettings_handler, "IU", "xgbe link_ksettings info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "ringparam_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_ringparam_handler, "IU", "xgbe ringparam info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "channels_info", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_channels_handler, "IU", "xgbe channels info"); SYSCTL_ADD_PROC(clist, top, OID_AUTO, "mac_stats", CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, pdata, 0, sysctl_mac_stats_handler, "IU", "xgbe mac stats"); } diff --git a/sys/dev/axgbe/xgbe-txrx.c b/sys/dev/axgbe/xgbe-txrx.c index c6872e584f81..e798bfa37335 100644 --- a/sys/dev/axgbe/xgbe-txrx.c +++ b/sys/dev/axgbe/xgbe-txrx.c @@ -1,777 +1,807 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Contact Information : * Rajesh Kumar * Shreyank Amartya * */ #include __FBSDID("$FreeBSD$"); #include "xgbe.h" #include "xgbe-common.h" /* * IFLIB interfaces */ static int axgbe_isc_txd_encap(void *, if_pkt_info_t); static void axgbe_isc_txd_flush(void *, uint16_t, qidx_t); static int axgbe_isc_txd_credits_update(void *, uint16_t, bool); static void axgbe_isc_rxd_refill(void *, if_rxd_update_t); static void axgbe_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); static int axgbe_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); static int axgbe_isc_rxd_pkt_get(void *, if_rxd_info_t); struct if_txrx axgbe_txrx = { .ift_txd_encap = axgbe_isc_txd_encap, .ift_txd_flush = axgbe_isc_txd_flush, .ift_txd_credits_update = axgbe_isc_txd_credits_update, .ift_rxd_available = axgbe_isc_rxd_available, .ift_rxd_pkt_get = axgbe_isc_rxd_pkt_get, .ift_rxd_refill = axgbe_isc_rxd_refill, .ift_rxd_flush = axgbe_isc_rxd_flush, .ift_legacy_intr = NULL }; static void xgbe_print_pkt_info(struct xgbe_prv_data *pdata, if_pkt_info_t pi) { axgbe_printf(1, "------Packet Info Start------\n"); axgbe_printf(1, "pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); axgbe_printf(1, "pi new_pidx: %d csum_flags: %x mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_mflags, pi->ipi_vtag); axgbe_printf(1, "pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); axgbe_printf(1, "pi tcp_hlen: %d tcp_hflags: %x tcp_seq: %d tso_segsz %d\n", pi->ipi_tcp_hlen, pi->ipi_tcp_hflags, pi->ipi_tcp_seq, pi->ipi_tso_segsz); } static bool axgbe_ctx_desc_setup(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, if_pkt_info_t pi) { struct xgbe_ring_desc *rdesc; struct xgbe_ring_data *rdata; bool inc_cur = false; rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; axgbe_printf(1, "ipi_tso_segsz %d cur_mss %d idx %d\n", pi->ipi_tso_segsz, ring->tx.cur_mss, ring->cur); axgbe_printf(1, "ipi_vtag 0x%x cur_vlan_ctag 0x%x\n", pi->ipi_vtag, ring->tx.cur_vlan_ctag); if ((pi->ipi_csum_flags & CSUM_TSO) && (pi->ipi_tso_segsz != ring->tx.cur_mss)) { /* * Set TSO maximum segment size * Mark as context descriptor * Indicate this descriptor contains MSS */ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, MSS, pi->ipi_tso_segsz); XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, TCMSSV, 1); ring->tx.cur_mss = pi->ipi_tso_segsz; inc_cur = true; } if (pi->ipi_vtag && (pi->ipi_vtag != ring->tx.cur_vlan_ctag)) { /* * Mark it as context descriptor * Set the VLAN tag * Indicate this descriptor contains the VLAN tag */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, CTXT, 1); XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VT, pi->ipi_vtag); XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, VLTV, 1); ring->tx.cur_vlan_ctag = pi->ipi_vtag; inc_cur = true; } return (inc_cur); } static uint16_t axgbe_calculate_tx_parms(struct xgbe_prv_data *pdata, if_pkt_info_t pi, struct xgbe_packet_data *packet) { uint32_t tcp_payload_len = 0, bytes = 0; uint16_t max_len, hlen, payload_len, pkts = 0; packet->tx_packets = packet->tx_bytes = 0; hlen = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen; if (pi->ipi_csum_flags & CSUM_TSO) { tcp_payload_len = pi->ipi_len - hlen; axgbe_printf(1, "%s: ipi_len %x elen %d iplen %d tcplen %d\n", __func__, pi->ipi_len, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_tcp_hlen); max_len = if_getmtu(pdata->netdev) + ETH_HLEN; if (pi->ipi_vtag) max_len += VLAN_HLEN; while (tcp_payload_len) { payload_len = max_len - hlen; payload_len = min(payload_len, tcp_payload_len); tcp_payload_len -= payload_len; pkts++; bytes += (hlen + payload_len); axgbe_printf(1, "%s: max_len %d payload_len %d " "tcp_len %d\n", __func__, max_len, payload_len, tcp_payload_len); } } else { pkts = 1; bytes = pi->ipi_len; } packet->tx_packets = pkts; packet->tx_bytes = bytes; axgbe_printf(1, "%s: packets %d bytes %d hlen %d\n", __func__, packet->tx_packets, packet->tx_bytes, hlen); return (hlen); } static int axgbe_isc_txd_encap(void *arg, if_pkt_info_t pi) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel; struct xgbe_ring *ring; struct xgbe_ring_desc *rdesc; struct xgbe_ring_data *rdata; struct xgbe_packet_data *packet; unsigned int cur, start, tx_set_ic; uint16_t offset, hlen, datalen, tcp_payload_len = 0; int cur_seg = 0; xgbe_print_pkt_info(pdata, pi); channel = pdata->channel[pi->ipi_qsidx]; ring = channel->tx_ring; packet = &ring->packet_data; cur = start = ring->cur; axgbe_printf(1, "--> %s: txq %d cur %d dirty %d\n", __func__, pi->ipi_qsidx, ring->cur, ring->dirty); MPASS(pi->ipi_len != 0); if (__predict_false(pi->ipi_len == 0)) { axgbe_error("empty packet received from stack\n"); return (0); } MPASS(ring->cur == pi->ipi_pidx); if (__predict_false(ring->cur != pi->ipi_pidx)) { axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__, ring->cur, pi->ipi_pidx); } /* Determine if an interrupt should be generated for this Tx: * Interrupt: * - Tx frame count exceeds the frame count setting * - Addition of Tx frame count to the frame count since the * last interrupt was set exceeds the frame count setting * No interrupt: * - No frame count setting specified (ethtool -C ethX tx-frames 0) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting */ memset(packet, 0, sizeof(*packet)); hlen = axgbe_calculate_tx_parms(pdata, pi, packet); axgbe_printf(1, "%s: ipi_len %d tx_pkts %d tx_bytes %d hlen %d\n", __func__, pi->ipi_len, packet->tx_packets, packet->tx_bytes, hlen); ring->coalesce_count += packet->tx_packets; if (!pdata->tx_frames) tx_set_ic = 0; else if (packet->tx_packets > pdata->tx_frames) tx_set_ic = 1; else if ((ring->coalesce_count % pdata->tx_frames) < (packet->tx_packets)) tx_set_ic = 1; else tx_set_ic = 0; /* Add Context descriptor if needed (for TSO, VLAN cases) */ if (axgbe_ctx_desc_setup(pdata, ring, pi)) cur++; rdata = XGBE_GET_DESC_DATA(ring, cur); rdesc = rdata->rdesc; axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " "ipi_len 0x%x\n", __func__, cur, lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); /* Update buffer address (for TSO this is the header) */ rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr)); rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr)); /* Update the buffer length */ if (hlen == 0) hlen = pi->ipi_segs[cur_seg].ds_len; XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, hlen); /* VLAN tag insertion check */ if (pi->ipi_vtag) { XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, TX_NORMAL_DESC2_VLAN_INSERT); } /* Mark it as First Descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); /* Mark it as a NORMAL descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); /* * Set the OWN bit if this is not the first descriptor. For first * descriptor, OWN bit will be set at last so that hardware will * process the descriptors only after the OWN bit for the first * descriptor is set */ if (cur != start) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); if (pi->ipi_csum_flags & CSUM_TSO) { /* Enable TSO */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); tcp_payload_len = pi->ipi_len - hlen; /* Set TCP payload length*/ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, tcp_payload_len); XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, pi->ipi_tcp_hlen/4); axgbe_printf(1, "tcp_payload %d tcp_hlen %d\n", tcp_payload_len, pi->ipi_tcp_hlen/4); } else { /* Enable CRC and Pad Insertion */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); /* Enable HW CSUM*/ if (pi->ipi_csum_flags) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); /* Set total length to be transmitted */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, pi->ipi_len); } cur++; for (cur_seg = 0 ; cur_seg < pi->ipi_nsegs ; cur_seg++) { if (cur_seg == 0) { offset = hlen; datalen = pi->ipi_segs[cur_seg].ds_len - hlen; } else { offset = 0; datalen = pi->ipi_segs[cur_seg].ds_len; } if (datalen) { rdata = XGBE_GET_DESC_DATA(ring, cur); rdesc = rdata->rdesc; /* Update buffer address */ rdesc->desc0 = cpu_to_le32(lower_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); rdesc->desc1 = cpu_to_le32(upper_32_bits(pi->ipi_segs[cur_seg].ds_addr + offset)); /* Update the buffer length */ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, datalen); /* Set OWN bit */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); /* Mark it as NORMAL descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); /* Enable HW CSUM*/ if (pi->ipi_csum_flags) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CIC, 0x3); axgbe_printf(1, "%s: cur %d lo 0x%lx hi 0x%lx ds_len 0x%x " "ipi_len 0x%x\n", __func__, cur, lower_32_bits(pi->ipi_segs[cur_seg].ds_addr), upper_32_bits(pi->ipi_segs[cur_seg].ds_addr), (int)pi->ipi_segs[cur_seg].ds_len, pi->ipi_len); cur++; } } /* Set LAST bit for the last descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); /* Set IC bit based on Tx coalescing settings */ if (tx_set_ic) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); wmb(); /* Set OWN bit for the first descriptor */ rdata = XGBE_GET_DESC_DATA(ring, start); rdesc = rdata->rdesc; XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); ring->cur = pi->ipi_new_pidx = (cur & (ring->rdesc_count - 1)); axgbe_printf(1, "<-- %s: end cur %d dirty %d\n", __func__, ring->cur, ring->dirty); return (0); } static void axgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel = pdata->channel[txqid]; struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata = XGBE_GET_DESC_DATA(ring, pidx); axgbe_printf(1, "--> %s: flush txq %d pidx %d cur %d dirty %d\n", __func__, txqid, pidx, ring->cur, ring->dirty); - MPASS(ring->cur == pidx); - if (__predict_false(ring->cur != pidx)) { - axgbe_error("--> %s: cur(%d) ne pidx(%d)\n", __func__, - ring->cur, pidx); - } - - wmb(); - /* Ring Doorbell */ - if (XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO) != - lower_32_bits(rdata->rdata_paddr)) { - XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, - lower_32_bits(rdata->rdata_paddr)); - } + XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, + lower_32_bits(rdata->rdata_paddr)); } static int axgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_hw_if *hw_if = &sc->pdata.hw_if; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel = pdata->channel[txqid]; struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; int processed = 0; axgbe_printf(1, "%s: txq %d clear %d cur %d dirty %d\n", __func__, txqid, clear, ring->cur, ring->dirty); if (__predict_false(ring->cur == ring->dirty)) { axgbe_printf(1, "<-- %s: cur(%d) equals dirty(%d)\n", __func__, ring->cur, ring->dirty); return (0); } /* Check whether the first dirty descriptor is Tx complete */ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); if (!hw_if->tx_complete(rdata->rdesc)) { axgbe_printf(1, "<-- %s: (dirty %d)\n", __func__, ring->dirty); return (0); } /* * If clear is false just let the caller know that there * are descriptors to reclaim */ if (!clear) { axgbe_printf(1, "<-- %s: (!clear)\n", __func__); return (1); } do { hw_if->tx_desc_reset(rdata); processed++; ring->dirty = (ring->dirty + 1) & (ring->rdesc_count - 1); /* * tx_complete will return true for unused descriptors also. * so, check tx_complete only until used descriptors. */ if (ring->cur == ring->dirty) break; rdata = XGBE_GET_DESC_DATA(ring, ring->dirty); } while (hw_if->tx_complete(rdata->rdesc)); axgbe_printf(1, "<-- %s: processed %d cur %d dirty %d\n", __func__, processed, ring->cur, ring->dirty); return (processed); } static void axgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel = pdata->channel[iru->iru_qsidx]; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; unsigned int rx_usecs = pdata->rx_usecs; unsigned int rx_frames = pdata->rx_frames; unsigned int inte; uint8_t count = iru->iru_count; int i, j; + bool config_intr = false; axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d count %d ring cur %d " "dirty %d\n", __func__, iru->iru_qsidx, iru->iru_flidx, iru->iru_pidx, count, ring->cur, ring->dirty); for (i = iru->iru_pidx, j = 0 ; j < count ; i++, j++) { - if (i == XGBE_RX_DESC_CNT_DEFAULT) + if (i == sc->scctx->isc_nrxd[0]) i = 0; rdata = XGBE_GET_DESC_DATA(ring, i); rdesc = rdata->rdesc; if (__predict_false(XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))) { axgbe_error("%s: refill clash, cur %d dirty %d index %d" "pidx %d\n", __func__, ring->cur, ring->dirty, j, i); } - /* Assuming split header is enabled */ - if (iru->iru_flidx == 0) { + if (pdata->sph_enable) { + if (iru->iru_flidx == 0) { + + /* Fill header/buffer1 address */ + rdesc->desc0 = + cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); + rdesc->desc1 = + cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); + } else { + + /* Fill data/buffer2 address */ + rdesc->desc2 = + cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); + rdesc->desc3 = + cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); + config_intr = true; + } + } else { /* Fill header/buffer1 address */ - rdesc->desc0 = + rdesc->desc0 = rdesc->desc2 = cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); - rdesc->desc1 = + rdesc->desc1 = rdesc->desc3 = cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); - } else { - /* Fill data/buffer2 address */ - rdesc->desc2 = - cpu_to_le32(lower_32_bits(iru->iru_paddrs[j])); - rdesc->desc3 = - cpu_to_le32(upper_32_bits(iru->iru_paddrs[j])); + config_intr = true; + } + + if (config_intr) { if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */ inte = 1; } else { /* Set interrupt based on Rx frame coalescing setting */ - if (rx_frames && - !(((ring->dirty + 1) &(ring->rdesc_count - 1)) % rx_frames)) + if (rx_frames && !((ring->dirty + 1) % rx_frames)) inte = 1; else inte = 0; } XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); wmb(); ring->dirty = ((ring->dirty + 1) & (ring->rdesc_count - 1)); + + config_intr = false; } } axgbe_printf(1, "<-- %s: rxq: %d cur: %d dirty: %d\n", __func__, channel->queue_index, ring->cur, ring->dirty); } static void axgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx, qidx_t pidx) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel = pdata->channel[qsidx]; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; axgbe_printf(1, "--> %s: rxq %d fl %d pidx %d cur %d dirty %d\n", __func__, qsidx, flidx, pidx, ring->cur, ring->dirty); - if (flidx == 1) { - - rdata = XGBE_GET_DESC_DATA(ring, pidx); + rdata = XGBE_GET_DESC_DATA(ring, pidx); + /* + * update RX descriptor tail pointer in hardware to indicate + * that new buffers are present in the allocated memory region + */ + if (!pdata->sph_enable || flidx == 1) { XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, lower_32_bits(rdata->rdata_paddr)); } - - wmb(); } static int axgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t idx, qidx_t budget) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel = pdata->channel[qsidx]; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; unsigned int cur; - int count; + int count = 0; uint8_t incomplete = 1, context_next = 0, running = 0; axgbe_printf(1, "--> %s: rxq %d idx %d budget %d cur %d dirty %d\n", __func__, qsidx, idx, budget, ring->cur, ring->dirty); + if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { + axgbe_printf(0, "%s: Polling when XGBE_DOWN\n", __func__); + return (count); + } + cur = ring->cur; for (count = 0; count <= budget; ) { rdata = XGBE_GET_DESC_DATA(ring, cur); rdesc = rdata->rdesc; if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) break; running = 1; if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) incomplete = 0; if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) context_next = 1; if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) context_next = 0; cur = (cur + 1) & (ring->rdesc_count - 1); if (incomplete || context_next) continue; /* Increment pkt count & reset variables for next full packet */ count++; incomplete = 1; context_next = 0; running = 0; } axgbe_printf(1, "--> %s: rxq %d cur %d incomp %d con_next %d running %d " "count %d\n", __func__, qsidx, cur, incomplete, context_next, running, count); return (count); } static unsigned int xgbe_rx_buf1_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, struct xgbe_packet_data *packet) { + unsigned int ret = 0; - /* Always zero if not the first descriptor */ - if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) { - axgbe_printf(1, "%s: Not First\n", __func__); - return (0); + if (pdata->sph_enable) { + /* Always zero if not the first descriptor */ + if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) { + axgbe_printf(1, "%s: Not First\n", __func__); + return (0); + } } /* First descriptor with split header, return header length */ if (rdata->rx.hdr_len) { axgbe_printf(1, "%s: hdr_len %d\n", __func__, rdata->rx.hdr_len); return (rdata->rx.hdr_len); } /* First descriptor but not the last descriptor and no split header, - * so the full buffer was used + * so the full buffer was used, 256 represents the hardcoded value of + * a max header split defined in the hardware */ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size); - return (256); + if (pdata->sph_enable) { + return (256); + } else { + return (pdata->rx_buf_size); + } } /* First descriptor and last descriptor and no split header, so - * calculate how much of the buffer was used + * calculate how much of the buffer was used, we can return the + * segment length or the remaining bytes of the packet */ axgbe_printf(1, "%s: pkt_len %d buf_size %d\n", __func__, rdata->rx.len, pdata->rx_buf_size); - return (min_t(unsigned int, 256, rdata->rx.len)); + if (pdata->sph_enable) { + ret = min_t(unsigned int, 256, rdata->rx.len); + } else { + ret = rdata->rx.len; + } + + return (ret); } static unsigned int xgbe_rx_buf2_len(struct xgbe_prv_data *pdata, struct xgbe_ring_data *rdata, struct xgbe_packet_data *packet, unsigned int len) { /* Always the full buffer if not the last descriptor */ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) { axgbe_printf(1, "%s: Not last %d\n", __func__, pdata->rx_buf_size); return (pdata->rx_buf_size); } /* Last descriptor so calculate how much of the buffer was used * for the last bit of data */ return ((rdata->rx.len != 0)? (rdata->rx.len - len) : 0); } static inline void axgbe_add_frag(struct xgbe_prv_data *pdata, if_rxd_info_t ri, int idx, int len, int pos, int flid) { axgbe_printf(2, "idx %d len %d pos %d flid %d\n", idx, len, pos, flid); ri->iri_frags[pos].irf_flid = flid; ri->iri_frags[pos].irf_idx = idx; ri->iri_frags[pos].irf_len = len; } static int axgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct axgbe_if_softc *sc = (struct axgbe_if_softc*)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_channel *channel = pdata->channel[ri->iri_qsidx]; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_packet_data *packet = &ring->packet_data; struct xgbe_ring_data *rdata; unsigned int last, context_next, context; unsigned int buf1_len, buf2_len, max_len, len = 0, prev_cur; int i = 0; axgbe_printf(2, "%s: rxq %d cidx %d cur %d dirty %d\n", __func__, ri->iri_qsidx, ri->iri_cidx, ring->cur, ring->dirty); memset(packet, 0, sizeof(struct xgbe_packet_data)); while (1) { read_again: if (hw_if->dev_read(channel)) { axgbe_printf(2, "<-- %s: OWN bit seen on %d\n", __func__, ring->cur); break; } rdata = XGBE_GET_DESC_DATA(ring, ring->cur); prev_cur = ring->cur; ring->cur = (ring->cur + 1) & (ring->rdesc_count - 1); last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST); context_next = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT); context = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT); if (!context) { /* Get the data length in the descriptor buffers */ buf1_len = xgbe_rx_buf1_len(pdata, rdata, packet); len += buf1_len; - buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len); - len += buf2_len; + if (pdata->sph_enable) { + buf2_len = xgbe_rx_buf2_len(pdata, rdata, packet, len); + len += buf2_len; + } } else buf1_len = buf2_len = 0; if (packet->errors) axgbe_printf(1, "%s: last %d context %d con_next %d buf1 %d " "buf2 %d len %d frags %d error %d\n", __func__, last, context, context_next, buf1_len, buf2_len, len, i, packet->errors); axgbe_add_frag(pdata, ri, prev_cur, buf1_len, i, 0); i++; - axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1); - i++; + if (pdata->sph_enable) { + axgbe_add_frag(pdata, ri, prev_cur, buf2_len, i, 1); + i++; + } if (!last || context_next) goto read_again; break; } if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE)) { ri->iri_csum_flags |= CSUM_IP_CHECKED; ri->iri_csum_flags |= CSUM_IP_VALID; axgbe_printf(2, "%s: csum flags 0x%x\n", __func__, ri->iri_csum_flags); } max_len = if_getmtu(pdata->netdev) + ETH_HLEN; if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) { ri->iri_flags |= M_VLANTAG; ri->iri_vtag = packet->vlan_ctag; max_len += VLAN_HLEN; axgbe_printf(2, "%s: iri_flags 0x%x vtag 0x%x\n", __func__, ri->iri_flags, ri->iri_vtag); } if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH)) { ri->iri_flowid = packet->rss_hash; ri->iri_rsstype = packet->rss_hash_type; axgbe_printf(2, "%s: hash 0x%x/0x%x rsstype 0x%x/0x%x\n", __func__, packet->rss_hash, ri->iri_flowid, packet->rss_hash_type, ri->iri_rsstype); } if (__predict_false(len == 0)) - axgbe_error("%s: Zero len packet\n", __func__); + axgbe_printf(1, "%s: Discarding Zero len packet\n", __func__); if (__predict_false(len > max_len)) axgbe_error("%s: Big packet %d/%d\n", __func__, len, max_len); if (__predict_false(packet->errors)) axgbe_printf(1, "<-- %s: rxq: %d len: %d frags: %d cidx %d cur: %d " "dirty: %d error 0x%x\n", __func__, ri->iri_qsidx, len, i, ri->iri_cidx, ring->cur, ring->dirty, packet->errors); axgbe_printf(1, "%s: Packet len %d frags %d\n", __func__, len, i); ri->iri_len = len; ri->iri_nfrags = i; return (0); } diff --git a/sys/dev/axgbe/xgbe.h b/sys/dev/axgbe/xgbe.h index a5a78cd68c39..766c0c6f551a 100644 --- a/sys/dev/axgbe/xgbe.h +++ b/sys/dev/axgbe/xgbe.h @@ -1,1357 +1,1363 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __XGBE_H__ #define __XGBE_H__ #include #include #include #include #include #include #include #include #include #include "xgbe_osdep.h" /* From linux/dcbnl.h */ #define IEEE_8021QAZ_MAX_TCS 8 #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" #define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver" /* Descriptor related defines */ #define XGBE_TX_DESC_CNT 512 #define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3) #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) #define XGBE_RX_DESC_CNT 512 #define XGBE_TX_DESC_CNT_MIN 64 #define XGBE_TX_DESC_CNT_MAX 4096 #define XGBE_RX_DESC_CNT_MIN 64 #define XGBE_RX_DESC_CNT_MAX 4096 #define XGBE_TX_DESC_CNT_DEFAULT 512 #define XGBE_RX_DESC_CNT_DEFAULT 512 #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) /* Descriptors required for maximum contiguous TSO/GSO packet */ #define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1) /* Maximum possible descriptors needed for an SKB: * - Maximum number of SKB frags * - Maximum descriptors for contiguous TSO/GSO packet * - Possible context descriptor * - Possible TSO header descriptor */ #define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2) #define XGBE_RX_MIN_BUF_SIZE 1522 #define XGBE_RX_BUF_ALIGN 64 #define XGBE_SKB_ALLOC_SIZE 256 #define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZ */ #define XGBE_MAX_DMA_CHANNELS 16 #define XGBE_MAX_QUEUES 16 #define XGBE_PRIORITY_QUEUES 8 #define XGBE_DMA_STOP_TIMEOUT 5 /* DMA cache settings - Outer sharable, write-back, write-allocate */ #define XGBE_DMA_OS_ARCR 0x002b2b2b #define XGBE_DMA_OS_AWCR 0x2f2f2f2f /* DMA cache settings - System, no caches used */ #define XGBE_DMA_SYS_ARCR 0x00303030 #define XGBE_DMA_SYS_AWCR 0x30303030 /* DMA cache settings - PCI device */ #define XGBE_DMA_PCI_ARCR 0x00000003 #define XGBE_DMA_PCI_AWCR 0x13131313 #define XGBE_DMA_PCI_AWARCR 0x00000313 /* DMA channel interrupt modes */ #define XGBE_IRQ_MODE_EDGE 0 #define XGBE_IRQ_MODE_LEVEL 1 #define XGMAC_MIN_PACKET 60 #define XGMAC_STD_PACKET_MTU 1500 #define XGMAC_MAX_STD_PACKET 1518 #define XGMAC_JUMBO_PACKET_MTU 9000 #define XGMAC_MAX_JUMBO_PACKET 9018 #define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */ #define XGMAC_PFC_DATA_LEN 46 #define XGMAC_PFC_DELAYS 14000 #define XGMAC_PRIO_QUEUES(_cnt) \ min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt)) /* Common property names */ #define XGBE_MAC_ADDR_PROPERTY "mac-address" #define XGBE_PHY_MODE_PROPERTY "phy-mode" #define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt" #define XGBE_SPEEDSET_PROPERTY "amd,speed-set" #define XGBE_BLWC_PROPERTY "amd,serdes-blwc" #define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" #define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" #define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp" #define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config" #define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable" /* Device-tree clock names */ #define XGBE_DMA_CLOCK "dma_clk" #define XGBE_PTP_CLOCK "ptp_clk" /* ACPI property names */ #define XGBE_ACPI_DMA_FREQ "amd,dma-freq" #define XGBE_ACPI_PTP_FREQ "amd,ptp-freq" /* PCI BAR mapping */ #define XGBE_XGMAC_BAR 0 #define XGBE_XPCS_BAR 1 #define XGBE_MAC_PROP_OFFSET 0x1d000 #define XGBE_I2C_CTRL_OFFSET 0x1e000 /* PCI MSI/MSIx support */ #define XGBE_MSI_BASE_COUNT 4 #define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1) /* PCI clock frequencies */ #define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */ #define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */ /* Timestamp support - values based on 50MHz PTP clock * 50MHz => 20 nsec */ #define XGBE_TSTAMP_SSINC 20 #define XGBE_TSTAMP_SNSINC 0 /* Driver PMT macros */ #define XGMAC_DRIVER_CONTEXT 1 #define XGMAC_IOCTL_CONTEXT 2 #define XGMAC_FIFO_MIN_ALLOC 2048 #define XGMAC_FIFO_UNIT 256 #define XGMAC_FIFO_ALIGN(_x) \ (((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1)) #define XGMAC_FIFO_FC_OFF 2048 #define XGMAC_FIFO_FC_MIN 4096 #define XGBE_FIFO_MAX 81920 #define XGBE_TC_MIN_QUANTUM 10 /* Helper macro for descriptor handling * Always use XGBE_GET_DESC_DATA to access the descriptor data * since the index is free-running and needs to be and-ed * with the descriptor count value of the ring to index to * the proper descriptor data. */ #define XGBE_GET_DESC_DATA(_ring, _idx) \ ((_ring)->rdata + \ ((_idx) & ((_ring)->rdesc_count - 1))) /* Default coalescing parameters */ #define XGMAC_INIT_DMA_TX_USECS 1000 #define XGMAC_INIT_DMA_TX_FRAMES 25 #define XGMAC_MAX_DMA_RIWT 0xff #define XGMAC_INIT_DMA_RX_USECS 30 #define XGMAC_INIT_DMA_RX_FRAMES 25 /* Flow control queue count */ #define XGMAC_MAX_FLOW_CONTROL_QUEUES 8 /* Flow control threshold units */ #define XGMAC_FLOW_CONTROL_UNIT 512 #define XGMAC_FLOW_CONTROL_ALIGN(_x) \ (((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1)) #define XGMAC_FLOW_CONTROL_VALUE(_x) \ (((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2) #define XGMAC_FLOW_CONTROL_MAX 33280 /* Maximum MAC address hash table size (256 bits = 8 bytes) */ #define XGBE_MAC_HASH_TABLE_SIZE 8 /* Receive Side Scaling */ #define XGBE_RSS_HASH_KEY_SIZE 40 #define XGBE_RSS_MAX_TABLE_SIZE 256 #define XGBE_RSS_LOOKUP_TABLE_TYPE 0 #define XGBE_RSS_HASH_KEY_TYPE 1 /* Auto-negotiation */ #define XGBE_AN_MS_TIMEOUT 500 #define XGBE_LINK_TIMEOUT 10 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) #define XGBE_SGMII_AN_LINK_SPEED_100 0x04 #define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 #define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) /* ECC correctable error notification window (seconds) */ #define XGBE_ECC_LIMIT 60 #define XGBE_AN_INT_CMPLT 0x01 #define XGBE_AN_INC_LINK 0x02 #define XGBE_AN_PG_RCV 0x04 #define XGBE_AN_INT_MASK 0x07 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) #define XGBE_SGMII_AN_LINK_SPEED_100 0x04 #define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 #define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 /* Default SerDes settings */ #define XGBE_SPEED_10000_BLWC 0 #define XGBE_SPEED_10000_CDR 0x7 #define XGBE_SPEED_10000_PLL 0x1 #define XGBE_SPEED_10000_PQ 0x12 #define XGBE_SPEED_10000_RATE 0x0 #define XGBE_SPEED_10000_TXAMP 0xa #define XGBE_SPEED_10000_WORD 0x7 #define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1 #define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f #define XGBE_SPEED_2500_BLWC 1 #define XGBE_SPEED_2500_CDR 0x2 #define XGBE_SPEED_2500_PLL 0x0 #define XGBE_SPEED_2500_PQ 0xa #define XGBE_SPEED_2500_RATE 0x1 #define XGBE_SPEED_2500_TXAMP 0xf #define XGBE_SPEED_2500_WORD 0x1 #define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3 #define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0 #define XGBE_SPEED_1000_BLWC 1 #define XGBE_SPEED_1000_CDR 0x2 #define XGBE_SPEED_1000_PLL 0x0 #define XGBE_SPEED_1000_PQ 0xa #define XGBE_SPEED_1000_RATE 0x3 #define XGBE_SPEED_1000_TXAMP 0xf #define XGBE_SPEED_1000_WORD 0x1 #define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3 #define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0 /* TSO related macros */ #define XGBE_TSO_MAX_SIZE UINT16_MAX /* MDIO port types */ #define XGMAC_MAX_C22_PORT 3 /* Link mode bit operations */ #define XGBE_ZERO_SUP(_phy) \ ((_phy)->supported = 0) #define XGBE_SET_SUP(_phy, _mode) \ ((_phy)->supported |= SUPPORTED_##_mode) #define XGBE_CLR_SUP(_phy, _mode) \ ((_phy)->supported &= ~SUPPORTED_##_mode) #define XGBE_IS_SUP(_phy, _mode) \ ((_phy)->supported & SUPPORTED_##_mode) #define XGBE_ZERO_ADV(_phy) \ ((_phy)->advertising = 0) #define XGBE_SET_ADV(_phy, _mode) \ ((_phy)->advertising |= ADVERTISED_##_mode) #define XGBE_CLR_ADV(_phy, _mode) \ ((_phy)->advertising &= ~ADVERTISED_##_mode) #define XGBE_ADV(_phy, _mode) \ ((_phy)->advertising & ADVERTISED_##_mode) #define XGBE_ZERO_LP_ADV(_phy) \ ((_phy)->lp_advertising = 0) #define XGBE_SET_LP_ADV(_phy, _mode) \ ((_phy)->lp_advertising |= ADVERTISED_##_mode) #define XGBE_CLR_LP_ADV(_phy, _mode) \ ((_phy)->lp_advertising &= ~ADVERTISED_##_mode) #define XGBE_LP_ADV(_phy, _mode) \ ((_phy)->lp_advertising & ADVERTISED_##_mode) #define XGBE_LM_COPY(_dphy, _dname, _sphy, _sname) \ ((_dphy)->_dname = (_sphy)->_sname) struct xgbe_prv_data; struct xgbe_packet_data { struct mbuf *m; unsigned int attributes; unsigned int errors; unsigned int rdesc_count; unsigned int length; unsigned int header_len; unsigned int tcp_header_len; unsigned int tcp_payload_len; unsigned short mss; unsigned short vlan_ctag; uint64_t rx_tstamp; unsigned int tx_packets; unsigned int tx_bytes; uint32_t rss_hash; uint32_t rss_hash_type; }; /* Common Rx and Tx descriptor mapping */ struct xgbe_ring_desc { __le32 desc0; __le32 desc1; __le32 desc2; __le32 desc3; }; /* Tx-related ring data */ struct xgbe_tx_ring_data { unsigned int packets; /* BQL packet count */ unsigned int bytes; /* BQL byte count */ }; /* Rx-related ring data */ struct xgbe_rx_ring_data { unsigned short hdr_len; /* Length of received header */ unsigned short len; /* Length of received packet */ }; /* Structure used to hold information related to the descriptor * and the packet associated with the descriptor (always use * use the XGBE_GET_DESC_DATA macro to access this data from the ring) */ struct xgbe_ring_data { struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */ bus_addr_t rdata_paddr; struct xgbe_tx_ring_data tx; /* Tx-related data */ struct xgbe_rx_ring_data rx; /* Rx-related data */ /* Incomplete receive save location. If the budget is exhausted * or the last descriptor (last normal descriptor or a following * context descriptor) has not been DMA'd yet the current state * of the receive processing needs to be saved. */ unsigned int state_saved; struct { struct mbuf *m; unsigned int len; unsigned int error; } state; }; struct xgbe_ring { /* Ring lock - used just for TX rings at the moment */ spinlock_t lock; /* Per packet related information */ struct xgbe_packet_data packet_data; /* Virtual/DMA addresses and count of allocated descriptor memory */ struct xgbe_ring_desc *rdesc; bus_addr_t rdesc_paddr; unsigned int rdesc_count; /* Array of descriptor data corresponding the descriptor memory * (always use the XGBE_GET_DESC_DATA macro to access this data) */ struct xgbe_ring_data *rdata; /* Ring index values * cur - Tx: index of descriptor to be used for current transfer * Rx: index of descriptor to check for packet availability * dirty - Tx: index of descriptor to check for transfer complete * Rx: index of descriptor to check for buffer reallocation */ unsigned int cur; unsigned int dirty; /* Coalesce frame count used for interrupt bit setting */ unsigned int coalesce_count; union { struct { unsigned int queue_stopped; unsigned int xmit_more; unsigned short cur_mss; unsigned short cur_vlan_ctag; } tx; }; uint16_t prev_pidx; uint8_t prev_count; } __aligned(CACHE_LINE_SIZE); /* Structure used to describe the descriptor rings associated with * a DMA channel. */ struct xgbe_channel { char name[16]; /* Address of private data area for device */ struct xgbe_prv_data *pdata; /* Queue index and base address of queue's DMA registers */ unsigned int queue_index; bus_space_tag_t dma_tag; bus_space_handle_t dma_handle; int dma_irq_rid; /* Per channel interrupt irq number */ struct resource *dma_irq_res; void *dma_irq_tag; /* Per channel interrupt enablement tracker */ unsigned int curr_ier; unsigned int saved_ier; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; } __aligned(CACHE_LINE_SIZE); enum xgbe_state { XGBE_DOWN, XGBE_LINK_INIT, XGBE_LINK_ERR, XGBE_STOPPED, }; enum xgbe_int { XGMAC_INT_DMA_CH_SR_TI, XGMAC_INT_DMA_CH_SR_TPS, XGMAC_INT_DMA_CH_SR_TBU, XGMAC_INT_DMA_CH_SR_RI, XGMAC_INT_DMA_CH_SR_RBU, XGMAC_INT_DMA_CH_SR_RPS, XGMAC_INT_DMA_CH_SR_TI_RI, XGMAC_INT_DMA_CH_SR_FBE, XGMAC_INT_DMA_ALL, }; enum xgbe_int_state { XGMAC_INT_STATE_SAVE, XGMAC_INT_STATE_RESTORE, }; enum xgbe_ecc_sec { XGBE_ECC_SEC_TX, XGBE_ECC_SEC_RX, XGBE_ECC_SEC_DESC, }; enum xgbe_speed { XGBE_SPEED_1000 = 0, XGBE_SPEED_2500, XGBE_SPEED_10000, XGBE_SPEEDS, }; enum xgbe_xpcs_access { XGBE_XPCS_ACCESS_V1 = 0, XGBE_XPCS_ACCESS_V2, }; enum xgbe_an_mode { XGBE_AN_MODE_CL73 = 0, XGBE_AN_MODE_CL73_REDRV, XGBE_AN_MODE_CL37, XGBE_AN_MODE_CL37_SGMII, XGBE_AN_MODE_NONE, }; enum xgbe_an { XGBE_AN_READY = 0, XGBE_AN_PAGE_RECEIVED, XGBE_AN_INCOMPAT_LINK, XGBE_AN_COMPLETE, XGBE_AN_NO_LINK, XGBE_AN_ERROR, }; enum xgbe_rx { XGBE_RX_BPA = 0, XGBE_RX_XNP, XGBE_RX_COMPLETE, XGBE_RX_ERROR, }; enum xgbe_mode { XGBE_MODE_KR = 0, XGBE_MODE_KX, XGBE_MODE_KX_1000, XGBE_MODE_KX_2500, XGBE_MODE_X, XGBE_MODE_SGMII_100, XGBE_MODE_SGMII_1000, XGBE_MODE_SFI, XGBE_MODE_UNKNOWN, }; enum xgbe_speedset { XGBE_SPEEDSET_1000_10000 = 0, XGBE_SPEEDSET_2500_10000, }; enum xgbe_mdio_mode { XGBE_MDIO_MODE_NONE = 0, XGBE_MDIO_MODE_CL22, XGBE_MDIO_MODE_CL45, }; struct xgbe_phy { uint32_t supported; uint32_t advertising; uint32_t lp_advertising; int address; int autoneg; int speed; int duplex; int link; int pause_autoneg; int tx_pause; int rx_pause; int pause; int asym_pause; }; enum xgbe_i2c_cmd { XGBE_I2C_CMD_READ = 0, XGBE_I2C_CMD_WRITE, }; struct xgbe_i2c_op { enum xgbe_i2c_cmd cmd; unsigned int target; void *buf; unsigned int len; }; struct xgbe_i2c_op_state { struct xgbe_i2c_op *op; unsigned int tx_len; unsigned char *tx_buf; unsigned int rx_len; unsigned char *rx_buf; unsigned int tx_abort_source; int ret; }; struct xgbe_i2c { unsigned int started; unsigned int max_speed_mode; unsigned int rx_fifo_size; unsigned int tx_fifo_size; struct xgbe_i2c_op_state op_state; }; struct xgbe_mmc_stats { /* Tx Stats */ uint64_t txoctetcount_gb; uint64_t txframecount_gb; uint64_t txbroadcastframes_g; uint64_t txmulticastframes_g; uint64_t tx64octets_gb; uint64_t tx65to127octets_gb; uint64_t tx128to255octets_gb; uint64_t tx256to511octets_gb; uint64_t tx512to1023octets_gb; uint64_t tx1024tomaxoctets_gb; uint64_t txunicastframes_gb; uint64_t txmulticastframes_gb; uint64_t txbroadcastframes_gb; uint64_t txunderflowerror; uint64_t txoctetcount_g; uint64_t txframecount_g; uint64_t txpauseframes; uint64_t txvlanframes_g; /* Rx Stats */ uint64_t rxframecount_gb; uint64_t rxoctetcount_gb; uint64_t rxoctetcount_g; uint64_t rxbroadcastframes_g; uint64_t rxmulticastframes_g; uint64_t rxcrcerror; uint64_t rxrunterror; uint64_t rxjabbererror; uint64_t rxundersize_g; uint64_t rxoversize_g; uint64_t rx64octets_gb; uint64_t rx65to127octets_gb; uint64_t rx128to255octets_gb; uint64_t rx256to511octets_gb; uint64_t rx512to1023octets_gb; uint64_t rx1024tomaxoctets_gb; uint64_t rxunicastframes_g; uint64_t rxlengtherror; uint64_t rxoutofrangetype; uint64_t rxpauseframes; uint64_t rxfifooverflow; uint64_t rxvlanframes_gb; uint64_t rxwatchdogerror; }; struct xgbe_ext_stats { uint64_t tx_tso_packets; uint64_t rx_split_header_packets; uint64_t rx_buffer_unavailable; uint64_t txq_packets[XGBE_MAX_DMA_CHANNELS]; uint64_t txq_bytes[XGBE_MAX_DMA_CHANNELS]; uint64_t rxq_packets[XGBE_MAX_DMA_CHANNELS]; uint64_t rxq_bytes[XGBE_MAX_DMA_CHANNELS]; uint64_t tx_vxlan_packets; uint64_t rx_vxlan_packets; uint64_t rx_csum_errors; uint64_t rx_vxlan_csum_errors; }; struct xgbe_hw_if { int (*tx_complete)(struct xgbe_ring_desc *); int (*set_mac_address)(struct xgbe_prv_data *, uint8_t *addr); int (*config_rx_mode)(struct xgbe_prv_data *); int (*enable_rx_csum)(struct xgbe_prv_data *); int (*disable_rx_csum)(struct xgbe_prv_data *); int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *); int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *); int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *); int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *); int (*update_vlan_hash_table)(struct xgbe_prv_data *); int (*read_mmd_regs)(struct xgbe_prv_data *, int, int); void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int); int (*set_speed)(struct xgbe_prv_data *, int); int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int, enum xgbe_mdio_mode); int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int); int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, uint16_t); int (*set_gpio)(struct xgbe_prv_data *, unsigned int); int (*clr_gpio)(struct xgbe_prv_data *, unsigned int); void (*enable_tx)(struct xgbe_prv_data *); void (*disable_tx)(struct xgbe_prv_data *); void (*enable_rx)(struct xgbe_prv_data *); void (*disable_rx)(struct xgbe_prv_data *); void (*powerup_tx)(struct xgbe_prv_data *); void (*powerdown_tx)(struct xgbe_prv_data *); void (*powerup_rx)(struct xgbe_prv_data *); void (*powerdown_rx)(struct xgbe_prv_data *); int (*init)(struct xgbe_prv_data *); int (*exit)(struct xgbe_prv_data *); int (*enable_int)(struct xgbe_channel *, enum xgbe_int); int (*disable_int)(struct xgbe_channel *, enum xgbe_int); int (*dev_read)(struct xgbe_channel *); void (*tx_desc_init)(struct xgbe_channel *); void (*rx_desc_init)(struct xgbe_channel *); void (*tx_desc_reset)(struct xgbe_ring_data *); int (*is_last_desc)(struct xgbe_ring_desc *); int (*is_context_desc)(struct xgbe_ring_desc *); /* For FLOW ctrl */ int (*config_tx_flow_control)(struct xgbe_prv_data *); int (*config_rx_flow_control)(struct xgbe_prv_data *); /* For RX coalescing */ int (*config_rx_coalesce)(struct xgbe_prv_data *); int (*config_tx_coalesce)(struct xgbe_prv_data *); unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int); unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int); /* For RX and TX threshold config */ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int); int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int); /* For RX and TX Store and Forward Mode config */ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int); int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int); /* For TX DMA Operate on Second Frame config */ int (*config_osp_mode)(struct xgbe_prv_data *); /* For MMC statistics */ void (*rx_mmc_int)(struct xgbe_prv_data *); void (*tx_mmc_int)(struct xgbe_prv_data *); void (*read_mmc_stats)(struct xgbe_prv_data *); /* For Receive Side Scaling */ int (*enable_rss)(struct xgbe_prv_data *); int (*disable_rss)(struct xgbe_prv_data *); int (*set_rss_hash_key)(struct xgbe_prv_data *, const uint8_t *); int (*set_rss_lookup_table)(struct xgbe_prv_data *, const uint32_t *); }; /* This structure represents implementation specific routines for an * implementation of a PHY. All routines are required unless noted below. * Optional routines: * an_pre, an_post * kr_training_pre, kr_training_post * module_info, module_eeprom */ struct xgbe_phy_impl_if { /* Perform Setup/teardown actions */ int (*init)(struct xgbe_prv_data *); void (*exit)(struct xgbe_prv_data *); /* Perform start/stop specific actions */ int (*reset)(struct xgbe_prv_data *); int (*start)(struct xgbe_prv_data *); void (*stop)(struct xgbe_prv_data *); /* Return the link status */ int (*link_status)(struct xgbe_prv_data *, int *); /* Indicate if a particular speed is valid */ bool (*valid_speed)(struct xgbe_prv_data *, int); /* Check if the specified mode can/should be used */ bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode); /* Switch the PHY into various modes */ void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode); /* Retrieve mode needed for a specific speed */ enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int); /* Retrieve new/next mode when trying to auto-negotiate */ enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *); /* Retrieve current mode */ enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *); /* Retrieve interface sub-type */ void (*get_type)(struct xgbe_prv_data *, struct ifmediareq *); /* Retrieve current auto-negotiation mode */ enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *); /* Configure auto-negotiation settings */ int (*an_config)(struct xgbe_prv_data *); /* Set/override auto-negotiation advertisement settings */ void (*an_advertising)(struct xgbe_prv_data *, struct xgbe_phy *); /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); /* Pre/Post auto-negotiation support */ void (*an_pre)(struct xgbe_prv_data *); void (*an_post)(struct xgbe_prv_data *); /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); /* SFP module related info */ int (*module_info)(struct xgbe_prv_data *pdata); int (*module_eeprom)(struct xgbe_prv_data *pdata); }; struct xgbe_phy_if { /* For PHY setup/teardown */ int (*phy_init)(struct xgbe_prv_data *); void (*phy_exit)(struct xgbe_prv_data *); /* For PHY support when setting device up/down */ int (*phy_reset)(struct xgbe_prv_data *); int (*phy_start)(struct xgbe_prv_data *); void (*phy_stop)(struct xgbe_prv_data *); /* For PHY support while device is up */ void (*phy_status)(struct xgbe_prv_data *); int (*phy_config_aneg)(struct xgbe_prv_data *); /* For PHY settings validation */ bool (*phy_valid_speed)(struct xgbe_prv_data *, int); /* For single interrupt support */ void (*an_isr)(struct xgbe_prv_data *); /* PHY implementation specific services */ struct xgbe_phy_impl_if phy_impl; }; struct xgbe_i2c_if { /* For initial I2C setup */ int (*i2c_init)(struct xgbe_prv_data *); /* For I2C support when setting device up/down */ int (*i2c_start)(struct xgbe_prv_data *); void (*i2c_stop)(struct xgbe_prv_data *); /* For performing I2C operations */ int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *); /* For single interrupt support */ void (*i2c_isr)(struct xgbe_prv_data *); }; struct xgbe_desc_if { int (*alloc_ring_resources)(struct xgbe_prv_data *); void (*free_ring_resources)(struct xgbe_prv_data *); int (*map_tx_skb)(struct xgbe_channel *, struct mbuf *); int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *, struct xgbe_ring_data *); void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *); void (*wrapper_tx_desc_init)(struct xgbe_prv_data *); void (*wrapper_rx_desc_init)(struct xgbe_prv_data *); }; /* This structure contains flags that indicate what hardware features * or configurations are present in the device. */ struct xgbe_hw_features { /* HW Version */ unsigned int version; /* HW Feature Register0 */ unsigned int gmii; /* 1000 Mbps support */ unsigned int vlhash; /* VLAN Hash Filter */ unsigned int sma; /* SMA(MDIO) Interface */ unsigned int rwk; /* PMT remote wake-up packet */ unsigned int mgk; /* PMT magic packet */ unsigned int mmc; /* RMON module */ unsigned int aoe; /* ARP Offload */ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ unsigned int eee; /* Energy Efficient Ethernet */ unsigned int tx_coe; /* Tx Checksum Offload */ unsigned int rx_coe; /* Rx Checksum Offload */ unsigned int addn_mac; /* Additional MAC Addresses */ unsigned int ts_src; /* Timestamp Source */ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ unsigned int vxn; /* VXLAN/NVGRE */ /* HW Feature Register1 */ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ unsigned int adv_ts_hi; /* Advance Timestamping High Word */ unsigned int dma_width; /* DMA width */ unsigned int dcb; /* DCB Feature */ unsigned int sph; /* Split Header Feature */ unsigned int tso; /* TCP Segmentation Offload */ unsigned int dma_debug; /* DMA Debug Registers */ unsigned int rss; /* Receive Side Scaling */ unsigned int tc_cnt; /* Number of Traffic Classes */ unsigned int hash_table_size; /* Hash Table Size */ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ /* HW Feature Register2 */ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ unsigned int pps_out_num; /* Number of PPS outputs */ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ }; struct xgbe_version_data { void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *); enum xgbe_xpcs_access xpcs_access; unsigned int mmc_64bit; unsigned int tx_max_fifo_size; unsigned int rx_max_fifo_size; unsigned int tx_tstamp_workaround; unsigned int ecc_support; unsigned int i2c_support; unsigned int irq_reissue_support; unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; unsigned int an_cdr_workaround; }; struct xgbe_prv_data { struct ifnet *netdev; struct platform_device *pdev; struct acpi_device *adev; device_t dev; /* Version related data */ struct xgbe_version_data *vdata; /* ACPI or DT flag */ unsigned int use_acpi; /* XGMAC/XPCS related mmio registers */ struct resource *xgmac_res; /* XGMAC CSRs */ struct resource *xpcs_res; /* XPCS MMD registers */ struct resource *rxtx_res; /* SerDes Rx/Tx CSRs */ struct resource *sir0_res; /* SerDes integration registers (1/2) */ struct resource *sir1_res; /* SerDes integration registers (2/2) */ /* Port property registers */ unsigned int pp0; unsigned int pp1; unsigned int pp2; unsigned int pp3; unsigned int pp4; /* DMA tag */ bus_dma_tag_t dmat; /* XPCS indirect addressing lock */ spinlock_t xpcs_lock; unsigned int xpcs_window_def_reg; unsigned int xpcs_window_sel_reg; unsigned int xpcs_window; unsigned int xpcs_window_size; unsigned int xpcs_window_mask; /* RSS addressing mutex */ struct mtx rss_mutex; /* Flags representing xgbe_state */ unsigned long dev_state; /* ECC support */ unsigned long tx_sec_period; unsigned long tx_ded_period; unsigned long rx_sec_period; unsigned long rx_ded_period; unsigned long desc_sec_period; unsigned long desc_ded_period; unsigned int tx_sec_count; unsigned int tx_ded_count; unsigned int rx_sec_count; unsigned int rx_ded_count; unsigned int desc_ded_count; unsigned int desc_sec_count; struct if_irq dev_irq; struct resource *dev_irq_res; struct resource *ecc_irq_res; struct resource *i2c_irq_res; struct resource *an_irq_res; int ecc_rid; int i2c_rid; int an_rid; void *dev_irq_tag; void *ecc_irq_tag; void *i2c_irq_tag; void *an_irq_tag; struct resource *chan_irq_res[XGBE_MAX_DMA_CHANNELS]; unsigned int per_channel_irq; unsigned int irq_count; unsigned int channel_irq_count; unsigned int channel_irq_mode; char ecc_name[IFNAMSIZ + 32]; unsigned int isr_as_tasklet; struct xgbe_hw_if hw_if; struct xgbe_phy_if phy_if; struct xgbe_desc_if desc_if; struct xgbe_i2c_if i2c_if; /* AXI DMA settings */ unsigned int coherent; unsigned int arcr; unsigned int awcr; unsigned int awarcr; /* Service routine support */ struct taskqueue *dev_workqueue; struct task service_work; struct callout service_timer; struct mtx timer_mutex; /* Rings for Tx/Rx on a DMA channel */ struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS]; unsigned int tx_max_channel_count; unsigned int rx_max_channel_count; unsigned int total_channel_count; unsigned int channel_count; unsigned int tx_ring_count; unsigned int tx_desc_count; unsigned int rx_ring_count; unsigned int rx_desc_count; unsigned int new_tx_ring_count; unsigned int new_rx_ring_count; unsigned int tx_max_q_count; unsigned int rx_max_q_count; unsigned int tx_q_count; unsigned int rx_q_count; /* Tx/Rx common settings */ unsigned int blen; unsigned int pbl; unsigned int aal; unsigned int rd_osr_limit; unsigned int wr_osr_limit; /* Tx settings */ unsigned int tx_sf_mode; unsigned int tx_threshold; unsigned int tx_osp_mode; unsigned int tx_max_fifo_size; /* Rx settings */ unsigned int rx_sf_mode; unsigned int rx_threshold; unsigned int rx_max_fifo_size; /* Tx coalescing settings */ unsigned int tx_usecs; unsigned int tx_frames; /* Rx coalescing settings */ unsigned int rx_riwt; unsigned int rx_usecs; unsigned int rx_frames; /* Current Rx buffer size */ unsigned int rx_buf_size; /* Flow control settings */ unsigned int pause_autoneg; unsigned int tx_pause; unsigned int rx_pause; unsigned int rx_rfa[XGBE_MAX_QUEUES]; unsigned int rx_rfd[XGBE_MAX_QUEUES]; /* Receive Side Scaling settings */ uint8_t rss_key[XGBE_RSS_HASH_KEY_SIZE]; uint32_t rss_table[XGBE_RSS_MAX_TABLE_SIZE]; uint32_t rss_options; unsigned int enable_rss; /* VXLAN settings */ unsigned int vxlan_port_set; unsigned int vxlan_offloads_set; unsigned int vxlan_force_disable; unsigned int vxlan_port_count; uint16_t vxlan_port; uint64_t vxlan_features; /* Netdev related settings */ unsigned char mac_addr[ETH_ALEN]; uint64_t netdev_features; struct xgbe_mmc_stats mmc_stats; struct xgbe_ext_stats ext_stats; /* Filtering support */ bitstr_t *active_vlans; unsigned int num_active_vlans; /* Device clocks */ struct clk *sysclk; unsigned long sysclk_rate; struct clk *ptpclk; unsigned long ptpclk_rate; /* DCB support */ unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; /* Hardware features of the device */ struct xgbe_hw_features hw_feat; /* Device work structure */ struct task restart_work; struct task stopdev_work; /* Keeps track of power mode */ unsigned int power_down; /* Network interface message level setting */ uint32_t msg_enable; /* Current PHY settings */ int phy_link; int phy_speed; /* MDIO/PHY related settings */ unsigned int phy_started; void *phy_data; struct xgbe_phy phy; int mdio_mmd; unsigned long link_check; struct mtx mdio_mutex; unsigned int mdio_addr; unsigned int kr_redrv; char an_name[IFNAMSIZ + 32]; struct taskqueue *an_workqueue; struct task an_irq_work; unsigned int speed_set; /* SerDes UEFI configurable settings. * Switching between modes/speeds requires new values for some * SerDes settings. The values can be supplied as device * properties in array format. The first array entry is for * 1GbE, second for 2.5GbE and third for 10GbE */ uint32_t serdes_blwc[XGBE_SPEEDS]; uint32_t serdes_cdr_rate[XGBE_SPEEDS]; uint32_t serdes_pq_skew[XGBE_SPEEDS]; uint32_t serdes_tx_amp[XGBE_SPEEDS]; uint32_t serdes_dfe_tap_cfg[XGBE_SPEEDS]; uint32_t serdes_dfe_tap_ena[XGBE_SPEEDS]; /* Auto-negotiation state machine support */ unsigned int an_int; unsigned int an_status; struct sx an_mutex; enum xgbe_an an_result; enum xgbe_an an_state; enum xgbe_rx kr_state; enum xgbe_rx kx_state; struct task an_work; unsigned int an_again; unsigned int an_supported; unsigned int parallel_detect; unsigned int fec_ability; unsigned long an_start; enum xgbe_an_mode an_mode; /* I2C support */ struct xgbe_i2c i2c; struct mtx i2c_mutex; bool i2c_complete; unsigned int lpm_ctrl; /* CTRL1 for resume */ unsigned int an_cdr_track_early; uint64_t features; device_t axgbe_miibus; unsigned int sysctl_xgmac_reg; unsigned int sysctl_xpcs_mmd; unsigned int sysctl_xpcs_reg; unsigned int sysctl_xprop_reg; unsigned int sysctl_xi2c_reg; bool sysctl_an_cdr_workaround; bool sysctl_an_cdr_track_early; int pcie_bus; /* PCIe bus number */ int pcie_device; /* PCIe device/slot number */ int pcie_func; /* PCIe function number */ void *sys_op; uint64_t use_adaptive_rx_coalesce; uint64_t use_adaptive_tx_coalesce; uint64_t rx_coalesce_usecs; unsigned int debug_level; + + /* + * Toggles the split header feature. + * This requires a complete restart. + */ + unsigned int sph_enable; }; struct axgbe_if_softc { struct xgbe_prv_data pdata; if_softc_ctx_t scctx; if_shared_ctx_t sctx; if_ctx_t ctx; struct ifnet *ifp; struct ifmedia *media; unsigned int link_status; }; /* Function prototypes*/ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *); void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *); void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *); void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *); void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *); void xgbe_get_all_hw_features(struct xgbe_prv_data *); void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *); int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu); void axgbe_sysctl_init(struct xgbe_prv_data *pdata); void axgbe_sysctl_exit(struct xgbe_prv_data *pdata); int xgbe_phy_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val); int xgbe_phy_mii_read(struct xgbe_prv_data *pdata, int addr, int reg); void xgbe_dump_i2c_registers(struct xgbe_prv_data *); uint32_t bitrev32(uint32_t); /* For debug prints */ #ifdef YDEBUG #define DBGPR(x...) device_printf(pdata->dev, x) #else #define DBGPR(x...) do { } while (0) #endif #ifdef YDEBUG_MDIO #define DBGPR_MDIO(x...) device_printf(pdata->dev, x) #else #define DBGPR_MDIO(x...) do { } while (0) #endif #define axgbe_printf(lvl, ...) do { \ if (lvl <= pdata->debug_level) \ device_printf(pdata->dev, __VA_ARGS__); \ } while (0) #define axgbe_error(...) do { \ device_printf(pdata->dev, __VA_ARGS__); \ } while (0) #endif /* __XGBE_H__ */